-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathpreprop_dataset.py
72 lines (66 loc) · 2.42 KB
/
preprop_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
#!/usr/bin/env python
"""Read WAV files and compute spectrograms and save them in the same folder."""
__author__ = 'Erdene-Ochir Tuguldur'
import argparse
from tqdm import *
import numpy as np
from torch.utils.data import ConcatDataset
from datasets import Compose, LoadAudio, ComputeMagSpectrogram
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset",
choices=['librispeech', 'mbspeech', 'bolorspeech', 'kazakh20h', 'backgroundsounds', 'aihub'],
default='bolorspeech', help='dataset name')
parser.add_argument("--path", type=str, default=None)
args = parser.parse_args()
if args.dataset == 'mbspeech':
from datasets.mb_speech import MBSpeech
dataset = MBSpeech()
elif args.dataset == 'librispeech':
from datasets.libri_speech import LibriSpeech
dataset = ConcatDataset([
LibriSpeech(name='train-clean-100'),
LibriSpeech(name='train-clean-360'),
LibriSpeech(name='train-other-500'),
LibriSpeech(name='dev-clean',)
])
elif args.dataset == 'backgroundsounds':
from datasets.background_sounds import BackgroundSounds
dataset = BackgroundSounds(is_random=False)
elif args.dataset == 'bolorspeech':
from datasets.bolor_speech import BolorSpeech
dataset = ConcatDataset([
BolorSpeech(name='train'),
BolorSpeech(name='train2'),
BolorSpeech(name='test'),
BolorSpeech(name='demo'),
BolorSpeech(name='annotation'),
BolorSpeech(name='annotation-1111')
])
elif args.dataset == 'kazakh20h':
from datasets.kazakh20h_speech import Kazakh20hSpeech
dataset = ConcatDataset([
Kazakh20hSpeech(name='test'),
Kazakh20hSpeech(name='train')
])
elif args.dataset == 'aihub':
from datasets.aihub_speech import AihubSpeech
dataset = ConcatDataset([
AihubSpeech(name='test'),
AihubSpeech(name='train')
])
elif args.dataset == 'uaihub':
from datasets.uaihub_speech import AihubSpeech
dataset = ConcatDataset([
AihubSpeech(name='test'),
AihubSpeech(name='train')
])
else:
print("unknown dataset!")
import sys
sys.exit(1)
transform=Compose([LoadAudio(), ComputeMagSpectrogram()])
for data in tqdm(dataset):
fname = data['fname']
data = transform(data)
mel_spectrogram = data['input']
np.save(fname.replace('.wav', '.npy'), mel_spectrogram)