Spaces:
Runtime error
Runtime error
File size: 3,059 Bytes
ae29df4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import json
import random
import torch
import torchaudio
from torch.utils.data import Dataset
class AudioTextDataset(Dataset):
"""Can sample data from audio-text databases
Params:
sampling_rate: audio sampling rate
max_clip_len: max length (seconds) of audio clip to be sampled
"""
def __init__(
self,
datafiles=[''],
sampling_rate=32000,
max_clip_len=5,
):
all_data_json = []
for datafile in datafiles:
with open(datafile, 'r') as fp:
data_json = json.load(fp)['data']
all_data_json.extend(data_json)
self.all_data_json = all_data_json
self.sampling_rate = sampling_rate
self.max_length = max_clip_len * sampling_rate
def __len__(self):
return len(self.all_data_json)
def _cut_or_randomcrop(self, waveform):
# waveform: [1, samples]
# random crop
if waveform.size(1) > self.max_length:
random_idx = random.randint(0, waveform.size(1)-self.max_length)
waveform = waveform[:, random_idx:random_idx+self.max_length]
else:
temp_wav = torch.zeros(1, self.max_length)
temp_wav[:, 0:waveform.size(1)] = waveform
waveform = temp_wav
assert waveform.size(1) == self.max_length, \
f"number of audio samples is {waveform.size(1)}"
return waveform
def _read_audio(self, index):
try:
audio_path = self.all_data_json[index]['wav']
audio_data, audio_rate = torchaudio.load(audio_path, channels_first=True)
text = self.all_data_json[index]['caption']
# drop short utterance
if audio_data.size(1) < self.sampling_rate * 1:
raise Exception(f'{audio_path} is too short, drop it ...')
return text, audio_data, audio_rate
except Exception as e:
print(f'error: {e} occurs, when loading {audio_path}')
random_index = random.randint(0, len(self.all_data_json)-1)
return self._read_audio(index=random_index)
def __getitem__(self, index):
# create a audio tensor
text, audio_data, audio_rate = self._read_audio(index)
audio_len = audio_data.shape[1] / audio_rate
# convert stero to single channel
if audio_data.shape[0] > 1:
# audio_data: [samples]
audio_data = (audio_data[0] + audio_data[1]) / 2
else:
audio_data = audio_data.squeeze(0)
# resample audio clip
if audio_rate != self.sampling_rate:
audio_data = torchaudio.functional.resample(audio_data, orig_freq=audio_rate, new_freq=self.sampling_rate)
audio_data = audio_data.unsqueeze(0)
audio_data = self._cut_or_randomcrop(audio_data)
data_dict = {
'text': text,
'waveform': audio_data,
'modality': 'audio_text'
}
return data_dict
|