Datasets:

ArXiv:
License:
yodas2 / yodas2.py
xinjianl's picture
Upload 2 files
b929e20 verified
import os
from collections import OrderedDict
from pathlib import Path
import datasets
import os
from .meta import lang2shard_cnt
import json
class Yodas2Config(datasets.BuilderConfig):
"""BuilderConfig for Yodas2."""
def __init__(self, lang, version, **kwargs):
self.language = lang
self.base_data_path = f"data/{lang}"
description = (
f"Youtube speech to text dataset in {self.language}."
)
super(Yodas2Config, self).__init__(
name=lang,
version=datasets.Version(version),
description=description,
**kwargs,
)
DEFAULT_CONFIG_NAME = "all"
LANGS = list(lang2shard_cnt.keys())
VERSION = "1.0.0"
class Yodas2(datasets.GeneratorBasedBuilder):
"""YodasSample dataset."""
BUILDER_CONFIGS = [
Yodas2Config(lang, version=VERSION) for lang in LANGS
]
VERSION = datasets.Version("1.0.1")
def _info(self):
return datasets.DatasetInfo(
description="Yodas Sample",
features=datasets.Features(
OrderedDict(
[
("id", datasets.Value("string")),
("video_id", datasets.Value("string")),
('duration', datasets.Value('float')),
("audio", datasets.Audio(sampling_rate=24_000)),
("utterances", datasets.Sequence(feature={'utt_id': datasets.Value(dtype='string'),
'text': datasets.Value(dtype='string'),
'start': datasets.Value(dtype='float'),
'end': datasets.Value(dtype='float')}))
]
)
),
supervised_keys=None,
homepage="", # TODO
citation="", # TODO
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO
total_cnt = lang2shard_cnt[self.config.name]
idx_lst = [f"{i:08d}" for i in range(total_cnt)]
audio_tar_files = dl_manager.download([f"{self.config.base_data_path}/audio/{i:08d}.tar.gz" for i in range(total_cnt)])
text_files = dl_manager.download([f"{self.config.base_data_path}/text/{i:08d}.json" for i in range(total_cnt)])
duration_files = dl_manager.download([f"{self.config.base_data_path}/duration/{i:08d}.txt" for i in range(total_cnt)])
if dl_manager.is_streaming:
audio_archives = [dl_manager.iter_archive(audio_tar_file) for audio_tar_file in audio_tar_files]
text_archives = [dl_manager.extract(text_file) for text_file in text_files]
duration_archives = [dl_manager.extract(duration_file) for duration_file in duration_files]
else:
print("extracting audio ...")
print("audio tarfiles... ")
print(audio_tar_files)
extracted_audio_archives = dl_manager.extract(audio_tar_files)
print("extracted archives...")
print(extracted_audio_archives)
audio_archives = []
text_archives = []
duration_archives = []
for idx, audio_tar_file, extracted_dir, text_file, duration_file in zip(idx_lst, audio_tar_files, extracted_audio_archives, text_files, duration_files):
audio_archives.append(extracted_dir)
text_archives.append(text_file)
duration_archives.append(duration_file)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"is_streaming": dl_manager.is_streaming,
"audio_archives": audio_archives,
'text_archives': text_archives,
'duration_archives': duration_archives,
},
),
]
def _generate_examples(self, is_streaming, audio_archives, text_archives, duration_archives):
"""Yields examples."""
global_id = 0
if is_streaming:
for tar_file, text_file, duration_file in zip(audio_archives, text_archives, duration_archives):
# video to text
video2text = {}
json_obj_lst = json.loads(open(text_file, 'r').read())
for json_obj in json_obj_lst:
video_id = json_obj['audio_id']
video2text[video_id] = []
for k,v in sorted(json_obj['text'].items()):
fields = k.split('-')
start_timestamp = float(fields[-2]) / 100
end_timestamp = float(fields[-1]) / 100
video2text[video_id].append({'utt_id': k, 'text': v, 'start': start_timestamp, 'end': end_timestamp})
# video to duration
video2duration = {}
with open(duration_file) as f:
for id_, row in enumerate(f):
fields = row.strip().split()
video_id = fields[0]
duration = float(fields[1])
video2duration[video_id] = duration
for path, audio_f in tar_file:
path = Path(path)
video_id = path.stem
if video_id in video2text and video_id in video2duration:
result = {
'id': global_id,
'video_id': video_id,
'audio': {"path": None, "bytes": audio_f.read()},
'duration': video2duration[video_id],
'utterances': video2text[video_id]
}
yield global_id, result
global_id += 1
else:
for extracted_dir, text_file, duration_file in zip(audio_archives, text_archives, duration_archives):
print('extracted_dir ', extracted_dir)
print('actual extracted dir', extracted_dir)
# video to text
video2text = {}
json_obj_lst = json.loads(open(text_file, 'r').read())
for json_obj in json_obj_lst:
video_id = json_obj['audio_id']
video2text[video_id] = []
for k,v in sorted(json_obj['text'].items()):
fields = k.split('-')
start_timestamp = float(fields[-2]) / 100
end_timestamp = float(fields[-1]) / 100
video2text[video_id].append({'utt_id': k, 'text': v, 'start': start_timestamp, 'end': end_timestamp})
# video to duration
video2duration = {}
with open(duration_file) as f:
for id_, row in enumerate(f):
fields = row.strip().split()
video_id = fields[0]
duration = float(fields[1])
video2duration[video_id] = duration
for audio_file in list(Path(extracted_dir).glob('*')):
video_id = audio_file.stem
if video_id in video2text and video_id in video2duration:
result = {
'id': global_id,
'video_id': video_id,
'duration': video2duration[video_id],
'audio': {"path": str(audio_file.absolute()), "bytes": open(audio_file, 'rb').read()},
'utterances': video2text[video_id]
}
yield global_id, result
global_id += 1