|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" Magic Hub Dataset""" |
|
|
|
|
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_DATA_URL = "./fil_tsv.tar.gz" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
Magic Hub's initiative to help teach machines how real people speak. They wanted to provide structured data that |
|
will help enthusiasts and researchers to spend more time on training models rather than cleaning and structuring data. |
|
""" |
|
|
|
_HOMEPAGE = "https://magichub.com/datasets/filipino-scripted-speech-corpus-daily-use-sentence/" |
|
|
|
_LICENSE = "https://magichub.com/magic-data-open-source-license/" |
|
|
|
_LANGUAGES = { |
|
"fil": { |
|
"Language": "Filipino", |
|
"Date": "2022-4-20", |
|
"Size": "414 MB", |
|
"Version": "ab_1h_2020-12-11", |
|
"Validated_Hr_Total": 4.58, |
|
"Overall_Hr_Total": 4.58, |
|
"Number_Of_Voice": 10, |
|
}, |
|
} |
|
|
|
|
|
class FilipinoVoiceConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Filipino Speech.""" |
|
|
|
def __init__(self, name, sub_version, **kwargs): |
|
""" |
|
Args: |
|
data_dir: `string`, the path to the folder containing the files in the |
|
downloaded .tar |
|
citation: `string`, citation for the data set |
|
url: `string`, url for information about the data set |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
self.sub_version = sub_version |
|
self.language = kwargs.pop("language", None) |
|
self.date_of_snapshot = kwargs.pop("date", None) |
|
self.size = kwargs.pop("size", None) |
|
self.validated_hr_total = kwargs.pop("val_hrs", None) |
|
self.total_hr_total = kwargs.pop("total_hrs", None) |
|
self.num_of_voice = kwargs.pop("num_of_voice", None) |
|
description = f"Magic Hub speech to text dataset in {self.language} version {self.sub_version} of {self.date_of_snapshot}. The dataset comprises {self.validated_hr_total} of validated transcribed speech data from {self.num_of_voice} speakers. The dataset has a size of {self.size}" |
|
super(FilipinoVoiceConfig, self).__init__( |
|
name=name, version=datasets.Version("1.0.0", ""), description=description, **kwargs |
|
) |
|
|
|
|
|
class FilipinoVoice(datasets.GeneratorBasedBuilder): |
|
|
|
DEFAULT_WRITER_BATCH_SIZE = 1000 |
|
BUILDER_CONFIGS = [ |
|
FilipinoVoiceConfig( |
|
name=lang_id, |
|
language=_LANGUAGES[lang_id]["Language"], |
|
sub_version=_LANGUAGES[lang_id]["Version"], |
|
date=_LANGUAGES[lang_id]["Date"], |
|
size=_LANGUAGES[lang_id]["Size"], |
|
val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"], |
|
total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"], |
|
num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"], |
|
) |
|
for lang_id in _LANGUAGES.keys() |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"CHANNEL": datasets.Value("string"), |
|
"UTTRANS_ID": datasets.Value("string"), |
|
"SPEAKER_ID": datasets.Value("string"), |
|
"PROMPT": datasets.Value("string"), |
|
"TRANSCRIPTION": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
archive_path = dl_manager.download(_DATA_URL.format(self.config.name)) |
|
|
|
|
|
|
|
path_to_data = "fil" |
|
path_to_clips = "/".join([path_to_data, "clips"]) |
|
metadata_filepaths = { |
|
split: "/".join([path_to_data, f"{split}.txt"]) |
|
for split in ["train", "test"] |
|
} |
|
|
|
|
|
|
|
|
|
|
|
local_extracted_archive = dl_manager.extract(archive_path) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"archive_iterator": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"metadata_filepath": metadata_filepaths["train"], |
|
"path_to_clips": path_to_clips, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"local_extracted_archive": local_extracted_archive, |
|
"archive_iterator": dl_manager.iter_archive( |
|
archive_path |
|
), |
|
"metadata_filepath": metadata_filepaths["test"], |
|
"path_to_clips": path_to_clips, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, local_extracted_archive, archive_iterator, metadata_filepath, path_to_clips): |
|
"""Yields examples.""" |
|
data_fields = list(self._info().features.keys()) |
|
|
|
|
|
|
|
data_fields.remove("audio") |
|
path_idx = data_fields.index("UTTRANS_ID") |
|
|
|
|
|
|
|
|
|
all_field_values = {} |
|
metadata_found = False |
|
|
|
for path, f in archive_iterator: |
|
print(path, ", ", f) |
|
|
|
if path == metadata_filepath: |
|
metadata_found = True |
|
lines = f.readlines() |
|
headline = lines[0].decode("utf-8") |
|
column_names = headline.strip().split("\t") |
|
assert ( |
|
column_names == data_fields |
|
), f"The file should have {data_fields} as column names, but has {column_names}" |
|
for line in lines[1:]: |
|
field_values = line.decode("utf-8").strip().split("\t") |
|
|
|
audio_path = "/".join([path_to_clips, field_values[path_idx]]) |
|
all_field_values[audio_path] = field_values |
|
|
|
elif path.startswith(path_to_clips): |
|
assert metadata_found, "Found audio clips before the metadata TSV file." |
|
if not all_field_values: |
|
break |
|
if path in all_field_values: |
|
|
|
field_values = all_field_values[path] |
|
|
|
|
|
|
|
|
|
if len(field_values) < len(data_fields): |
|
field_values += (len(data_fields) - len(field_values)) * ["''"] |
|
result = {key: value for key, value in zip(data_fields, field_values)} |
|
|
|
|
|
result["audio"] = {"path": path, "bytes": f.read()} |
|
|
|
|
|
result["UTTRANS_ID"] = os.path.join(local_extracted_archive, path) if local_extracted_archive else None |
|
|
|
yield path, result |