# coding=utf-8 # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ NLPGhana Voice Dataset""" from __future__ import absolute_import, division, print_function import os import datasets #_DATA_URL = "https://zenodo.org/record/4641533/files/ak.tar.gz?download=1" #_DATA_URL = 'https://www.dropbox.com/s/o6k13voiy8kdhhk/ak.tar.gz?dl=1' _DATA_URL = "ak.tar.gz" _CITATION = """\ """ _DESCRIPTION = """\ This work is comprised of audio data of Twi, a low resourced language spoken by the Akan people in Ghana. This has been adapted by NLPGhana. """ _HOMEPAGE = "https://ghananlp.org/" _LICENSE = "" _LANGUAGES = { "ak": { "Language": "Twi", "Date": "2023-07-08", "Size": "753 MB", "Version": "tw_05_2023-07-08", }, } class NLPGhanaVoiceConfig(datasets.BuilderConfig): """BuilderConfig for NLPGhana.""" def __init__(self, name, sub_version, **kwargs): """ Args: data_dir: `string`, the path to the folder containing the files in the downloaded .tar citation: `string`, citation for the data set url: `string`, url for information about the data set **kwargs: keyword arguments forwarded to super. """ self.sub_version = sub_version self.language = kwargs.pop("language", None) self.date_of_snapshot = kwargs.pop("date", None) self.size = kwargs.pop("size", None) description = f"NLPGhana speech to text dataset in {self.language} version {self.sub_version} of {self.date_of_snapshot}. The dataset has a size of {self.size}" super(NLPGhanaVoiceConfig, self).__init__( name=name, version=datasets.Version("1.0.5", ""), description=description, **kwargs ) class NLPGhanaVoice(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ NLPGhanaVoiceConfig( name=lang_id, language=_LANGUAGES[lang_id]["Language"], sub_version=_LANGUAGES[lang_id]["Version"], date=_LANGUAGES[lang_id]["Date"], size=_LANGUAGES[lang_id]["Size"], ) for lang_id in _LANGUAGES.keys() ] def _info(self): features = datasets.Features( { "user_id": datasets.Value("string"), "path": datasets.Value("string"), "text": datasets.Value("string"), "durationMsec": datasets.Value("int64"), "sampleRate": datasets.Value("int64"), "speaker_gender": datasets.Value("string"), "mother_tongue": datasets.Value("string"), "date": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=None, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" dl_path = dl_manager.download_and_extract(_DATA_URL) abs_path_to_data = os.path.join(dl_path, self.config.name) abs_path_to_clips = os.path.join(abs_path_to_data, "clips") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": os.path.join(abs_path_to_data, "train.tsv"), "path_to_clips": abs_path_to_clips, }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": os.path.join(abs_path_to_data, "test.tsv"), "path_to_clips": abs_path_to_clips, }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": os.path.join(abs_path_to_data, "validation.tsv"), "path_to_clips": abs_path_to_clips, }, ), ] def _generate_examples(self, filepath, path_to_clips): """ Yields examples. """ data_fields = list(self._info().features.keys()) path_idx = data_fields.index("path") with open(filepath, encoding="utf-8") as f: lines = f.readlines() headline = lines[0] column_names = headline.strip().split("\t") assert ( column_names == data_fields ), f"The file should have {data_fields} as column names, but has {column_names}" for id_, line in enumerate(lines[1:]): field_values = line.strip().split("\t") # set absolute path for mp3 audio file field_values[path_idx] = os.path.join(path_to_clips, field_values[path_idx]) # if data is incomplete, fill with empty values if len(field_values) < len(data_fields): field_values += (len(data_fields) - len(field_values)) * ["''"] yield id_, {key: value for key, value in zip(data_fields, field_values)}