# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """NST: Nordic Language Technology""" import io import json import tarfile import datasets from datasets.tasks import AutomaticSpeechRecognition _CITATION = """\ @inproceedings{, title={}, author={}, booktitle={}, year={2022}, url={https://arxiv.org/abs/} } """ _DESCRIPTION = """\ This database was created by Nordic Language Technology for the development of automatic speech recognition and dictation in Norwegian. In this version, the organization of the data have been altered to improve the usefulness of the database. The acoustic databases described below were developed by the firm Nordisk språkteknologi holding AS (NST), which went bankrupt in 2003. In 2006, a consortium consisting of the University of Oslo, the University of Bergen, the Norwegian University of Science and Technology, the Norwegian Language Council and IBM bought the bankruptcy estate of NST, in order to ensure that the language resources developed by NST were preserved. In 2009, the Norwegian Ministry of Culture charged the National Library of Norway with the task of creating a Norwegian language bank, which they initiated in 2010. The resources from NST were transferred to the National Library in May 2011, and are now made available in Språkbanken, for the time being without any further modification. Språkbanken is open for feedback from users about how the resources can be improved, and we are also interested in improved versions of the databases that users wish to share with other users. Please send response and feedback to sprakbanken@nb.no. """ _HOMEPAGE = "https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-54/" # Example: https://huggingface.co/datasets/NbAiLab/NST/resolve/main/data/train/nst_no_train_distant-0005-of-0009.tar.gz _DATA_URL = "https://huggingface.co/datasets/NbAiLab/NST/resolve/main/data/{split}/nst_{lang_code}_{split}_{mic}-{shard_idx:04d}-of-{shard_total:04d}.tar.gz" # Example: https://huggingface.co/datasets/NbAiLab/NST/resolve/main/data/test/nst_no_test_close-0002-of-0003.json _METADATA_URL = "https://huggingface.co/datasets/NbAiLab/NST/resolve/main/data/{split}/nst_{lang_code}_{split}_{mic}-{shard_idx:04d}-of-{shard_total:04d}.json" _SHARDS = { "no": { "test": 3, "train": 9, }, } _METADATA_MAPPING = { "pid": "pid", "age": "Age", "region_of_birth": "Region_of_Birth", "region_of_youth": "Region_of_Youth", "remarks": "Remarks", "sex": "Sex", "speaker_id": "Speaker_ID", "directory": "Directory", "imported_sheet_file": "Imported_sheet_file", "number_of_recordings": "Number_of_recordings", "recording_date": "RecDate", "recording_time": "RecTime", "recording_duration": "Record_duration", "recording_session": "Record_session", "sheet_number": "Sheet_number", "ansi_codepage": "ANSI_Codepage", "board": "Board", "byte_format": "ByteFormat", "channels": "Channels", "character_set": "CharacterSet", "coding": "Coding", "dos_codepage": "DOS_Codepage", "delimiter": "Delimiter", "frequency": "Frequency", "memo": "Memo", "script": "Script", "version": "Version", "dst": "DST", "noi": "NOI", "qua": "QUA", "snd": "SND", "spc": "SPC", "utt": "UTT", "file": "file", "t0": "t0", "t1": "t1", "t2": "t2", "text": "text", "type": "type", } class NstConfig(datasets.BuilderConfig): """BuilderConfig for NST.""" def __init__(self, *args, **kwargs): """BuilderConfig for NST. Args: **kwargs: keyword arguments forwarded to super. """ super(NstConfig, self).__init__(*args, **kwargs) class Nst(datasets.GeneratorBasedBuilder): """NST dataset.""" DEFAULT_WRITER_BATCH_SIZE = 1000 BUILDER_CONFIGS = [ NstConfig( name="no-both", version=datasets.Version("1.0.0"), description="NST Norwegian (both close and distant microphones)", ), NstConfig( name="no-close", version=datasets.Version("1.0.0"), description="NST Norwegian (close microphone)", ), NstConfig( name="no-distant", version=datasets.Version("1.0.0"), description="NST Norwegian (distant microphone)", ), ] def _info(self): sampling_rate = 16_000 return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ "id": datasets.Value("string"), "audio": datasets.features.Audio(sampling_rate=sampling_rate), "lang_code": datasets.Value("string"), "microphone_position": datasets.Value("string"), # Metadata "pid": datasets.Value("string"), # "no14xx31-24012000-1820", "age": datasets.Value("int32"), # 18, "region_of_birth": datasets.Value("string"), # "Voss og omland", "region_of_youth": datasets.Value("string"), # "Voss og omland", "remarks": datasets.Value("string"), # "Some text", "sex": datasets.Value("string"), # "female", "speaker_id": datasets.Value("string"), # 31, "directory": datasets.Value("string"), # "c:\\adb_0464\\data\\scr0464\\17\\04641701\\r4640006", "imported_sheet_file": datasets.Value("string"), # "c:\\adb\\dsdr\\scripts\\nor464\\nor464.psh", "number_of_recordings": datasets.Value("int32"), # 987, "recording_date": datasets.Value("string"), # "24 jan 2000", "recording_time": datasets.Value("string"), # "18:20:52", "recording_duration": datasets.Value("string"), # "232' 15\"", "recording_session": datasets.Value("int32"), # "6", "sheet_number": datasets.Value("int32"), # "31", "ansi_codepage": datasets.Value("string"), # "1252", "board": datasets.Value("string"), # "2;NI DSP2200", "byte_format": datasets.Value("string"), # "01", "channels": datasets.Value("int32"), # 2, "character_set": datasets.Value("string"), # "ANSI", "coding": datasets.Value("string"), # "PCM;Linear", "dos_codepage": datasets.Value("string"), # "850", "delimiter": datasets.Value("string"), # ">-<", "frequency": datasets.Value("int32"), # 16000, "memo": datasets.Value("string"), # "", "script": datasets.Value("string"), # "464", "version": datasets.Value("string"), # "0001_1", "dst": datasets.Value("string"), # "0", "noi": datasets.Value("string"), # "0", "qua": datasets.Value("string"), # "X", "snd": datasets.Value("string"), # "0", "spc": datasets.Value("string"), # "0", "utt": datasets.Value("string"), # "0", "file": datasets.Value("string"), # "u0006733.wav", "t0": datasets.Value("int32"), # 417024, "t1": datasets.Value("int32"), # 249777024, "t2": datasets.Value("int32"), # 250193024, "text": datasets.Value("string"), # "Selv rett utenfor administrasjonsbygningen er det anlagt en liten fotballbane.", "type": datasets.Value("string"), # "ISp71", }), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, task_templates=[ AutomaticSpeechRecognition( audio_column="audio", transcription_column="text" ) ], ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_urls = {} lang_code, mic = self.config.name.split("-") if mic == "both": mics = ["close", "distant"] else: mics = [mic] for split in ["train", "test"]: data_urls[split] = [] for mic in mics: shard_total = _SHARDS[lang_code][split] for shard_idx in range(1, shard_total + 1): # .../data/{split}/nst_{lang_code}_{split}_{mic}-{shard_idx}-of-{shard_total} string_formating = dict( split=split, lang_code=lang_code, mic=mic, shard_idx=shard_idx, shard_total=shard_total ) data_urls[split] += [( _METADATA_URL.format(**string_formating), _DATA_URL.format(**string_formating) )] train_downloaded_data = dl_manager.download(data_urls["train"]) test_downloaded_data = dl_manager.download(data_urls["test"]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepaths": train_downloaded_data, } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepaths": test_downloaded_data, } ), ] def _generate_examples(self, filepaths): """Yields examples.""" lang_code, _ = self.config.name.split("-") data_fields = list(self._info().features.keys()) data_fields.remove("id") data_fields.remove("audio") data_fields.remove("lang_code") data_fields.remove("microphone_position") for metadata_path, archive_path in filepaths: mic = "close" if metadata_path else "distant" metadata = {} with open(metadata_path) as metadata_file: for line in metadata_file.read().split("\n"): if line: metadata_object = json.loads(line) metadata_key = f'{metadata_object["pid"]}_{metadata_object["file"].replace(".wav", "")}_{mic}' metadata[metadata_key] = metadata_object with open(archive_path, "rb") as archive_fs: archive_bytes = io.BytesIO(archive_fs.read()) with tarfile.open(fileobj=archive_bytes, mode="r") as tar: for audio_file in tar.getmembers(): if audio_file.isfile() and audio_file.name.endswith(".mp3"): metadata_key = f'{audio_file.name.replace(".mp3", "")}_{mic}' audio_bytes = tar.extractfile(audio_file).read() audio_dict = {"bytes": audio_bytes, "path": audio_file.name} fields = {key: metadata[metadata_key].get(_METADATA_MAPPING[key], "") for key in data_fields} fields["channels"] = 1 fields["file"] = fields["file"].replace(".wav", ".mp3") fields["frequency"] = 16_000 # Fixing some errors in the metadata if "," in fields["age"]: fields["age"] = fields["age"].split(",")[0].strip() if "+" in fields["age"]: fields["age"] = fields["age"].split("+")[0].strip() if " " in fields["age"].strip(): fields["age"] = fields["age"].strip().split(" ")[0].strip() if fields["age"].lower() in ("slethei", "ramslie") or not fields["age"].strip(): fields["age"] = 0 for int32_field in ("number_of_recordings", "recording_session", "sheet_number", "t0", "t1", "t2"): if not fields[int32_field].strip(): fields[int32_field] = 0 yield metadata_key, { "id": metadata_key, "audio": audio_dict, "lang_code": lang_code, "microphone_position": mic, **fields }