nota / nota.py
Rasmus Arpe Fogh Egebæk
renamed nota file
b37f502
import os
import re
import datasets
import requests
from datasets import AutomaticSpeechRecognition
_DATA_URLS = ["https://sprogtek-ressources.digst.govcloud.dk/nota/Inspiration%202016%20-%202021/",
"https://sprogtek-ressources.digst.govcloud.dk/nota/Inspiration%202008%20-%202016/",
"https://sprogtek-ressources.digst.govcloud.dk/nota/Radio-TV%20program%202007%20-%202012/",
"https://sprogtek-ressources.digst.govcloud.dk/nota/Radio-TV%20Program%202013%20-%202015/",
"https://sprogtek-ressources.digst.govcloud.dk/nota/Radio-TV%20Program%202016%20-%202018/",
"https://sprogtek-ressources.digst.govcloud.dk/nota/Radio-TV%20Program%202019%20-%202022/"
]
_DESCRIPTION = """\
Nota lyd- og tekstdata
Datasættet indeholder både tekst- og taledata fra udvalgte dele af Nota's lydbogsbiblotek. Datasættet består af
over 500 timers oplæsninger og medfølgende transkriptioner på dansk. Al lyddata er i .wav-format, mens tekstdata
er i .txt-format.
I data indgår indlæsninger af Notas eget blad "Inspiration" og "Radio/TV", som er udgivet i perioden 2007 til 2022.
Nota krediteres for arbejdet med at strukturere data, således at tekst og lyd stemmer overens.
Nota er en institution under Kulturministeriet, der gør trykte tekster tilgængelige i digitale formater til personer
med synshandicap og læsevanskeligheder, fx via produktion af lydbøger og oplæsning af aviser, magasiner, mv.
"""
_HOMEPAGE = "https://sprogteknologi.dk/dataset/notalyd-ogtekstdata"
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
def extract_file_links():
"""
Extracts the web locations of the zip files containing the data
:return: List of web urls
"""
download_paths = []
download_files_regex = re.compile("<a href=\"(.+?)\">")
for download_root in _DATA_URLS:
r = requests.get(download_root)
all_files = download_files_regex.findall(str(r.content))
# We ignore Parent and Readme files
all_files_filtered = filter(lambda x: x != "Readme.txt" and x != "/nota/", all_files)
for download_file in all_files_filtered:
# Empty file
if "INSL20210003.zip" in download_file:
continue
# Because of wget behaviour, we have to replace correct %20 with space
full_download_path = download_root + download_file
full_download_path = full_download_path.replace("%20", " ")
download_paths.append(full_download_path)
return download_paths
class NotaDanishSoundAndTextDataset(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "all"
def _info(self):
features = datasets.Features(
{
"audio": datasets.Audio(sampling_rate=44_100),
"sentence": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentence")],
)
def _split_generators(self, dl_manager):
download_urls = extract_file_links()
dl_path = dl_manager.download_and_extract(download_urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"dl_path": dl_path,
},
)
]
@staticmethod
def _extract_transcript(file_path):
with open(file_path, "r", encoding="utf-8") as f:
data = f.read()
return data
def _generate_examples(self, dl_path):
key = 0
transcripts = {}
for parent_directory in dl_path:
parent_directory_path = os.listdir(os.path.join(dl_path, parent_directory))
for sub_directory in parent_directory_path:
data_directory_path = os.path.join(dl_path, parent_directory, sub_directory)
data_files = os.listdir(data_directory_path)
for data_file in data_files:
file_type = data_file[-3:]
file_id = data_file[:-4]
if file_id not in transcripts:
transcripts[file_id] = {}
if file_type == "wav":
transcripts[file_id]["audio_path"] = os.path.join(data_directory_path, data_file)
elif file_type == "txt":
transcripts[file_id]["sentence"] = self._extract_transcript(
os.path.join(data_directory_path, data_file))
for sample_id, info in transcripts.items():
audio = {"path": info["audio_path"]}
yield key, {"audio": audio, "sentence": info["sentence"]}
key += 1
transcripts = {}