banc-trawsgrifiadau-bangor / banc-trawsgrifiadau-bangor.py
DewiBrynJones's picture
cywiriad
bea3c37
raw
history blame
3.89 kB
import os
import datasets
from pathlib import Path
from tqdm import tqdm
from datasets.tasks import AutomaticSpeechRecognition
logger = datasets.utils.logging.get_logger(__name__)
#
_DESCRIPTION = """
Dyma fanc o 25 awr 34 munud a 24 eiliad o segmentau o leferydd naturiol dros hanner cant o gyfranwyr ar ffurf ffeiliau mp3, ynghyd â thrawsgrifiadau 'verbatim' cyfatebol o’r lleferydd ar ffurf ffeil .tsv. Mae'r mwyafrif o'r lleferydd yn leferydd digymell, naturiol. Dosbarthwn y deunydd hwn o dan drwydded agored CC0.
This resource is a bank of 25 hours 34 minutes and 24 seconds of segments of natural speech from over 50 contributors in mp3 file format, together with corresponding 'verbatim' transcripts of the speech in .tsv file format. The majority of the speech is spontaneous, natural speech. We distribute this material under a CC0 open license.
"""
# TODO: Add BibTeX citation
_CITATION = """
}
"""
_HOMEPAGE = "https://git.techiaith.bangor.ac.uk/data-porth-technolegau-iaith/banc-trawsgrifiadau-bangor"
_LICENSE = "Creative Commons Zero v1.0 Universal"
_DL_URL = "https://huggingface.co/datasets/DewiBrynJones/banc-trawsgrifiadau-bangor/resolve/main"
#
# 23.6 = 23.06.0
#
class BancTrawsgrifiadauBangor(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("23.6.0","")
def _info(self):
# audio_filename audio_filesize transcript duration
features=datasets.Features(
{
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=48_000),
"sentence": datasets.Value("string"),
}
)
#
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentence")]
)
def _split_generators(self, dl_manager):
#
clips_data_dir = os.path.join(dl_manager.download_and_extract(os.path.join(_DL_URL, "clips.zip")), "clips")
return [
datasets.SplitGenerator(
name="clips",
gen_kwargs={
"metadata_filepath": dl_manager.download(f"{_DL_URL}/clips.tsv"),
"path_to_clips": clips_data_dir,
},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"metadata_filepath": dl_manager.download(f"{_DL_URL}/train.tsv"),
"path_to_clips": clips_data_dir,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"metadata_filepath": dl_manager.download(f"{_DL_URL}/test.tsv"),
"path_to_clips": clips_data_dir,
},
),
]
def _generate_examples(self, metadata_filepath, path_to_clips):
#print (metadata_filepath)
with open(metadata_filepath, encoding='utf-8') as f:
lines = f.readlines()
for id_, line in tqdm(enumerate(lines[1:]), total=len(lines)) :
field_values = line.strip().split("\t")
#if len(field_values) < 3:
# continue
audio_file_path=os.path.join(path_to_clips, field_values[0])
transcript = field_values[2]
#
row = { "path": audio_file_path, "sentence": transcript }
row["audio"] = {"audio_path": audio_file_path, "bytes": open(audio_file_path,'rb').read()}
yield id_, row