File size: 3,836 Bytes
39b5f6c b21ba76 39b5f6c b21ba76 1f22e36 b21ba76 39b5f6c b21ba76 39b5f6c b21ba76 9ec223c b21ba76 39b5f6c 0c4fd82 39b5f6c 2220a25 39b5f6c 799ce12 39b5f6c 3766003 2220a25 3766003 b21ba76 3766003 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
from datasets import Dataset, GeneratorBasedBuilder, Features
import os
import tarfile
import librosa
import datasets
_LICENSE = "https://creativecommons.org/licenses/by/4.0/"
_HOMEPAGE = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-3126"
_DATASET_URL = "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3126/snemovna.tar.xz"
_DESCRIPTION = "Large corpus of Czech parliament plenary sessions, originaly released 2019-11-29 by Kratochvíl Jonáš, Polák Peter and Bojar Ondřej\
The dataset consists of 444 hours of transcribed speech audio snippets 1 to 40 seconds long.\
Original dataset transcriptions were converted to true case from uppercase using spacy library."
_CITATION = """\
@misc{11234/1-3126,
title = {Large Corpus of Czech Parliament Plenary Hearings},
author = {Kratochv{\'{\i}}l, Jon{\'a}{\v s} and Pol{\'a}k, Peter and Bojar, Ond{\v r}ej},
url = {http://hdl.handle.net/11234/1-3126},
note = {{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{\'U}FAL}), Faculty of Mathematics and Physics, Charles University},
copyright = {Creative Commons - Attribution 4.0 International ({CC} {BY} 4.0)},
year = {2019} } """
class CzechParliamentPlenaryHearings(GeneratorBasedBuilder):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=16000),
"transcription": datasets.Value("string")
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_DATASET_URL)
data_dir = os.path.join(data_dir, 'ASR_DATA')
splits = ("train", "dev", "test")
split_names = {
"train": datasets.Split.TRAIN,
"dev": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
split_generators = []
for split in splits:
split_generators.append(
datasets.SplitGenerator(
name=split_names.get(split, split),
gen_kwargs={'split': split, 'data_dir': data_dir}
)
)
return split_generators
def _generate_examples(self, split, data_dir):
split_dir = os.path.join(data_dir, split)
with os.scandir(split_dir) as it:
for entry in it:
if entry.is_dir():
folder_name = entry.name
folder_path = os.path.join(split_dir, folder_name)
with os.scandir(folder_path) as it2:
for entry2 in it2:
if entry2.is_file() and entry2.name.endswith('.wav'):
audio_file = entry2.name
audio_path = os.path.join(
folder_path, audio_file)
transcription_path = os.path.join(
folder_path, audio_file + '.trn')
transcription = open(
transcription_path).read().strip()
audio, sr = librosa.load(audio_path, sr=16000)
yield f"{folder_name}/{audio_file}", {
'audio': audio,
'transcription': transcription,
}
|