kabyle_asr / kabyle_asr.py
TifinLab's picture
Create kabyle_asr.py
66c456b verified
raw
history blame
4.42 kB
import csv
import os
import datasets
from tqdm import tqdm
import tarfile
_CITATION = """\
@inproceedings{commonvoice:2020,
author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
title = {Common Voice: A Massively-Multilingual Speech Corpus},
booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
pages = {4211--4215},
year = 2020
}
"""
_HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
_BASE_URL = "https://huggingface.co/datasets/TifinLab/kabyle_asr/raw/main/"
_AUDIO_URL = _BASE_URL + "data/{split}.tar"
_TRANSCRIPT_URL = _BASE_URL + "text/{split}.csv"
class KabyleAsr(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"id": datasets.Value("int64"),
"path": datasets.Value("string"),
"audio": datasets.features.Audio(sampling_rate=48000),
"text": datasets.Value("string"),
"licence": datasets.Value("string"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
splits = {
"train": _AUDIO_URL.format(split="train"),
"test": _AUDIO_URL.format(split="test"),
}
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"audio_paths": dl_manager.download(splits["train"]),
"split":"train",
"transcript_path": dl_manager.download_and_extract(_TRANSCRIPT_URL.format(split="train"))
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"audio_paths": dl_manager.download(splits["test"]),
"split":"test",
"transcript_path": dl_manager.download_and_extract(_TRANSCRIPT_URL.format(split="test"))},
),
]
def _generate_examples(self, audio_paths, split, transcript_path):
with open(transcript_path, encoding="utf-8") as f:
# L'utilisation de csv.DictReader permet de traiter facilement chaque ligne du CSV
reader = csv.DictReader(f, delimiter=";", quoting=csv.QUOTE_NONE)
# Extraction et traitement des fichiers audio de l'archive
audio_tar_path = audio_paths + ".tar"
with tarfile.open(audio_tar_path, "r") as tar:
# Créer un dictionnaire pour associer chaque fichier audio à son chemin extrait
audio_file_dict = {member.name: tar.extractfile(member) for member in tar.getmembers()}
# Itérer sur chaque ligne du fichier de métadonnées pour générer des exemples
for row_id, row in enumerate(tqdm(reader, desc=f"Génération d'exemples pour {split}")):
audio_filename = row["Path"]
sentence = row['Text']
licenseR = row['Licence']
# Vérifier si le fichier audio correspondant existe
if audio_filename in audio_file_dict:
# Créer un chemin temporaire pour stocker le fichier audio extrait
audio_extracted_path = os.path.join("temp", audio_filename)
# Écrire le contenu du fichier audio extrait
with open(audio_extracted_path, "wb") as audio_out:
audio_out.write(audio_file_dict[audio_filename].read())
with open(audio_extracted_path, "rb") as audio_in:
audio_bytes = audio_in.read()
yield row_id, {
"id": row_id,
"path": audio_filename,
"audio": {"path": audio_extracted_path, "bytes": audio_bytes},
"text": sentence,
"licence": licenseR
}