WER_Evaluation_For_TTS / WER_Evaluation_For_TTS.py
Idrizorg's picture
Update WER_Evaluation_For_TTS.py
78d62b5
import csv
import os
from pathlib import Path
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@inproceedings{maniati22_interspeech,
author={Georgia Maniati and Alexandra Vioni and Nikolaos Ellinas and Karolos Nikitaras and Konstantinos Klapsas and June Sig Sung and Gunu Jho and Aimilios Chalamandaris and Pirros Tsiakoulis},
title={{SOMOS: The Samsung Open MOS Dataset for the Evaluation of Neural Text-to-Speech Synthesis}},
year=2022,
booktitle={Proc. Interspeech 2022},
pages={2388--2392},
doi={10.21437/Interspeech.2022-10922}
}
"""
_DESCRIPTION = """\
The SOMOS dataset contains 20,000 synthetic utterances (wavs), 100 natural utterances and 374,955 naturalness evaluations (human-assigned scores in the range 1-5). The synthetic utterances are single-speaker, generated by training several Tacotron-like acoustic models and an LPCNet vocoder on the LJ Speech voice public dataset. 2,000 text sentences were synthesized, selected from Blizzard Challenge texts of years 2007-2016, the LJ Speech corpus as well as Wikipedia and general domain data from the Internet.
Naturalness evaluations were collected via crowdsourcing a listening test on Amazon Mechanical Turk in the US, GB and CA locales. The records of listening test participants (workers) are fully anonymized. Statistics on the reliability of the scores assigned by the workers are also included, generated through processing the scores and validation controls per submission page.
"""
_URL = "https://zenodo.org/record/7119400"
home = Path.home()
destination_folder = Path(home, "Téléchargements","somos","Aud","audiosdestwer")
class SOMOS(datasets.GeneratorBasedBuilder):
"""SOMOS DATASET"""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="main", version=VERSION, description="A piece of SOMOS Speech dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"audio": datasets.Audio(sampling_rate=22050),
"file": datasets.Value("string"),
"text": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
wav_path = dl_manager.extract(str(destination_folder/"data.tar.gz"))
csv_path = Path(str(destination_folder/"metadata.csv"))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"wav_path": wav_path, "csv_path": csv_path}
),
]
def _generate_examples(self, wav_path, csv_path):
"""Generate examples from an data archive_path."""
examples = list()
key = 0
with open(csv_path, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
res = dict()
filename = row
split= filename[0].split("/")
res["file"] = str(filename[0])
res["audio"] = str(split[1])
res["text"] = filename[1]
examples.append(res)
"""examples[filename[0]] = {
"file": str(filename[0]),
"audio": str(filename[0]),
"text": filename[1],
}"""
for example in examples:
yield key, {**example}
key += 1
examples = []