coraal-asr / coraal-asr.py
Padomin's picture
Update coraal-asr.py
02b437f
raw
history blame
6.65 kB
# coding=utf-8
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Common Voice Dataset"""
import json
import os
from copy import deepcopy
import re
import unicodedata
from more_itertools import windowed
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
coraalを音声認識した誤り訂正用データセット
"""
_HOMEPAGE = ""
_LICENSE = ""
URLS = {
"v1": {
"text": "https://huggingface.co/datasets/Padomin/coraal-asr/resolve/main/coraal-asr.tar.gz",
},
"v2": {
"text": "https://huggingface.co/datasets/Padomin/coraal-asr/resolve/main/coraal-asr-v2.tar.gz",
},
"ctc-large": {
"text": "https://huggingface.co/datasets/Padomin/coraal-asr/resolve/main/coraal-ctc-large.tar.gz",
},
"xlsr": {
"text": "https://huggingface.co/datasets/Padomin/coraal-asr/resolve/main/coraal-xlsr.tar.gz",
}
}
class coraal_asr_config(datasets.BuilderConfig):
def __init__(self, n_fronts=0, n_bodies=1, n_rears=0, front_prefix='front:\n', body_prefix='body:\n', rear_prefix='rear:\n', **kwargs):
super(coraal_asr_config, self).__init__(**kwargs)
self.n_fronts = n_fronts
self.n_bodies = n_bodies
self.n_rears = n_rears
self.front_prefix = front_prefix
self.body_prefix = body_prefix
self.rear_prefix = rear_prefix
class coraal_asr(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.2.0")
BUILDER_CONFIGS = [
coraal_asr_config(name="v1", version=VERSION),
coraal_asr_config(name="v2", version=VERSION),
coraal_asr_config(name="ctc-large", version=VERSION),
coraal_asr_config(name="xlsr", version=VERSION),
]
DEFAULT_CONFIG_NAME = "ctc-large" # It's not mandatory to have a default configuration. Just use one if it make sense.
BUILDER_CONFIG_CLASS = coraal_asr_config
def _info(self):
feature_dict = {
"text": datasets.Value("string"),
"text_asr": datasets.Value("string"),
"src": datasets.Value("string"),
"tgt": datasets.Value("string"),
"id": datasets.Value("string")
}
features = datasets.Features(feature_dict)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
if "v1" in self.config.name:
urls = deepcopy(URLS["v1"])
if "v2" in self.config.name:
urls = deepcopy(URLS["v2"])
if "ctc-large" in self.config.name:
urls = deepcopy(URLS["ctc-large"])
if "xlsr" in self.config.name:
urls = deepcopy(URLS["xlsr"])
dl_path = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(dl_path["text"], "train.jsonl"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(dl_path["text"], "test.jsonl"),
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(dl_path["text"], "validation.jsonl"),
"split": "validation",
},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
id_ = 0
with open(filepath, encoding="utf-8") as f:
for line in f:
doc = json.loads(line)
utterances = doc['utterances']
# divide text and asr
texts_asr = [utt['asr'] for utt in utterances]
texts = [utt['text'] for utt in utterances]
# window considering front and rear contexts
if split == "train":
windowed_texts_asr = windowed([''] * self.config.n_fronts + texts_asr + [''] * self.config.n_rears, self.config.n_bodies + self.config.n_fronts + self.config.n_rears)
windowed_texts = windowed(texts, self.config.n_bodies)
else:
windowed_texts_asr = windowed([''] * self.config.n_fronts + texts_asr + [''] * self.config.n_rears, self.config.n_bodies + self.config.n_fronts + self.config.n_rears, fillvalue='', step=self.config.n_bodies)
windowed_texts = windowed(texts, self.config.n_bodies, fillvalue='', step=self.config.n_bodies)
for text_asr, text, utt in zip(windowed_texts_asr, windowed_texts, utterances):
src = ''
if self.config.n_fronts > 0:
src += self.config.front_prefix
src += '\n'.join(text_asr[:self.config.n_fronts])
src += '\n'
src += self.config.body_prefix
src += '\n'.join(text_asr[self.config.n_fronts:self.config.n_fronts + self.config.n_bodies])
if self.config.n_rears > 0:
src += '\n' + self.config.rear_prefix
src += '\n'.join(text_asr[self.config.n_fronts + self.config.n_bodies:])
tgt = '\n'.join(text)
data = {
"text": utt["text"],
"text_asr": utt["asr"],
'src': src,
'tgt': tgt,
'id': doc["id"],
}
yield id_, data
id_ += 1