SonishMaharjan commited on
Commit
92c8a84
1 Parent(s): 220ba07

Delete asr_nepali.py

Browse files
Files changed (1) hide show
  1. asr_nepali.py +0 -162
asr_nepali.py DELETED
@@ -1,162 +0,0 @@
1
- import csv
2
- import os
3
- import json
4
-
5
- import datasets
6
- from datasets.utils.py_utils import size_str
7
- from tqdm import tqdm
8
-
9
- from .languages import LANGUAGES
10
-
11
-
12
-
13
- _CITATION = """\
14
- @inproceedings{khadka2023tts,
15
- title={Nepali Text-to-Speech Synthesis using Tacotron2 for Melspectrogram Generation},
16
- author={Khadka, Supriya and G.C., Ranju and Paudel, Prabin and Shah, Rahul and Joshi, Basanta},
17
- booktitle={SIGUL 2023, 2nd Annual Meeting of the Special Interest Group on Under-resourced Languages: a Satellite Workshop of Interspeech 2023},
18
- year={2023}
19
- }
20
- """
21
-
22
- _HOMEPAGE = "https://https://www.openslr.org/143/"
23
-
24
- _LICENSE = "Attribution-ShareAlike 4.0 (CC BY-NC-SA 4.0)"
25
-
26
-
27
-
28
- _AUDIO_URL = "https://huggingface.co/datasets/SonishMaharjan/asr_nepali_0/resolve/main/audio/asr_nepali/{split}/asr_nepali_{split}.tar"
29
- # "https://huggingface.co/datasets/SonishMaharjan/asr_nepali_0/resolve/main/audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"
30
-
31
- _TRANSCRIPT_URL = "https://huggingface.co/datasets/SonishMaharjan/asr_nepali_0/raw/main/transcript/asr_nepali_{split}.tsv"
32
- # "https://huggingface.co/datasets/SonishMaharjan/asr_nepali_0/raw/main/transcript/{lang}/{split}.tsv"
33
-
34
- _N_SHARDS_URL = "https://huggingface.co/datasets/SonishMaharjan/asr_nepali_0/raw/main/n_shards.json"
35
-
36
-
37
- class CommonVoiceConfig(datasets.BuilderConfig):
38
- """BuilderConfig for CommonVoice."""
39
-
40
- def __init__(self, name, version, **kwargs):
41
- self.language = kwargs.pop("language", None)
42
- self.release_date = kwargs.pop("release_date", None)
43
- self.num_clips = kwargs.pop("num_clips", None)
44
- self.num_speakers = kwargs.pop("num_speakers", None)
45
- self.validated_hr = kwargs.pop("validated_hr", None)
46
- self.total_hr = kwargs.pop("total_hr", None)
47
- self.size_bytes = kwargs.pop("size_bytes", None)
48
- self.size_human = size_str(self.size_bytes)
49
- description = (
50
- f"Common Voice speech to text dataset in {self.language} released on {self.release_date}. "
51
- f"The dataset comprises {self.validated_hr} hours of validated transcribed speech data "
52
- f"out of {self.total_hr} hours in total from {self.num_speakers} speakers. "
53
- f"The dataset contains {self.num_clips} audio clips and has a size of {self.size_human}."
54
- )
55
- super(CommonVoiceConfig, self).__init__(
56
- name=name,
57
- version=datasets.Version(version),
58
- description=description,
59
- **kwargs,
60
- )
61
-
62
-
63
- class CommonVoice(datasets.GeneratorBasedBuilder):
64
- DEFAULT_WRITER_BATCH_SIZE = 1000
65
-
66
- BUILDER_CONFIGS = [
67
- CommonVoiceConfig(
68
- name="asr_nepali",
69
- version="0.0.01",
70
- language="asr_nepali",
71
- release_date="22nov2023",
72
- )
73
- ]
74
-
75
- def _info(self):
76
- description = (
77
- "This dataset comprises text and speech data in Nepali, featuring both female and male voices. The dataset includes .wav files and two separate .tsv files for male and female audio. Each .tsv file contains audio_id and corresponding sentences, aligning with the audio filenames. The dataset underwent manual quality checks, although the possibility of errors remains. It was recorded to facilitate Nepali Text-to-Speech Synthesis research during the fine-tuning phase. "
78
- )
79
- features = datasets.Features(
80
- {
81
-
82
- "file_path": datasets.Value("string"),
83
- "audio": datasets.features.Audio(sampling_rate=48_000),
84
- "transcription": datasets.Value("string"),
85
-
86
- }
87
- )
88
-
89
- return datasets.DatasetInfo(
90
- description=description,
91
- features=features,
92
- supervised_keys=None,
93
- homepage=_HOMEPAGE,
94
- license=_LICENSE,
95
- citation=_CITATION,
96
- version=self.config.version,
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- lang = self.config.name
101
- n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
102
- with open(n_shards_path, encoding="utf-8") as f:
103
- n_shards = json.load(f)
104
- audio_urls = {}
105
- splits = ("train", "test")
106
- for split in splits:
107
- audio_urls[split] = [
108
- _AUDIO_URL.format(split=split) for i in range(n_shards['asr_nepali'][split])
109
- ]
110
- archive_paths = dl_manager.download(audio_urls)
111
- local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
112
-
113
- meta_urls = {split: _TRANSCRIPT_URL.format(split=split) for split in splits}
114
- meta_paths = dl_manager.download_and_extract(meta_urls)
115
-
116
- split_generators = []
117
- split_names = {
118
- "train": datasets.Split.TRAIN,
119
- "test": datasets.Split.TEST,
120
- }
121
- for split in splits:
122
- split_generators.append(
123
- datasets.SplitGenerator(
124
- name=split_names.get(split, split),
125
- gen_kwargs={
126
- "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
127
- "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
128
- "meta_path": meta_paths[split],
129
- },
130
- ),
131
- )
132
-
133
- return split_generators
134
-
135
- def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
136
- data_fields = list(self._info().features.keys())
137
- metadata = {}
138
- with open(meta_path, encoding="utf-8") as f:
139
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
140
- for row in tqdm(reader, desc="Reading metadata..."):
141
- if not row["path"].endswith(".flac"):
142
- row["path"] += ".flac"
143
- # accent -> accents in CV 8.0
144
- if "accents" in row:
145
- row["accent"] = row["accents"]
146
- del row["accents"]
147
- # if data is incomplete, fill with empty values
148
- for field in data_fields:
149
- if field not in row:
150
- row[field] = ""
151
- metadata[row["path"]] = row
152
-
153
- for i, audio_archive in enumerate(archives):
154
- for path, file in audio_archive:
155
- _, filename = os.path.split(path)
156
- if filename in metadata:
157
- result = dict(metadata[filename])
158
- # set the audio feature and the path to the extracted file
159
- path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
160
- result["audio"] = {"path": path, "bytes": file.read()}
161
- result["path"] = path
162
- yield path, result