Achitha commited on
Commit
53221d3
1 Parent(s): 072eb6c

Create 10th_science_tamil_to_english.py

Browse files
Files changed (1) hide show
  1. 10th_science_tamil_to_english.py +129 -0
10th_science_tamil_to_english.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Simple sentences Dataset - contains 90 mins of speech data"""
16
+
17
+ import csv
18
+ import json
19
+ import os
20
+
21
+ import datasets
22
+
23
+ _CITATION = """\
24
+ @misc{simpledata_1,
25
+ title = {Whisper model for tamil-to-eng translation},
26
+ publisher = {Achitha},
27
+ year = {2022},
28
+ }
29
+ @misc{simpledata_2,
30
+ title = {Fine-tuning whisper model},
31
+ publisher = {Achitha},
32
+ year = {2022},
33
+ }
34
+ """
35
+ _DESCRIPTION = """\
36
+ The data contains roughly one and half hours of audio and transcripts in Tamil language.
37
+ """
38
+
39
+ _HOMEPAGE = ""
40
+
41
+ _LICENSE = "MIT"
42
+
43
+
44
+ _METADATA_URLS = {
45
+ "train": "data/train.jsonl",
46
+ "test": "data/test.jsonl"
47
+ }
48
+ _URLS = {
49
+ "train": "data/train.tar.gz",
50
+ "test": "data/test.tar.gz",
51
+
52
+ }
53
+
54
+ class simple_data(datasets.GeneratorBasedBuilder):
55
+
56
+
57
+ VERSION = datasets.Version("1.1.0")
58
+ def _info(self):
59
+ features = datasets.Features(
60
+ {
61
+ "audio": datasets.Audio(sampling_rate=16_000),
62
+ "path": datasets.Value("string"),
63
+ "sentence": datasets.Value("string"),
64
+ "length": datasets.Value("float")
65
+
66
+ }
67
+ )
68
+ return datasets.DatasetInfo(
69
+ description=_DESCRIPTION,
70
+ features=features,
71
+ supervised_keys=("sentence", "label"),
72
+ homepage=_HOMEPAGE,
73
+ license=_LICENSE,
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ metadata_paths = dl_manager.download(_METADATA_URLS)
79
+ train_archive = dl_manager.download(_URLS["train"])
80
+ test_archive = dl_manager.download(_URLS["test"])
81
+ local_extracted_train_archive = dl_manager.extract(train_archive) if not dl_manager.is_streaming else None
82
+ local_extracted_test_archive = dl_manager.extract(test_archive) if not dl_manager.is_streaming else None
83
+ test_archive = dl_manager.download(_URLS["test"])
84
+ train_dir = "train"
85
+ test_dir = "test"
86
+
87
+ return [
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.TRAIN,
90
+ gen_kwargs={
91
+ "metadata_path": metadata_paths["train"],
92
+ "local_extracted_archive": local_extracted_train_archive,
93
+ "path_to_clips": train_dir,
94
+ "audio_files": dl_manager.iter_archive(train_archive),
95
+ },
96
+ ),
97
+ datasets.SplitGenerator(
98
+ name=datasets.Split.TEST,
99
+ gen_kwargs={
100
+ "metadata_path": metadata_paths["test"],
101
+ "local_extracted_archive": local_extracted_test_archive,
102
+ "path_to_clips": test_dir,
103
+ "audio_files": dl_manager.iter_archive(test_archive),
104
+ },
105
+ ),
106
+
107
+ ]
108
+
109
+ def _generate_examples(self, metadata_path, local_extracted_archive, path_to_clips, audio_files):
110
+ """Yields examples as (key, example) tuples."""
111
+ examples = {}
112
+ with open(metadata_path, encoding="utf-8") as f:
113
+ for key, row in enumerate(f):
114
+ data = json.loads(row)
115
+ examples[data["path"]] = data
116
+ inside_clips_dir = False
117
+ id_ = 0
118
+ for path, f in audio_files:
119
+ if path.startswith(path_to_clips):
120
+ inside_clips_dir = True
121
+ if path in examples:
122
+ result = examples[path]
123
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
124
+ result["audio"] = {"path": path, "bytes": f.read()}
125
+ result["path"] = path
126
+ yield id_, result
127
+ id_ += 1
128
+ elif inside_clips_dir:
129
+ break