mict-zhaw commited on
Commit
2fcf121
1 Parent(s): 627c12e

Initial Commit

Browse files
Files changed (2) hide show
  1. README.md +126 -0
  2. chall.py +215 -0
README.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ TODO: Add YAML tags here. Copy-paste the tags obtained with the online tagging app: https://huggingface.co/spaces/huggingface/datasets-tagging
3
+ ---
4
+
5
+ # Dataset Card for [Dataset Name]
6
+
7
+ ## Table of Contents
8
+ - [Table of Contents](#table-of-contents)
9
+ - [Dataset Description](#dataset-description)
10
+ - [Dataset Summary](#dataset-summary)
11
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
12
+ - [Languages](#languages)
13
+ - [Dataset Structure](#dataset-structure)
14
+ - [Data Instances](#data-instances)
15
+ - [Data Fields](#data-fields)
16
+ - [Data Splits](#data-splits)
17
+ - [Dataset Creation](#dataset-creation)
18
+ - [Curation Rationale](#curation-rationale)
19
+ - [Source Data](#source-data)
20
+ - [Annotations](#annotations)
21
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
22
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
23
+ - [Social Impact of Dataset](#social-impact-of-dataset)
24
+ - [Discussion of Biases](#discussion-of-biases)
25
+ - [Other Known Limitations](#other-known-limitations)
26
+ - [Additional Information](#additional-information)
27
+ - [Dataset Curators](#dataset-curators)
28
+ - [Licensing Information](#licensing-information)
29
+ - [Citation Information](#citation-information)
30
+ - [Contributions](#contributions)
31
+
32
+ ## Dataset Description
33
+
34
+ - **Homepage:**
35
+ - **Repository:**
36
+ - **Paper:**
37
+ - **Leaderboard:**
38
+ - **Point of Contact:**
39
+
40
+ ### Dataset Summary
41
+
42
+ [More Information Needed]
43
+
44
+ ### Supported Tasks and Leaderboards
45
+
46
+ [More Information Needed]
47
+
48
+ ### Languages
49
+
50
+ [More Information Needed]
51
+
52
+ ## Dataset Structure
53
+
54
+ ### Data Instances
55
+
56
+ [More Information Needed]
57
+
58
+ ### Data Fields
59
+
60
+ [More Information Needed]
61
+
62
+ ### Data Splits
63
+
64
+ [More Information Needed]
65
+
66
+ ## Dataset Creation
67
+
68
+ ### Curation Rationale
69
+
70
+ [More Information Needed]
71
+
72
+ ### Source Data
73
+
74
+ #### Initial Data Collection and Normalization
75
+
76
+ [More Information Needed]
77
+
78
+ #### Who are the source language producers?
79
+
80
+ [More Information Needed]
81
+
82
+ ### Annotations
83
+
84
+ #### Annotation process
85
+
86
+ [More Information Needed]
87
+
88
+ #### Who are the annotators?
89
+
90
+ [More Information Needed]
91
+
92
+ ### Personal and Sensitive Information
93
+
94
+ [More Information Needed]
95
+
96
+ ## Considerations for Using the Data
97
+
98
+ ### Social Impact of Dataset
99
+
100
+ [More Information Needed]
101
+
102
+ ### Discussion of Biases
103
+
104
+ [More Information Needed]
105
+
106
+ ### Other Known Limitations
107
+
108
+ [More Information Needed]
109
+
110
+ ## Additional Information
111
+
112
+ ### Dataset Curators
113
+
114
+ [More Information Needed]
115
+
116
+ ### Licensing Information
117
+
118
+ [More Information Needed]
119
+
120
+ ### Citation Information
121
+
122
+ [More Information Needed]
123
+
124
+ ### Contributions
125
+
126
+ Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
chall.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import datasets
4
+ import soundfile as sf
5
+
6
+
7
+ _DESCRIPTION = "tbd"
8
+ _CITATION = "tbd"
9
+
10
+ _META_FILE = "chall_data.jsonl"
11
+
12
+
13
+ logger = datasets.logging.get_logger(__name__)
14
+
15
+
16
+ class ChallConfig(datasets.BuilderConfig):
17
+
18
+ split_into_utterances: bool = False
19
+
20
+ def __init__(self, split_into_utterances: bool, **kwargs):
21
+ super(ChallConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
22
+ self.split_into_utterances = split_into_utterances
23
+
24
+
25
+ class Chall(datasets.GeneratorBasedBuilder):
26
+
27
+ VERSION = datasets.Version("1.0.0")
28
+
29
+ DEFAULT_CONFIG_NAME = "chall_data"
30
+
31
+ BUILDER_CONFIGS = [
32
+ ChallConfig(
33
+ name="chall_data",
34
+ split_into_utterances=False
35
+ ),
36
+ ChallConfig(
37
+ name="asr",
38
+ split_into_utterances=True
39
+ )
40
+ ]
41
+
42
+ max_chunk_length: int = int
43
+
44
+ def __init__(self, *args, max_chunk_length=12, **kwargs):
45
+ super().__init__(*args, **kwargs)
46
+ self.max_chunk_length = max_chunk_length # max chunk length in seconds
47
+
48
+ @property
49
+ def manual_download_instructions(self):
50
+ return (
51
+ "To use the chall dataset you have to download it manually. "
52
+ "TBD Download Instructions. " # todo
53
+ "Please extract all files in one folder and load the dataset with: "
54
+ "`datasets.load_dataset('chall', data_dir='path/to/folder/folder_name')`"
55
+ )
56
+
57
+ def _info(self):
58
+
59
+ if self.config.split_into_utterances:
60
+ features = datasets.Features({
61
+ "audio_id": datasets.Value("string"), # todo maybe shorten to id
62
+ "intervention": datasets.Value("int32"),
63
+ "school_grade": datasets.Value("string"),
64
+ "area_of_school_code": datasets.Value("int32"),
65
+ "background_noise": datasets.Value("bool"),
66
+ "speaker": datasets.Value("string"),
67
+ "words": datasets.features.Sequence(
68
+ {
69
+ "start": datasets.Value("float"),
70
+ "end": datasets.Value("float"),
71
+ "duration": datasets.Value("float"),
72
+ "text": datasets.Value("string"),
73
+ }
74
+ ),
75
+ "audio": datasets.Audio(sampling_rate=16_000)
76
+ })
77
+ else:
78
+ features = datasets.Features({
79
+ "audio_id": datasets.Value("string"), # todo maybe shorten to id
80
+ "intervention": datasets.Value("int32"),
81
+ "school_grade": datasets.Value("string"),
82
+ "area_of_school_code": datasets.Value("int32"),
83
+ "participants": datasets.features.Sequence(
84
+ {
85
+ "pseudonym": datasets.Value("string"),
86
+ "gender": datasets.Value("string"),
87
+ "year_of_birth": datasets.Value("int32"),
88
+ "school_grade": datasets.Value("int32"),
89
+ "languages": datasets.Value("string"),
90
+ "estimated_l2_proficiency": datasets.Value("string")
91
+ }, length=-1
92
+ ),
93
+ "background_noise": datasets.Value("bool"),
94
+ "speakers": datasets.features.Sequence(
95
+ {
96
+ "spkid": datasets.Value("string"),
97
+ "name": datasets.Value("string")
98
+ }
99
+ ),
100
+ "segments": datasets.features.Sequence(
101
+ {
102
+ "speaker": datasets.Value("string"),
103
+ "words": datasets.features.Sequence(
104
+ {
105
+ "start": datasets.Value("float"),
106
+ "end": datasets.Value("float"),
107
+ "duration": datasets.Value("float"),
108
+ "text": datasets.Value("string"),
109
+ }
110
+ ),
111
+ }
112
+ ),
113
+ "audio": datasets.Audio(sampling_rate=16_000)
114
+ })
115
+
116
+ return datasets.DatasetInfo(
117
+ description=_DESCRIPTION,
118
+ features=features,
119
+ # todo No default supervised_keys (as we have to pass both question and context as input).
120
+ supervised_keys=None,
121
+ homepage="",
122
+ citation=_CITATION,
123
+ )
124
+
125
+ def _split_generators(self, dl_manager):
126
+
127
+ print("_split_generators")
128
+
129
+ # todo define splits?
130
+
131
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
132
+
133
+ print(data_dir)
134
+
135
+ # todo read ids for splits as we do not separate them by folder
136
+
137
+ if not os.path.exists(data_dir):
138
+ raise FileNotFoundError(
139
+ f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('chall', data_dir=...)` "
140
+ f"that includes files unzipped from the chall zip. Manual download instructions: {self.manual_download_instructions}"
141
+ )
142
+ return [
143
+ datasets.SplitGenerator(
144
+ name=datasets.Split.TRAIN,
145
+ gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _META_FILE)},
146
+ ),
147
+ # datasets.SplitGenerator(
148
+ # name=datasets.Split.TEST,
149
+ # gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _META_FILE)},
150
+ # ),
151
+ # datasets.SplitGenerator(
152
+ # name=datasets.Split.VALIDATION,
153
+ # gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _META_FILE)},
154
+ # ),
155
+ ]
156
+
157
+ def _generate_examples(self, filepath, metafile):
158
+
159
+ logger.info("generating examples from = %s", filepath) # todo define logger?
160
+ print("_generate_examples")
161
+
162
+ with open(metafile, 'r') as file:
163
+ for line in file:
164
+ data = json.loads(line)
165
+
166
+ # load json
167
+ transcript_file = os.path.join(filepath, data["transcript_file"])
168
+ with open(transcript_file, 'r') as transcript:
169
+ transcript = json.load(transcript)
170
+
171
+ audio_id = data['audio_id']
172
+ audio_file_path = os.path.join(filepath, data["audio_file"])
173
+
174
+ if self.config.name == "asr":
175
+
176
+ for segment_i, segment in enumerate(transcript["segments"]):
177
+
178
+ id_ = f"{audio_id}_{str(segment_i).rjust(3, '0')}"
179
+
180
+ data["audio_id"] = id_
181
+ data["speaker_id"] = segment["speaker"]
182
+ data["words"] = segment["words"]
183
+
184
+ track = sf.SoundFile(audio_file_path)
185
+
186
+ can_seek = track.seekable()
187
+ if not can_seek:
188
+ raise ValueError("Not compatible with seeking")
189
+
190
+ sr = track.samplerate
191
+ start_time = segment["words"][0]["start"]
192
+ end_time = segment["words"][-1]["end"]
193
+
194
+ start_frame = int(sr * start_time)
195
+ frames_to_read = int(sr * (end_time - start_time))
196
+
197
+ # Seek to the start frame
198
+ track.seek(start_frame)
199
+
200
+ # Read the desired frames
201
+ audio = track.read(frames_to_read)
202
+
203
+ data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": sr}
204
+
205
+ yield id_, data
206
+ else:
207
+
208
+ id_ = data["audio_id"]
209
+ data["speakers"] = transcript["speakers"]
210
+ data["segments"] = transcript["segments"]
211
+
212
+ audio, samplerate = sf.read(audio_file_path)
213
+ data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": samplerate}
214
+
215
+ yield id_, data