Anjaly commited on
Commit
ee48466
1 Parent(s): cb87621
Files changed (1) hide show
  1. snow-mountain.py +15 -4
snow-mountain.py CHANGED
@@ -95,36 +95,42 @@ class Test(datasets.GeneratorBasedBuilder):
95
  def _split_generators(self, dl_manager):
96
 
97
  downloaded_files = dl_manager.download(_FILES[self.config.name])
 
98
 
99
  train_splits = [
100
  datasets.SplitGenerator(
101
  name="train_500",
102
  gen_kwargs={
103
  "filepath": downloaded_files["train_500"],
 
104
  },
105
  ),
106
  datasets.SplitGenerator(
107
  name="train_1000",
108
  gen_kwargs={
109
  "filepath": downloaded_files["train_1000"],
 
110
  },
111
  ),
112
  datasets.SplitGenerator(
113
  name="train_2500",
114
  gen_kwargs={
115
  "filepath": downloaded_files["train_2500"],
 
116
  },
117
  ),
118
  datasets.SplitGenerator(
119
  name="train_short",
120
  gen_kwargs={
121
  "filepath": downloaded_files["train_short"],
 
122
  },
123
  ),
124
  datasets.SplitGenerator(
125
  name="train_full",
126
  gen_kwargs={
127
  "filepath": downloaded_files["train_full"],
 
128
  },
129
  ),
130
  ]
@@ -134,30 +140,35 @@ class Test(datasets.GeneratorBasedBuilder):
134
  name="val_500",
135
  gen_kwargs={
136
  "filepath": downloaded_files["val_500"],
 
137
  },
138
  ),
139
  datasets.SplitGenerator(
140
  name="val_1000",
141
  gen_kwargs={
142
  "filepath": downloaded_files["val_1000"],
 
143
  },
144
  ),
145
  datasets.SplitGenerator(
146
  name="val_2500",
147
  gen_kwargs={
148
  "filepath": downloaded_files["val_2500"],
 
149
  },
150
  ),
151
  datasets.SplitGenerator(
152
  name="val_short",
153
  gen_kwargs={
154
  "filepath": downloaded_files["val_short"],
 
155
  },
156
  ),
157
  datasets.SplitGenerator(
158
  name="val_full",
159
  gen_kwargs={
160
  "filepath": downloaded_files["val_full"],
 
161
  },
162
  ),
163
  ]
@@ -167,21 +178,21 @@ class Test(datasets.GeneratorBasedBuilder):
167
  name="test_common",
168
  gen_kwargs={
169
  "filepath": downloaded_files["test_common"],
 
170
  },
171
  ),
172
  ]
173
  return train_splits + dev_splits + test_splits
174
 
175
 
176
- def _generate_examples(self, filepath):
177
  key = 0
178
- print("----------------------------------------------")
179
- print(os.listdir('data/'))
180
  with open(filepath) as f:
181
  data_df = pd.read_csv(f,sep=',')
182
  transcripts = []
183
  for index,row in data_df.iterrows():
184
- samplerate, audio_data = wavfile.read(row["path"])
 
185
  yield key, {
186
  "sentence": row["sentence"],
187
  "path": row["path"],
 
95
  def _split_generators(self, dl_manager):
96
 
97
  downloaded_files = dl_manager.download(_FILES[self.config.name])
98
+ path_to_audios = "/".join(["data/cleaned", self.config.name])
99
 
100
  train_splits = [
101
  datasets.SplitGenerator(
102
  name="train_500",
103
  gen_kwargs={
104
  "filepath": downloaded_files["train_500"],
105
+ "path_to_audios": path_to_audios,
106
  },
107
  ),
108
  datasets.SplitGenerator(
109
  name="train_1000",
110
  gen_kwargs={
111
  "filepath": downloaded_files["train_1000"],
112
+ "path_to_audios": path_to_audios,
113
  },
114
  ),
115
  datasets.SplitGenerator(
116
  name="train_2500",
117
  gen_kwargs={
118
  "filepath": downloaded_files["train_2500"],
119
+ "path_to_audios": path_to_audios,
120
  },
121
  ),
122
  datasets.SplitGenerator(
123
  name="train_short",
124
  gen_kwargs={
125
  "filepath": downloaded_files["train_short"],
126
+ "path_to_audios": path_to_audios,
127
  },
128
  ),
129
  datasets.SplitGenerator(
130
  name="train_full",
131
  gen_kwargs={
132
  "filepath": downloaded_files["train_full"],
133
+ "path_to_audios": path_to_audios,
134
  },
135
  ),
136
  ]
 
140
  name="val_500",
141
  gen_kwargs={
142
  "filepath": downloaded_files["val_500"],
143
+ "path_to_audios": path_to_audios,
144
  },
145
  ),
146
  datasets.SplitGenerator(
147
  name="val_1000",
148
  gen_kwargs={
149
  "filepath": downloaded_files["val_1000"],
150
+ "path_to_audios": path_to_audios,
151
  },
152
  ),
153
  datasets.SplitGenerator(
154
  name="val_2500",
155
  gen_kwargs={
156
  "filepath": downloaded_files["val_2500"],
157
+ "path_to_audios": path_to_audios,
158
  },
159
  ),
160
  datasets.SplitGenerator(
161
  name="val_short",
162
  gen_kwargs={
163
  "filepath": downloaded_files["val_short"],
164
+ "path_to_audios": path_to_audios,
165
  },
166
  ),
167
  datasets.SplitGenerator(
168
  name="val_full",
169
  gen_kwargs={
170
  "filepath": downloaded_files["val_full"],
171
+ "path_to_audios": path_to_audios,
172
  },
173
  ),
174
  ]
 
178
  name="test_common",
179
  gen_kwargs={
180
  "filepath": downloaded_files["test_common"],
181
+ "path_to_audios": path_to_audios,
182
  },
183
  ),
184
  ]
185
  return train_splits + dev_splits + test_splits
186
 
187
 
188
+ def _generate_examples(self, filepath, path_to_audios):
189
  key = 0
 
 
190
  with open(filepath) as f:
191
  data_df = pd.read_csv(f,sep=',')
192
  transcripts = []
193
  for index,row in data_df.iterrows():
194
+ downloaded_audio = dl_manager.download(row["path"])
195
+ samplerate, audio_data = wavfile.read(downloaded_audio)
196
  yield key, {
197
  "sentence": row["sentence"],
198
  "path": row["path"],