Datasets:

ArXiv:
License:
anjalyjayakrishnan commited on
Commit
87ebac7
1 Parent(s): c3f036f

keeping large csv files as tar and modified script to support them

Browse files
Files changed (1) hide show
  1. snow-mountain.py +49 -22
snow-mountain.py CHANGED
@@ -62,8 +62,8 @@ for lang in _LANGUAGES:
62
  "train_full": f"data/experiments/{lang}/train_full.csv",
63
  "val_full": f"data/experiments/{lang}/val_full.csv",
64
  "test_common": f"data/experiments/{lang}/test_common.csv",
65
- "all_verses": f"data/cleaned/{lang}/all_verses.csv",
66
- "short_verses": f"data/cleaned/{lang}/short_verses.csv",
67
  }
68
  _FILES[lang] = file_dic
69
 
@@ -75,7 +75,8 @@ OT_BOOKS = ['GEN', 'EXO', 'LEV', 'NUM', 'DEU', 'JOS', 'JDG', 'RUT', '1SA', '2SA'
75
 
76
  BOOKS_DIC = {'hindi':OT_BOOKS, 'bhadrawahi':NT_BOOKS, 'bilaspuri':NT_BOOKS, 'dogri':NT_BOOKS, 'gaddi':
77
  NT_BOOKS, 'haryanvi':NT_BOOKS, 'kangri':NT_BOOKS, 'kulvi':NT_BOOKS, 'kulvi_outer_seraji':NT_BOOKS
78
- , 'mandeali':NT_BOOKS, 'pahari_mahasui':NT_BOOKS}
 
79
 
80
  class Test(datasets.GeneratorBasedBuilder):
81
 
@@ -110,6 +111,7 @@ class Test(datasets.GeneratorBasedBuilder):
110
 
111
  downloaded_files = dl_manager.download(_FILES[self.config.name])
112
 
 
113
  audio_data = {}
114
  for book in BOOKS_DIC[self.config.name]:
115
  archive_url = f"data/cleaned/{self.config.name}/{book}.tar.gz"
@@ -118,8 +120,7 @@ class Test(datasets.GeneratorBasedBuilder):
118
  audio_ = path.split('/')[-1]
119
  if audio_ not in audio_data:
120
  content = file.read()
121
- audio_data[audio_] = content
122
-
123
 
124
  data_size = ['500', '1000', '2500', 'short', 'full']
125
 
@@ -131,6 +132,7 @@ class Test(datasets.GeneratorBasedBuilder):
131
  gen_kwargs={
132
  "filepath": downloaded_files[f"train_{size}"],
133
  "audio_data": audio_data,
 
134
  },
135
  )
136
  )
@@ -140,6 +142,7 @@ class Test(datasets.GeneratorBasedBuilder):
140
  gen_kwargs={
141
  "filepath": downloaded_files[f"val_{size}"],
142
  "audio_data": audio_data,
 
143
  },
144
  )
145
  )
@@ -149,6 +152,7 @@ class Test(datasets.GeneratorBasedBuilder):
149
  gen_kwargs={
150
  "filepath": downloaded_files["test_common"],
151
  "audio_data": audio_data,
 
152
  },
153
  )
154
  )
@@ -158,6 +162,7 @@ class Test(datasets.GeneratorBasedBuilder):
158
  gen_kwargs={
159
  "filepath": downloaded_files["all_verses"],
160
  "audio_data": audio_data,
 
161
  },
162
  )
163
  )
@@ -167,27 +172,49 @@ class Test(datasets.GeneratorBasedBuilder):
167
  gen_kwargs={
168
  "filepath": downloaded_files["short_verses"],
169
  "audio_data": audio_data,
 
170
  },
171
  )
172
  )
173
  return splits
174
 
175
 
176
- def _generate_examples(self, filepath, audio_data):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  key = 0
178
- #print(list(audio_data.keys()))
179
- with open(filepath) as f:
180
- data_df = pd.read_csv(f,sep=',')
181
- for index,row in data_df.iterrows():
182
- audio = row['path'].split('/')[-1]
183
- content = ''
184
- if audio in list(audio_data.keys()):
185
- content = audio_data[audio]
186
- else:
187
- print(f"*********** Couldn't find audio: {audio} **************")
188
- yield key, {
189
- "sentence": row["sentence"],
190
- "path": row["path"],
191
- "audio":{"path": row["path"], "bytes": content}
192
- }
193
- key+=1
 
62
  "train_full": f"data/experiments/{lang}/train_full.csv",
63
  "val_full": f"data/experiments/{lang}/val_full.csv",
64
  "test_common": f"data/experiments/{lang}/test_common.csv",
65
+ "all_verses": f"data/cleaned/{lang}/all_verses.tar.gz",
66
+ "short_verses": f"data/cleaned/{lang}/short_verses.tar.gz",
67
  }
68
  _FILES[lang] = file_dic
69
 
 
75
 
76
  BOOKS_DIC = {'hindi':OT_BOOKS, 'bhadrawahi':NT_BOOKS, 'bilaspuri':NT_BOOKS, 'dogri':NT_BOOKS, 'gaddi':
77
  NT_BOOKS, 'haryanvi':NT_BOOKS, 'kangri':NT_BOOKS, 'kulvi':NT_BOOKS, 'kulvi_outer_seraji':NT_BOOKS
78
+ , 'mandeali':NT_BOOKS, 'pahari_mahasui':NT_BOOKS, 'malayalam':OT_BOOKS+NT_BOOKS, 'tamil':
79
+ OT_BOOKS+NT_BOOKS, 'kannada': OT_BOOKS+NT_BOOKS, 'telugu': OT_BOOKS+NT_BOOKS}
80
 
81
  class Test(datasets.GeneratorBasedBuilder):
82
 
 
111
 
112
  downloaded_files = dl_manager.download(_FILES[self.config.name])
113
 
114
+ '''Downloads full audio here'''
115
  audio_data = {}
116
  for book in BOOKS_DIC[self.config.name]:
117
  archive_url = f"data/cleaned/{self.config.name}/{book}.tar.gz"
 
120
  audio_ = path.split('/')[-1]
121
  if audio_ not in audio_data:
122
  content = file.read()
123
+ audio_data[audio_] = content
 
124
 
125
  data_size = ['500', '1000', '2500', 'short', 'full']
126
 
 
132
  gen_kwargs={
133
  "filepath": downloaded_files[f"train_{size}"],
134
  "audio_data": audio_data,
135
+ "dl_manager":dl_manager,
136
  },
137
  )
138
  )
 
142
  gen_kwargs={
143
  "filepath": downloaded_files[f"val_{size}"],
144
  "audio_data": audio_data,
145
+ "dl_manager":dl_manager,
146
  },
147
  )
148
  )
 
152
  gen_kwargs={
153
  "filepath": downloaded_files["test_common"],
154
  "audio_data": audio_data,
155
+ "dl_manager":dl_manager,
156
  },
157
  )
158
  )
 
162
  gen_kwargs={
163
  "filepath": downloaded_files["all_verses"],
164
  "audio_data": audio_data,
165
+ "dl_manager":dl_manager,
166
  },
167
  )
168
  )
 
172
  gen_kwargs={
173
  "filepath": downloaded_files["short_verses"],
174
  "audio_data": audio_data,
175
+ "dl_manager":dl_manager,
176
  },
177
  )
178
  )
179
  return splits
180
 
181
 
182
+ def _generate_examples(self, filepath, audio_data, dl_manager):
183
+
184
+ '''Function for parsing large csv archives (all_verses, short_verses)'''
185
+ def parse_archive(archive):
186
+ temp_df = pd.DataFrame()
187
+ for path, file in dl_manager.iter_archive(archive):
188
+ if path.endswith('_verses.csv'):
189
+ verses_filepath = file
190
+ verses_lines = file.readlines()
191
+ verses_lines = [line.decode("utf-8") for line in verses_lines]
192
+ column_names = verses_lines[0].strip().split(",")
193
+ rows = [row.split(',') for row in verses_lines[1:]]
194
+ rows = [[i[0], i[1], ','.join(i[2:])]for i in rows]
195
+ temp_df = pd.DataFrame(rows, columns =column_names)
196
+ break
197
+ return temp_df
198
+
199
+ if filepath.endswith('all_verses.tar.gz'):
200
+ data_df = parse_archive(filepath)
201
+ elif filepath.endswith('short_verses.tar.gz'):
202
+ data_df = parse_archive(filepath)
203
+ else:
204
+ with open(filepath) as f:
205
+ data_df = pd.read_csv(f,sep=',')
206
+
207
  key = 0
208
+ for index,row in data_df.iterrows():
209
+ audio = row['path'].split('/')[-1]
210
+ content = ''
211
+ if audio in list(audio_data.keys()):
212
+ content = audio_data[audio]
213
+ else:
214
+ print(f"*********** Couldn't find audio: {audio} **************")
215
+ yield key, {
216
+ "sentence": row["sentence"],
217
+ "path": row["path"],
218
+ "audio":{"path": row["path"], "bytes": content}
219
+ }
220
+ key+=1