anjalyjayakrishnan commited on
Commit
695efb7
1 Parent(s): 8a28fc8

improved loading time

Browse files
Files changed (1) hide show
  1. snow-mountain.py +31 -17
snow-mountain.py CHANGED
@@ -66,6 +66,15 @@ for lang in _LANGUAGES:
66
  }
67
  _FILES[lang] = file_dic
68
 
 
 
 
 
 
 
 
 
 
69
 
70
  class Test(datasets.GeneratorBasedBuilder):
71
 
@@ -100,6 +109,18 @@ class Test(datasets.GeneratorBasedBuilder):
100
 
101
  downloaded_files = dl_manager.download(_FILES[self.config.name])
102
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  data_size = ['500', '1000', '2500', 'short', 'full']
104
 
105
  splits = []
@@ -109,7 +130,7 @@ class Test(datasets.GeneratorBasedBuilder):
109
  name=f"train_{size}",
110
  gen_kwargs={
111
  "filepath": downloaded_files[f"train_{size}"],
112
- "dl_manager": dl_manager,
113
  },
114
  )
115
  )
@@ -118,7 +139,7 @@ class Test(datasets.GeneratorBasedBuilder):
118
  name=f"val_{size}",
119
  gen_kwargs={
120
  "filepath": downloaded_files[f"val_{size}"],
121
- "dl_manager": dl_manager,
122
  },
123
  )
124
  )
@@ -127,7 +148,7 @@ class Test(datasets.GeneratorBasedBuilder):
127
  name="test_common",
128
  gen_kwargs={
129
  "filepath": downloaded_files["test_common"],
130
- "dl_manager": dl_manager,
131
  },
132
  )
133
  )
@@ -136,7 +157,7 @@ class Test(datasets.GeneratorBasedBuilder):
136
  name="all_verses",
137
  gen_kwargs={
138
  "filepath": downloaded_files["all_verses"],
139
- "dl_manager": dl_manager,
140
  },
141
  )
142
  )
@@ -145,32 +166,25 @@ class Test(datasets.GeneratorBasedBuilder):
145
  name="short_verses",
146
  gen_kwargs={
147
  "filepath": downloaded_files["short_verses"],
148
- "dl_manager": dl_manager,
149
  },
150
  )
151
  )
152
  return splits
153
 
154
 
155
- def _generate_examples(self, filepath, dl_manager):
156
  key = 0
 
157
  with open(filepath) as f:
158
  data_df = pd.read_csv(f,sep=',')
159
- audio_data = {}
160
  for index,row in data_df.iterrows():
161
  audio = row['path'].split('/')[-1]
162
- if audio in audio_data:
 
163
  content = audio_data[audio]
164
  else:
165
- archive_url = '/'.join(row["path"].split('/')[:-1])+'.tar.gz'
166
- archive_path = dl_manager.download(archive_url)
167
- for path, file in dl_manager.iter_archive(archive_path):
168
- audio_ = path.split('/')[-1]
169
- if audio_ not in audio_data:
170
- content = file.read()
171
- audio_data[audio_] = content
172
- if audio in audio_data:
173
- content = audio_data[audio]
174
  yield key, {
175
  "sentence": row["sentence"],
176
  "path": row["path"],
 
66
  }
67
  _FILES[lang] = file_dic
68
 
69
+ NT_BOOKS = ['MAT', 'MRK', 'LUK', 'JHN', 'ACT', 'ROM', '1CO', '2CO', 'GAL', 'EPH', 'PHP', 'COL', '1TH',
70
+ '2TH', '1TI', '2TI', 'TIT', 'PHM', 'HEB', 'JAS', '1PE', '2PE', '1JN', '2JN', '3JN', 'JUD', 'REV']
71
+ OT_BOOKS = ['GEN', 'EXO', 'LEV', 'NUM', 'DEU', 'JOS', 'JDG', 'RUT', '1SA', '2SA', '1KI', '2KI', '1CH',
72
+ '2CH', 'EZR', 'NEH', 'EST', 'JOB', 'PSA', 'PRO', 'ECC', 'SNG', 'ISA', 'JER', 'LAM', 'EZK',
73
+ 'DAN', 'HOS', 'JOL', 'AMO', 'OBA', 'JON', 'MIC', 'NAM', 'HAB', 'ZEP', 'HAG', 'ZEC', 'MAL']
74
+
75
+ BOOKS_DIC = {'hindi':OT_BOOKS, 'bhadrawahi':NT_BOOKS, 'bilaspuri':NT_BOOKS, 'dogri':NT_BOOKS, 'gaddi':
76
+ NT_BOOKS, 'haryanvi':NT_BOOKS, 'kangri':NT_BOOKS, 'kulvi':NT_BOOKS, 'kulvi_outer_seraji':NT_BOOKS
77
+ , 'mandeali':NT_BOOKS, 'pahari_mahasui':NT_BOOKS}
78
 
79
  class Test(datasets.GeneratorBasedBuilder):
80
 
 
109
 
110
  downloaded_files = dl_manager.download(_FILES[self.config.name])
111
 
112
+ audio_data = {}
113
+ for book in BOOKS_DIC[self.config.name]:
114
+ archive_url = f"data/cleaned/{self.config.name}/{book}.tar.gz"
115
+ # archive_url = '/'.join(row["path"].split('/')[:-1])+'.tar.gz'
116
+ archive_path = dl_manager.download(archive_url)
117
+ for path, file in dl_manager.iter_archive(archive_path):
118
+ audio_ = path.split('/')[-1]
119
+ if audio_ not in audio_data:
120
+ content = file.read()
121
+ audio_data[audio_] = content
122
+
123
+
124
  data_size = ['500', '1000', '2500', 'short', 'full']
125
 
126
  splits = []
 
130
  name=f"train_{size}",
131
  gen_kwargs={
132
  "filepath": downloaded_files[f"train_{size}"],
133
+ "audio_data": audio_data,
134
  },
135
  )
136
  )
 
139
  name=f"val_{size}",
140
  gen_kwargs={
141
  "filepath": downloaded_files[f"val_{size}"],
142
+ "audio_data": audio_data,
143
  },
144
  )
145
  )
 
148
  name="test_common",
149
  gen_kwargs={
150
  "filepath": downloaded_files["test_common"],
151
+ "audio_data": audio_data,
152
  },
153
  )
154
  )
 
157
  name="all_verses",
158
  gen_kwargs={
159
  "filepath": downloaded_files["all_verses"],
160
+ "audio_data": audio_data,
161
  },
162
  )
163
  )
 
166
  name="short_verses",
167
  gen_kwargs={
168
  "filepath": downloaded_files["short_verses"],
169
+ "audio_data": audio_data,
170
  },
171
  )
172
  )
173
  return splits
174
 
175
 
176
+ def _generate_examples(self, filepath, audio_data):
177
  key = 0
178
+ #print(list(audio_data.keys()))
179
  with open(filepath) as f:
180
  data_df = pd.read_csv(f,sep=',')
 
181
  for index,row in data_df.iterrows():
182
  audio = row['path'].split('/')[-1]
183
+ content = ''
184
+ if audio in list(audio_data.keys()):
185
  content = audio_data[audio]
186
  else:
187
+ print(f"*********** Couldn't find audio: {audio} **************")
 
 
 
 
 
 
 
 
188
  yield key, {
189
  "sentence": row["sentence"],
190
  "path": row["path"],