system HF staff commited on
Commit
48c7741
1 Parent(s): 9f18076

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. air_dialogue.py +69 -67
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - human-annotated
4
  language_creators:
 
1
  ---
2
+ pretty_name: AirDialogue
3
  annotations_creators:
4
  - human-annotated
5
  language_creators:
air_dialogue.py CHANGED
@@ -16,7 +16,6 @@
16
 
17
 
18
  import json
19
- import os
20
 
21
  import datasets
22
 
@@ -185,7 +184,7 @@ class AirDialogue(datasets.GeneratorBasedBuilder):
185
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
186
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
187
  my_urls = _URLs[self.config.name]
188
- data_dir = dl_manager.download_and_extract(my_urls)
189
  if self.config.name == "air_dialogue_data":
190
  train = "airdialogue_data/airdialogue/train_data.json"
191
  dev = "airdialogue_data/airdialogue/dev_data.json"
@@ -198,90 +197,93 @@ class AirDialogue(datasets.GeneratorBasedBuilder):
198
  name=datasets.Split.TRAIN,
199
  # These kwargs will be passed to _generate_examples
200
  gen_kwargs={
201
- "filepath": os.path.join(data_dir, train),
202
- "split": "train",
203
  },
204
  ),
205
  datasets.SplitGenerator(
206
  name=datasets.Split.VALIDATION,
207
  # These kwargs will be passed to _generate_examples
208
  gen_kwargs={
209
- "filepath": os.path.join(data_dir, dev),
210
- "split": "dev",
211
  },
212
  ),
213
  ]
214
 
215
- def _generate_examples(self, filepath, split):
216
  """Yields examples."""
217
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
218
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
219
  # The key is not important, it's more here for legacy reason (legacy from tfds)
220
 
221
- with open(filepath, encoding="utf-8") as f:
222
- for id_, row in enumerate(f):
223
- data = json.loads(row)
224
- if self.config.name == "air_dialogue_data":
 
 
225
 
226
- intent = {
227
- "return_month": data["intent"]["return_month"],
228
- "return_day": data["intent"]["return_day"],
229
- "max_price": data["intent"]["max_price"],
230
- "departure_airport": data["intent"]["departure_airport"],
231
- "max_connections": data["intent"].get("max_connections", -1),
232
- "departure_day": data["intent"]["departure_day"],
233
- "goal": data["intent"]["goal"],
234
- "departure_month": data["intent"]["departure_month"],
235
- "name": data["intent"]["name"],
236
- "return_airport": data["intent"]["return_airport"],
237
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
- search_info = (
240
- []
241
- if "search_info" not in data
242
- else [
243
  {
244
- "button_name": search_info.get("button_name", ""),
245
- "field_name": search_info.get("field_name", ""),
246
- "field_value": search_info.get("field_value", ""),
247
- "timestmamp": search_info["timestmamp"],
 
 
 
 
 
 
 
 
 
248
  }
249
- for search_info in data["search_info"]
250
  ]
251
- )
252
 
253
- yield id_, {
254
- "action": {key: data["action"][key] for key in data["action"]},
255
- "intent": intent,
256
- "timestamps": data["timestamps"],
257
- "dialogue": data["dialogue"],
258
- "expected_action": {key: data["expected_action"][key] for key in data["expected_action"]},
259
- "search_info": search_info,
260
- "correct_sample": data["correct_sample"],
261
- }
262
-
263
- else:
264
-
265
- kb = [
266
- {
267
- "airline": kb["airline"],
268
- "class": kb["class"],
269
- "departure_airport": kb["departure_airport"],
270
- "departure_day": kb["departure_day"],
271
- "departure_month": kb["departure_month"],
272
- "departure_time_num": kb["departure_time_num"],
273
- "flight_number": kb["flight_number"],
274
- "num_connections": kb["num_connections"],
275
- "price": kb["price"],
276
- "return_airport": kb["return_airport"],
277
- "return_day": kb["return_day"],
278
- "return_month": kb["return_month"],
279
- "return_time_num": kb["return_time_num"],
280
  }
281
- for kb in data["kb"]
282
- ]
283
-
284
- yield id_, {
285
- "kb": kb,
286
- "reservation": data["reservation"],
287
- }
 
16
 
17
 
18
  import json
 
19
 
20
  import datasets
21
 
 
184
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
185
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
186
  my_urls = _URLs[self.config.name]
187
+ archive = dl_manager.download(my_urls)
188
  if self.config.name == "air_dialogue_data":
189
  train = "airdialogue_data/airdialogue/train_data.json"
190
  dev = "airdialogue_data/airdialogue/dev_data.json"
 
197
  name=datasets.Split.TRAIN,
198
  # These kwargs will be passed to _generate_examples
199
  gen_kwargs={
200
+ "filepath": train,
201
+ "files": dl_manager.iter_archive(archive),
202
  },
203
  ),
204
  datasets.SplitGenerator(
205
  name=datasets.Split.VALIDATION,
206
  # These kwargs will be passed to _generate_examples
207
  gen_kwargs={
208
+ "filepath": dev,
209
+ "files": dl_manager.iter_archive(archive),
210
  },
211
  ),
212
  ]
213
 
214
+ def _generate_examples(self, filepath, files):
215
  """Yields examples."""
216
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
217
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
218
  # The key is not important, it's more here for legacy reason (legacy from tfds)
219
 
220
+ for path, f in files:
221
+ if path == filepath:
222
+ for id_, row in enumerate(f):
223
+ row = row.decode("utf-8")
224
+ data = json.loads(row)
225
+ if self.config.name == "air_dialogue_data":
226
 
227
+ intent = {
228
+ "return_month": data["intent"]["return_month"],
229
+ "return_day": data["intent"]["return_day"],
230
+ "max_price": data["intent"]["max_price"],
231
+ "departure_airport": data["intent"]["departure_airport"],
232
+ "max_connections": data["intent"].get("max_connections", -1),
233
+ "departure_day": data["intent"]["departure_day"],
234
+ "goal": data["intent"]["goal"],
235
+ "departure_month": data["intent"]["departure_month"],
236
+ "name": data["intent"]["name"],
237
+ "return_airport": data["intent"]["return_airport"],
238
+ }
239
+
240
+ search_info = (
241
+ []
242
+ if "search_info" not in data
243
+ else [
244
+ {
245
+ "button_name": search_info.get("button_name", ""),
246
+ "field_name": search_info.get("field_name", ""),
247
+ "field_value": search_info.get("field_value", ""),
248
+ "timestmamp": search_info["timestmamp"],
249
+ }
250
+ for search_info in data["search_info"]
251
+ ]
252
+ )
253
+
254
+ yield id_, {
255
+ "action": {key: data["action"][key] for key in data["action"]},
256
+ "intent": intent,
257
+ "timestamps": data["timestamps"],
258
+ "dialogue": data["dialogue"],
259
+ "expected_action": {key: data["expected_action"][key] for key in data["expected_action"]},
260
+ "search_info": search_info,
261
+ "correct_sample": data["correct_sample"],
262
+ }
263
+
264
+ else:
265
 
266
+ kb = [
 
 
 
267
  {
268
+ "airline": kb["airline"],
269
+ "class": kb["class"],
270
+ "departure_airport": kb["departure_airport"],
271
+ "departure_day": kb["departure_day"],
272
+ "departure_month": kb["departure_month"],
273
+ "departure_time_num": kb["departure_time_num"],
274
+ "flight_number": kb["flight_number"],
275
+ "num_connections": kb["num_connections"],
276
+ "price": kb["price"],
277
+ "return_airport": kb["return_airport"],
278
+ "return_day": kb["return_day"],
279
+ "return_month": kb["return_month"],
280
+ "return_time_num": kb["return_time_num"],
281
  }
282
+ for kb in data["kb"]
283
  ]
 
284
 
285
+ yield id_, {
286
+ "kb": kb,
287
+ "reservation": data["reservation"],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
  }
289
+ break