Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
1dd1ef9
1 Parent(s): 6e0eb0d

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. qasc.py +39 -30
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  languages:
3
  - en
4
  paperswithcode_id: qasc
 
1
  ---
2
+ pretty_name: Question Answering via Sentence Composition (QASC)
3
  languages:
4
  - en
5
  paperswithcode_id: qasc
qasc.py CHANGED
@@ -2,7 +2,6 @@
2
 
3
 
4
  import json
5
- import os
6
 
7
  import datasets
8
 
@@ -66,49 +65,59 @@ class Qasc(datasets.GeneratorBasedBuilder):
66
  # TODO(qasc): Downloads the data and defines the splits
67
  # dl_manager is a datasets.download.DownloadManager that can be used to
68
  # download and extract URLs
69
- dl_dir = dl_manager.download_and_extract(_URl)
70
- data_dir = os.path.join(dl_dir, "QASC_Dataset")
71
  return [
72
  datasets.SplitGenerator(
73
  name=datasets.Split.TRAIN,
74
  # These kwargs will be passed to _generate_examples
75
- gen_kwargs={"filepath": os.path.join(data_dir, "train.jsonl")},
 
 
 
76
  ),
77
  datasets.SplitGenerator(
78
  name=datasets.Split.TEST,
79
  # These kwargs will be passed to _generate_examples
80
- gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl")},
 
 
 
81
  ),
82
  datasets.SplitGenerator(
83
  name=datasets.Split.VALIDATION,
84
  # These kwargs will be passed to _generate_examples
85
- gen_kwargs={"filepath": os.path.join(data_dir, "dev.jsonl")},
 
 
 
86
  ),
87
  ]
88
 
89
- def _generate_examples(self, filepath):
90
  """Yields examples."""
91
  # TODO(qasc): Yields (key, example) tuples from the dataset
92
- with open(filepath, encoding="utf-8") as f:
93
- for row in f:
94
- data = json.loads(row)
95
- answerkey = data.get("answerKey", "")
96
- id_ = data["id"]
97
- question = data["question"]["stem"]
98
- choices = data["question"]["choices"]
99
- text_choices = [choice["text"] for choice in choices]
100
- label_choices = [choice["label"] for choice in choices]
101
- fact1 = data.get("fact1", "")
102
- fact2 = data.get("fact2", "")
103
- combined_fact = data.get("combinedfact", "")
104
- formatted_question = data.get("formatted_question", "")
105
- yield id_, {
106
- "id": id_,
107
- "answerKey": answerkey,
108
- "question": question,
109
- "choices": {"text": text_choices, "label": label_choices},
110
- "fact1": fact1,
111
- "fact2": fact2,
112
- "combinedfact": combined_fact,
113
- "formatted_question": formatted_question,
114
- }
 
 
 
2
 
3
 
4
  import json
 
5
 
6
  import datasets
7
 
 
65
  # TODO(qasc): Downloads the data and defines the splits
66
  # dl_manager is a datasets.download.DownloadManager that can be used to
67
  # download and extract URLs
68
+ archive = dl_manager.download(_URl)
 
69
  return [
70
  datasets.SplitGenerator(
71
  name=datasets.Split.TRAIN,
72
  # These kwargs will be passed to _generate_examples
73
+ gen_kwargs={
74
+ "filepath": "/".join(["QASC_Dataset", "train.jsonl"]),
75
+ "files": dl_manager.iter_archive(archive),
76
+ },
77
  ),
78
  datasets.SplitGenerator(
79
  name=datasets.Split.TEST,
80
  # These kwargs will be passed to _generate_examples
81
+ gen_kwargs={
82
+ "filepath": "/".join(["QASC_Dataset", "test.jsonl"]),
83
+ "files": dl_manager.iter_archive(archive),
84
+ },
85
  ),
86
  datasets.SplitGenerator(
87
  name=datasets.Split.VALIDATION,
88
  # These kwargs will be passed to _generate_examples
89
+ gen_kwargs={
90
+ "filepath": "/".join(["QASC_Dataset", "dev.jsonl"]),
91
+ "files": dl_manager.iter_archive(archive),
92
+ },
93
  ),
94
  ]
95
 
96
+ def _generate_examples(self, filepath, files):
97
  """Yields examples."""
98
  # TODO(qasc): Yields (key, example) tuples from the dataset
99
+ for path, f in files:
100
+ if path == filepath:
101
+ for row in f:
102
+ data = json.loads(row.decode("utf-8"))
103
+ answerkey = data.get("answerKey", "")
104
+ id_ = data["id"]
105
+ question = data["question"]["stem"]
106
+ choices = data["question"]["choices"]
107
+ text_choices = [choice["text"] for choice in choices]
108
+ label_choices = [choice["label"] for choice in choices]
109
+ fact1 = data.get("fact1", "")
110
+ fact2 = data.get("fact2", "")
111
+ combined_fact = data.get("combinedfact", "")
112
+ formatted_question = data.get("formatted_question", "")
113
+ yield id_, {
114
+ "id": id_,
115
+ "answerKey": answerkey,
116
+ "question": question,
117
+ "choices": {"text": text_choices, "label": label_choices},
118
+ "fact1": fact1,
119
+ "fact2": fact2,
120
+ "combinedfact": combined_fact,
121
+ "formatted_question": formatted_question,
122
+ }
123
+ break