Datasets:

Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
expert-generated
Annotations Creators:
no-annotation
Source Datasets:
original
License:
system HF staff commited on
Commit
2131957
1 Parent(s): e0d9502

Update files from the datasets library (from 1.7.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.7.0

Files changed (2) hide show
  1. README.md +2 -1
  2. head_qa.py +4 -3
README.md CHANGED
@@ -20,6 +20,7 @@ task_categories:
20
  - question-answering
21
  task_ids:
22
  - multiple-choice-qa
 
23
  ---
24
 
25
  # Dataset Card for HEAD-QA
@@ -27,7 +28,7 @@ task_ids:
27
  ## Table of Contents
28
  - [Dataset Description](#dataset-description)
29
  - [Dataset Summary](#dataset-summary)
30
- - [Supported Tasks](#supported-tasks-and-leaderboards)
31
  - [Languages](#languages)
32
  - [Dataset Structure](#dataset-structure)
33
  - [Data Instances](#data-instances)
 
20
  - question-answering
21
  task_ids:
22
  - multiple-choice-qa
23
+ paperswithcode_id: headqa
24
  ---
25
 
26
  # Dataset Card for HEAD-QA
 
28
  ## Table of Contents
29
  - [Dataset Description](#dataset-description)
30
  - [Dataset Summary](#dataset-summary)
31
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
32
  - [Languages](#languages)
33
  - [Dataset Structure](#dataset-structure)
34
  - [Data Instances](#data-instances)
head_qa.py CHANGED
@@ -120,13 +120,13 @@ class HeadQA(datasets.GeneratorBasedBuilder):
120
  """Yields examples."""
121
  with open(filepath, encoding="utf-8") as f:
122
  head_qa = json.load(f)
123
- for exam in head_qa["exams"]:
124
  content = head_qa["exams"][exam]
125
  name = content["name"].strip()
126
  year = content["year"].strip()
127
  category = content["category"].strip()
128
  for question in content["data"]:
129
- id_ = int(question["qid"].strip())
130
  qtext = question["qtext"].strip()
131
  ra = int(question["ra"].strip())
132
  image = question["image"].strip()
@@ -135,11 +135,12 @@ class HeadQA(datasets.GeneratorBasedBuilder):
135
  atexts = [answer["atext"].strip() for answer in question["answers"]]
136
  answers = [{"aid": aid, "atext": atext} for aid, atext in zip(aids, atexts)]
137
 
 
138
  yield id_, {
139
  "name": name,
140
  "year": year,
141
  "category": category,
142
- "qid": id_,
143
  "qtext": qtext,
144
  "ra": ra,
145
  "image": image,
 
120
  """Yields examples."""
121
  with open(filepath, encoding="utf-8") as f:
122
  head_qa = json.load(f)
123
+ for exam_id, exam in enumerate(head_qa["exams"]):
124
  content = head_qa["exams"][exam]
125
  name = content["name"].strip()
126
  year = content["year"].strip()
127
  category = content["category"].strip()
128
  for question in content["data"]:
129
+ qid = int(question["qid"].strip())
130
  qtext = question["qtext"].strip()
131
  ra = int(question["ra"].strip())
132
  image = question["image"].strip()
 
135
  atexts = [answer["atext"].strip() for answer in question["answers"]]
136
  answers = [{"aid": aid, "atext": atext} for aid, atext in zip(aids, atexts)]
137
 
138
+ id_ = f"{exam_id}_{qid}"
139
  yield id_, {
140
  "name": name,
141
  "year": year,
142
  "category": category,
143
+ "qid": qid,
144
  "qtext": qtext,
145
  "ra": ra,
146
  "image": image,