Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
05d5c34
1 Parent(s): e37ee6f

Delete loading script

Browse files
Files changed (1) hide show
  1. race.py +0 -111
race.py DELETED
@@ -1,111 +0,0 @@
1
- """TODO(race): Add a description here."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
-
8
-
9
- _CITATION = """\
10
- @article{lai2017large,
11
- title={RACE: Large-scale ReAding Comprehension Dataset From Examinations},
12
- author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard},
13
- journal={arXiv preprint arXiv:1704.04683},
14
- year={2017}
15
- }
16
- """
17
-
18
- _DESCRIPTION = """\
19
- Race is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The
20
- dataset is collected from English examinations in China, which are designed for middle school and high school students.
21
- The dataset can be served as the training and test sets for machine comprehension.
22
-
23
- """
24
-
25
- _URL = "http://www.cs.cmu.edu/~glai1/data/race/RACE.tar.gz"
26
-
27
-
28
- class Race(datasets.GeneratorBasedBuilder):
29
- """ReAding Comprehension Dataset From Examination dataset from CMU"""
30
-
31
- VERSION = datasets.Version("0.1.0")
32
-
33
- BUILDER_CONFIGS = [
34
- datasets.BuilderConfig(name="high", description="Exams designed for high school students", version=VERSION),
35
- datasets.BuilderConfig(
36
- name="middle", description="Exams designed for middle school students", version=VERSION
37
- ),
38
- datasets.BuilderConfig(
39
- name="all", description="Exams designed for both high school and middle school students", version=VERSION
40
- ),
41
- ]
42
-
43
- def _info(self):
44
- return datasets.DatasetInfo(
45
- # This is the description that will appear on the datasets page.
46
- description=_DESCRIPTION,
47
- # datasets.features.FeatureConnectors
48
- features=datasets.Features(
49
- {
50
- "example_id": datasets.Value("string"),
51
- "article": datasets.Value("string"),
52
- "answer": datasets.Value("string"),
53
- "question": datasets.Value("string"),
54
- "options": datasets.features.Sequence(datasets.Value("string"))
55
- # These are the features of your dataset like images, labels ...
56
- }
57
- ),
58
- # If there's a common (input, target) tuple from the features,
59
- # specify them here. They'll be used if as_supervised=True in
60
- # builder.as_dataset.
61
- supervised_keys=None,
62
- # Homepage of the dataset for documentation
63
- homepage="http://www.cs.cmu.edu/~glai1/data/race/",
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- """Returns SplitGenerators."""
69
- # Downloads the data and defines the splits
70
- # dl_manager is a datasets.download.DownloadManager that can be used to
71
- archive = dl_manager.download(_URL)
72
- case = str(self.config.name)
73
- if case == "all":
74
- case = ""
75
- return [
76
- datasets.SplitGenerator(
77
- name=datasets.Split.TEST,
78
- # These kwargs will be passed to _generate_examples
79
- gen_kwargs={"train_test_or_eval": f"RACE/test/{case}", "files": dl_manager.iter_archive(archive)},
80
- ),
81
- datasets.SplitGenerator(
82
- name=datasets.Split.TRAIN,
83
- # These kwargs will be passed to _generate_examples
84
- gen_kwargs={"train_test_or_eval": f"RACE/train/{case}", "files": dl_manager.iter_archive(archive)},
85
- ),
86
- datasets.SplitGenerator(
87
- name=datasets.Split.VALIDATION,
88
- # These kwargs will be passed to _generate_examples
89
- gen_kwargs={"train_test_or_eval": f"RACE/dev/{case}", "files": dl_manager.iter_archive(archive)},
90
- ),
91
- ]
92
-
93
- def _generate_examples(self, train_test_or_eval, files):
94
- """Yields examples."""
95
- for file_idx, (path, f) in enumerate(files):
96
- if path.startswith(train_test_or_eval) and path.endswith(".txt"):
97
- data = json.loads(f.read().decode("utf-8"))
98
- questions = data["questions"]
99
- answers = data["answers"]
100
- options = data["options"]
101
- for i in range(len(questions)):
102
- question = questions[i]
103
- answer = answers[i]
104
- option = options[i]
105
- yield f"{file_idx}_{i}", {
106
- "example_id": data["id"],
107
- "article": data["article"],
108
- "question": question,
109
- "answer": answer,
110
- "options": option,
111
- }