system HF staff commited on
Commit
fdca929
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f304d5119a93a37a3cb44de9c33977fbeb6c290c54ad0319474381a5385384a2
3
+ size 2654
reclor.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(reclor): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(reclor): BibTeX citation
12
+ _CITATION = """\
13
+ @inproceedings{yu2020reclor,
14
+ author = {Yu, Weihao and Jiang, Zihang and Dong, Yanfei and Feng, Jiashi},
15
+ title = {ReClor: A Reading Comprehension Dataset Requiring Logical Reasoning},
16
+ booktitle = {International Conference on Learning Representations (ICLR)},
17
+ month = {April},
18
+ year = {2020}
19
+ }
20
+
21
+ """
22
+
23
+ # TODO(reclor):
24
+ _DESCRIPTION = """\
25
+ Logical reasoning is an important ability to examine, analyze, and critically evaluate arguments as they occur in ordinary
26
+ language as the definition from LSAC. ReClor is a dataset extracted from logical reasoning questions of standardized graduate
27
+ admission examinations. Empirical results show that the state-of-the-art models struggle on ReClor with poor performance
28
+ indicating more research is needed to essentially enhance the logical reasoning ability of current models. We hope this
29
+ dataset could help push Machine Reading Comprehension (MRC) towards more complicated reasonin
30
+ """
31
+
32
+
33
+ class Reclor(datasets.GeneratorBasedBuilder):
34
+ """TODO(reclor): Short description of my dataset."""
35
+
36
+ # TODO(reclor): Set up version.
37
+ VERSION = datasets.Version("0.1.0")
38
+
39
+ @property
40
+ def manual_download_instructions(self):
41
+ return """\
42
+ to use ReClor you need to download it manually. Please go to its homepage (http://whyu.me/reclor/) fill the google
43
+ form and you will recive a download link and a password to extract it.Please extract all files in one folder and use the path folder in datasets.load_dataset('reclor', data_dir='path/to/folder/folder_name')
44
+ """
45
+
46
+ def _info(self):
47
+ # TODO(reclor): Specifies the datasets.DatasetInfo object
48
+ return datasets.DatasetInfo(
49
+ # This is the description that will appear on the datasets page.
50
+ description=_DESCRIPTION,
51
+ # datasets.features.FeatureConnectors
52
+ features=datasets.Features(
53
+ {
54
+ # These are the features of your dataset like images, labels ...
55
+ "context": datasets.Value("string"),
56
+ "question": datasets.Value("string"),
57
+ "answers": datasets.features.Sequence(datasets.Value("string")),
58
+ "label": datasets.Value("string"),
59
+ "id_string": datasets.Value("string"),
60
+ }
61
+ ),
62
+ # If there's a common (input, target) tuple from the features,
63
+ # specify them here. They'll be used if as_supervised=True in
64
+ # builder.as_dataset.
65
+ supervised_keys=None,
66
+ # Homepage of the dataset for documentation
67
+ homepage="http://whyu.me/reclor/",
68
+ citation=_CITATION,
69
+ )
70
+
71
+ def _split_generators(self, dl_manager):
72
+ """Returns SplitGenerators."""
73
+ # TODO(reclor): Downloads the data and defines the splits
74
+ # dl_manager is a datasets.download.DownloadManager that can be used to
75
+ # download and extract URLs
76
+ data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
77
+
78
+ if not os.path.exists(data_dir):
79
+ raise FileNotFoundError(
80
+ "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('wikihow', data_dir=...)` that includes files unzipped from the reclor zip. Manual download instructions: {}".format(
81
+ data_dir, self.manual_download_instructions
82
+ )
83
+ )
84
+ return [
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.TRAIN,
87
+ # These kwargs will be passed to _generate_examples
88
+ gen_kwargs={"filepath": os.path.join(data_dir, "train.json")},
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST,
92
+ # These kwargs will be passed to _generate_examples
93
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.json")},
94
+ ),
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.VALIDATION,
97
+ # These kwargs will be passed to _generate_examples
98
+ gen_kwargs={"filepath": os.path.join(data_dir, "val.json")},
99
+ ),
100
+ ]
101
+
102
+ def _generate_examples(self, filepath):
103
+ """Yields examples."""
104
+ # TODO(reclor): Yields (key, example) tuples from the dataset
105
+ with open(filepath, encoding="utf-8") as f:
106
+ data = json.load(f)
107
+ for id_, row in enumerate(data):
108
+ yield id_, {
109
+ "context": row["context"],
110
+ "question": row["question"],
111
+ "answers": row["answers"],
112
+ "label": str(row.get("label", "")),
113
+ "id_string": row["id_string"],
114
+ }
urls_checksums/checksums.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ # TODO(reclor): If your dataset downloads files, then the checksums will be
2
+ # automatically added here when running the download_and_prepare script
3
+ # with --register_checksums.