system HF staff commited on
Commit
e9c1607
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/0.1.0/dummy_data.zip +3 -0
  4. quartz.py +159 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "QuaRTz is a crowdsourced dataset of 3864 multiple-choice questions about open domain qualitative relationships. Each \nquestion is paired with one of 405 different background sentences (sometimes short paragraphs).\nThe QuaRTz dataset V1 contains 3864 questions about open domain qualitative relationships. Each question is paired with \none of 405 different background sentences (sometimes short paragraphs).\n\nThe dataset is split into train (2696), dev (384) and test (784). A background sentence will only appear in a single split.\n", "citation": "@InProceedings{quartz,\n author = {Oyvind Tafjord and Matt Gardner and Kevin Lin and Peter Clark},\n title = {\"QUARTZ: An Open-Domain Dataset of Qualitative Relationship\nQuestions\"},\n \n year = {\"2019\"},\n}\n", "homepage": "https://allenai.org/data/quartz", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "choices": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answerKey": {"dtype": "string", "id": null, "_type": "Value"}, "para": {"dtype": "string", "id": null, "_type": "Value"}, "para_id": {"dtype": "string", "id": null, "_type": "Value"}, "para_anno": {"effect_prop": {"dtype": "string", "id": null, "_type": "Value"}, "cause_dir_str": {"dtype": "string", "id": null, "_type": "Value"}, "effect_dir_str": {"dtype": "string", "id": null, "_type": "Value"}, "cause_dir_sign": {"dtype": "string", "id": null, "_type": "Value"}, "effect_dir_sign": {"dtype": "string", "id": null, "_type": "Value"}, "cause_prop": {"dtype": "string", "id": null, "_type": "Value"}}, "question_anno": {"more_effect_dir": {"dtype": "string", "id": null, "_type": "Value"}, "less_effect_dir": {"dtype": "string", "id": null, "_type": "Value"}, "less_cause_prop": {"dtype": "string", "id": null, "_type": "Value"}, "more_effect_prop": {"dtype": "string", "id": null, "_type": "Value"}, "less_effect_prop": {"dtype": "string", "id": null, "_type": "Value"}, "less_cause_dir": {"dtype": "string", "id": null, "_type": "Value"}}}, "supervised_keys": null, "builder_name": "quartz", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 351374, "num_examples": 784, "dataset_name": "quartz"}, "train": {"name": "train", "num_bytes": 1197525, "num_examples": 2696, "dataset_name": "quartz"}, "validation": {"name": "validation", "num_bytes": 175871, "num_examples": 384, "dataset_name": "quartz"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-website/data/quartz-dataset-v1-aug2019.zip": {"num_bytes": 497354, "checksum": "e86ed35153c6c3fb6dc5991b6a3b520a2c154c42266cb6b4edc7ed526fa4b5a8"}}, "download_size": 497354, "dataset_size": 1724770, "size_in_bytes": 2222124}}
dummy/0.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0b0b938b6d5c30169f733030b6cd19a34960b7d1a8b81c077fdeed6248f2009
3
+ size 2695
quartz.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(quartz): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(quartz): BibTeX citation
12
+ _CITATION = """\
13
+ @InProceedings{quartz,
14
+ author = {Oyvind Tafjord and Matt Gardner and Kevin Lin and Peter Clark},
15
+ title = {"QUARTZ: An Open-Domain Dataset of Qualitative Relationship
16
+ Questions"},
17
+ year = {"2019"},
18
+ }
19
+ """
20
+
21
+ # TODO(quartz):
22
+ _DESCRIPTION = """\
23
+ QuaRTz is a crowdsourced dataset of 3864 multiple-choice questions about open domain qualitative relationships. Each
24
+ question is paired with one of 405 different background sentences (sometimes short paragraphs).
25
+ The QuaRTz dataset V1 contains 3864 questions about open domain qualitative relationships. Each question is paired with
26
+ one of 405 different background sentences (sometimes short paragraphs).
27
+ The dataset is split into train (2696), dev (384) and test (784). A background sentence will only appear in a single split.
28
+ """
29
+
30
+ _URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/quartz-dataset-v1-aug2019.zip"
31
+
32
+
33
+ class Quartz(datasets.GeneratorBasedBuilder):
34
+ """TODO(quartz): Short description of my dataset."""
35
+
36
+ # TODO(quartz): Set up version.
37
+ VERSION = datasets.Version("0.1.0")
38
+
39
+ def _info(self):
40
+ # TODO(quartz): Specifies the datasets.DatasetInfo object
41
+ return datasets.DatasetInfo(
42
+ # This is the description that will appear on the datasets page.
43
+ description=_DESCRIPTION,
44
+ # datasets.features.FeatureConnectors
45
+ features=datasets.Features(
46
+ {
47
+ # These are the features of your dataset like images, labels ...
48
+ "id": datasets.Value("string"),
49
+ "question": datasets.Value("string"),
50
+ "choices": datasets.features.Sequence(
51
+ {"text": datasets.Value("string"), "label": datasets.Value("string")}
52
+ ),
53
+ "answerKey": datasets.Value("string"),
54
+ "para": datasets.Value("string"),
55
+ "para_id": datasets.Value("string"),
56
+ "para_anno": {
57
+ "effect_prop": datasets.Value("string"),
58
+ "cause_dir_str": datasets.Value("string"),
59
+ "effect_dir_str": datasets.Value("string"),
60
+ "cause_dir_sign": datasets.Value("string"),
61
+ "effect_dir_sign": datasets.Value("string"),
62
+ "cause_prop": datasets.Value("string"),
63
+ },
64
+ "question_anno": {
65
+ "more_effect_dir": datasets.Value("string"),
66
+ "less_effect_dir": datasets.Value("string"),
67
+ "less_cause_prop": datasets.Value("string"),
68
+ "more_effect_prop": datasets.Value("string"),
69
+ "less_effect_prop": datasets.Value("string"),
70
+ "less_cause_dir": datasets.Value("string"),
71
+ },
72
+ }
73
+ ),
74
+ # If there's a common (input, target) tuple from the features,
75
+ # specify them here. They'll be used if as_supervised=True in
76
+ # builder.as_dataset.
77
+ supervised_keys=None,
78
+ # Homepage of the dataset for documentation
79
+ homepage="https://allenai.org/data/quartz",
80
+ citation=_CITATION,
81
+ )
82
+
83
+ def _split_generators(self, dl_manager):
84
+ """Returns SplitGenerators."""
85
+ # TODO(quartz): Downloads the data and defines the splits
86
+ # dl_manager is a datasets.download.DownloadManager that can be used to
87
+ # download and extract URLs
88
+ dl_dir = dl_manager.download_and_extract(_URL)
89
+ data_dir = os.path.join(dl_dir, "quartz-dataset-v1-aug2019")
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TRAIN,
93
+ # These kwargs will be passed to _generate_examples
94
+ gen_kwargs={"filepath": os.path.join(data_dir, "train.jsonl")},
95
+ ),
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TEST,
98
+ # These kwargs will be passed to _generate_examples
99
+ gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl")},
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.VALIDATION,
103
+ # These kwargs will be passed to _generate_examples
104
+ gen_kwargs={"filepath": os.path.join(data_dir, "dev.jsonl")},
105
+ ),
106
+ ]
107
+
108
+ def _generate_examples(self, filepath):
109
+ """Yields examples."""
110
+ # TODO(quartz): Yields (key, example) tuples from the dataset
111
+ with open(filepath, encoding="utf-8") as f:
112
+ for row in f:
113
+ data = json.loads(row)
114
+ id_ = data["id"]
115
+ question = data["question"]["stem"]
116
+ answerKey = data["answerKey"]
117
+ choices = data["question"]["choices"]
118
+ choice_text = [choice["text"] for choice in choices]
119
+ choice_label = [choice["label"] for choice in choices]
120
+ para_id = data["para_id"]
121
+ para = data["para"]
122
+ para_ano = data["para_anno"]
123
+ effect_prop = para_ano.get("effect_prop", "")
124
+ cause_dir_str = para_ano.get("cause_dir_str", "")
125
+ effect_dir_str = para_ano.get("effect_dir_str", "")
126
+ cause_dir_sign = para_ano.get("cause_dir_sign", "")
127
+ effect_dir_sign = para_ano.get("effect_dir_sign", "")
128
+ cause_prop = para_ano.get("cause_prop", "")
129
+ question_anno = data["question_anno"]
130
+ more_effect_dir = "" if not question_anno else question_anno.get("more_effect_dir", "")
131
+ less_effect_dir = "" if not question_anno else question_anno.get("less_effect_dir", "")
132
+ less_cause_prop = "" if not question_anno else question_anno.get("less_cause_prop", "")
133
+ more_effect_prop = "" if not question_anno else question_anno.get("more_effect_prop", "")
134
+ less_effect_prop = "" if not question_anno else question_anno.get("less_effect_prop", "")
135
+ less_cause_dir = "" if not question_anno else question_anno.get("less_effect_prop", "")
136
+ yield id_, {
137
+ "id": id_,
138
+ "question": question,
139
+ "choices": {"text": choice_text, "label": choice_label},
140
+ "answerKey": answerKey,
141
+ "para": para,
142
+ "para_id": para_id,
143
+ "para_anno": {
144
+ "effect_prop": effect_prop,
145
+ "cause_dir_str": cause_dir_str,
146
+ "effect_dir_str": effect_dir_str,
147
+ "cause_dir_sign": cause_dir_sign,
148
+ "effect_dir_sign": effect_dir_sign,
149
+ "cause_prop": cause_prop,
150
+ },
151
+ "question_anno": {
152
+ "more_effect_dir": more_effect_dir,
153
+ "less_effect_dir": less_effect_dir,
154
+ "less_cause_prop": less_cause_prop,
155
+ "more_effect_prop": more_effect_prop,
156
+ "less_effect_prop": less_effect_prop,
157
+ "less_cause_dir": less_cause_dir,
158
+ },
159
+ }