albertvillanova HF staff commited on
Commit
28c1dbb
1 Parent(s): d09d2e1

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (76b601c821b066397098c65c1ac75b10c6f11949)
- Delete loading script (1da014ce7ff4fd6edb88d0fef62bbac63e867259)
- Delete legacy dataset_infos.json (319e71e380040f70c98a21661a5bc174ecd80c6b)

README.md CHANGED
@@ -67,17 +67,26 @@ dataset_info:
67
  - name: less_cause_dir
68
  dtype: string
69
  splits:
70
- - name: test
71
- num_bytes: 351374
72
- num_examples: 784
73
  - name: train
74
- num_bytes: 1197525
75
  num_examples: 2696
 
 
 
76
  - name: validation
77
- num_bytes: 175871
78
  num_examples: 384
79
- download_size: 497354
80
- dataset_size: 1724770
 
 
 
 
 
 
 
 
 
81
  ---
82
 
83
  # Dataset Card for "quartz"
67
  - name: less_cause_dir
68
  dtype: string
69
  splits:
 
 
 
70
  - name: train
71
+ num_bytes: 1188342
72
  num_examples: 2696
73
+ - name: test
74
+ num_bytes: 348644
75
+ num_examples: 784
76
  - name: validation
77
+ num_bytes: 174491
78
  num_examples: 384
79
+ download_size: 569255
80
+ dataset_size: 1711477
81
+ configs:
82
+ - config_name: default
83
+ data_files:
84
+ - split: train
85
+ path: data/train-*
86
+ - split: test
87
+ path: data/test-*
88
+ - split: validation
89
+ path: data/validation-*
90
  ---
91
 
92
  # Dataset Card for "quartz"
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78c4c77cfc1ad0e65dbdfeec4ed788117e73484ad44f6a2aebf43e08c7fba2e8
3
+ size 99764
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edeb1fa716d945ee198c9f5ce5ab6cad4d847e714ff7463834dee422c8450035
3
+ size 415195
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91afc8bbdb1b6b67f35f05ea4b4dbc1ba718ec2469be665a386c46b4648a1fb4
3
+ size 54296
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "QuaRTz is a crowdsourced dataset of 3864 multiple-choice questions about open domain qualitative relationships. Each \nquestion is paired with one of 405 different background sentences (sometimes short paragraphs).\nThe QuaRTz dataset V1 contains 3864 questions about open domain qualitative relationships. Each question is paired with \none of 405 different background sentences (sometimes short paragraphs).\n\nThe dataset is split into train (2696), dev (384) and test (784). A background sentence will only appear in a single split.\n", "citation": "@InProceedings{quartz,\n author = {Oyvind Tafjord and Matt Gardner and Kevin Lin and Peter Clark},\n title = {\"QUARTZ: An Open-Domain Dataset of Qualitative Relationship\nQuestions\"},\n \n year = {\"2019\"},\n}\n", "homepage": "https://allenai.org/data/quartz", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "choices": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "answerKey": {"dtype": "string", "id": null, "_type": "Value"}, "para": {"dtype": "string", "id": null, "_type": "Value"}, "para_id": {"dtype": "string", "id": null, "_type": "Value"}, "para_anno": {"effect_prop": {"dtype": "string", "id": null, "_type": "Value"}, "cause_dir_str": {"dtype": "string", "id": null, "_type": "Value"}, "effect_dir_str": {"dtype": "string", "id": null, "_type": "Value"}, "cause_dir_sign": {"dtype": "string", "id": null, "_type": "Value"}, "effect_dir_sign": {"dtype": "string", "id": null, "_type": "Value"}, "cause_prop": {"dtype": "string", "id": null, "_type": "Value"}}, "question_anno": {"more_effect_dir": {"dtype": "string", "id": null, "_type": "Value"}, "less_effect_dir": {"dtype": "string", "id": null, "_type": "Value"}, "less_cause_prop": {"dtype": "string", "id": null, "_type": "Value"}, "more_effect_prop": {"dtype": "string", "id": null, "_type": "Value"}, "less_effect_prop": {"dtype": "string", "id": null, "_type": "Value"}, "less_cause_dir": {"dtype": "string", "id": null, "_type": "Value"}}}, "supervised_keys": null, "builder_name": "quartz", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 351374, "num_examples": 784, "dataset_name": "quartz"}, "train": {"name": "train", "num_bytes": 1197525, "num_examples": 2696, "dataset_name": "quartz"}, "validation": {"name": "validation", "num_bytes": 175871, "num_examples": 384, "dataset_name": "quartz"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-website/data/quartz-dataset-v1-aug2019.zip": {"num_bytes": 497354, "checksum": "e86ed35153c6c3fb6dc5991b6a3b520a2c154c42266cb6b4edc7ed526fa4b5a8"}}, "download_size": 497354, "dataset_size": 1724770, "size_in_bytes": 2222124}}
 
quartz.py DELETED
@@ -1,158 +0,0 @@
1
- """TODO(quartz): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(quartz): BibTeX citation
11
- _CITATION = """\
12
- @InProceedings{quartz,
13
- author = {Oyvind Tafjord and Matt Gardner and Kevin Lin and Peter Clark},
14
- title = {"QUARTZ: An Open-Domain Dataset of Qualitative Relationship
15
- Questions"},
16
- year = {"2019"},
17
- }
18
- """
19
-
20
- # TODO(quartz):
21
- _DESCRIPTION = """\
22
- QuaRTz is a crowdsourced dataset of 3864 multiple-choice questions about open domain qualitative relationships. Each
23
- question is paired with one of 405 different background sentences (sometimes short paragraphs).
24
- The QuaRTz dataset V1 contains 3864 questions about open domain qualitative relationships. Each question is paired with
25
- one of 405 different background sentences (sometimes short paragraphs).
26
- The dataset is split into train (2696), dev (384) and test (784). A background sentence will only appear in a single split.
27
- """
28
-
29
- _URL = "https://s3-us-west-2.amazonaws.com/ai2-website/data/quartz-dataset-v1-aug2019.zip"
30
-
31
-
32
- class Quartz(datasets.GeneratorBasedBuilder):
33
- """TODO(quartz): Short description of my dataset."""
34
-
35
- # TODO(quartz): Set up version.
36
- VERSION = datasets.Version("0.1.0")
37
-
38
- def _info(self):
39
- # TODO(quartz): Specifies the datasets.DatasetInfo object
40
- return datasets.DatasetInfo(
41
- # This is the description that will appear on the datasets page.
42
- description=_DESCRIPTION,
43
- # datasets.features.FeatureConnectors
44
- features=datasets.Features(
45
- {
46
- # These are the features of your dataset like images, labels ...
47
- "id": datasets.Value("string"),
48
- "question": datasets.Value("string"),
49
- "choices": datasets.features.Sequence(
50
- {"text": datasets.Value("string"), "label": datasets.Value("string")}
51
- ),
52
- "answerKey": datasets.Value("string"),
53
- "para": datasets.Value("string"),
54
- "para_id": datasets.Value("string"),
55
- "para_anno": {
56
- "effect_prop": datasets.Value("string"),
57
- "cause_dir_str": datasets.Value("string"),
58
- "effect_dir_str": datasets.Value("string"),
59
- "cause_dir_sign": datasets.Value("string"),
60
- "effect_dir_sign": datasets.Value("string"),
61
- "cause_prop": datasets.Value("string"),
62
- },
63
- "question_anno": {
64
- "more_effect_dir": datasets.Value("string"),
65
- "less_effect_dir": datasets.Value("string"),
66
- "less_cause_prop": datasets.Value("string"),
67
- "more_effect_prop": datasets.Value("string"),
68
- "less_effect_prop": datasets.Value("string"),
69
- "less_cause_dir": datasets.Value("string"),
70
- },
71
- }
72
- ),
73
- # If there's a common (input, target) tuple from the features,
74
- # specify them here. They'll be used if as_supervised=True in
75
- # builder.as_dataset.
76
- supervised_keys=None,
77
- # Homepage of the dataset for documentation
78
- homepage="https://allenai.org/data/quartz",
79
- citation=_CITATION,
80
- )
81
-
82
- def _split_generators(self, dl_manager):
83
- """Returns SplitGenerators."""
84
- # TODO(quartz): Downloads the data and defines the splits
85
- # dl_manager is a datasets.download.DownloadManager that can be used to
86
- # download and extract URLs
87
- dl_dir = dl_manager.download_and_extract(_URL)
88
- data_dir = os.path.join(dl_dir, "quartz-dataset-v1-aug2019")
89
- return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- # These kwargs will be passed to _generate_examples
93
- gen_kwargs={"filepath": os.path.join(data_dir, "train.jsonl")},
94
- ),
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TEST,
97
- # These kwargs will be passed to _generate_examples
98
- gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl")},
99
- ),
100
- datasets.SplitGenerator(
101
- name=datasets.Split.VALIDATION,
102
- # These kwargs will be passed to _generate_examples
103
- gen_kwargs={"filepath": os.path.join(data_dir, "dev.jsonl")},
104
- ),
105
- ]
106
-
107
- def _generate_examples(self, filepath):
108
- """Yields examples."""
109
- # TODO(quartz): Yields (key, example) tuples from the dataset
110
- with open(filepath, encoding="utf-8") as f:
111
- for row in f:
112
- data = json.loads(row)
113
- id_ = data["id"]
114
- question = data["question"]["stem"]
115
- answerKey = data["answerKey"]
116
- choices = data["question"]["choices"]
117
- choice_text = [choice["text"] for choice in choices]
118
- choice_label = [choice["label"] for choice in choices]
119
- para_id = data["para_id"]
120
- para = data["para"]
121
- para_ano = data["para_anno"]
122
- effect_prop = para_ano.get("effect_prop", "")
123
- cause_dir_str = para_ano.get("cause_dir_str", "")
124
- effect_dir_str = para_ano.get("effect_dir_str", "")
125
- cause_dir_sign = para_ano.get("cause_dir_sign", "")
126
- effect_dir_sign = para_ano.get("effect_dir_sign", "")
127
- cause_prop = para_ano.get("cause_prop", "")
128
- question_anno = data["question_anno"]
129
- more_effect_dir = "" if not question_anno else question_anno.get("more_effect_dir", "")
130
- less_effect_dir = "" if not question_anno else question_anno.get("less_effect_dir", "")
131
- less_cause_prop = "" if not question_anno else question_anno.get("less_cause_prop", "")
132
- more_effect_prop = "" if not question_anno else question_anno.get("more_effect_prop", "")
133
- less_effect_prop = "" if not question_anno else question_anno.get("less_effect_prop", "")
134
- less_cause_dir = "" if not question_anno else question_anno.get("less_effect_prop", "")
135
- yield id_, {
136
- "id": id_,
137
- "question": question,
138
- "choices": {"text": choice_text, "label": choice_label},
139
- "answerKey": answerKey,
140
- "para": para,
141
- "para_id": para_id,
142
- "para_anno": {
143
- "effect_prop": effect_prop,
144
- "cause_dir_str": cause_dir_str,
145
- "effect_dir_str": effect_dir_str,
146
- "cause_dir_sign": cause_dir_sign,
147
- "effect_dir_sign": effect_dir_sign,
148
- "cause_prop": cause_prop,
149
- },
150
- "question_anno": {
151
- "more_effect_dir": more_effect_dir,
152
- "less_effect_dir": less_effect_dir,
153
- "less_cause_prop": less_cause_prop,
154
- "more_effect_prop": more_effect_prop,
155
- "less_effect_prop": less_effect_prop,
156
- "less_cause_dir": less_cause_dir,
157
- },
158
- }