albertvillanova HF staff commited on
Commit
df6c96b
1 Parent(s): 1ac0124

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (fa77a3ebbc7b0faf51d2dce2e129c2096d41d97c)
- Delete loading script (9d57a64ed979df32e9b9ac1215b2024cff3d67b7)
- Delete legacy dataset_infos.json (33cb53c8f6621261e940cab55a90e03b2ccbe573)

README.md CHANGED
@@ -23,6 +23,7 @@ pretty_name: Abductive Reasoning in narrative Text
23
  tags:
24
  - abductive-natural-language-inference
25
  dataset_info:
 
26
  features:
27
  - name: observation_1
28
  dtype: string
@@ -39,16 +40,23 @@ dataset_info:
39
  '0': '0'
40
  '1': '1'
41
  '2': '2'
42
- config_name: anli
43
  splits:
44
  - name: validation
45
- num_bytes: 312314
46
  num_examples: 1532
47
  - name: train
48
- num_bytes: 34046304
49
  num_examples: 169654
50
- download_size: 5118294
51
- dataset_size: 34358618
 
 
 
 
 
 
 
 
52
  ---
53
 
54
  # Dataset Card for "art"
 
23
  tags:
24
  - abductive-natural-language-inference
25
  dataset_info:
26
+ config_name: anli
27
  features:
28
  - name: observation_1
29
  dtype: string
 
40
  '0': '0'
41
  '1': '1'
42
  '2': '2'
 
43
  splits:
44
  - name: validation
45
+ num_bytes: 311146
46
  num_examples: 1532
47
  - name: train
48
+ num_bytes: 33918790
49
  num_examples: 169654
50
+ download_size: 9191805
51
+ dataset_size: 34229936
52
+ configs:
53
+ - config_name: anli
54
+ data_files:
55
+ - split: validation
56
+ path: anli/validation-*
57
+ - split: train
58
+ path: anli/train-*
59
+ default: true
60
  ---
61
 
62
  # Dataset Card for "art"
anli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ed3125b054c0918b77e84e5babb0cc41d3636093b7ba66e135017a564c129c8
3
+ size 8983169
anli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:466c1f53fe13a20101d612a5b7d837ec5b5101bf7a0f8c64e32ea0277b754f61
3
+ size 208636
art.py DELETED
@@ -1,116 +0,0 @@
1
- """TODO(art): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(art): BibTeX citation
11
- _CITATION = """\
12
- @InProceedings{anli,
13
- author = {Chandra, Bhagavatula and Ronan, Le Bras and Chaitanya, Malaviya and Keisuke, Sakaguchi and Ari, Holtzman
14
- and Hannah, Rashkin and Doug, Downey and Scott, Wen-tau Yih and Yejin, Choi},
15
- title = {Abductive Commonsense Reasoning},
16
- year = {2020}
17
- }"""
18
-
19
- # TODO(art):
20
- _DESCRIPTION = """\
21
- the Abductive Natural Language Inference Dataset from AI2
22
- """
23
- _DATA_URL = "https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip"
24
-
25
-
26
- class ArtConfig(datasets.BuilderConfig):
27
- """BuilderConfig for Art."""
28
-
29
- def __init__(self, **kwargs):
30
- """BuilderConfig for Art.
31
- Args:
32
- **kwargs: keyword arguments forwarded to super.
33
- """
34
- super(ArtConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
35
-
36
-
37
- class Art(datasets.GeneratorBasedBuilder):
38
- """TODO(art): Short description of my dataset."""
39
-
40
- # TODO(art): Set up version.
41
- VERSION = datasets.Version("0.1.0")
42
- BUILDER_CONFIGS = [
43
- ArtConfig(
44
- name="anli",
45
- description="""\
46
- the Abductive Natural Language Inference Dataset from AI2.
47
- """,
48
- ),
49
- ]
50
-
51
- def _info(self):
52
- # TODO(art): Specifies the datasets.DatasetInfo object
53
- return datasets.DatasetInfo(
54
- # This is the description that will appear on the datasets page.
55
- description=_DESCRIPTION,
56
- # datasets.features.FeatureConnectors
57
- features=datasets.Features(
58
- {
59
- "observation_1": datasets.Value("string"),
60
- "observation_2": datasets.Value("string"),
61
- "hypothesis_1": datasets.Value("string"),
62
- "hypothesis_2": datasets.Value("string"),
63
- "label": datasets.features.ClassLabel(num_classes=3)
64
- # These are the features of your dataset like images, labels ...
65
- }
66
- ),
67
- # If there's a common (input, target) tuple from the features,
68
- # specify them here. They'll be used if as_supervised=True in
69
- # builder.as_dataset.
70
- supervised_keys=None,
71
- # Homepage of the dataset for documentation
72
- homepage="https://leaderboard.allenai.org/anli/submissions/get-started",
73
- citation=_CITATION,
74
- )
75
-
76
- def _split_generators(self, dl_manager):
77
- """Returns SplitGenerators."""
78
- # TODO(art): Downloads the data and defines the splits
79
- # dl_manager is a datasets.download.DownloadManager that can be used to
80
- # download and extract URLs
81
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
82
- return [
83
- datasets.SplitGenerator(
84
- name=datasets.Split.VALIDATION,
85
- gen_kwargs={
86
- "filepath": os.path.join(dl_dir, "dev.jsonl"),
87
- "labelpath": os.path.join(dl_dir, "dev-labels.lst"),
88
- },
89
- ),
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- gen_kwargs={
93
- "filepath": os.path.join(dl_dir, "train.jsonl"),
94
- "labelpath": os.path.join(dl_dir, "train-labels.lst"),
95
- },
96
- ),
97
- ]
98
-
99
- def _generate_examples(self, filepath, labelpath):
100
- """Yields examples."""
101
- # TODO(art): Yields (key, example) tuples from the dataset
102
- data = []
103
- for line in open(filepath, encoding="utf-8"):
104
- data.append(json.loads(line))
105
- labels = []
106
- with open(labelpath, encoding="utf-8") as f:
107
- for word in f:
108
- labels.append(word)
109
- for idx, row in enumerate(data):
110
- yield idx, {
111
- "observation_1": row["obs1"],
112
- "observation_2": row["obs2"],
113
- "hypothesis_1": row["hyp1"],
114
- "hypothesis_2": row["hyp2"],
115
- "label": labels[idx],
116
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"anli": {"description": "the Abductive Natural Language Inference Dataset from AI2\n", "citation": "@InProceedings{anli,\n author = \"Chandra, Bhagavatula\n and Ronan, Le Bras\n and Chaitanya, Malaviya\n and Keisuke, Sakaguchi\n and Ari, Holtzman\n and Hannah, Rashkin\n and Doug, Downey\n and Scott, Wen-tau Yih\n and Yejin, Choi\",\n title = \"Abductive Commonsense Reasoning\",\n year = \"2020\",\n}", "homepage": "https://leaderboard.allenai.org/anli/submissions/get-started", "license": "", "features": {"observation_1": {"dtype": "string", "id": null, "_type": "Value"}, "observation_2": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_1": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["0", "1", "2"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "art", "config_name": "anli", "version": {"version_str": "0.1.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 34046304, "num_examples": 169654, "dataset_name": "art"}, "validation": {"name": "validation", "num_bytes": 312314, "num_examples": 1532, "dataset_name": "art"}}, "download_checksums": {"https://storage.googleapis.com/ai2-mosaic/public/alphanli/alphanli-train-dev.zip": {"num_bytes": 5118294, "checksum": "24840b27553e93ec625ae020dbf78d92daeae4be31ebbd469a0c9f6f99ed1c8d"}}, "download_size": 5118294, "dataset_size": 34358618, "size_in_bytes": 39476912}}