albertvillanova HF staff commited on
Commit
8e4813d
1 Parent(s): bf20683

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (ab481543fcf139fd7c3307a8097a58f0e10cc165)
- Delete loading script (6bcf0d7f654492d8e7ee2f9988842ee03ce84cd7)
- Delete legacy dataset_infos.json (b83b1452ab817eac4dcb36625e7c1fa95025093d)

README.md CHANGED
@@ -23,6 +23,7 @@ task_ids:
23
  paperswithcode_id: anli
24
  pretty_name: Adversarial NLI
25
  dataset_info:
 
26
  features:
27
  - name: uid
28
  dtype: string
@@ -39,37 +40,58 @@ dataset_info:
39
  '2': contradiction
40
  - name: reason
41
  dtype: string
42
- config_name: plain_text
43
  splits:
44
  - name: train_r1
45
- num_bytes: 8006920
46
  num_examples: 16946
47
  - name: dev_r1
48
- num_bytes: 573444
49
  num_examples: 1000
50
  - name: test_r1
51
- num_bytes: 574933
52
  num_examples: 1000
53
  - name: train_r2
54
- num_bytes: 20801661
55
  num_examples: 45460
56
  - name: dev_r2
57
- num_bytes: 556082
58
  num_examples: 1000
59
  - name: test_r2
60
- num_bytes: 572655
61
  num_examples: 1000
62
  - name: train_r3
63
- num_bytes: 44720895
64
  num_examples: 100459
65
  - name: dev_r3
66
- num_bytes: 663164
67
  num_examples: 1200
68
  - name: test_r3
69
- num_bytes: 657602
70
  num_examples: 1200
71
- download_size: 18621352
72
- dataset_size: 77127356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  ---
74
 
75
  # Dataset Card for "anli"
23
  paperswithcode_id: anli
24
  pretty_name: Adversarial NLI
25
  dataset_info:
26
+ config_name: plain_text
27
  features:
28
  - name: uid
29
  dtype: string
40
  '2': contradiction
41
  - name: reason
42
  dtype: string
 
43
  splits:
44
  - name: train_r1
45
+ num_bytes: 8006888
46
  num_examples: 16946
47
  - name: dev_r1
48
+ num_bytes: 573428
49
  num_examples: 1000
50
  - name: test_r1
51
+ num_bytes: 574917
52
  num_examples: 1000
53
  - name: train_r2
54
+ num_bytes: 20801581
55
  num_examples: 45460
56
  - name: dev_r2
57
+ num_bytes: 556066
58
  num_examples: 1000
59
  - name: test_r2
60
+ num_bytes: 572639
61
  num_examples: 1000
62
  - name: train_r3
63
+ num_bytes: 44720719
64
  num_examples: 100459
65
  - name: dev_r3
66
+ num_bytes: 663148
67
  num_examples: 1200
68
  - name: test_r3
69
+ num_bytes: 657586
70
  num_examples: 1200
71
+ download_size: 26286748
72
+ dataset_size: 77126972
73
+ configs:
74
+ - config_name: plain_text
75
+ data_files:
76
+ - split: train_r1
77
+ path: plain_text/train_r1-*
78
+ - split: dev_r1
79
+ path: plain_text/dev_r1-*
80
+ - split: test_r1
81
+ path: plain_text/test_r1-*
82
+ - split: train_r2
83
+ path: plain_text/train_r2-*
84
+ - split: dev_r2
85
+ path: plain_text/dev_r2-*
86
+ - split: test_r2
87
+ path: plain_text/test_r2-*
88
+ - split: train_r3
89
+ path: plain_text/train_r3-*
90
+ - split: dev_r3
91
+ path: plain_text/dev_r3-*
92
+ - split: test_r3
93
+ path: plain_text/test_r3-*
94
+ default: true
95
  ---
96
 
97
  # Dataset Card for "anli"
anli.py DELETED
@@ -1,152 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The Adversarial NLI Corpus."""
18
-
19
-
20
- import json
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _CITATION = """\
27
- @InProceedings{nie2019adversarial,
28
- title={Adversarial NLI: A New Benchmark for Natural Language Understanding},
29
- author={Nie, Yixin
30
- and Williams, Adina
31
- and Dinan, Emily
32
- and Bansal, Mohit
33
- and Weston, Jason
34
- and Kiela, Douwe},
35
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
36
- year = "2020",
37
- publisher = "Association for Computational Linguistics",
38
- }
39
- """
40
-
41
- _DESCRIPTION = """\
42
- The Adversarial Natural Language Inference (ANLI) is a new large-scale NLI benchmark dataset,
43
- The dataset is collected via an iterative, adversarial human-and-model-in-the-loop procedure.
44
- ANLI is much more difficult than its predecessors including SNLI and MNLI.
45
- It contains three rounds. Each round has train/dev/test splits.
46
- """
47
-
48
- stdnli_label = {
49
- "e": "entailment",
50
- "n": "neutral",
51
- "c": "contradiction",
52
- }
53
-
54
-
55
- class ANLIConfig(datasets.BuilderConfig):
56
- """BuilderConfig for ANLI."""
57
-
58
- def __init__(self, **kwargs):
59
- """BuilderConfig for ANLI.
60
-
61
- Args:
62
- .
63
- **kwargs: keyword arguments forwarded to super.
64
- """
65
- super(ANLIConfig, self).__init__(version=datasets.Version("0.1.0", ""), **kwargs)
66
-
67
-
68
- class ANLI(datasets.GeneratorBasedBuilder):
69
- """ANLI: The ANLI Dataset."""
70
-
71
- BUILDER_CONFIGS = [
72
- ANLIConfig(
73
- name="plain_text",
74
- description="Plain text",
75
- ),
76
- ]
77
-
78
- def _info(self):
79
- return datasets.DatasetInfo(
80
- description=_DESCRIPTION,
81
- features=datasets.Features(
82
- {
83
- "uid": datasets.Value("string"),
84
- "premise": datasets.Value("string"),
85
- "hypothesis": datasets.Value("string"),
86
- "label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
87
- "reason": datasets.Value("string"),
88
- }
89
- ),
90
- # No default supervised_keys (as we have to pass both premise
91
- # and hypothesis as input).
92
- supervised_keys=None,
93
- homepage="https://github.com/facebookresearch/anli/",
94
- citation=_CITATION,
95
- )
96
-
97
- def _vocab_text_gen(self, filepath):
98
- for _, ex in self._generate_examples(filepath):
99
- yield " ".join([ex["premise"], ex["hypothesis"]])
100
-
101
- def _split_generators(self, dl_manager):
102
-
103
- downloaded_dir = dl_manager.download_and_extract("https://dl.fbaipublicfiles.com/anli/anli_v0.1.zip")
104
-
105
- anli_path = os.path.join(downloaded_dir, "anli_v0.1")
106
-
107
- path_dict = dict()
108
- for round_tag in ["R1", "R2", "R3"]:
109
- path_dict[round_tag] = dict()
110
- for split_name in ["train", "dev", "test"]:
111
- path_dict[round_tag][split_name] = os.path.join(anli_path, round_tag, f"{split_name}.jsonl")
112
-
113
- return [
114
- # Round 1
115
- datasets.SplitGenerator(name="train_r1", gen_kwargs={"filepath": path_dict["R1"]["train"]}),
116
- datasets.SplitGenerator(name="dev_r1", gen_kwargs={"filepath": path_dict["R1"]["dev"]}),
117
- datasets.SplitGenerator(name="test_r1", gen_kwargs={"filepath": path_dict["R1"]["test"]}),
118
- # Round 2
119
- datasets.SplitGenerator(name="train_r2", gen_kwargs={"filepath": path_dict["R2"]["train"]}),
120
- datasets.SplitGenerator(name="dev_r2", gen_kwargs={"filepath": path_dict["R2"]["dev"]}),
121
- datasets.SplitGenerator(name="test_r2", gen_kwargs={"filepath": path_dict["R2"]["test"]}),
122
- # Round 3
123
- datasets.SplitGenerator(name="train_r3", gen_kwargs={"filepath": path_dict["R3"]["train"]}),
124
- datasets.SplitGenerator(name="dev_r3", gen_kwargs={"filepath": path_dict["R3"]["dev"]}),
125
- datasets.SplitGenerator(name="test_r3", gen_kwargs={"filepath": path_dict["R3"]["test"]}),
126
- ]
127
-
128
- def _generate_examples(self, filepath):
129
- """Generate mnli examples.
130
-
131
- Args:
132
- filepath: a string
133
-
134
- Yields:
135
- dictionaries containing "premise", "hypothesis" and "label" strings
136
- """
137
- for idx, line in enumerate(open(filepath, "rb")):
138
- if line is not None:
139
- line = line.strip().decode("utf-8")
140
- item = json.loads(line)
141
-
142
- reason_text = ""
143
- if "reason" in item:
144
- reason_text = item["reason"]
145
-
146
- yield item["uid"], {
147
- "uid": item["uid"],
148
- "premise": item["context"],
149
- "hypothesis": item["hypothesis"],
150
- "label": stdnli_label[item["label"]],
151
- "reason": reason_text,
152
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"plain_text": {"description": "The Adversarial Natural Language Inference (ANLI) is a new large-scale NLI benchmark dataset, \nThe dataset is collected via an iterative, adversarial human-and-model-in-the-loop procedure.\nANLI is much more difficult than its predecessors including SNLI and MNLI.\nIt contains three rounds. Each round has train/dev/test splits.\n", "citation": "@InProceedings{nie2019adversarial,\n title={Adversarial NLI: A New Benchmark for Natural Language Understanding},\n author={Nie, Yixin \n and Williams, Adina \n and Dinan, Emily \n and Bansal, Mohit \n and Weston, Jason \n and Kiela, Douwe},\n booktitle = \"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics\",\n year = \"2020\",\n publisher = \"Association for Computational Linguistics\",\n}\n", "homepage": "https://github.com/facebookresearch/anli/", "license": "", "features": {"uid": {"dtype": "string", "id": null, "_type": "Value"}, "premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "reason": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "anli", "config_name": "plain_text", "version": {"version_str": "0.1.0", "description": "", "datasets_version_to_prepare": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train_r1": {"name": "train_r1", "num_bytes": 8006920, "num_examples": 16946, "dataset_name": "anli"}, "dev_r1": {"name": "dev_r1", "num_bytes": 573444, "num_examples": 1000, "dataset_name": "anli"}, "test_r1": {"name": "test_r1", "num_bytes": 574933, "num_examples": 1000, "dataset_name": "anli"}, "train_r2": {"name": "train_r2", "num_bytes": 20801661, "num_examples": 45460, "dataset_name": "anli"}, "dev_r2": {"name": "dev_r2", "num_bytes": 556082, "num_examples": 1000, "dataset_name": "anli"}, "test_r2": {"name": "test_r2", "num_bytes": 572655, "num_examples": 1000, "dataset_name": "anli"}, "train_r3": {"name": "train_r3", "num_bytes": 44720895, "num_examples": 100459, "dataset_name": "anli"}, "dev_r3": {"name": "dev_r3", "num_bytes": 663164, "num_examples": 1200, "dataset_name": "anli"}, "test_r3": {"name": "test_r3", "num_bytes": 657602, "num_examples": 1200, "dataset_name": "anli"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/anli/anli_v0.1.zip": {"num_bytes": 18621352, "checksum": "16ac929a7e90ecf9093deaec89cc81fe86a379265a5320a150028efe50c5cde8"}}, "download_size": 18621352, "dataset_size": 77127356, "size_in_bytes": 95748708}}
 
plain_text/dev_r1-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72e27463177b4363be80f1fc6ccdaab44ddaeb65db58c2280f94690e15468334
3
+ size 351479
plain_text/dev_r2-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e4673665decf0b0e8487e55f98285423cb356b985e206fe5998defae2e38fa
3
+ size 350606
plain_text/dev_r3-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61775ec09351f6011ce4dc9ea313f457bba6e11d7665d34d95c111665023a83e
3
+ size 434044
plain_text/test_r1-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4a3d304c4671941d6bad5a07632a79713c5a1be485ccf75b81b6df93f61045e
3
+ size 353376
plain_text/test_r2-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df5daccdd5623cfcaa34be0100721783485f4181a42796b1d0ac0cd7601e7acb
3
+ size 361549
plain_text/test_r3-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3232c4217979da00b2cd6ed97d099a8a8edf04530193ea52e3c8d69190de92a2
3
+ size 434550
plain_text/train_r1-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de2d038ae67f1fb1872073490b9e7685e9114d5f278ddd4631905fe0a4ecbcff
3
+ size 3140120
plain_text/train_r2-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:209f4a15bf77224c62ffbde5f150fda928a7e2f5175366f4cacc3c7588aab13d
3
+ size 6527557
plain_text/train_r3-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1d3f614d673888ac56b9ab62324e21583c98a11c4fef84e938d0f8fc414b29a
3
+ size 14333467