Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
crowdsourced
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
da70db2
1 Parent(s): 39383d4

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (c4f4109e71f2fd9522a4f6ff12615c89354d1b18)
- Delete loading script (6dc77dbfe0540d1ca2238cf3fd3bb69d23f297d4)
- Delete legacy dataset_infos.json (4a17b6c1fdf9e24b0585dd756de7ea489051e0a1)

README.md CHANGED
@@ -54,16 +54,25 @@ dataset_info:
54
  '2': contradiction
55
  splits:
56
  - name: train
57
- num_bytes: 410211586
58
  num_examples: 392702
59
  - name: validation_matched
60
- num_bytes: 10063939
61
  num_examples: 9815
62
  - name: validation_mismatched
63
- num_bytes: 10610221
64
  num_examples: 9832
65
- download_size: 226850426
66
- dataset_size: 430885746
 
 
 
 
 
 
 
 
 
67
  ---
68
 
69
  # Dataset Card for Multi-Genre Natural Language Inference (MultiNLI)
54
  '2': contradiction
55
  splits:
56
  - name: train
57
+ num_bytes: 410210306
58
  num_examples: 392702
59
  - name: validation_matched
60
+ num_bytes: 10063907
61
  num_examples: 9815
62
  - name: validation_mismatched
63
+ num_bytes: 10610189
64
  num_examples: 9832
65
+ download_size: 224005223
66
+ dataset_size: 430884402
67
+ configs:
68
+ - config_name: default
69
+ data_files:
70
+ - split: train
71
+ path: data/train-*
72
+ - split: validation_matched
73
+ path: data/validation_matched-*
74
+ - split: validation_mismatched
75
+ path: data/validation_mismatched-*
76
  ---
77
 
78
  # Dataset Card for Multi-Genre Natural Language Inference (MultiNLI)
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c1de03640b168e410aabfca19e7cc2f3dfcd7f0e126e935674e56fb102c4529
3
+ size 213961663
data/validation_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:350c26950b55f460b50d36c76aef87d64b49c78812d7abf7bf97e5fede10f186
3
+ size 4938568
data/validation_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b0e1231cebedd255000a7d1732af37ad0500902239db625e231ed77c0c8f2f8
3
+ size 5104992
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "The Multi-Genre Natural Language Inference (MultiNLI) corpus is a\ncrowd-sourced collection of 433k sentence pairs annotated with textual\nentailment information. The corpus is modeled on the SNLI corpus, but differs in\nthat covers a range of genres of spoken and written text, and supports a\ndistinctive cross-genre generalization evaluation. The corpus served as the\nbasis for the shared task of the RepEval 2017 Workshop at EMNLP in Copenhagen.\n", "citation": "@InProceedings{N18-1101,\n author = {Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel},\n title = {A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference},\n booktitle = {Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)},\n year = {2018},\n publisher = {Association for Computational Linguistics},\n pages = {1112--1122},\n location = {New Orleans, Louisiana},\n url = {http://aclweb.org/anthology/N18-1101}\n}\n", "homepage": "https://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"promptID": {"dtype": "int32", "id": null, "_type": "Value"}, "pairID": {"dtype": "string", "id": null, "_type": "Value"}, "premise": {"dtype": "string", "id": null, "_type": "Value"}, "premise_binary_parse": {"dtype": "string", "id": null, "_type": "Value"}, "premise_parse": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_binary_parse": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis_parse": {"dtype": "string", "id": null, "_type": "Value"}, "genre": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "multi_nli", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 410211586, "num_examples": 392702, "dataset_name": "multi_nli"}, "validation_matched": {"name": "validation_matched", "num_bytes": 10063939, "num_examples": 9815, "dataset_name": "multi_nli"}, "validation_mismatched": {"name": "validation_mismatched", "num_bytes": 10610221, "num_examples": 9832, "dataset_name": "multi_nli"}}, "download_checksums": {"https://cims.nyu.edu/~sbowman/multinli/multinli_1.0.zip": {"num_bytes": 226850426, "checksum": "049f507b9e36b1fcb756cfd5aeb3b7a0cfcb84bf023793652987f7e7e0957822"}}, "download_size": 226850426, "post_processing_size": null, "dataset_size": 430885746, "size_in_bytes": 657736172}}
 
multi_nli.py DELETED
@@ -1,118 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The Multi-Genre NLI Corpus."""
18
-
19
-
20
- import json
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _CITATION = """\
27
- @InProceedings{N18-1101,
28
- author = {Williams, Adina
29
- and Nangia, Nikita
30
- and Bowman, Samuel},
31
- title = {A Broad-Coverage Challenge Corpus for
32
- Sentence Understanding through Inference},
33
- booktitle = {Proceedings of the 2018 Conference of
34
- the North American Chapter of the
35
- Association for Computational Linguistics:
36
- Human Language Technologies, Volume 1 (Long
37
- Papers)},
38
- year = {2018},
39
- publisher = {Association for Computational Linguistics},
40
- pages = {1112--1122},
41
- location = {New Orleans, Louisiana},
42
- url = {http://aclweb.org/anthology/N18-1101}
43
- }
44
- """
45
-
46
- _DESCRIPTION = """\
47
- The Multi-Genre Natural Language Inference (MultiNLI) corpus is a
48
- crowd-sourced collection of 433k sentence pairs annotated with textual
49
- entailment information. The corpus is modeled on the SNLI corpus, but differs in
50
- that covers a range of genres of spoken and written text, and supports a
51
- distinctive cross-genre generalization evaluation. The corpus served as the
52
- basis for the shared task of the RepEval 2017 Workshop at EMNLP in Copenhagen.
53
- """
54
-
55
-
56
- class MultiNli(datasets.GeneratorBasedBuilder):
57
- """MultiNLI: The Stanford Question Answering Dataset. Version 1.1."""
58
-
59
- def _info(self):
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=datasets.Features(
63
- {
64
- "promptID": datasets.Value("int32"),
65
- "pairID": datasets.Value("string"),
66
- "premise": datasets.Value("string"),
67
- "premise_binary_parse": datasets.Value("string"), # parses in unlabeled binary-branching format
68
- "premise_parse": datasets.Value("string"), # sentence as parsed by the Stanford PCFG Parser 3.5.2
69
- "hypothesis": datasets.Value("string"),
70
- "hypothesis_binary_parse": datasets.Value("string"), # parses in unlabeled binary-branching format
71
- "hypothesis_parse": datasets.Value(
72
- "string"
73
- ), # sentence as parsed by the Stanford PCFG Parser 3.5.2
74
- "genre": datasets.Value("string"),
75
- "label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
76
- }
77
- ),
78
- # No default supervised_keys (as we have to pass both premise
79
- # and hypothesis as input).
80
- supervised_keys=None,
81
- homepage="https://www.nyu.edu/projects/bowman/multinli/",
82
- citation=_CITATION,
83
- )
84
-
85
- def _split_generators(self, dl_manager):
86
-
87
- downloaded_dir = dl_manager.download_and_extract("https://cims.nyu.edu/~sbowman/multinli/multinli_1.0.zip")
88
- mnli_path = os.path.join(downloaded_dir, "multinli_1.0")
89
- train_path = os.path.join(mnli_path, "multinli_1.0_train.jsonl")
90
- matched_validation_path = os.path.join(mnli_path, "multinli_1.0_dev_matched.jsonl")
91
- mismatched_validation_path = os.path.join(mnli_path, "multinli_1.0_dev_mismatched.jsonl")
92
-
93
- return [
94
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
95
- datasets.SplitGenerator(name="validation_matched", gen_kwargs={"filepath": matched_validation_path}),
96
- datasets.SplitGenerator(name="validation_mismatched", gen_kwargs={"filepath": mismatched_validation_path}),
97
- ]
98
-
99
- def _generate_examples(self, filepath):
100
- """Generate mnli examples"""
101
-
102
- with open(filepath, encoding="utf-8") as f:
103
- for id_, row in enumerate(f):
104
- data = json.loads(row)
105
- if data["gold_label"] == "-":
106
- continue
107
- yield id_, {
108
- "promptID": data["promptID"],
109
- "pairID": data["pairID"],
110
- "premise": data["sentence1"],
111
- "premise_binary_parse": data["sentence1_binary_parse"],
112
- "premise_parse": data["sentence1_parse"],
113
- "hypothesis": data["sentence2"],
114
- "hypothesis_binary_parse": data["sentence2_binary_parse"],
115
- "hypothesis_parse": data["sentence2_parse"],
116
- "genre": data["genre"],
117
- "label": data["gold_label"],
118
- }