Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
5611843
1 Parent(s): 5e5f553

Delete loading script

Browse files
Files changed (1) hide show
  1. snli.py +0 -110
snli.py DELETED
@@ -1,110 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The Stanford Natural Language Inference (SNLI) Corpus."""
18
-
19
-
20
- import csv
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _CITATION = """\
27
- @inproceedings{bowman-etal-2015-large,
28
- title = "A large annotated corpus for learning natural language inference",
29
- author = "Bowman, Samuel R. and
30
- Angeli, Gabor and
31
- Potts, Christopher and
32
- Manning, Christopher D.",
33
- editor = "M{\\`a}rquez, Llu{\\'\\i}s and
34
- Callison-Burch, Chris and
35
- Su, Jian",
36
- booktitle = "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing",
37
- month = sep,
38
- year = "2015",
39
- address = "Lisbon, Portugal",
40
- publisher = "Association for Computational Linguistics",
41
- url = "https://aclanthology.org/D15-1075",
42
- doi = "10.18653/v1/D15-1075",
43
- pages = "632--642",
44
- }
45
- """
46
-
47
- _DESCRIPTION = """\
48
- The SNLI corpus (version 1.0) is a collection of 570k human-written English
49
- sentence pairs manually labeled for balanced classification with the labels
50
- entailment, contradiction, and neutral, supporting the task of natural language
51
- inference (NLI), also known as recognizing textual entailment (RTE).
52
- """
53
-
54
- _DATA_URL = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
55
-
56
-
57
- class Snli(datasets.GeneratorBasedBuilder):
58
- """The Stanford Natural Language Inference (SNLI) Corpus."""
59
-
60
- BUILDER_CONFIGS = [
61
- datasets.BuilderConfig(
62
- name="plain_text",
63
- version=datasets.Version("1.0.0", ""),
64
- description="Plain text import of SNLI",
65
- )
66
- ]
67
-
68
- def _info(self):
69
- return datasets.DatasetInfo(
70
- description=_DESCRIPTION,
71
- features=datasets.Features(
72
- {
73
- "premise": datasets.Value("string"),
74
- "hypothesis": datasets.Value("string"),
75
- "label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]),
76
- }
77
- ),
78
- # No default supervised_keys (as we have to pass both premise
79
- # and hypothesis as input).
80
- supervised_keys=None,
81
- homepage="https://nlp.stanford.edu/projects/snli/",
82
- citation=_CITATION,
83
- )
84
-
85
- def _split_generators(self, dl_manager):
86
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
87
- data_dir = os.path.join(dl_dir, "snli_1.0")
88
- return [
89
- datasets.SplitGenerator(
90
- name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_test.txt")}
91
- ),
92
- datasets.SplitGenerator(
93
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_dev.txt")}
94
- ),
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_train.txt")}
97
- ),
98
- ]
99
-
100
- def _generate_examples(self, filepath):
101
- """This function returns the examples in the raw (text) form."""
102
- with open(filepath, encoding="utf-8") as f:
103
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
104
- for idx, row in enumerate(reader):
105
- label = -1 if row["gold_label"] == "-" else row["gold_label"]
106
- yield idx, {
107
- "premise": row["sentence1"],
108
- "hypothesis": row["sentence2"],
109
- "label": label,
110
- }