Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
283dc80
1 Parent(s): f817a2b

Delete loading script

Browse files
Files changed (1) hide show
  1. sst2.py +0 -105
sst2.py DELETED
@@ -1,105 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """SST-2 (Stanford Sentiment Treebank v2) dataset."""
15
-
16
-
17
- import csv
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{socher2013recursive,
25
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
26
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
27
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
28
- pages={1631--1642},
29
- year={2013}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
35
- human annotations of their sentiment. The task is to predict the sentiment of a
36
- given sentence. We use the two-way (positive/negative) class split, and use only
37
- sentence-level labels.
38
- """
39
-
40
- _HOMEPAGE = "https://nlp.stanford.edu/sentiment/"
41
-
42
- _LICENSE = "Unknown"
43
-
44
- _URL = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
45
-
46
-
47
- class Sst2(datasets.GeneratorBasedBuilder):
48
- """SST-2 dataset."""
49
-
50
- VERSION = datasets.Version("2.0.0")
51
-
52
- def _info(self):
53
- features = datasets.Features(
54
- {
55
- "idx": datasets.Value("int32"),
56
- "sentence": datasets.Value("string"),
57
- "label": datasets.features.ClassLabel(names=["negative", "positive"]),
58
- }
59
- )
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=features,
63
- homepage=_HOMEPAGE,
64
- license=_LICENSE,
65
- citation=_CITATION,
66
- )
67
-
68
- def _split_generators(self, dl_manager):
69
- dl_dir = dl_manager.download_and_extract(_URL)
70
- return [
71
- datasets.SplitGenerator(
72
- name=datasets.Split.TRAIN,
73
- gen_kwargs={
74
- "file_paths": dl_manager.iter_files(dl_dir),
75
- "data_filename": "train.tsv",
76
- },
77
- ),
78
- datasets.SplitGenerator(
79
- name=datasets.Split.VALIDATION,
80
- gen_kwargs={
81
- "file_paths": dl_manager.iter_files(dl_dir),
82
- "data_filename": "dev.tsv",
83
- },
84
- ),
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TEST,
87
- gen_kwargs={
88
- "file_paths": dl_manager.iter_files(dl_dir),
89
- "data_filename": "test.tsv",
90
- },
91
- ),
92
- ]
93
-
94
- def _generate_examples(self, file_paths, data_filename):
95
- for file_path in file_paths:
96
- filename = os.path.basename(file_path)
97
- if filename == data_filename:
98
- with open(file_path, encoding="utf8") as f:
99
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
100
- for idx, row in enumerate(reader):
101
- yield idx, {
102
- "idx": row["index"] if "index" in row else idx,
103
- "sentence": row["sentence"],
104
- "label": int(row["label"]) if "label" in row else -1,
105
- }