Remove dataset script

#6
by lhoestq HF staff - opened
Files changed (5) hide show
  1. README.md +4 -0
  2. rotten_tomatoes.py +0 -121
  3. test.parquet +3 -0
  4. train.parquet +3 -0
  5. validation.parquet +3 -0
README.md CHANGED
@@ -173,6 +173,10 @@ The data fields are the same among all splits.
173
 
174
  ### Data Splits
175
 
 
 
 
 
176
  | name |train|validation|test|
177
  |-------|----:|---------:|---:|
178
  |default| 8530| 1066|1066|
 
173
 
174
  ### Data Splits
175
 
176
+ Reads Rotten Tomatoes sentences and splits into 80% train, 10% validation, and 10% test, as is the practice set out in
177
+
178
+ Jinfeng Li, ``TEXTBUGGER: Generating Adversarial Text Against Real-world Applications.''
179
+
180
  | name |train|validation|test|
181
  |-------|----:|---------:|---:|
182
  |default| 8530| 1066|1066|
rotten_tomatoes.py DELETED
@@ -1,121 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Rotten tomatoes movie reviews dataset."""
18
-
19
- import datasets
20
- from datasets.tasks import TextClassification
21
-
22
-
23
- _DESCRIPTION = """\
24
- Movie Review Dataset.
25
- This is a dataset of containing 5,331 positive and 5,331 negative processed
26
- sentences from Rotten Tomatoes movie reviews. This data was first used in Bo
27
- Pang and Lillian Lee, ``Seeing stars: Exploiting class relationships for
28
- sentiment categorization with respect to rating scales.'', Proceedings of the
29
- ACL, 2005.
30
- """
31
-
32
- _CITATION = """\
33
- @InProceedings{Pang+Lee:05a,
34
- author = {Bo Pang and Lillian Lee},
35
- title = {Seeing stars: Exploiting class relationships for sentiment
36
- categorization with respect to rating scales},
37
- booktitle = {Proceedings of the ACL},
38
- year = 2005
39
- }
40
- """
41
-
42
- _DOWNLOAD_URL = "https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz"
43
-
44
-
45
- class RottenTomatoesMovieReview(datasets.GeneratorBasedBuilder):
46
- """Cornell Rotten Tomatoes movie reviews dataset."""
47
-
48
- VERSION = datasets.Version("1.0.0")
49
-
50
- def _info(self):
51
- return datasets.DatasetInfo(
52
- description=_DESCRIPTION,
53
- features=datasets.Features(
54
- {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["neg", "pos"])}
55
- ),
56
- supervised_keys=[""],
57
- homepage="http://www.cs.cornell.edu/people/pabo/movie-review-data/",
58
- citation=_CITATION,
59
- task_templates=[TextClassification(text_column="text", label_column="label")],
60
- )
61
-
62
- def _split_generators(self, dl_manager):
63
- """Downloads Rotten Tomatoes sentences."""
64
- archive = dl_manager.download(_DOWNLOAD_URL)
65
- return [
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TRAIN,
68
- gen_kwargs={"split_key": "train", "files": dl_manager.iter_archive(archive)},
69
- ),
70
- datasets.SplitGenerator(
71
- name=datasets.Split.VALIDATION,
72
- gen_kwargs={"split_key": "validation", "files": dl_manager.iter_archive(archive)},
73
- ),
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TEST,
76
- gen_kwargs={"split_key": "test", "files": dl_manager.iter_archive(archive)},
77
- ),
78
- ]
79
-
80
- def _get_examples_from_split(self, split_key, files):
81
- """Reads Rotten Tomatoes sentences and splits into 80% train,
82
- 10% validation, and 10% test, as is the practice set out in Jinfeng
83
- Li, ``TEXTBUGGER: Generating Adversarial Text Against Real-world
84
- Applications.''
85
- """
86
- data_dir = "rt-polaritydata/"
87
- pos_samples, neg_samples = None, None
88
- for path, f in files:
89
- if path == data_dir + "rt-polarity.pos":
90
- pos_samples = [line.decode("latin-1").strip() for line in f]
91
- elif path == data_dir + "rt-polarity.neg":
92
- neg_samples = [line.decode("latin-1").strip() for line in f]
93
- if pos_samples is not None and neg_samples is not None:
94
- break
95
-
96
- # 80/10/10 split
97
- i1 = int(len(pos_samples) * 0.8 + 0.5)
98
- i2 = int(len(pos_samples) * 0.9 + 0.5)
99
- train_samples = pos_samples[:i1] + neg_samples[:i1]
100
- train_labels = (["pos"] * i1) + (["neg"] * i1)
101
- validation_samples = pos_samples[i1:i2] + neg_samples[i1:i2]
102
- validation_labels = (["pos"] * (i2 - i1)) + (["neg"] * (i2 - i1))
103
- test_samples = pos_samples[i2:] + neg_samples[i2:]
104
- test_labels = (["pos"] * (len(pos_samples) - i2)) + (["neg"] * (len(pos_samples) - i2))
105
-
106
- if split_key == "train":
107
- return (train_samples, train_labels)
108
- if split_key == "validation":
109
- return (validation_samples, validation_labels)
110
- if split_key == "test":
111
- return (test_samples, test_labels)
112
- else:
113
- raise ValueError(f"Invalid split key {split_key}")
114
-
115
- def _generate_examples(self, split_key, files):
116
- """Yields examples for a given split of MR."""
117
- split_text, split_labels = self._get_examples_from_split(split_key, files)
118
- for text, label in zip(split_text, split_labels):
119
- data_key = split_key + "_" + text
120
- feature_dict = {"text": text, "label": label}
121
- yield data_key, feature_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5427a2b59d1b9bed1ea5cc3f963843bd13ea7443f32d27e74a957a3d181cc545
3
+ size 92206
train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f899e3cb8124a12b7d82c30ba5cd35d27eb6575d1b10d65f731a60348454a959
3
+ size 698845
validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ea894e394cd24413b781790683924a2598507d146da9b4ad0a2a01830c77b00
3
+ size 90001