albertvillanova HF staff commited on
Commit
8d51e7e
1 Parent(s): f295b0d

Convert dataset to Parquet (#7)

Browse files

- Convert dataset to Parquet (f817a2b0170bf1822262d6198427436189c07d04)
- Delete loading script (283dc807270f854f56ed56a1d5ef27109ef8c917)
- Delete legacy dataset_infos.json (703426cda46e0d65db9fe8143397ecee1ec16cc0)

README.md CHANGED
@@ -33,16 +33,25 @@ dataset_info:
33
  '1': positive
34
  splits:
35
  - name: train
36
- num_bytes: 4690022
37
  num_examples: 67349
38
  - name: validation
39
- num_bytes: 106361
40
  num_examples: 872
41
  - name: test
42
- num_bytes: 216868
43
  num_examples: 1821
44
- download_size: 7439277
45
- dataset_size: 5013251
 
 
 
 
 
 
 
 
 
46
  ---
47
 
48
  # Dataset Card for [Dataset Name]
 
33
  '1': positive
34
  splits:
35
  - name: train
36
+ num_bytes: 4681603
37
  num_examples: 67349
38
  - name: validation
39
+ num_bytes: 106252
40
  num_examples: 872
41
  - name: test
42
+ num_bytes: 216640
43
  num_examples: 1821
44
+ download_size: 3331058
45
+ dataset_size: 5004495
46
+ configs:
47
+ - config_name: default
48
+ data_files:
49
+ - split: train
50
+ path: data/train-*
51
+ - split: validation
52
+ path: data/validation-*
53
+ - split: test
54
+ path: data/test-*
55
  ---
56
 
57
  # Dataset Card for [Dataset Name]
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20d27a86c0c59acb746a41a481ebb1fc71edb72d94b5ccee7f23b9041b17adcf
3
+ size 147787
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7921283b75a42e685f50edecb96798607ea0fcbfd0739ee8975f22c12d55f09
3
+ size 3110458
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb00fe008f6828f86ba2beda8415a4cf5da0c884f21c5f238c87131b5aa19529
3
+ size 72813
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "The Stanford Sentiment Treebank consists of sentences from movie reviews and\nhuman annotations of their sentiment. The task is to predict the sentiment of a\ngiven sentence. We use the two-way (positive/negative) class split, and use only\nsentence-level labels.\n", "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n", "homepage": "https://nlp.stanford.edu/sentiment/", "license": "Unknown", "features": {"idx": {"dtype": "int32", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "sst2", "config_name": "default", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4690022, "num_examples": 67349, "dataset_name": "sst2"}, "validation": {"name": "validation", "num_bytes": 106361, "num_examples": 872, "dataset_name": "sst2"}, "test": {"name": "test", "num_bytes": 216868, "num_examples": 1821, "dataset_name": "sst2"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {"num_bytes": 7439277, "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"}}, "download_size": 7439277, "post_processing_size": null, "dataset_size": 5013251, "size_in_bytes": 12452528}}
 
 
sst2.py DELETED
@@ -1,105 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """SST-2 (Stanford Sentiment Treebank v2) dataset."""
15
-
16
-
17
- import csv
18
- import os
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @inproceedings{socher2013recursive,
25
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
26
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
27
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
28
- pages={1631--1642},
29
- year={2013}
30
- }
31
- """
32
-
33
- _DESCRIPTION = """\
34
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
35
- human annotations of their sentiment. The task is to predict the sentiment of a
36
- given sentence. We use the two-way (positive/negative) class split, and use only
37
- sentence-level labels.
38
- """
39
-
40
- _HOMEPAGE = "https://nlp.stanford.edu/sentiment/"
41
-
42
- _LICENSE = "Unknown"
43
-
44
- _URL = "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip"
45
-
46
-
47
- class Sst2(datasets.GeneratorBasedBuilder):
48
- """SST-2 dataset."""
49
-
50
- VERSION = datasets.Version("2.0.0")
51
-
52
- def _info(self):
53
- features = datasets.Features(
54
- {
55
- "idx": datasets.Value("int32"),
56
- "sentence": datasets.Value("string"),
57
- "label": datasets.features.ClassLabel(names=["negative", "positive"]),
58
- }
59
- )
60
- return datasets.DatasetInfo(
61
- description=_DESCRIPTION,
62
- features=features,
63
- homepage=_HOMEPAGE,
64
- license=_LICENSE,
65
- citation=_CITATION,
66
- )
67
-
68
- def _split_generators(self, dl_manager):
69
- dl_dir = dl_manager.download_and_extract(_URL)
70
- return [
71
- datasets.SplitGenerator(
72
- name=datasets.Split.TRAIN,
73
- gen_kwargs={
74
- "file_paths": dl_manager.iter_files(dl_dir),
75
- "data_filename": "train.tsv",
76
- },
77
- ),
78
- datasets.SplitGenerator(
79
- name=datasets.Split.VALIDATION,
80
- gen_kwargs={
81
- "file_paths": dl_manager.iter_files(dl_dir),
82
- "data_filename": "dev.tsv",
83
- },
84
- ),
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TEST,
87
- gen_kwargs={
88
- "file_paths": dl_manager.iter_files(dl_dir),
89
- "data_filename": "test.tsv",
90
- },
91
- ),
92
- ]
93
-
94
- def _generate_examples(self, file_paths, data_filename):
95
- for file_path in file_paths:
96
- filename = os.path.basename(file_path)
97
- if filename == data_filename:
98
- with open(file_path, encoding="utf8") as f:
99
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
100
- for idx, row in enumerate(reader):
101
- yield idx, {
102
- "idx": row["index"] if "index" in row else idx,
103
- "sentence": row["sentence"],
104
- "label": int(row["label"]) if "label" in row else -1,
105
- }