system HF staff commited on
Commit
9fd6e32
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"plain_text": {"description": "Large Movie Review Dataset.\nThis is a dataset for binary sentiment classification containing substantially more data than previous benchmark datasets. We provide a set of 25,000 highly polar movie reviews for training, and 25,000 for testing. There is additional unlabeled data for use as well.", "citation": "@InProceedings{maas-EtAl:2011:ACL-HLT2011,\n author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},\n title = {Learning Word Vectors for Sentiment Analysis},\n booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},\n month = {June},\n year = {2011},\n address = {Portland, Oregon, USA},\n publisher = {Association for Computational Linguistics},\n pages = {142--150},\n url = {http://www.aclweb.org/anthology/P11-1015}\n}\n", "homepage": "http://ai.stanford.edu/~amaas/data/sentiment/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "supervised_keys": null, "builder_name": "imdb", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 32660064, "num_examples": 25000, "dataset_name": "imdb"}, "train": {"name": "train", "num_bytes": 33442202, "num_examples": 25000, "dataset_name": "imdb"}, "unsupervised": {"name": "unsupervised", "num_bytes": 67125548, "num_examples": 50000, "dataset_name": "imdb"}}, "download_checksums": {"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz": {"num_bytes": 84125825, "checksum": "c40f74a18d3b61f90feba1e17730e0d38e8b97c05fde7008942e91923d1658fe"}}, "download_size": 84125825, "dataset_size": 133227814, "size_in_bytes": 217353639}}
dummy/plain_text/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:570a8f885827a2f340aec4a9f8b3452d037ee361ae00aa97c12d85bf3fc59e6a
3
+ size 4699
imdb.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """IMDB movie reviews dataset."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _DESCRIPTION = """\
27
+ Large Movie Review Dataset.
28
+ This is a dataset for binary sentiment classification containing substantially \
29
+ more data than previous benchmark datasets. We provide a set of 25,000 highly \
30
+ polar movie reviews for training, and 25,000 for testing. There is additional \
31
+ unlabeled data for use as well.\
32
+ """
33
+
34
+ _CITATION = """\
35
+ @InProceedings{maas-EtAl:2011:ACL-HLT2011,
36
+ author = {Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher},
37
+ title = {Learning Word Vectors for Sentiment Analysis},
38
+ booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies},
39
+ month = {June},
40
+ year = {2011},
41
+ address = {Portland, Oregon, USA},
42
+ publisher = {Association for Computational Linguistics},
43
+ pages = {142--150},
44
+ url = {http://www.aclweb.org/anthology/P11-1015}
45
+ }
46
+ """
47
+
48
+ _DOWNLOAD_URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
49
+
50
+
51
+ class IMDBReviewsConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for IMDBReviews."""
53
+
54
+ def __init__(self, **kwargs):
55
+ """BuilderConfig for IMDBReviews.
56
+
57
+ Args:
58
+ **kwargs: keyword arguments forwarded to super.
59
+ """
60
+ super(IMDBReviewsConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
61
+
62
+
63
+ class Imdb(datasets.GeneratorBasedBuilder):
64
+ """IMDB movie reviews dataset."""
65
+
66
+ BUILDER_CONFIGS = [
67
+ IMDBReviewsConfig(
68
+ name="plain_text",
69
+ description="Plain text",
70
+ )
71
+ ]
72
+
73
+ def _info(self):
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=datasets.Features(
77
+ {"text": datasets.Value("string"), "label": datasets.features.ClassLabel(names=["neg", "pos"])}
78
+ ),
79
+ supervised_keys=None,
80
+ homepage="http://ai.stanford.edu/~amaas/data/sentiment/",
81
+ citation=_CITATION,
82
+ )
83
+
84
+ def _vocab_text_gen(self, archive):
85
+ for _, ex in self._generate_examples(archive, os.path.join("aclImdb", "train")):
86
+ yield ex["text"]
87
+
88
+ def _split_generators(self, dl_manager):
89
+ arch_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
90
+ data_dir = os.path.join(arch_path, "aclImdb")
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN, gen_kwargs={"directory": os.path.join(data_dir, "train")}
94
+ ),
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TEST, gen_kwargs={"directory": os.path.join(data_dir, "test")}
97
+ ),
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split("unsupervised"),
100
+ gen_kwargs={"directory": os.path.join(data_dir, "train"), "labeled": False},
101
+ ),
102
+ ]
103
+
104
+ def _generate_examples(self, directory, labeled=True):
105
+ """Generate IMDB examples."""
106
+ # For labeled examples, extract the label from the path.
107
+ if labeled:
108
+ files = {
109
+ "pos": sorted(os.listdir(os.path.join(directory, "pos"))),
110
+ "neg": sorted(os.listdir(os.path.join(directory, "neg"))),
111
+ }
112
+ for key in files:
113
+ for id_, file in enumerate(files[key]):
114
+ filepath = os.path.join(directory, key, file)
115
+ with open(filepath, encoding="UTF-8") as f:
116
+ yield key + "_" + str(id_), {"text": f.read(), "label": key}
117
+ else:
118
+ unsup_files = sorted(os.listdir(os.path.join(directory, "unsup")))
119
+ for id_, file in enumerate(unsup_files):
120
+ filepath = os.path.join(directory, "unsup", file)
121
+ with open(filepath, encoding="UTF-8") as f:
122
+ yield id_, {"text": f.read(), "label": -1}