Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
crowdsourced
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
Tags:
License:
system HF staff commited on
Commit
f180f01
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"wikitext-103-raw-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified \n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@InProceedings{wikitext,\n author={Stephen, Merity and Caiming ,Xiong and James, Bradbury and Richard Socher}\n year=2016\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wikitext", "config_name": "wikitext-103-raw-v1", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1306182, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 546951363, "num_examples": 1801350, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1160232, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip": {"num_bytes": 191984949, "checksum": "91c00ae287f0d699e18605c84afc9e45c192bc6b7797ff8837e5474655a33794"}}, "download_size": 191984949, "dataset_size": 549417777, "size_in_bytes": 741402726}, "wikitext-2-raw-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified \n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@InProceedings{wikitext,\n author={Stephen, Merity and Caiming ,Xiong and James, Bradbury and Richard Socher}\n year=2016\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wikitext", "config_name": "wikitext-2-raw-v1", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1306182, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 11070901, "num_examples": 36718, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1160232, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip": {"num_bytes": 4721645, "checksum": "ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11"}}, "download_size": 4721645, "dataset_size": 13537315, "size_in_bytes": 18258960}, "wikitext-103-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified \n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@InProceedings{wikitext,\n author={Stephen, Merity and Caiming ,Xiong and James, Bradbury and Richard Socher}\n year=2016\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wikitext", "config_name": "wikitext-103-v1", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1296669, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 545592329, "num_examples": 1801350, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1155695, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip": {"num_bytes": 190229076, "checksum": "242ba0f20b329cfdf1ccc61e9e9e5b59becf189db7f7a81cd2a0e2fc31539590"}}, "download_size": 190229076, "dataset_size": 548044693, "size_in_bytes": 738273769}, "wikitext-2-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified \n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.\n", "citation": "@InProceedings{wikitext,\n author={Stephen, Merity and Caiming ,Xiong and James, Bradbury and Richard Socher}\n year=2016\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wikitext", "config_name": "wikitext-2-v1", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1272041, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 10927302, "num_examples": 36718, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1135067, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip": {"num_bytes": 4475746, "checksum": "92675f1d63015c1c8b51f1656a52d5bdbc33aafa60cc47a218a66e7ee817488c"}}, "download_size": 4475746, "dataset_size": 13334410, "size_in_bytes": 17810156}}
dummy/wikitext-103-raw-v1/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:842dca49e6c854b3a07251a812d1355bca3a45a92478c3807a252542d573a127
3
+ size 3285
dummy/wikitext-103-v1/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ccfed190561a2336322f8e409a210dd6cd8f6861596a0680a596347dff8cf23
3
+ size 2912
dummy/wikitext-2-raw-v1/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3af2d3ce8ca55f82c972ff50be65b30f56f81dd1dee1c1da7733f61eeb1bc837
3
+ size 3269
dummy/wikitext-2-v1/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83bededc8b8da9343474957690e17aac2d1fbfd89fb60529bd6296cc1fc63908
3
+ size 2896
wikitext.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(wikitext): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import os
6
+
7
+ import datasets
8
+
9
+
10
+ # TODO(wikitext): BibTeX citation
11
+ _CITATION = """\
12
+ @InProceedings{wikitext,
13
+ author={Stephen, Merity and Caiming ,Xiong and James, Bradbury and Richard Socher}
14
+ year={2016}
15
+ }
16
+ """
17
+
18
+ # TODO(wikitext):
19
+ _DESCRIPTION = """\
20
+ The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
21
+ Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.
22
+ """
23
+ _URL = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
24
+ _DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
25
+
26
+
27
+ class WikitextConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for GLUE."""
29
+
30
+ def __init__(self, data_url, **kwargs):
31
+ """BuilderConfig for Wikitext
32
+
33
+ Args:
34
+ data_url: `string`, url to the dataset (word or raw level)
35
+ **kwargs: keyword arguments forwarded to super.
36
+ """
37
+ super(WikitextConfig, self).__init__(
38
+ version=datasets.Version(
39
+ "1.0.0",
40
+ ),
41
+ **kwargs,
42
+ )
43
+ self.data_url = data_url
44
+
45
+
46
+ class Wikitext(datasets.GeneratorBasedBuilder):
47
+ """TODO(wikitext_103): Short description of my dataset."""
48
+
49
+ # TODO(wikitext_103): Set up version.
50
+ VERSION = datasets.Version("0.1.0")
51
+ BUILDER_CONFIGS = [
52
+ WikitextConfig(
53
+ name="wikitext-103-raw-v1",
54
+ data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
55
+ description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
56
+ ),
57
+ WikitextConfig(
58
+ name="wikitext-2-raw-v1",
59
+ data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
60
+ description="word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
61
+ ),
62
+ WikitextConfig(
63
+ name="wikitext-103-v1",
64
+ data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
65
+ description="raw level dataset. The raw tokens before the addition of <unk> tokens. "
66
+ "They should only be used for character level work or for creating newly derived datasets.",
67
+ ),
68
+ WikitextConfig(
69
+ name="wikitext-2-v1",
70
+ data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
71
+ description="raw level dataset. The raw tokens before the addition of <unk> tokens. "
72
+ "They should only be used for character level work or for creating newly derived datasets.",
73
+ ),
74
+ ]
75
+
76
+ def _info(self):
77
+ # TODO(wikitext): Specifies the datasets.DatasetInfo object
78
+ return datasets.DatasetInfo(
79
+ # This is the description that will appear on the datasets page.
80
+ description=_DESCRIPTION,
81
+ # datasets.features.FeatureConnectors
82
+ features=datasets.Features(
83
+ {
84
+ "text": datasets.Value("string")
85
+ # These are the features of your dataset like images, labels ...
86
+ }
87
+ ),
88
+ # If there's a common (input, target) tuple from the features,
89
+ # specify them here. They'll be used if as_supervised=True in
90
+ # builder.as_dataset.
91
+ supervised_keys=None,
92
+ # Homepage of the dataset for documentation
93
+ homepage=_URL,
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+ """Returns SplitGenerators."""
99
+ # TODO(wikitext): Downloads the data and defines the splits
100
+ # dl_manager is a datasets.download.DownloadManager that can be used to
101
+ # download and extract URLs
102
+ if self.config.name == "wikitext-103-v1":
103
+ data_file = dl_manager.download_and_extract(self.config.data_url)
104
+ data_dir = os.path.join(data_file, "wikitext-103")
105
+ return [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TEST,
108
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TRAIN,
112
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.VALIDATION,
116
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
117
+ ),
118
+ ]
119
+ else:
120
+ if self.config.name == "wikitext-103-raw-v1":
121
+ data_file = dl_manager.download_and_extract(self.config.data_url)
122
+ data_dir = os.path.join(data_file, "wikitext-103-raw")
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TEST,
126
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TRAIN,
130
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
131
+ ),
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.VALIDATION,
134
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
135
+ ),
136
+ ]
137
+ else:
138
+ if self.config.name == "wikitext-2-raw-v1":
139
+ data_file = dl_manager.download_and_extract(self.config.data_url)
140
+ data_dir = os.path.join(data_file, "wikitext-2-raw")
141
+ return [
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TEST,
144
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
145
+ ),
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TRAIN,
148
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.VALIDATION,
152
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
153
+ ),
154
+ ]
155
+ else:
156
+ if self.config.name == "wikitext-2-v1":
157
+ data_file = dl_manager.download_and_extract(self.config.data_url)
158
+ data_dir = os.path.join(data_file, "wikitext-2")
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.TEST,
162
+ gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
163
+ ),
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.TRAIN,
166
+ gen_kwargs={
167
+ "data_file": os.path.join(data_dir, "wiki.train.tokens"),
168
+ "split": "train",
169
+ },
170
+ ),
171
+ datasets.SplitGenerator(
172
+ name=datasets.Split.VALIDATION,
173
+ gen_kwargs={
174
+ "data_file": os.path.join(data_dir, "wiki.valid.tokens"),
175
+ "split": "valid",
176
+ },
177
+ ),
178
+ ]
179
+
180
+ def _generate_examples(self, data_file, split):
181
+
182
+ """Yields examples."""
183
+ # TODO(wikitext): Yields (key, example) tuples from the dataset
184
+ with open(data_file, encoding="utf-8") as f:
185
+ for idx, row in enumerate(f):
186
+ if row.strip():
187
+ yield idx, {"text": row}
188
+ else:
189
+ yield idx, {"text": ""}