Commit
•
7800097
1
Parent(s):
927b4b3
Update parquet files
Browse files- README.md +0 -3
- dataset_infos.json +0 -1
- default/train/0000.parquet +0 -0
- gitattributes.txt +0 -27
- vocab.txt +0 -0
- wikitext.py +0 -192
README.md
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike License.
|
2 |
-
|
3 |
-
Compared to the preprocessed version of Penn Treebank (PTB), WikiText-2 is over 2 times larger and WikiText-103 is over 110 times larger. The WikiText dataset also features a far larger vocabulary and retains the original case, punctuation and numbers - all of which are removed in PTB. As it is composed of full articles, the dataset is well suited for models that can take advantage of long term dependencies.
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"wikitext-103-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-103-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1295579, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 545142639, "num_examples": 1801350, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1154755, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip": {"num_bytes": 190229076, "checksum": "242ba0f20b329cfdf1ccc61e9e9e5b59becf189db7f7a81cd2a0e2fc31539590"}}, "download_size": 190229076, "post_processing_size": null, "dataset_size": 547592973, "size_in_bytes": 737822049}, "wikitext-2-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-2-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1270951, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 10918134, "num_examples": 36718, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1134127, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip": {"num_bytes": 4475746, "checksum": "92675f1d63015c1c8b51f1656a52d5bdbc33aafa60cc47a218a66e7ee817488c"}}, "download_size": 4475746, "post_processing_size": null, "dataset_size": 13323212, "size_in_bytes": 17798958}, "wikitext-103-raw-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-103-raw-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1305092, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 546501673, "num_examples": 1801350, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1159292, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip": {"num_bytes": 191984949, "checksum": "91c00ae287f0d699e18605c84afc9e45c192bc6b7797ff8837e5474655a33794"}}, "download_size": 191984949, "post_processing_size": null, "dataset_size": 548966057, "size_in_bytes": 740951006}, "wikitext-2-raw-v1": {"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n", "citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/", "license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "wikitext", "config_name": "wikitext-2-raw-v1", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1305092, "num_examples": 4358, "dataset_name": "wikitext"}, "train": {"name": "train", "num_bytes": 11061733, "num_examples": 36718, "dataset_name": "wikitext"}, "validation": {"name": "validation", "num_bytes": 1159292, "num_examples": 3760, "dataset_name": "wikitext"}}, "download_checksums": {"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip": {"num_bytes": 4721645, "checksum": "ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11"}}, "download_size": 4721645, "post_processing_size": null, "dataset_size": 13526117, "size_in_bytes": 18247762}}
|
|
|
|
default/train/0000.parquet
ADDED
Binary file (266 kB). View file
|
|
gitattributes.txt
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vocab.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
wikitext.py
DELETED
@@ -1,192 +0,0 @@
|
|
1 |
-
"""TODO(wikitext): Add a description here."""
|
2 |
-
|
3 |
-
|
4 |
-
import os
|
5 |
-
|
6 |
-
import datasets
|
7 |
-
|
8 |
-
|
9 |
-
_CITATION = """\
|
10 |
-
@misc{merity2016pointer,
|
11 |
-
title={Pointer Sentinel Mixture Models},
|
12 |
-
author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},
|
13 |
-
year={2016},
|
14 |
-
eprint={1609.07843},
|
15 |
-
archivePrefix={arXiv},
|
16 |
-
primaryClass={cs.CL}
|
17 |
-
}
|
18 |
-
"""
|
19 |
-
|
20 |
-
_DESCRIPTION = """\
|
21 |
-
The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
|
22 |
-
Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike
|
23 |
-
License.
|
24 |
-
"""
|
25 |
-
_HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
|
26 |
-
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
|
27 |
-
_DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
|
28 |
-
|
29 |
-
|
30 |
-
class WikitextConfig(datasets.BuilderConfig):
|
31 |
-
"""BuilderConfig for GLUE."""
|
32 |
-
|
33 |
-
def __init__(self, data_url, **kwargs):
|
34 |
-
"""BuilderConfig for Wikitext
|
35 |
-
|
36 |
-
Args:
|
37 |
-
data_url: `string`, url to the dataset (word or raw level)
|
38 |
-
**kwargs: keyword arguments forwarded to super.
|
39 |
-
"""
|
40 |
-
super(WikitextConfig, self).__init__(
|
41 |
-
version=datasets.Version(
|
42 |
-
"1.0.0",
|
43 |
-
),
|
44 |
-
**kwargs,
|
45 |
-
)
|
46 |
-
self.data_url = data_url
|
47 |
-
|
48 |
-
|
49 |
-
class Wikitext(datasets.GeneratorBasedBuilder):
|
50 |
-
"""TODO(wikitext_103): Short description of my dataset."""
|
51 |
-
|
52 |
-
# TODO(wikitext_103): Set up version.
|
53 |
-
VERSION = datasets.Version("0.1.0")
|
54 |
-
BUILDER_CONFIGS = [
|
55 |
-
WikitextConfig(
|
56 |
-
name="wikitext-103-v1",
|
57 |
-
data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
|
58 |
-
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
|
59 |
-
),
|
60 |
-
WikitextConfig(
|
61 |
-
name="wikitext-2-v1",
|
62 |
-
data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
|
63 |
-
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
|
64 |
-
),
|
65 |
-
WikitextConfig(
|
66 |
-
name="wikitext-103-raw-v1",
|
67 |
-
data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
|
68 |
-
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
|
69 |
-
"They should only be used for character level work or for creating newly derived datasets.",
|
70 |
-
),
|
71 |
-
WikitextConfig(
|
72 |
-
name="wikitext-2-raw-v1",
|
73 |
-
data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
|
74 |
-
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
|
75 |
-
"They should only be used for character level work or for creating newly derived datasets.",
|
76 |
-
),
|
77 |
-
]
|
78 |
-
|
79 |
-
def _info(self):
|
80 |
-
# TODO(wikitext): Specifies the datasets.DatasetInfo object
|
81 |
-
return datasets.DatasetInfo(
|
82 |
-
# This is the description that will appear on the datasets page.
|
83 |
-
description=_DESCRIPTION,
|
84 |
-
# datasets.features.FeatureConnectors
|
85 |
-
features=datasets.Features(
|
86 |
-
{
|
87 |
-
"text": datasets.Value("string")
|
88 |
-
# These are the features of your dataset like images, labels ...
|
89 |
-
}
|
90 |
-
),
|
91 |
-
# If there's a common (input, target) tuple from the features,
|
92 |
-
# specify them here. They'll be used if as_supervised=True in
|
93 |
-
# builder.as_dataset.
|
94 |
-
supervised_keys=None,
|
95 |
-
homepage=_HOMEPAGE,
|
96 |
-
license=_LICENSE,
|
97 |
-
citation=_CITATION,
|
98 |
-
)
|
99 |
-
|
100 |
-
def _split_generators(self, dl_manager):
|
101 |
-
"""Returns SplitGenerators."""
|
102 |
-
# TODO(wikitext): Downloads the data and defines the splits
|
103 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to
|
104 |
-
# download and extract URLs
|
105 |
-
if self.config.name == "wikitext-103-v1":
|
106 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
107 |
-
data_dir = os.path.join(data_file, "wikitext-103")
|
108 |
-
return [
|
109 |
-
datasets.SplitGenerator(
|
110 |
-
name=datasets.Split.TEST,
|
111 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
|
112 |
-
),
|
113 |
-
datasets.SplitGenerator(
|
114 |
-
name=datasets.Split.TRAIN,
|
115 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
|
116 |
-
),
|
117 |
-
datasets.SplitGenerator(
|
118 |
-
name=datasets.Split.VALIDATION,
|
119 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
|
120 |
-
),
|
121 |
-
]
|
122 |
-
else:
|
123 |
-
if self.config.name == "wikitext-103-raw-v1":
|
124 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
125 |
-
data_dir = os.path.join(data_file, "wikitext-103-raw")
|
126 |
-
return [
|
127 |
-
datasets.SplitGenerator(
|
128 |
-
name=datasets.Split.TEST,
|
129 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
|
130 |
-
),
|
131 |
-
datasets.SplitGenerator(
|
132 |
-
name=datasets.Split.TRAIN,
|
133 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
|
134 |
-
),
|
135 |
-
datasets.SplitGenerator(
|
136 |
-
name=datasets.Split.VALIDATION,
|
137 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
|
138 |
-
),
|
139 |
-
]
|
140 |
-
else:
|
141 |
-
if self.config.name == "wikitext-2-raw-v1":
|
142 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
143 |
-
data_dir = os.path.join(data_file, "wikitext-2-raw")
|
144 |
-
return [
|
145 |
-
datasets.SplitGenerator(
|
146 |
-
name=datasets.Split.TEST,
|
147 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
|
148 |
-
),
|
149 |
-
datasets.SplitGenerator(
|
150 |
-
name=datasets.Split.TRAIN,
|
151 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
|
152 |
-
),
|
153 |
-
datasets.SplitGenerator(
|
154 |
-
name=datasets.Split.VALIDATION,
|
155 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
|
156 |
-
),
|
157 |
-
]
|
158 |
-
else:
|
159 |
-
if self.config.name == "wikitext-2-v1":
|
160 |
-
data_file = dl_manager.download_and_extract(self.config.data_url)
|
161 |
-
data_dir = os.path.join(data_file, "wikitext-2")
|
162 |
-
return [
|
163 |
-
datasets.SplitGenerator(
|
164 |
-
name=datasets.Split.TEST,
|
165 |
-
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
|
166 |
-
),
|
167 |
-
datasets.SplitGenerator(
|
168 |
-
name=datasets.Split.TRAIN,
|
169 |
-
gen_kwargs={
|
170 |
-
"data_file": os.path.join(data_dir, "wiki.train.tokens"),
|
171 |
-
"split": "train",
|
172 |
-
},
|
173 |
-
),
|
174 |
-
datasets.SplitGenerator(
|
175 |
-
name=datasets.Split.VALIDATION,
|
176 |
-
gen_kwargs={
|
177 |
-
"data_file": os.path.join(data_dir, "wiki.valid.tokens"),
|
178 |
-
"split": "valid",
|
179 |
-
},
|
180 |
-
),
|
181 |
-
]
|
182 |
-
|
183 |
-
def _generate_examples(self, data_file, split):
|
184 |
-
|
185 |
-
"""Yields examples."""
|
186 |
-
# TODO(wikitext): Yields (key, example) tuples from the dataset
|
187 |
-
with open(data_file, encoding="utf-8") as f:
|
188 |
-
for idx, row in enumerate(f):
|
189 |
-
if row.strip():
|
190 |
-
yield idx, {"text": row}
|
191 |
-
else:
|
192 |
-
yield idx, {"text": ""}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|