Commit
•
499cebb
0
Parent(s):
Update files from the datasets library (from 1.1.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.1.0
- .gitattributes +27 -0
- dataset_infos.json +1 -0
- dummy/plain_text/1.0.0/dummy_data.zip +3 -0
- openwebtext.py +87 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"plain_text": {"description": "An open-source replication of the WebText dataset from OpenAI.\n", "citation": "@misc{Gokaslan2019OpenWeb, \n\ttitle={OpenWebText Corpus},\n\tauthor={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},\n\thowpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}}, \n\tyear={2019}\n}\n", "homepage": "https://skylion007.github.io/OpenWebTextCorpus/", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "openwebtext", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 39769494896, "num_examples": 8013769, "dataset_name": "openwebtext"}}, "download_checksums": {"https://zenodo.org/record/3834942/files/openwebtext.tar.xz": {"num_bytes": 12880027468, "checksum": "9fe39d154c5bc67da8c359415372b79510eb1e2edb0d035fe4f7fc3a732b9336"}}, "download_size": 12880027468, "post_processing_size": null, "dataset_size": 39769494896, "size_in_bytes": 52649522364}}
|
dummy/plain_text/1.0.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a86b562b3452a6bf2588ba6cd4184c3f636d473db15024a00b1e5fa7c6f69bfe
|
3 |
+
size 9786
|
openwebtext.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""The Open WebText Corpus"""
|
16 |
+
|
17 |
+
from __future__ import absolute_import, division, print_function
|
18 |
+
|
19 |
+
import os
|
20 |
+
import re
|
21 |
+
from itertools import chain
|
22 |
+
|
23 |
+
import datasets
|
24 |
+
|
25 |
+
|
26 |
+
_CITATION = """\
|
27 |
+
@misc{Gokaslan2019OpenWeb,
|
28 |
+
title={OpenWebText Corpus},
|
29 |
+
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
|
30 |
+
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
|
31 |
+
year={2019}
|
32 |
+
}
|
33 |
+
"""
|
34 |
+
|
35 |
+
_DESCRIPTION = """\
|
36 |
+
An open-source replication of the WebText dataset from OpenAI.
|
37 |
+
"""
|
38 |
+
|
39 |
+
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
|
40 |
+
|
41 |
+
|
42 |
+
class Openwebtext(datasets.GeneratorBasedBuilder):
|
43 |
+
"""The Open WebText dataset."""
|
44 |
+
|
45 |
+
BUILDER_CONFIGS = [
|
46 |
+
datasets.BuilderConfig(
|
47 |
+
name="plain_text",
|
48 |
+
description="Plain text",
|
49 |
+
version=datasets.Version("1.0.0"),
|
50 |
+
)
|
51 |
+
]
|
52 |
+
|
53 |
+
def _info(self):
|
54 |
+
return datasets.DatasetInfo(
|
55 |
+
description=_DESCRIPTION,
|
56 |
+
features=datasets.Features({"text": datasets.Value("string")}),
|
57 |
+
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
|
58 |
+
citation=_CITATION,
|
59 |
+
)
|
60 |
+
|
61 |
+
def _split_generators(self, dl_manager):
|
62 |
+
dl_dir = dl_manager.download_and_extract(_URL)
|
63 |
+
owt_dir = os.path.join(dl_dir, "openwebtext")
|
64 |
+
subset_xzs = [
|
65 |
+
os.path.join(owt_dir, file_name)
|
66 |
+
for file_name in sorted(os.listdir(owt_dir))
|
67 |
+
if file_name.endswith("xz") # filter out ...xz.lock
|
68 |
+
]
|
69 |
+
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
|
70 |
+
nested_txt_files = [
|
71 |
+
[
|
72 |
+
os.path.join(ex_dir, txt_file_name)
|
73 |
+
for txt_file_name in sorted(os.listdir(ex_dir))
|
74 |
+
if txt_file_name.endswith("txt")
|
75 |
+
]
|
76 |
+
for ex_dir in ex_dirs
|
77 |
+
]
|
78 |
+
txt_files = chain(*nested_txt_files)
|
79 |
+
return [
|
80 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
|
81 |
+
]
|
82 |
+
|
83 |
+
def _generate_examples(self, txt_files):
|
84 |
+
""" Yields examples. """
|
85 |
+
for idx, filepath in enumerate(txt_files):
|
86 |
+
with open(filepath, encoding="utf-8") as f:
|
87 |
+
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
|