Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
system HF staff commited on
Commit
4c153aa
0 Parent(s):

Update files from the datasets library (from 1.1.3)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.1.3

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
bookcorpusopen.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """The BookCorpus dataset based on Shawn Presser's work https://github.com/soskek/bookcorpus/issues/27 """
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import glob
22
+ import os
23
+ import pathlib
24
+
25
+ import datasets
26
+
27
+
28
+ _DESCRIPTION = """\
29
+ Books are a rich source of both fine-grained information, how a character, \
30
+ an object or a scene looks like, as well as high-level semantics, what \
31
+ someone is thinking, feeling and how these states evolve through a story.\
32
+
33
+ This version of bookcorpus has 17868 dataset items (books). Each item contains \
34
+ two fields: title and text. The title is the name of the book (just the file name) \
35
+ while text contains unprocessed book text. The bookcorpus has been prepared by \
36
+ Shawn Presser and is generously hosted by The-Eye. The-Eye is a non-profit, community \
37
+ driven platform dedicated to the archiving and long-term preservation of any and \
38
+ all data including but by no means limited to... websites, books, games, software, \
39
+ video, audio, other digital-obscura and ideas.
40
+ """
41
+
42
+ _CITATION = """\
43
+ @InProceedings{Zhu_2015_ICCV,
44
+ title = {Aligning Books and Movies: Towards Story-Like Visual Explanations by Watching Movies and Reading Books},
45
+ author = {Zhu, Yukun and Kiros, Ryan and Zemel, Rich and Salakhutdinov, Ruslan and Urtasun, Raquel and Torralba, Antonio and Fidler, Sanja},
46
+ booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
47
+ month = {December},
48
+ year = {2015}
49
+ }
50
+ """
51
+ _PROJECT_URL = "https://github.com/soskek/bookcorpus/issues/27"
52
+ _DOWNLOAD_URL = "https://the-eye.eu/public/AI/pile_preliminary_components/books1.tar.gz"
53
+
54
+
55
+ class BookCorpusOpenConfig(datasets.BuilderConfig):
56
+ """BuilderConfig for BookCorpus."""
57
+
58
+ def __init__(self, **kwargs):
59
+ """BuilderConfig for BookCorpus.
60
+ Args:
61
+ **kwargs: keyword arguments forwarded to super.
62
+ """
63
+ super(BookCorpusOpenConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
64
+
65
+
66
+ class BookCorpusOpen(datasets.GeneratorBasedBuilder):
67
+ """BookCorpus dataset."""
68
+
69
+ BUILDER_CONFIGS = [
70
+ BookCorpusOpenConfig(
71
+ name="plain_text",
72
+ description="Plain text",
73
+ )
74
+ ]
75
+
76
+ def _info(self):
77
+ return datasets.DatasetInfo(
78
+ description=_DESCRIPTION,
79
+ features=datasets.Features(
80
+ {
81
+ "title": datasets.Value("string"),
82
+ "text": datasets.Value("string"),
83
+ }
84
+ ),
85
+ supervised_keys=None,
86
+ homepage=_PROJECT_URL,
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager):
91
+ arch_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
92
+
93
+ return [
94
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"directory": arch_path}),
95
+ ]
96
+
97
+ def _generate_examples(self, directory):
98
+ glob_target = os.path.join(directory, "**/*.epub.txt")
99
+ book_files = glob.glob(glob_target, recursive=True)
100
+ book_files = sorted(book_files)
101
+ _id = 0
102
+ for book_file_path in book_files:
103
+ path = pathlib.PurePath(book_file_path)
104
+ with open(book_file_path, mode="r", encoding="utf-8") as f:
105
+ yield _id, {"title": str(path.name), "text": f.read()},
106
+ _id += 1
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"plain_text": {"description": "Books are a rich source of both fine-grained information, how a character, an object or a scene looks like, as well as high-level semantics, what someone is thinking, feeling and how these states evolve through a story.\nThis version of bookcorpus has 17868 dataset items (books). Each item contains two fields: title and text. The title is the name of the book (just the file name) while text contains unprocessed book text. The bookcorpus has been prepared by Shawn Presser and is generously hosted by The-Eye. The-Eye is a non-profit, community driven platform dedicated to the archiving and long-term preservation of any and all data including but by no means limited to... websites, books, games, software, video, audio, other digital-obscura and ideas.\n", "citation": "@InProceedings{Zhu_2015_ICCV,\n title = {Aligning Books and Movies: Towards Story-Like Visual Explanations by Watching Movies and Reading Books},\n author = {Zhu, Yukun and Kiros, Ryan and Zemel, Rich and Salakhutdinov, Ruslan and Urtasun, Raquel and Torralba, Antonio and Fidler, Sanja},\n booktitle = {The IEEE International Conference on Computer Vision (ICCV)},\n month = {December},\n year = {2015}\n}\n", "homepage": "https://github.com/soskek/bookcorpus/issues/27", "license": "", "features": {"title": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "book_corpus_open", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6644158829, "num_examples": 17868, "dataset_name": "book_corpus_open"}}, "download_checksums": {"https://the-eye.eu/public/AI/pile_preliminary_components/books1.tar.gz": {"num_bytes": 2404269430, "checksum": "e3c993cc825df2bdf0f78ef592f5c09236f0b9cd6bb1877142281acc50f446f9"}}, "download_size": 2404269430, "post_processing_size": null, "dataset_size": 6644158829, "size_in_bytes": 9048428259}}
dummy/plain_text/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8343a455886433f43a58368db069d1820d42f393c3e04a8e5e138eb41aa16a
3
+ size 862302