Datasets:
Update hebrew_projectbenyehuda.py
#2
by
imvladikon
- opened
- hebrew_projectbenyehuda.py +51 -66
hebrew_projectbenyehuda.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
"""Public domain texts from Project Ben-Yehuda- a set of books extracted from the Project BenYehuda library"""
|
2 |
|
3 |
-
|
4 |
import csv
|
|
|
5 |
|
6 |
-
import
|
7 |
|
|
|
|
|
8 |
|
9 |
_CITATION = """\
|
10 |
@article{,
|
@@ -14,7 +16,6 @@ _CITATION = """\
|
|
14 |
url = {https://github.com/projectbenyehuda/public_domain_dump},
|
15 |
year = {2020},
|
16 |
}
|
17 |
-
|
18 |
"""
|
19 |
|
20 |
_DESCRIPTION = """\
|
@@ -23,28 +24,26 @@ All these works are in the public domain, so you are free to make any use of the
|
|
23 |
There are 10078 files, 3181136 lines
|
24 |
"""
|
25 |
|
26 |
-
|
27 |
-
_STORAGE_API_ROOT_URL = "https://raw.githubusercontent.com/projectbenyehuda/public_domain_dump/master/txt/"
|
28 |
-
|
29 |
-
# download one by one file from github is too slow
|
30 |
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
|
34 |
class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
|
35 |
-
|
36 |
-
|
37 |
-
VERSION = datasets.Version("0.1.0")
|
38 |
|
39 |
def _info(self):
|
40 |
return datasets.DatasetInfo(
|
41 |
-
# This is the description that will appear on the datasets page.
|
42 |
description=_DESCRIPTION,
|
43 |
-
# datasets.features.FeatureConnectors
|
44 |
features=datasets.Features(
|
45 |
{
|
46 |
"id": datasets.Value("int32"),
|
47 |
-
"
|
48 |
"title": datasets.Value("string"),
|
49 |
"authors": datasets.Value("string"),
|
50 |
"translators": datasets.Value("string"),
|
@@ -52,49 +51,38 @@ class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
|
|
52 |
"genre": datasets.Value("string"),
|
53 |
"source_edition": datasets.Value("string"),
|
54 |
"text": datasets.Value("string"),
|
55 |
-
|
|
|
56 |
}
|
57 |
),
|
58 |
-
# If there's a common (input, target) tuple from the features,
|
59 |
-
# specify them here. They'll be used if as_supervised=True in
|
60 |
-
# builder.as_dataset.
|
61 |
supervised_keys=None,
|
62 |
-
# Homepage of the dataset for documentation
|
63 |
homepage="https://github.com/projectbenyehuda/public_domain_dump",
|
64 |
citation=_CITATION,
|
65 |
)
|
66 |
|
67 |
def _split_generators(self, dl_manager):
|
68 |
-
"""Returns SplitGenerators."""
|
69 |
-
|
70 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to
|
71 |
-
# download and extract URLs
|
72 |
-
|
73 |
-
metadata = dl_manager.download({"metadata": _METADATA_URL})
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
ids.append(row["ID"])
|
80 |
-
urls_to_download[row["ID"]] = _STORAGE_API_ROOT_URL + row["path"].strip("/") + ".txt"
|
81 |
|
82 |
-
downloaded_files = dl_manager.download(urls_to_download)
|
83 |
return [
|
84 |
datasets.SplitGenerator(
|
85 |
name=datasets.Split.TRAIN,
|
86 |
gen_kwargs={
|
87 |
-
"
|
88 |
-
"metadata_filepath": metadata["metadata"],
|
89 |
-
"filepaths": downloaded_files,
|
90 |
},
|
91 |
)
|
92 |
]
|
93 |
|
94 |
-
def _generate_examples(self,
|
95 |
-
|
|
|
|
|
|
|
96 |
|
97 |
-
with open(
|
98 |
metadata_dict = csv.DictReader(
|
99 |
f,
|
100 |
fieldnames=[
|
@@ -108,33 +96,30 @@ class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
|
|
108 |
"source_edition",
|
109 |
],
|
110 |
)
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
|
|
|
|
|
|
117 |
with open(filepath, encoding="utf-8") as f:
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
title = data["title"]
|
122 |
-
url = data["path"].strip("/")
|
123 |
-
url = _STORAGE_API_ROOT_URL + url + ".txt"
|
124 |
-
authors = data["authors"]
|
125 |
-
translators = data["translators"]
|
126 |
-
original_language = data["original_language"]
|
127 |
-
genre = data["genre"]
|
128 |
-
source_edition = data["source_edition"]
|
129 |
-
|
130 |
-
yield _id, {
|
131 |
-
"id": _id,
|
132 |
-
"title": title,
|
133 |
-
"url": url,
|
134 |
-
"authors": authors,
|
135 |
-
"translators": translators,
|
136 |
-
"original_language": original_language,
|
137 |
-
"genre": genre,
|
138 |
-
"source_edition": source_edition,
|
139 |
-
"text": text,
|
140 |
-
}
|
|
|
1 |
"""Public domain texts from Project Ben-Yehuda- a set of books extracted from the Project BenYehuda library"""
|
2 |
|
|
|
3 |
import csv
|
4 |
+
csv.field_size_limit(1000000000)
|
5 |
|
6 |
+
from pathlib import Path
|
7 |
|
8 |
+
import datasets
|
9 |
+
import logging
|
10 |
|
11 |
_CITATION = """\
|
12 |
@article{,
|
|
|
16 |
url = {https://github.com/projectbenyehuda/public_domain_dump},
|
17 |
year = {2020},
|
18 |
}
|
|
|
19 |
"""
|
20 |
|
21 |
_DESCRIPTION = """\
|
|
|
24 |
There are 10078 files, 3181136 lines
|
25 |
"""
|
26 |
|
27 |
+
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
28 |
|
29 |
+
URLS = dict(
|
30 |
+
html="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/html.zip",
|
31 |
+
catalogue="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/pseudocatalogue.csv",
|
32 |
+
txt="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/txt.zip",
|
33 |
+
txt_stripped="https://github.com/projectbenyehuda/public_domain_dump/releases/download/2022-12/txt_stripped.zip",
|
34 |
+
)
|
35 |
|
36 |
|
37 |
class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
|
38 |
+
VERSION = datasets.Version("0.2.0")
|
|
|
|
|
39 |
|
40 |
def _info(self):
|
41 |
return datasets.DatasetInfo(
|
|
|
42 |
description=_DESCRIPTION,
|
|
|
43 |
features=datasets.Features(
|
44 |
{
|
45 |
"id": datasets.Value("int32"),
|
46 |
+
"path": datasets.Value("string"),
|
47 |
"title": datasets.Value("string"),
|
48 |
"authors": datasets.Value("string"),
|
49 |
"translators": datasets.Value("string"),
|
|
|
51 |
"genre": datasets.Value("string"),
|
52 |
"source_edition": datasets.Value("string"),
|
53 |
"text": datasets.Value("string"),
|
54 |
+
"txt_stripped": datasets.Value("string"),
|
55 |
+
"html": datasets.Value("string"),
|
56 |
}
|
57 |
),
|
|
|
|
|
|
|
58 |
supervised_keys=None,
|
|
|
59 |
homepage="https://github.com/projectbenyehuda/public_domain_dump",
|
60 |
citation=_CITATION,
|
61 |
)
|
62 |
|
63 |
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
paths = {}
|
66 |
+
for key, url in URLS.items():
|
67 |
+
logger.info("Downloading %s", url)
|
68 |
+
paths[key] = dl_manager.download_and_extract(url)
|
|
|
|
|
69 |
|
|
|
70 |
return [
|
71 |
datasets.SplitGenerator(
|
72 |
name=datasets.Split.TRAIN,
|
73 |
gen_kwargs={
|
74 |
+
"filepaths": paths,
|
|
|
|
|
75 |
},
|
76 |
)
|
77 |
]
|
78 |
|
79 |
+
def _generate_examples(self, filepaths):
|
80 |
+
catalogue_path = filepaths["catalogue"]
|
81 |
+
html_path = Path(filepaths["html"]) / "html"
|
82 |
+
txt_path = Path(filepaths["txt"]) / "txt"
|
83 |
+
txt_stripped_path = Path(filepaths["txt_stripped"]) / "txt_stripped"
|
84 |
|
85 |
+
with open(catalogue_path, encoding="utf-8") as f:
|
86 |
metadata_dict = csv.DictReader(
|
87 |
f,
|
88 |
fieldnames=[
|
|
|
96 |
"source_edition",
|
97 |
],
|
98 |
)
|
99 |
+
for data in metadata_dict:
|
100 |
+
if data["path"] == "path":
|
101 |
+
continue
|
102 |
+
|
103 |
+
yield data["_id"], {
|
104 |
+
"id": data["_id"],
|
105 |
+
"title": data["title"],
|
106 |
+
"path": data["path"],
|
107 |
+
"authors": data["authors"],
|
108 |
+
"translators": data["translators"],
|
109 |
+
"original_language": data["original_language"],
|
110 |
+
"genre": data["genre"],
|
111 |
+
"source_edition": data["source_edition"],
|
112 |
+
"text": self.read_file(txt_path / f"{data['path'].strip('/')}.txt"),
|
113 |
+
"txt_stripped": self.read_file(
|
114 |
+
txt_stripped_path / f"{data['path'].strip('/')}.txt"
|
115 |
+
),
|
116 |
+
"html": self.read_file(html_path / f"{data['path'].strip('/')}.html"),
|
117 |
+
}
|
118 |
|
119 |
+
def read_file(self, filepath):
|
120 |
+
filepath = Path(filepath)
|
121 |
+
if filepath.exists():
|
122 |
with open(filepath, encoding="utf-8") as f:
|
123 |
+
return f.read()
|
124 |
+
else:
|
125 |
+
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|