imvladikon commited on
Commit
f39d4cb
1 Parent(s): dbd611e

Delete hebrew_projectbenyehuda.py

Browse files
Files changed (1) hide show
  1. hebrew_projectbenyehuda.py +0 -140
hebrew_projectbenyehuda.py DELETED
@@ -1,140 +0,0 @@
1
- """Public domain texts from Project Ben-Yehuda- a set of books extracted from the Project BenYehuda library"""
2
-
3
-
4
- import csv
5
-
6
- import datasets
7
-
8
-
9
- _CITATION = """\
10
- @article{,
11
- author = {},
12
- title = {Public domain texts from Project Ben-Yehuda},
13
- journal = {},
14
- url = {https://github.com/projectbenyehuda/public_domain_dump},
15
- year = {2020},
16
- }
17
-
18
- """
19
-
20
- _DESCRIPTION = """\
21
- This repository contains a dump of thousands of public domain works in Hebrew, from Project Ben-Yehuda, in plaintext UTF-8 files, with and without diacritics (nikkud). The metadata (pseudocatalogue.csv) file is a list of titles, authors, genres, and file paths, to help you process the dump.
22
- All these works are in the public domain, so you are free to make any use of them, and do not need to ask for permission.
23
- There are 10078 files, 3181136 lines
24
- """
25
-
26
- _ASSET_ROOT_URL = "https://raw.githubusercontent.com/projectbenyehuda/public_domain_dump/master/"
27
- _STORAGE_API_ROOT_URL = "https://raw.githubusercontent.com/projectbenyehuda/public_domain_dump/master/txt/"
28
-
29
- # download one by one file from github is too slow
30
-
31
- _METADATA_URL = _ASSET_ROOT_URL + "pseudocatalogue.csv"
32
-
33
-
34
- class HebrewProjectbenyehuda(datasets.GeneratorBasedBuilder):
35
- """Project Ben Yehuda dataset - books as plain text extracted from the Project Project Ben Yehuda library"""
36
-
37
- VERSION = datasets.Version("0.1.0")
38
-
39
- def _info(self):
40
- return datasets.DatasetInfo(
41
- # This is the description that will appear on the datasets page.
42
- description=_DESCRIPTION,
43
- # datasets.features.FeatureConnectors
44
- features=datasets.Features(
45
- {
46
- "id": datasets.Value("int32"),
47
- "url": datasets.Value("string"),
48
- "title": datasets.Value("string"),
49
- "authors": datasets.Value("string"),
50
- "translators": datasets.Value("string"),
51
- "original_language": datasets.Value("string"),
52
- "genre": datasets.Value("string"),
53
- "source_edition": datasets.Value("string"),
54
- "text": datasets.Value("string"),
55
- # These are the features of your dataset like images, labels ...
56
- }
57
- ),
58
- # If there's a common (input, target) tuple from the features,
59
- # specify them here. They'll be used if as_supervised=True in
60
- # builder.as_dataset.
61
- supervised_keys=None,
62
- # Homepage of the dataset for documentation
63
- homepage="https://github.com/projectbenyehuda/public_domain_dump",
64
- citation=_CITATION,
65
- )
66
-
67
- def _split_generators(self, dl_manager):
68
- """Returns SplitGenerators."""
69
-
70
- # dl_manager is a datasets.download.DownloadManager that can be used to
71
- # download and extract URLs
72
-
73
- metadata = dl_manager.download({"metadata": _METADATA_URL})
74
-
75
- urls_to_download = dict()
76
- ids = list()
77
- with open(metadata["metadata"], encoding="utf-8") as csv_file:
78
- for row in csv.DictReader(csv_file):
79
- ids.append(row["ID"])
80
- urls_to_download[row["ID"]] = _STORAGE_API_ROOT_URL + row["path"].strip("/") + ".txt"
81
-
82
- downloaded_files = dl_manager.download(urls_to_download)
83
- return [
84
- datasets.SplitGenerator(
85
- name=datasets.Split.TRAIN,
86
- gen_kwargs={
87
- "ids": ids,
88
- "metadata_filepath": metadata["metadata"],
89
- "filepaths": downloaded_files,
90
- },
91
- )
92
- ]
93
-
94
- def _generate_examples(self, ids, metadata_filepath, filepaths):
95
- """Yields examples."""
96
-
97
- with open(metadata_filepath, encoding="utf-8") as f:
98
- metadata_dict = csv.DictReader(
99
- f,
100
- fieldnames=[
101
- "_id",
102
- "path",
103
- "title",
104
- "authors",
105
- "translators",
106
- "original_language",
107
- "genre",
108
- "source_edition",
109
- ],
110
- )
111
- indexed_metadata = {str(row["_id"]): row for row in metadata_dict}
112
-
113
- for _id in ids:
114
- data = indexed_metadata[_id]
115
- filepath = filepaths[_id]
116
-
117
- with open(filepath, encoding="utf-8") as f:
118
- text = f.read()
119
-
120
- _id = data["_id"]
121
- title = data["title"]
122
- url = data["path"].strip("/")
123
- url = _STORAGE_API_ROOT_URL + url + ".txt"
124
- authors = data["authors"]
125
- translators = data["translators"]
126
- original_language = data["original_language"]
127
- genre = data["genre"]
128
- source_edition = data["source_edition"]
129
-
130
- yield _id, {
131
- "id": _id,
132
- "title": title,
133
- "url": url,
134
- "authors": authors,
135
- "translators": translators,
136
- "original_language": original_language,
137
- "genre": genre,
138
- "source_edition": source_edition,
139
- "text": text,
140
- }