mariosasko commited on
Commit
907091c
1 Parent(s): 7bde1fb

Delete wiki40b.py

Browse files
Files changed (1) hide show
  1. wiki40b.py +0 -182
wiki40b.py DELETED
@@ -1,182 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """Wiki40B: A clean Wikipedia dataset for 40+ languages."""
17
-
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """
26
- """
27
-
28
- _DESCRIPTION = """
29
- Clean-up text for 40+ Wikipedia languages editions of pages
30
- correspond to entities. The datasets have train/dev/test splits per language.
31
- The dataset is cleaned up by page filtering to remove disambiguation pages,
32
- redirect pages, deleted pages, and non-entity pages. Each example contains the
33
- wikidata id of the entity, and the full Wikipedia article after page processing
34
- that removes non-content sections and structured objects.
35
- """
36
-
37
- _LICENSE = """
38
- This work is licensed under the Creative Commons Attribution-ShareAlike
39
- 3.0 Unported License. To view a copy of this license, visit
40
- http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
41
- Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
42
- """
43
-
44
- _URL = "https://research.google/pubs/pub49029/"
45
-
46
- _DATA_DIRECTORY = "gs://tfds-data/downloads/wiki40b/tfrecord_prod"
47
-
48
- WIKIPEDIA_LANGUAGES = [
49
- "en",
50
- "ar",
51
- "zh-cn",
52
- "zh-tw",
53
- "nl",
54
- "fr",
55
- "de",
56
- "it",
57
- "ja",
58
- "ko",
59
- "pl",
60
- "pt",
61
- "ru",
62
- "es",
63
- "th",
64
- "tr",
65
- "bg",
66
- "ca",
67
- "cs",
68
- "da",
69
- "el",
70
- "et",
71
- "fa",
72
- "fi",
73
- "he",
74
- "hi",
75
- "hr",
76
- "hu",
77
- "id",
78
- "lt",
79
- "lv",
80
- "ms",
81
- "no",
82
- "ro",
83
- "sk",
84
- "sl",
85
- "sr",
86
- "sv",
87
- "tl",
88
- "uk",
89
- "vi",
90
- ]
91
-
92
-
93
- class Wiki40bConfig(datasets.BuilderConfig):
94
- """BuilderConfig for Wiki40B."""
95
-
96
- def __init__(self, language=None, **kwargs):
97
- """BuilderConfig for Wiki40B.
98
-
99
- Args:
100
- language: string, the language code for the Wiki40B dataset to use.
101
- **kwargs: keyword arguments forwarded to super.
102
- """
103
- super(Wiki40bConfig, self).__init__(
104
- name=str(language), description=f"Wiki40B dataset for {language}.", **kwargs
105
- )
106
- self.language = language
107
-
108
-
109
- _VERSION = datasets.Version("1.1.0")
110
-
111
-
112
- class Wiki40b(datasets.BeamBasedBuilder):
113
- """Wiki40B: A Clean Wikipedia Dataset for Mutlilingual Language Modeling."""
114
-
115
- BUILDER_CONFIGS = [
116
- Wiki40bConfig(
117
- version=_VERSION,
118
- language=lang,
119
- ) # pylint:disable=g-complex-comprehension
120
- for lang in WIKIPEDIA_LANGUAGES
121
- ]
122
-
123
- def _info(self):
124
- return datasets.DatasetInfo(
125
- description=_DESCRIPTION,
126
- features=datasets.Features(
127
- {
128
- "wikidata_id": datasets.Value("string"),
129
- "text": datasets.Value("string"),
130
- "version_id": datasets.Value("string"),
131
- }
132
- ),
133
- supervised_keys=None,
134
- homepage=_URL,
135
- citation=_CITATION,
136
- )
137
-
138
- def _split_generators(self, dl_manager):
139
- """Returns SplitGenerators."""
140
-
141
- lang = self.config.language
142
-
143
- return [
144
- datasets.SplitGenerator(
145
- name=datasets.Split.TRAIN,
146
- gen_kwargs={"filepaths": f"{_DATA_DIRECTORY}/train/{lang}_examples-*"},
147
- ),
148
- datasets.SplitGenerator(
149
- name=datasets.Split.VALIDATION,
150
- gen_kwargs={"filepaths": f"{_DATA_DIRECTORY}/dev/{lang}_examples-*"},
151
- ),
152
- datasets.SplitGenerator(
153
- name=datasets.Split.TEST,
154
- gen_kwargs={"filepaths": f"{_DATA_DIRECTORY}/test/{lang}_examples-*"},
155
- ),
156
- ]
157
-
158
- def _build_pcollection(self, pipeline, filepaths):
159
- """Build PCollection of examples."""
160
- import apache_beam as beam
161
- import tensorflow as tf
162
-
163
- logger.info("generating examples from = %s", filepaths)
164
-
165
- def _extract_content(example):
166
- """Extracts content from a TFExample."""
167
- wikidata_id = example.features.feature["wikidata_id"].bytes_list.value[0].decode("utf-8")
168
- text = example.features.feature["text"].bytes_list.value[0].decode("utf-8")
169
- version_id = example.features.feature["version_id"].bytes_list.value[0].decode("utf-8")
170
-
171
- # wikidata_id could be duplicated with different texts.
172
- yield wikidata_id + text, {
173
- "wikidata_id": wikidata_id,
174
- "text": text,
175
- "version_id": version_id,
176
- }
177
-
178
- return (
179
- pipeline
180
- | beam.io.ReadFromTFRecord(filepaths, coder=beam.coders.ProtoCoder(tf.train.Example))
181
- | beam.FlatMap(_extract_content)
182
- )