system HF staff commited on
Commit
eab50b5
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (3) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. wiki40b.py +183 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"en": {"description": "\nClean-up text for 40+ Wikipedia languages editions of pages\ncorrespond to entities. The datasets have train/dev/test splits per language.\nThe dataset is cleaned up by page filtering to remove disambiguation pages,\nredirect pages, deleted pages, and non-entity pages. Each example contains the\nwikidata id of the entity, and the full Wikipedia article after page processing\nthat removes non-content sections and structured objects.\n", "citation": "\n", "homepage": "https://research.google/pubs/pub49029/", "license": "", "features": {"wikidata_id": {"dtype": "string", "id": null, "_type": "Value"}, "text": {"dtype": "string", "id": null, "_type": "Value"}, "version_id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "wiki40b", "config_name": "en", "version": {"version_str": "1.1.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9423623904, "num_examples": 2926536, "dataset_name": "wiki40b"}, "validation": {"name": "validation", "num_bytes": 527383016, "num_examples": 163597, "dataset_name": "wiki40b"}, "test": {"name": "test", "num_bytes": 522219464, "num_examples": 162274, "dataset_name": "wiki40b"}}, "download_checksums": {}, "download_size": 0, "dataset_size": 10473226384, "size_in_bytes": 10473226384}}
wiki40b.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Wiki40B: A clean Wikipedia dataset for 40+ languages."""
17
+
18
+ from __future__ import absolute_import, division, print_function
19
+
20
+ import logging
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """
27
+ """
28
+
29
+ _DESCRIPTION = """
30
+ Clean-up text for 40+ Wikipedia languages editions of pages
31
+ correspond to entities. The datasets have train/dev/test splits per language.
32
+ The dataset is cleaned up by page filtering to remove disambiguation pages,
33
+ redirect pages, deleted pages, and non-entity pages. Each example contains the
34
+ wikidata id of the entity, and the full Wikipedia article after page processing
35
+ that removes non-content sections and structured objects.
36
+ """
37
+
38
+ _LICENSE = """
39
+ This work is licensed under the Creative Commons Attribution-ShareAlike
40
+ 3.0 Unported License. To view a copy of this license, visit
41
+ http://creativecommons.org/licenses/by-sa/3.0/ or send a letter to
42
+ Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
43
+ """
44
+
45
+ _URL = "https://research.google/pubs/pub49029/"
46
+
47
+ _DATA_DIRECTORY = "gs://tfds-data/downloads/wiki40b/tfrecord_prod"
48
+
49
+ WIKIPEDIA_LANGUAGES = [
50
+ "en",
51
+ "ar",
52
+ "zh-cn",
53
+ "zh-tw",
54
+ "nl",
55
+ "fr",
56
+ "de",
57
+ "it",
58
+ "ja",
59
+ "ko",
60
+ "pl",
61
+ "pt",
62
+ "ru",
63
+ "es",
64
+ "th",
65
+ "tr",
66
+ "bg",
67
+ "ca",
68
+ "cs",
69
+ "da",
70
+ "el",
71
+ "et",
72
+ "fa",
73
+ "fi",
74
+ "he",
75
+ "hi",
76
+ "hr",
77
+ "hu",
78
+ "id",
79
+ "lt",
80
+ "lv",
81
+ "ms",
82
+ "no",
83
+ "ro",
84
+ "sk",
85
+ "sl",
86
+ "sr",
87
+ "sv",
88
+ "tl",
89
+ "uk",
90
+ "vi",
91
+ ]
92
+
93
+
94
+ class Wiki40bConfig(datasets.BuilderConfig):
95
+ """BuilderConfig for Wiki40B."""
96
+
97
+ def __init__(self, language=None, **kwargs):
98
+ """BuilderConfig for Wiki40B.
99
+
100
+ Args:
101
+ language: string, the language code for the Wiki40B dataset to use.
102
+ **kwargs: keyword arguments forwarded to super.
103
+ """
104
+ super(Wiki40bConfig, self).__init__(
105
+ name=str(language), description="Wiki40B dataset for {0}.".format(language), **kwargs
106
+ )
107
+ self.language = language
108
+
109
+
110
+ _VERSION = datasets.Version("1.1.0")
111
+
112
+
113
+ class Wiki40b(datasets.BeamBasedBuilder):
114
+ """Wiki40B: A Clean Wikipedia Dataset for Mutlilingual Language Modeling."""
115
+
116
+ BUILDER_CONFIGS = [
117
+ Wiki40bConfig(
118
+ version=_VERSION,
119
+ language=lang,
120
+ ) # pylint:disable=g-complex-comprehension
121
+ for lang in WIKIPEDIA_LANGUAGES
122
+ ]
123
+
124
+ def _info(self):
125
+ return datasets.DatasetInfo(
126
+ description=_DESCRIPTION,
127
+ features=datasets.Features(
128
+ {
129
+ "wikidata_id": datasets.Value("string"),
130
+ "text": datasets.Value("string"),
131
+ "version_id": datasets.Value("string"),
132
+ }
133
+ ),
134
+ supervised_keys=None,
135
+ homepage=_URL,
136
+ citation=_CITATION,
137
+ )
138
+
139
+ def _split_generators(self, dl_manager):
140
+ """Returns SplitGenerators."""
141
+
142
+ lang = self.config.language
143
+
144
+ return [
145
+ datasets.SplitGenerator(
146
+ name=datasets.Split.TRAIN,
147
+ gen_kwargs={"filepaths": os.path.join(_DATA_DIRECTORY, "train", "{}_examples-*".format(lang))},
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.VALIDATION,
151
+ gen_kwargs={"filepaths": os.path.join(_DATA_DIRECTORY, "dev", "{}_examples-*".format(lang))},
152
+ ),
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TEST,
155
+ gen_kwargs={"filepaths": os.path.join(_DATA_DIRECTORY, "test", "{}_examples-*".format(lang))},
156
+ ),
157
+ ]
158
+
159
+ def _build_pcollection(self, pipeline, filepaths):
160
+ """Build PCollection of examples."""
161
+ import apache_beam as beam
162
+ import tensorflow as tf
163
+
164
+ logging.info("generating examples from = %s", filepaths)
165
+
166
+ def _extract_content(example):
167
+ """Extracts content from a TFExample."""
168
+ wikidata_id = example.features.feature["wikidata_id"].bytes_list.value[0].decode("utf-8")
169
+ text = example.features.feature["text"].bytes_list.value[0].decode("utf-8")
170
+ version_id = example.features.feature["version_id"].bytes_list.value[0].decode("utf-8")
171
+
172
+ # wikidata_id could be duplicated with different texts.
173
+ yield wikidata_id + text, {
174
+ "wikidata_id": wikidata_id,
175
+ "text": text,
176
+ "version_id": version_id,
177
+ }
178
+
179
+ return (
180
+ pipeline
181
+ | beam.io.ReadFromTFRecord(filepaths, coder=beam.coders.ProtoCoder(tf.train.Example))
182
+ | beam.FlatMap(_extract_content)
183
+ )