jimypbr commited on
Commit
055937b
1 Parent(s): 6deb9ae

Delete wikipedia-bert-512.py

Browse files

Don't need dataset conversion script anymore

Files changed (1) hide show
  1. wikipedia-bert-512.py +0 -164
wikipedia-bert-512.py DELETED
@@ -1,164 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
- import json
20
- import os
21
-
22
- import numpy as np
23
- from pathlib import Path
24
- from tfrecord.reader import tfrecord_loader
25
- import datasets
26
-
27
-
28
- # TODO: Add BibTeX citation
29
- # Find for instance the citation on arxiv or on the dataset repo/website
30
- _CITATION = """\
31
- @InProceedings{huggingface:dataset,
32
- title = {A great new dataset},
33
- author={huggingface, Inc.
34
- },
35
- year={2020}
36
- }
37
- """
38
-
39
- # TODO: Add description of the dataset here
40
- # You can copy an official description
41
- _DESCRIPTION = """\
42
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
43
- """
44
-
45
- # TODO: Add a link to an official homepage for the dataset here
46
- _HOMEPAGE = ""
47
-
48
- # TODO: Add the licence for the dataset here if you can find it
49
- _LICENSE = ""
50
-
51
- # TODO: Add link to the official dataset URLs here
52
- # The HuggingFace dataset library don't host the datasets but only point to the original files
53
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
54
- _URLs = {
55
- 'pretraining': "https://huggingface.co/great-new-dataset-first_domain.zip",
56
- # 'second_domain': "https://huggingface.co/great-new-dataset-second_domain.zip",
57
- }
58
-
59
-
60
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
61
- class WikipediaBERT128(datasets.GeneratorBasedBuilder):
62
- """TODO: Short description of my dataset."""
63
-
64
- VERSION = datasets.Version("1.1.0")
65
-
66
- # This is an example of a dataset with multiple configurations.
67
- # If you don't want/need to define several sub-sets in your dataset,
68
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
69
-
70
- # If you need to make complex sub-parts in the datasets with configurable options
71
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
72
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
73
-
74
- # You will be able to load one or the other configurations in the following list with
75
- # data = datasets.load_dataset('my_dataset', 'first_domain')
76
- # data = datasets.load_dataset('my_dataset', 'second_domain')
77
- BUILDER_CONFIGS = [
78
- datasets.BuilderConfig(name="pretraining", version=VERSION, description="This part of my dataset covers a first domain"),
79
- ]
80
-
81
-
82
- def _info(self):
83
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
84
- print(self.config.name)
85
- features = datasets.Features(
86
- {
87
- "input_ids": datasets.Sequence(datasets.Value("int64")),
88
- "attention_mask": datasets.Sequence(datasets.Value("int64")),
89
- "token_type_ids": datasets.Sequence(datasets.Value("int64")),
90
- "labels": datasets.Sequence(datasets.Value("int64")),
91
- "next_sentence_label": datasets.Value("int64"),
92
- # These are the features of your dataset like images, labels ...
93
- }
94
- )
95
- return datasets.DatasetInfo(
96
- # This is the description that will appear on the datasets page.
97
- description=_DESCRIPTION,
98
- # This defines the different columns of the dataset and their types
99
- features=features, # Here we define them above because they are different between the two configurations
100
- # If there's a common (input, target) tuple from the features,
101
- # specify them here. They'll be used if as_supervised=True in
102
- # builder.as_dataset.
103
- supervised_keys=None,
104
- # Homepage of the dataset for documentation
105
- homepage=_HOMEPAGE,
106
- # License for the dataset if available
107
- license=_LICENSE,
108
- # Citation for the dataset
109
- citation=_CITATION,
110
- )
111
-
112
- def _split_generators(self, dl_manager):
113
- """Returns SplitGenerators."""
114
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
115
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
116
-
117
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
118
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
119
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
120
- return [
121
- datasets.SplitGenerator(
122
- name=datasets.Split.TRAIN,
123
- # These kwargs will be passed to _generate_examples
124
- gen_kwargs={
125
- "data_files": self.config.data_files["train"],
126
- },
127
- ),
128
- ]
129
-
130
- def _generate_examples(self, data_files):
131
- """ Yields examples as (key, example) tuples. """
132
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
133
- # The `key` is here for legacy reason (tfds) and is not important in itself.
134
-
135
- # Convert from tfrecord files
136
- TFRECORD_KEYS = ( # Torch Model Keys
137
- 'input_ids', # input_ids : tokens after masking
138
- 'input_mask', # attention_mask : 1 if padding token, 0 otherwise
139
- 'segment_ids', # token_type_ids : sentence 0 or 1
140
- 'masked_lm_positions', # masked_lm_positions : position of masked tokens in input_ids
141
- 'masked_lm_ids', # masked_lm_labels=None : label of masked tokens with padding as 0.
142
- 'next_sentence_labels' # next_sentence_label=None : 1 if next sentence, 0 otherwise
143
- )
144
- highest_id_ = -1
145
- for rec in data_files:
146
- reader = tfrecord_loader(rec, None, list(TFRECORD_KEYS))
147
- for id_, d in enumerate(reader, start=highest_id_+1):
148
- highest_id_ = id_
149
- input_ids = d["input_ids"]
150
- labels = np.ones_like(input_ids) * -100
151
- masked_lm_positions = d["masked_lm_positions"]
152
- masked_lm_labels = d["masked_lm_ids"]
153
- masked_lm_positions_ = masked_lm_positions[masked_lm_positions != 0]
154
- masked_lm_labels_ = masked_lm_labels[:len(masked_lm_positions_)]
155
- labels[masked_lm_positions_] = masked_lm_labels_
156
-
157
- yield id_, {
158
- "input_ids": input_ids,
159
- "attention_mask": d["input_mask"],
160
- "token_type_ids": d["segment_ids"],
161
- "labels": labels,
162
- "next_sentence_label": d["next_sentence_labels"]
163
- }
164
-