jimypbr commited on
Commit
e017345
1 Parent(s): f0635ff

Delete wikipedia-bert-128.py

Browse files

Don't need a data conversion script anymore

Files changed (1) hide show
  1. wikipedia-bert-128.py +0 -140
wikipedia-bert-128.py DELETED
@@ -1,140 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """TODO: Add a description here."""
16
-
17
-
18
- import csv
19
- import glob
20
- import json
21
- import os
22
-
23
- import datasets
24
- import numpy as np
25
- from pathlib import Path
26
- import pyarrow as pa
27
- import pyarrow.parquet as pq
28
-
29
-
30
- # TODO: Add BibTeX citation
31
- # Find for instance the citation on arxiv or on the dataset repo/website
32
- _CITATION = """\
33
- @InProceedings{huggingface:dataset,
34
- title = {A great new dataset},
35
- author={huggingface, Inc.
36
- },
37
- year={2020}
38
- }
39
- """
40
-
41
- # TODO: Add description of the dataset here
42
- # You can copy an official description
43
- _DESCRIPTION = """\
44
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
45
- """
46
-
47
- # TODO: Add a link to an official homepage for the dataset here
48
- _HOMEPAGE = ""
49
-
50
- # TODO: Add the licence for the dataset here if you can find it
51
- _LICENSE = ""
52
-
53
- # TODO: Add link to the official dataset URLs here
54
- # The HuggingFace dataset library don't host the datasets but only point to the original files
55
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
- _URLs = {
57
- 'pretraining': "https://huggingface.co/great-new-dataset-first_domain.zip",
58
- # 'second_domain': "https://huggingface.co/great-new-dataset-second_domain.zip",
59
- }
60
-
61
-
62
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
63
- class WikipediaBERT128(datasets.GeneratorBasedBuilder):
64
- """TODO: Short description of my dataset."""
65
-
66
- VERSION = datasets.Version("1.1.0")
67
-
68
- # This is an example of a dataset with multiple configurations.
69
- # If you don't want/need to define several sub-sets in your dataset,
70
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
71
-
72
- # If you need to make complex sub-parts in the datasets with configurable options
73
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
74
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
75
-
76
- # You will be able to load one or the other configurations in the following list with
77
- # data = datasets.load_dataset('my_dataset', 'first_domain')
78
- # data = datasets.load_dataset('my_dataset', 'second_domain')
79
- BUILDER_CONFIGS = [
80
- datasets.BuilderConfig(name="pretraining", version=VERSION, description="This part of my dataset covers a first domain"),
81
- ]
82
-
83
-
84
- def _info(self):
85
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
86
- print(self.config.name)
87
- features = datasets.Features(
88
- {
89
- "input_ids": datasets.Sequence(datasets.Value("int64")),
90
- "attention_mask": datasets.Sequence(datasets.Value("int64")),
91
- "token_type_ids": datasets.Sequence(datasets.Value("int64")),
92
- "labels": datasets.Sequence(datasets.Value("int64")),
93
- "next_sentence_label": datasets.Value("int64"),
94
- # These are the features of your dataset like images, labels ...
95
- }
96
- )
97
- return datasets.DatasetInfo(
98
- # This is the description that will appear on the datasets page.
99
- description=_DESCRIPTION,
100
- # This defines the different columns of the dataset and their types
101
- features=features, # Here we define them above because they are different between the two configurations
102
- # If there's a common (input, target) tuple from the features,
103
- # specify them here. They'll be used if as_supervised=True in
104
- # builder.as_dataset.
105
- supervised_keys=None,
106
- # Homepage of the dataset for documentation
107
- homepage=_HOMEPAGE,
108
- # License for the dataset if available
109
- license=_LICENSE,
110
- # Citation for the dataset
111
- citation=_CITATION,
112
- )
113
-
114
- def _split_generators(self, dl_manager):
115
- """Returns SplitGenerators."""
116
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
117
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
118
-
119
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
120
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
121
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
122
- files_to_download = list(glob.glob(os.path.join("data", "*.parquet")))
123
- downloaded_files = dl_manager.download(files_to_download)
124
- print(files_to_download)
125
-
126
- return [
127
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data_files": downloaded_files}),
128
- ]
129
-
130
- def _generate_examples(self, data_files):
131
- """ Yields examples as (key, example) tuples. """
132
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
133
- # The `key` is here for legacy reason (tfds) and is not important in itself.
134
- print(data_files)
135
- for file in data_files:
136
- with open(file, "rb") as f:
137
- parquet_file = pq.ParquetFile(f)
138
- for record_batch in parquet_file.iter_batches():
139
- batch: dict = pa.Table.from_batches([record_batch]).to_pydict()
140
- yield batch