Datasets:
Antoine SIMOULIN's picture asi
/

Task Categories: sequence-modeling
Languages: fr-FR
Multilinguality: monolingual
Size Categories: unknown
Language Creators: found
Annotations Creators: no-annotation
Source Datasets: original
wikitext_fr / wikitext_fr.py
1 # coding=utf-8
2 # Copyright 2020 The HuggingFace Datasets Authors and Antoine SIMOULIN.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Wikitext-fr language modeling dataset consists of over 70 million tokens
16 extracted from the set of french Wikipedia articles that are classified as
17 "quality articles" or "good articles.". The aim is to replicate the English
18 benchmark."""
19
20
21 import csv
22 import json
23 import os
24
25 import datasets
26
27
28 # TODO: Add BibTeX citation
29 # Find for instance the citation on arxiv or on the dataset repo/website
30 _CITATION = """\
31 @inproceedings{simoulin:hal-03265900,
32 TITLE = {{Un mod{\`e}le Transformer G{\'e}n{\'e}ratif Pr{\'e}-entrain{\'e} pour le \_\_\_\_\_\_ fran{\c c}ais}},
33 AUTHOR = {Simoulin, Antoine and Crabb{\'e}, Benoit},
34 URL = {https://hal.archives-ouvertes.fr/hal-03265900},
35 BOOKTITLE = {{Traitement Automatique des Langues Naturelles}},
36 ADDRESS = {Lille, France},
37 EDITOR = {Denis, Pascal and Grabar, Natalia and Fraisse, Amel and Cardon, R{\'e}mi and Jacquemin, Bernard and Kergosien, Eric and Balvet, Antonio},
38 PUBLISHER = {{ATALA}},
39 PAGES = {246-255},
40 YEAR = {2021},
41 KEYWORDS = {fran{\c c}ais. ; GPT ; G{\'e}n{\'e}ratif ; Transformer ; Pr{\'e}-entra{\^i}n{\'e}},
42 PDF = {https://hal.archives-ouvertes.fr/hal-03265900/file/7.pdf},
43 HAL_ID = {hal-03265900},
44 HAL_VERSION = {v1},
45 }
46 """
47
48 # TODO: Add description of the dataset here
49 # You can copy an official description
50 _DESCRIPTION = """\
51 Wikitext-fr language modeling dataset consists of over 70 million tokens
52 extracted from the set of french Wikipedia articles that are classified as
53 "quality articles" or "good articles.". The aim is to replicate the English
54 benchmark."""
55
56 # TODO: Add a link to an official homepage for the dataset here
57 _HOMEPAGE = "https://github.com/AntoineSimoulin/gpt-fr"
58
59 # TODO: Add the licence for the dataset here if you can find it
60 _LICENSE = "Creative Commons Attribution-ShareAlike License."
61
62 # TODO: Add link to the official dataset URLs here
63 # The HuggingFace dataset library don't host the datasets but only point to the original files
64 # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
65 # _URLs = {
66 # 'wikitext-35': "./wikitext_35/",
67 # 'wikitext-72': "./wikitext_72/",
68 # }
69 _URLs = {
70 'wikitext-35': "wikitext_35/wiki.zip",
71 'wikitext-72': "wikitext_72/wiki.zip",
72 }
73
74
75 # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
76 class NewDataset(datasets.GeneratorBasedBuilder):
77 """Wikitext-fr language modeling dataset consists of over 70 million tokens
78 extracted from the set of french Wikipedia articles that are classified as
79 "quality articles" or "good articles.". The aim is to replicate the English benchmark.
80 """
81
82 VERSION = datasets.Version("1.1.0")
83
84 # This is an example of a dataset with multiple configurations.
85 # If you don't want/need to define several sub-sets in your dataset,
86 # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
87
88 # If you need to make complex sub-parts in the datasets with configurable options
89 # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
90 # BUILDER_CONFIG_CLASS = MyBuilderConfig
91
92 # You will be able to load one or the other configurations in the following list with
93 # data = datasets.load_dataset('my_dataset', 'first_domain')
94 # data = datasets.load_dataset('my_dataset', 'second_domain')
95 BUILDER_CONFIGS = [
96 datasets.BuilderConfig(name="wikitext-35", version=VERSION, description="This part covers quality articles only"),
97 datasets.BuilderConfig(name="wikitext-72", version=VERSION, description="This part covers quality articles and good articles"),
98 ]
99
100 DEFAULT_CONFIG_NAME = "wikitext-35" # It's not mandatory to have a default configuration. Just use one if it make sense.
101
102 def _info(self):
103 features = datasets.Features({"paragraph": datasets.Value("string")})
104 return datasets.DatasetInfo(
105 # This is the description that will appear on the datasets page.
106 description=_DESCRIPTION,
107 # This defines the different columns of the dataset and their types
108 features=features, # Here we define them above because they are different between the two configurations
109 # If there's a common (input, target) tuple from the features,
110 # specify them here. They'll be used if as_supervised=True in
111 # builder.as_dataset.
112 supervised_keys=None,
113 # Homepage of the dataset for documentation
114 homepage=_HOMEPAGE,
115 # License for the dataset if available
116 license=_LICENSE,
117 # Citation for the dataset
118 citation=_CITATION,
119 )
120
121 def _split_generators(self, dl_manager):
122 """Returns SplitGenerators."""
123 # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
124 # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
125
126 # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
127 # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
128 # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
129 my_urls = _URLs[self.config.name]
130 data_dir = dl_manager.download_and_extract(my_urls)
131 return [
132 datasets.SplitGenerator(
133 name=datasets.Split.TRAIN,
134 # These kwargs will be passed to _generate_examples
135 gen_kwargs={
136 "filepath": os.path.join(data_dir, "wiki.train.tokens"),
137 "split": "train",
138 },
139 ),
140 datasets.SplitGenerator(
141 name=datasets.Split.TEST,
142 # These kwargs will be passed to _generate_examples
143 gen_kwargs={
144 "filepath": os.path.join(data_dir, "wiki.test.tokens"),
145 "split": "test"
146 },
147 ),
148 datasets.SplitGenerator(
149 name=datasets.Split.VALIDATION,
150 # These kwargs will be passed to _generate_examples
151 gen_kwargs={
152 "filepath": os.path.join(data_dir, "wiki.valid.tokens"),
153 "split": "dev",
154 },
155 ),
156 ]
157
158 def _generate_examples(
159 self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
160 ):
161 """ Yields examples as (key, example) tuples. """
162 # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
163 # The `key` is here for legacy reason (tfds) and is not important in itself.
164
165 with open(filepath, 'r') as f:
166 data = f.readlines()
167 for id_, paragraph in enumerate(data):
168 yield id_, {"paragraph": paragraph, }
169