Madjakul commited on
Commit
bc19630
1 Parent(s): 85c7533

Delete halvest_r.py

Browse files
Files changed (1) hide show
  1. halvest_r.py +0 -212
halvest_r.py DELETED
@@ -1,212 +0,0 @@
1
- # halvest-r.py
2
-
3
- import collections
4
- import gzip
5
- import json
6
- import os
7
-
8
- import datasets
9
-
10
- logger = datasets.logging.get_logger(__name__)
11
-
12
- _DESCRIPTION = "HALvest Raw"
13
- _URL = "https://huggingface.co/datasets/Madjakul/halvest"
14
- _LICENSE = """
15
- The licence terms for HALvest-R strictly follows those of HAL.
16
- Please refer to the below license when using this dataset.
17
- - HAL license: https://doc.archives-ouvertes.fr/en/legal-aspects/
18
- The corpus is extracted from the HAL's open archive which distributes scientific \
19
- publications following open access principles. The corpus is made up of both \
20
- creative commons licensed and copyrighted documents (distribution authorized on \
21
- HAL by the publisher). This must be considered prior to using this dataset for any \
22
- purpose, other than training deep learning models, data mining etc. We do not own \
23
- any of the text from which these data has been extracted.
24
- """
25
- _CITATION = """
26
- @software{almanach_halvest_2024,
27
- author = {Kulumba, Francis and Antoun, Wissam and Vimont, Guillaume and ROmary, Laurent},
28
- title = {HALvest: Open Scientific Papers Harvested from HAL.},
29
- month = {April},
30
- year = {2024},
31
- company = {Almanach},
32
- url = {https://github.com/Madjakul/HALvesting}
33
- }
34
- """
35
- _BASE_DATA_PATH = "{language}/"
36
- _BASE_CHECKSUM_FILENAME = "checksum.sha256"
37
-
38
-
39
- def _languages():
40
- """Creates the sorted dictionary of language codes, and language names."""
41
- langs = {
42
- "Albanian": "sq",
43
- "Arabic": "ar",
44
- "Armenian": "hy",
45
- "Azerbaijani": "az",
46
- "Basque": "eu",
47
- "Bosnian": "bs",
48
- "Breton": "br",
49
- "Bulgarian": "bg",
50
- "Catalan": "ca",
51
- "Chinese": "zh",
52
- "Corsican": "co",
53
- "Croatian": "hr",
54
- "Czech": "cs",
55
- "Danish": "da",
56
- "English": "en",
57
- "Esperanto": "eo",
58
- "Estonian": "et",
59
- "Filipino": "tl",
60
- "Finnish": "fi",
61
- "French": "fr",
62
- "Galician": "gl",
63
- "German": "de",
64
- "Greek": "el",
65
- "Guarani": "gn",
66
- "Hebrew": "he",
67
- "Hindi": "hi",
68
- "Hungarian": "hu",
69
- "Indonesian": "id",
70
- "Interlingue": "ie",
71
- "Italian": "it",
72
- "Japanese": "ja",
73
- "Kazakh": "kk",
74
- "Korean": "ko",
75
- "Lithuanian": "lt",
76
- "Macedonian": "mk",
77
- "Marathi": "mr",
78
- "Norwegian": "no",
79
- "Occitan": "oc",
80
- "Persian": "fa",
81
- "Polish": "pl",
82
- "Portuguese": "pt",
83
- "Romanian": "ro",
84
- "Russian": "ru",
85
- "Serbian": "sr",
86
- "Slovak": "sk",
87
- "Slovenian": "sl",
88
- "Spanish": "es",
89
- "Swahili": "sw",
90
- "Swedish": "sv",
91
- "Tamil": "ta",
92
- "Tetum": "tet",
93
- "Thai": "th",
94
- "Tibetan": "bo",
95
- "Turkish": "tr",
96
- "Turkmen": "tk",
97
- "Ukrainian": "uk",
98
- "Vietnamese": "vi",
99
- }
100
- langs = {v: k for k, v in langs.items()}
101
- return collections.OrderedDict(sorted(langs.items()))
102
-
103
-
104
- class HALvest_RConfig(datasets.BuilderConfig):
105
- """HALvest-R builder config.
106
- Parameters
107
- ----------
108
- language: str
109
- ISO 639 language code.
110
- Attributes
111
- ----------
112
- base_data_path: str
113
- f"{self.language}/".
114
- """
115
-
116
- def __init__(self, language: str, **kwargs):
117
- if language not in _languages():
118
- raise ValueError("Invalid language: %s " % language)
119
-
120
- name = f"{language}"
121
- description = f"""
122
- Raw {_languages()[language]} HALvest-R dataset from February 2024.
123
- """
124
- super(HALvest_RConfig, self).__init__(
125
- name=name, description=description, **kwargs
126
- )
127
- self.language = language
128
- self.base_data_path = _BASE_DATA_PATH.format(language=language)
129
-
130
-
131
- class HALvest_R(datasets.GeneratorBasedBuilder):
132
- """HALvest Raw: Open Scientific Papers Harvested from HAL (Unfiltered)."""
133
-
134
- BUILDER_CONFIGS = [
135
- HALvest_RConfig(language=language, version=datasets.Version("0.1.0"))
136
- for language in _languages()
137
- ]
138
- BUILDER_CONFIG_CLASS = HALvest_RConfig
139
-
140
- def _info(self):
141
- return datasets.DatasetInfo(
142
- description=_DESCRIPTION,
143
- features=datasets.Features(
144
- {
145
- "halid": datasets.Value("string"),
146
- "lang": datasets.Value("string"),
147
- "domain": datasets.Sequence("string"),
148
- "timestamp": datasets.Value("string"),
149
- "year": datasets.Value("string"),
150
- "url": datasets.Value("string"),
151
- "text": datasets.Value("string"),
152
- "token_count": datasets.Value("int32"),
153
- "rps_doc_frac_all_caps_words": datasets.Value("float64"),
154
- "rps_doc_frac_lines_end_with_ellipsis": datasets.Value("float64"),
155
- "rps_doc_frac_no_alph_words": datasets.Value("float64"),
156
- "rps_doc_lorem_ipsum": datasets.Value("float64"),
157
- "rps_doc_mean_word_length": datasets.Value("float64"),
158
- "rps_doc_stop_word_fraction": datasets.Value("float64"),
159
- "rps_doc_symbol_to_word_ratio": datasets.Value("float64"),
160
- "rps_doc_frac_unique_words": datasets.Value("float64"),
161
- "rps_doc_unigram_entropy": datasets.Value("float64"),
162
- "rps_doc_word_count": datasets.Value("int64"),
163
- "doc_frac_lines_ending_with_terminal_punctution_mark": datasets.Value("float64"),
164
- "rps_lines_frac_start_with_bulletpoint": datasets.Value("float64"),
165
- "rps_doc_num_sentences": datasets.Value("int64"),
166
- "rps_frac_chars_in_dupe_5grams": datasets.Value("float64"),
167
- "rps_frac_chars_in_dupe_6grams": datasets.Value("float64"),
168
- "rps_frac_chars_in_dupe_7grams": datasets.Value("float64"),
169
- "rps_frac_chars_in_dupe_8grams": datasets.Value("float64"),
170
- "rps_frac_chars_in_dupe_9grams": datasets.Value("float64"),
171
- "rps_frac_chars_in_dupe_10grams": datasets.Value("float64"),
172
- "kenlm_pp": datasets.Value("float64"),
173
- }
174
- ),
175
- supervised_keys=None,
176
- homepage=_URL,
177
- citation=_CITATION,
178
- license=_LICENSE,
179
- )
180
-
181
- def _split_generators(self, dl_manager):
182
- checksum_path = os.path.join(
183
- self.config.base_data_path, _BASE_CHECKSUM_FILENAME
184
- )
185
- checksum_file = dl_manager.download(checksum_path)
186
-
187
- with open(checksum_file, encoding="utf-8") as f:
188
- data_filenames = [line.split("\t")[1] for line in f if line]
189
- data_urls = [
190
- os.path.join(self.config.base_data_path, data_filename.rstrip("\n"))
191
- for data_filename in data_filenames
192
- ]
193
-
194
- downloaded_files = dl_manager.download(
195
- [url for url in data_urls if url.endswith(".gz")]
196
- )
197
-
198
- return [
199
- datasets.SplitGenerator(
200
- name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files}
201
- )
202
- ]
203
-
204
- def _generate_examples(self, filepaths):
205
- id_ = 0
206
- for filepath in filepaths:
207
- logger.info("Generating examples from = %s", filepath)
208
- with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
209
- for line in f:
210
- js_line = json.loads(line)
211
- yield id_, js_line
212
- id_ += 1