abarbosa commited on
Commit
4845aec
1 Parent(s): c72ccfc

include dataset generator script

Browse files
Files changed (2) hide show
  1. .gitattributes +1 -0
  2. aes_enem_dataset.py +520 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ sourceA.tar.gz filter=lfs diff=lfs merge=lfs -text
aes_enem_dataset.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Andre Barbosa, Igor Caetano Silveira & The HuggingFace Datasets Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import math
20
+ import os
21
+ import re
22
+
23
+ import datasets
24
+ import numpy as np
25
+ import pandas as pd
26
+ from bs4 import BeautifulSoup
27
+ from tqdm.auto import tqdm
28
+
29
+ np.random.seed(42) # Set the seed
30
+
31
+ # TODO: Add BibTeX citation
32
+ # Find for instance the citation on arxiv or on the dataset repo/website
33
+ _CITATION = """\
34
+ TODO
35
+ """
36
+
37
+ # TODO: Add description of the dataset here
38
+ # You can copy an official description
39
+ _DESCRIPTION = """\
40
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
41
+ """
42
+
43
+ # TODO: Add a link to an official homepage for the dataset here
44
+ _HOMEPAGE = ""
45
+
46
+ # TODO: Add the licence for the dataset here if you can find it
47
+ _LICENSE = ""
48
+
49
+ _URLS = {
50
+ "sourceA": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceA.tar.gz?download=true",
51
+ }
52
+
53
+
54
+ PROMPTS_TO_IGNORE = [
55
+ "brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista",
56
+ "carta-convite-discutir-discriminacao-na-escola",
57
+ "informacao-no-rotulo-de-produtos-transgenicos",
58
+ ]
59
+ CSV_HEADER = [
60
+ "id",
61
+ "id_prompt",
62
+ "title",
63
+ "essay",
64
+ "grades",
65
+ "general",
66
+ "specific",
67
+ "essay_year",
68
+ ]
69
+
70
+
71
+ class AesEnemDataset(datasets.GeneratorBasedBuilder):
72
+ """TODO: Short description of my dataset."""
73
+
74
+ VERSION = datasets.Version("0.0.1")
75
+
76
+ # This is an example of a dataset with multiple configurations.
77
+ # If you don't want/need to define several sub-sets in your dataset,
78
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
79
+
80
+ # If you need to make complex sub-parts in the datasets with configurable options
81
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
82
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
83
+
84
+ # You will be able to load one or the other configurations in the following list with
85
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
86
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
87
+ BUILDER_CONFIGS = [
88
+ datasets.BuilderConfig(name="sourceA", version=VERSION, description="TODO"),
89
+ datasets.BuilderConfig(
90
+ name="sourceB",
91
+ version=VERSION,
92
+ description="TODO",
93
+ ),
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = "sourceA" # It's not mandatory to have a default configuration. Just use one if it make sense.
97
+
98
+ def _info(self):
99
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
100
+ if (
101
+ self.config.name == "sourceA"
102
+ ): # This is the name of the configuration selected in BUILDER_CONFIGS above
103
+ features = datasets.Features(
104
+ {
105
+ "id": datasets.Value("string"),
106
+ "id_prompt": datasets.Value("string"),
107
+ "essay_title": datasets.Value("string"),
108
+ "essay_text": datasets.Value("string"),
109
+ "grades": datasets.Sequence(datasets.Value("int16")),
110
+ "essay_year": datasets.Value("int16"),
111
+ }
112
+ )
113
+ return datasets.DatasetInfo(
114
+ # This is the description that will appear on the datasets page.
115
+ description=_DESCRIPTION,
116
+ # This defines the different columns of the dataset and their types
117
+ features=features, # Here we define them above because they are different between the two configurations
118
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
119
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
120
+ # supervised_keys=("sentence", "label"),
121
+ # Homepage of the dataset for documentation
122
+ homepage=_HOMEPAGE,
123
+ # License for the dataset if available
124
+ license=_LICENSE,
125
+ # Citation for the dataset
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager):
130
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
131
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
132
+
133
+ urls = _URLS[self.config.name]
134
+ extracted_files = dl_manager.download_and_extract({"sourceA": urls})
135
+ html_parser = self._process_html_files(extracted_files)
136
+ self._generate_splits(html_parser.sourceA)
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ # These kwargs will be passed to _generate_examples
141
+ gen_kwargs={
142
+ "filepath": os.path.join(
143
+ extracted_files["sourceA"], "sourceA", "train.csv"
144
+ ),
145
+ "split": "train",
146
+ },
147
+ ),
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.VALIDATION,
150
+ # These kwargs will be passed to _generate_examples
151
+ gen_kwargs={
152
+ "filepath": os.path.join(
153
+ extracted_files["sourceA"], "sourceA", "validation.csv"
154
+ ),
155
+ "split": "validation",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TEST,
160
+ # These kwargs will be passed to _generate_examples
161
+ gen_kwargs={
162
+ "filepath": os.path.join(
163
+ extracted_files["sourceA"], "sourceA", "test.csv"
164
+ ),
165
+ "split": "test",
166
+ },
167
+ ),
168
+ ]
169
+
170
+ def _process_html_files(self, paths_dict):
171
+ html_parser = HTMLParser(paths_dict)
172
+ html_parser.parse()
173
+ return html_parser
174
+
175
+ def _generate_splits(self, filepath: str, train_size=0.7):
176
+ def map_year(year):
177
+ if year <= 2017:
178
+ return "<=2017"
179
+ return str(year)
180
+
181
+ def normalize_grades(grades):
182
+ grades = grades.strip("[]").split(", ")
183
+ grade_mapping = {"0.0": 0, "20": 40}
184
+
185
+ # We will remove the rows that match the criteria below
186
+ if any(
187
+ single_grade in grades
188
+ for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"]
189
+ ):
190
+ return None
191
+ # Use the mapping to transform grades, ignoring the last grade
192
+ mapped_grades = [
193
+ int(grade_mapping.get(grade_concept, grade_concept))
194
+ for grade_concept in grades[:-1]
195
+ ]
196
+
197
+ # Calculate and append the sum of the mapped grades as the last element
198
+ mapped_grades.append(sum(mapped_grades))
199
+ return mapped_grades
200
+
201
+ df = pd.read_csv(filepath)
202
+ df["general"] = df["general"].fillna("")
203
+ df["essay_year"] = df["essay_year"].astype("int")
204
+ df["mapped_year"] = df["essay_year"].apply(map_year)
205
+ df["grades"] = df["grades"].apply(normalize_grades)
206
+ df = df.dropna()
207
+ buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict()
208
+ df.drop('mapped_year', axis=1, inplace=True)
209
+ train_set = []
210
+ val_set = []
211
+ test_set = []
212
+ for year, prompts in buckets.items():
213
+ np.random.shuffle(prompts)
214
+ num_prompts = len(prompts)
215
+
216
+ # All prompts go to the test if less than 3
217
+ if num_prompts <= 3:
218
+ train_set.append(df[df["id_prompt"].isin([prompts[0]])])
219
+ val_set.append(df[df["id_prompt"].isin([prompts[1]])])
220
+ test_set.append(df[df["id_prompt"].isin([prompts[2]])])
221
+ continue
222
+
223
+ # Determine the number of prompts for each set based on train_size and remaining prompts
224
+ num_train = math.floor(num_prompts * train_size)
225
+ num_val_test = num_prompts - num_train
226
+ num_val = num_val_test // 2
227
+ num_test = num_val_test - num_val
228
+
229
+ # Assign prompts to each set
230
+ train_set.append(df[df["id_prompt"].isin(prompts[:num_train])])
231
+ val_set.append(
232
+ df[df["id_prompt"].isin(prompts[num_train : (num_train + num_val)])]
233
+ )
234
+ test_set.append(
235
+ df[
236
+ df["id_prompt"].isin(
237
+ prompts[
238
+ (num_train + num_val) : (num_train + num_val + num_test)
239
+ ]
240
+ )
241
+ ]
242
+ )
243
+
244
+ # Convert lists of groups to DataFrames
245
+ train_df = pd.concat(train_set)
246
+ val_df = pd.concat(val_set)
247
+ test_df = pd.concat(test_set)
248
+
249
+ # Data Validation Assertions
250
+ assert (
251
+ len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0
252
+ ), "Overlap between train and val id_prompt"
253
+ assert (
254
+ len(set(train_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
255
+ ), "Overlap between train and test id_prompt"
256
+ assert (
257
+ len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0
258
+ ), "Overlap between val and test id_prompt"
259
+ dirname = os.path.dirname(filepath)
260
+ train_df.to_csv(f"{dirname}/train.csv", index=False)
261
+ val_df.to_csv(f"{dirname}/validation.csv", index=False)
262
+ test_df.to_csv(f"{dirname}/test.csv", index=False)
263
+
264
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
265
+ def _generate_examples(self, filepath, split):
266
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
267
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
268
+ if self.config.name == "sourceA":
269
+ with open(filepath, encoding="utf-8") as csvfile:
270
+ next(csvfile)
271
+ csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER)
272
+ for i, row in enumerate(csv_reader):
273
+ grades = row["grades"].strip("[]").split(", ")
274
+ yield i, {
275
+ "id": row["id"],
276
+ "id_prompt": row["id_prompt"],
277
+ "essay_title": row["title"],
278
+ "essay_text": row["essay"],
279
+ "grades": grades,
280
+ "essay_year": row["essay_year"],
281
+ }
282
+
283
+
284
+ class HTMLParser:
285
+ def __init__(self, paths_dict):
286
+ self.paths_dict = paths_dict
287
+ self.sourceA = None
288
+ self.sourceB = None
289
+
290
+ def apply_soup(self, filepath, num):
291
+ # recebe uma URL, salva o HTML dessa página e retorna o soup dela
292
+ file = open(os.path.join(filepath, num), "r", encoding="utf8")
293
+ conteudo = file.read()
294
+ soup = BeautifulSoup(conteudo, "html.parser")
295
+ return soup
296
+
297
+ @staticmethod
298
+ def _get_title(soup):
299
+ title = soup.find("div", class_="container-composition")
300
+ if title is None:
301
+ title = soup.find("h1", class_="pg-color10").get_text()
302
+ else:
303
+ title = title.h2.get_text()
304
+ title = title.replace("\xa0", "")
305
+ return title
306
+
307
+ @staticmethod
308
+ def _get_grades(soup):
309
+ grades = soup.find("section", class_="results-table")
310
+ final_grades = []
311
+ if grades is not None:
312
+ grades = grades.find_all("span", class_="points")
313
+ assert len(grades) == 6, f"Missing grades: {len(grades)}"
314
+ for single_grade in grades:
315
+ grade = int(single_grade.get_text())
316
+ final_grades.append(grade)
317
+ assert final_grades[-1] == sum(
318
+ final_grades[:-1]
319
+ ), "Grading sum is not making sense"
320
+ else:
321
+ grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7")
322
+ grades_sum = float(
323
+ soup.find("th", class_="noBorder-left").get_text().replace(",", ".")
324
+ )
325
+ grades = grades.find_all("td")[:10]
326
+ for idx in range(1, 10, 2):
327
+ grade = float(grades[idx].get_text().replace(",", "."))
328
+ final_grades.append(grade)
329
+ assert grades_sum == sum(final_grades), "Grading sum is not making sense"
330
+ final_grades.append(grades_sum)
331
+ return final_grades
332
+
333
+ @staticmethod
334
+ def _get_general_comment(soup):
335
+ def get_general_comment_aux(soup):
336
+ result = soup.find("article", class_="list-item c")
337
+ if result is not None:
338
+ result = result.find("div", class_="description")
339
+ return result.get_text()
340
+ else:
341
+ result = soup.find("p", style="margin: 0px 0px 11px;")
342
+ if result is not None:
343
+ return result.get_text()
344
+ else:
345
+ result = soup.find("p", style="margin: 0px;")
346
+ if result is not None:
347
+ return result.get_text()
348
+ else:
349
+ result = soup.find(
350
+ "p", style="margin: 0px; text-align: justify;"
351
+ )
352
+ if result is not None:
353
+ return result.get_text()
354
+ else:
355
+ return ""
356
+
357
+ text = soup.find("div", class_="text")
358
+ if text is not None:
359
+ text = text.find("p")
360
+ if (text is None) or (len(text.get_text()) < 2):
361
+ return get_general_comment_aux(soup)
362
+ return text.get_text()
363
+ else:
364
+ return get_general_comment_aux(soup)
365
+
366
+ @staticmethod
367
+ def _get_specific_comment(soup):
368
+ result = soup.find("div", class_="text")
369
+ if result is not None:
370
+ result = result.find_all("li")
371
+ cms = []
372
+ if result != []:
373
+ for item in result:
374
+ text = item.get_text()
375
+ if text != "\xa0":
376
+ cms.append(text)
377
+ return cms
378
+ else:
379
+ result = soup.find("div", class_="text").find_all("p")
380
+ for item in result:
381
+ text = item.get_text()
382
+ if text != "\xa0":
383
+ cms.append(text)
384
+ return cms
385
+ else:
386
+ result = soup.find_all("article", class_="list-item c")
387
+ if len(result) < 2:
388
+ return ["First if"]
389
+ result = result[1].find_all("p")
390
+ cms = []
391
+ for item in result:
392
+ text = item.get_text()
393
+ if text != "\xa0":
394
+ cms.append(text)
395
+ return cms
396
+
397
+ @staticmethod
398
+ def _get_essay(soup):
399
+ essay = soup.find("div", class_="text-composition")
400
+ if essay is not None:
401
+ essay = essay.find_all("p")
402
+ for f in essay:
403
+ while f.find("span", style="color:#00b050") is not None:
404
+ f.find("span", style="color:#00b050").decompose()
405
+ while f.find("span", class_="certo") is not None:
406
+ f.find("span", class_="certo").decompose()
407
+ result = []
408
+ for paragraph in essay:
409
+ result.append(paragraph.get_text())
410
+ return result
411
+ else:
412
+ essay = soup.find("div", {"id": "texto"})
413
+ essay.find("section", class_="list-items").decompose()
414
+ essay = essay.find_all("p")
415
+ for f in essay:
416
+ while f.find("span", class_="certo") is not None:
417
+ f.find("span", class_="certo").decompose()
418
+ result = []
419
+ for paragraph in essay:
420
+ result.append(paragraph.get_text())
421
+ return result
422
+
423
+ @staticmethod
424
+ def _get_essay_year(soup):
425
+ pattern = r"redações corrigidas - \w+/\d+"
426
+ first_occurrence = re.search(pattern, soup.get_text().lower())
427
+ matched_url = first_occurrence.group(0) if first_occurrence else None
428
+ year_pattern = r"\d{4}"
429
+ return re.search(year_pattern, matched_url).group(0)
430
+
431
+ def _clean_title(self, title):
432
+ smaller_index = title.find("[")
433
+ if smaller_index == -1:
434
+ return title
435
+ else:
436
+ bigger_index = title.find("]")
437
+ new_title = title[:smaller_index] + title[bigger_index + 1 :]
438
+ return self._clean_title(new_title.replace(" ", " "))
439
+
440
+ def _clean_list(self, list):
441
+ if list == []:
442
+ return []
443
+ else:
444
+ new_list = []
445
+ for phrase in list:
446
+ phrase = (
447
+ phrase.replace("\xa0", "").replace(" ,", ",").replace(" .", ".")
448
+ )
449
+ while phrase.find(" ") != -1:
450
+ phrase = phrase.replace(" ", " ")
451
+ if len(phrase) > 1:
452
+ new_list.append(phrase)
453
+ return new_list
454
+
455
+ def parse(self):
456
+ for key, filepath in self.paths_dict.items():
457
+ full_path = os.path.join(filepath, key)
458
+ if key == "sourceA":
459
+ self.sourceA = f"{full_path}/sourceA.csv"
460
+ with open(
461
+ f"{full_path}/{key}.csv", "w", newline="", encoding="utf8"
462
+ ) as final_file:
463
+ writer = csv.writer(final_file)
464
+ writer.writerow(CSV_HEADER)
465
+ sub_folders = [
466
+ name for name in os.listdir(full_path) if not name.endswith(".csv")
467
+ ]
468
+ essay_id = 0
469
+ essay_title = None
470
+ essay_text = None
471
+ essay_grades = None
472
+ general_comment = None
473
+ specific_comment = None
474
+ essay_year = None
475
+ for prompt_folder in tqdm(
476
+ sub_folders,
477
+ desc=f"Parsing HTML files from: {key}",
478
+ total=len(sub_folders),
479
+ ):
480
+ if prompt_folder in PROMPTS_TO_IGNORE:
481
+ continue
482
+ prompt = os.path.join(full_path, prompt_folder)
483
+ prompt_essays = [name for name in os.listdir(prompt)]
484
+ prompt_essays = prompt_essays[:-1]
485
+ essay_year = HTMLParser._get_essay_year(
486
+ self.apply_soup(prompt, "Prompt.html")
487
+ )
488
+ for essay in prompt_essays:
489
+ soup_text = self.apply_soup(prompt, essay)
490
+ if essay == "Prompt.html":
491
+ continue
492
+ essay_title = self._clean_title(
493
+ HTMLParser._get_title(soup_text).replace(";", ",")
494
+ )
495
+ essay_grades = HTMLParser._get_grades(soup_text)
496
+ general_comment = HTMLParser._get_general_comment(
497
+ soup_text
498
+ ).strip()
499
+ specific_comment = HTMLParser._get_specific_comment(soup_text)
500
+ if general_comment in specific_comment:
501
+ specific_comment.remove(general_comment)
502
+ if (len(specific_comment) > 1) and (
503
+ len(specific_comment[0]) < 2
504
+ ):
505
+ specific_comment = specific_comment[1:]
506
+ essay_text = self._clean_list(HTMLParser._get_essay(soup_text))
507
+ specific_comment = self._clean_list(specific_comment)
508
+ writer.writerow(
509
+ [
510
+ essay,
511
+ prompt_folder,
512
+ essay_title,
513
+ essay_text,
514
+ essay_grades,
515
+ general_comment,
516
+ specific_comment,
517
+ essay_year,
518
+ ]
519
+ )
520
+ essay_id += 1