Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Languages:
Portuguese
Size:
1K - 10K
License:
# Copyright 2023 Andre Barbosa, Igor Caetano Silveira & The HuggingFace Datasets Authors | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""TODO: Add a description here.""" | |
import csv | |
import math | |
import os | |
import re | |
import datasets | |
import numpy as np | |
import pandas as pd | |
from bs4 import BeautifulSoup | |
from tqdm.auto import tqdm | |
np.random.seed(42) # Set the seed | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
TODO | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
_URLS = { | |
"sourceA": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceA.tar.gz?download=true", | |
} | |
PROMPTS_TO_IGNORE = [ | |
"brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista", | |
"carta-convite-discutir-discriminacao-na-escola", | |
"informacao-no-rotulo-de-produtos-transgenicos", | |
] | |
CSV_HEADER = [ | |
"id", | |
"id_prompt", | |
"title", | |
"essay", | |
"grades", | |
"general", | |
"specific", | |
"essay_year", | |
] | |
class AesEnemDataset(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("0.0.1") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="sourceA", version=VERSION, description="TODO"), | |
datasets.BuilderConfig( | |
name="sourceB", | |
version=VERSION, | |
description="TODO", | |
), | |
] | |
DEFAULT_CONFIG_NAME = "sourceA" # It's not mandatory to have a default configuration. Just use one if it make sense. | |
def _info(self): | |
# TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset | |
if ( | |
self.config.name == "sourceA" | |
): # This is the name of the configuration selected in BUILDER_CONFIGS above | |
features = datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"id_prompt": datasets.Value("string"), | |
"essay_title": datasets.Value("string"), | |
"essay_text": datasets.Value("string"), | |
"grades": datasets.Sequence(datasets.Value("int16")), | |
"essay_year": datasets.Value("int16"), | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and | |
# specify them. They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
urls = _URLS[self.config.name] | |
extracted_files = dl_manager.download_and_extract({"sourceA": urls}) | |
html_parser = self._process_html_files(extracted_files) | |
self._generate_splits(html_parser.sourceA) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": os.path.join( | |
extracted_files["sourceA"], "sourceA", "train.csv" | |
), | |
"split": "train", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": os.path.join( | |
extracted_files["sourceA"], "sourceA", "validation.csv" | |
), | |
"split": "validation", | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": os.path.join( | |
extracted_files["sourceA"], "sourceA", "test.csv" | |
), | |
"split": "test", | |
}, | |
), | |
] | |
def _process_html_files(self, paths_dict): | |
html_parser = HTMLParser(paths_dict) | |
html_parser.parse() | |
return html_parser | |
def _generate_splits(self, filepath: str, train_size=0.7): | |
def map_year(year): | |
if year <= 2017: | |
return "<=2017" | |
return str(year) | |
def normalize_grades(grades): | |
grades = grades.strip("[]").split(", ") | |
grade_mapping = {"0.0": 0, "20": 40} | |
# We will remove the rows that match the criteria below | |
if any( | |
single_grade in grades | |
for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"] | |
): | |
return None | |
# Use the mapping to transform grades, ignoring the last grade | |
mapped_grades = [ | |
int(grade_mapping.get(grade_concept, grade_concept)) | |
for grade_concept in grades[:-1] | |
] | |
# Calculate and append the sum of the mapped grades as the last element | |
mapped_grades.append(sum(mapped_grades)) | |
return mapped_grades | |
df = pd.read_csv(filepath) | |
df["general"] = df["general"].fillna("") | |
df["essay_year"] = df["essay_year"].astype("int") | |
df["mapped_year"] = df["essay_year"].apply(map_year) | |
df["grades"] = df["grades"].apply(normalize_grades) | |
df = df.dropna() | |
buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict() | |
df.drop('mapped_year', axis=1, inplace=True) | |
train_set = [] | |
val_set = [] | |
test_set = [] | |
for year, prompts in buckets.items(): | |
np.random.shuffle(prompts) | |
num_prompts = len(prompts) | |
# All prompts go to the test if less than 3 | |
if num_prompts <= 3: | |
train_set.append(df[df["id_prompt"].isin([prompts[0]])]) | |
val_set.append(df[df["id_prompt"].isin([prompts[1]])]) | |
test_set.append(df[df["id_prompt"].isin([prompts[2]])]) | |
continue | |
# Determine the number of prompts for each set based on train_size and remaining prompts | |
num_train = math.floor(num_prompts * train_size) | |
num_val_test = num_prompts - num_train | |
num_val = num_val_test // 2 | |
num_test = num_val_test - num_val | |
# Assign prompts to each set | |
train_set.append(df[df["id_prompt"].isin(prompts[:num_train])]) | |
val_set.append( | |
df[df["id_prompt"].isin(prompts[num_train : (num_train + num_val)])] | |
) | |
test_set.append( | |
df[ | |
df["id_prompt"].isin( | |
prompts[ | |
(num_train + num_val) : (num_train + num_val + num_test) | |
] | |
) | |
] | |
) | |
# Convert lists of groups to DataFrames | |
train_df = pd.concat(train_set) | |
val_df = pd.concat(val_set) | |
test_df = pd.concat(test_set) | |
# Data Validation Assertions | |
assert ( | |
len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0 | |
), "Overlap between train and val id_prompt" | |
assert ( | |
len(set(train_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0 | |
), "Overlap between train and test id_prompt" | |
assert ( | |
len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0 | |
), "Overlap between val and test id_prompt" | |
dirname = os.path.dirname(filepath) | |
train_df.to_csv(f"{dirname}/train.csv", index=False) | |
val_df.to_csv(f"{dirname}/validation.csv", index=False) | |
test_df.to_csv(f"{dirname}/test.csv", index=False) | |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators` | |
def _generate_examples(self, filepath, split): | |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example. | |
if self.config.name == "sourceA": | |
with open(filepath, encoding="utf-8") as csvfile: | |
next(csvfile) | |
csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER) | |
for i, row in enumerate(csv_reader): | |
grades = row["grades"].strip("[]").split(", ") | |
yield i, { | |
"id": row["id"], | |
"id_prompt": row["id_prompt"], | |
"essay_title": row["title"], | |
"essay_text": row["essay"], | |
"grades": grades, | |
"essay_year": row["essay_year"], | |
} | |
class HTMLParser: | |
def __init__(self, paths_dict): | |
self.paths_dict = paths_dict | |
self.sourceA = None | |
self.sourceB = None | |
def apply_soup(self, filepath, num): | |
# recebe uma URL, salva o HTML dessa página e retorna o soup dela | |
file = open(os.path.join(filepath, num), "r", encoding="utf8") | |
conteudo = file.read() | |
soup = BeautifulSoup(conteudo, "html.parser") | |
return soup | |
def _get_title(soup): | |
title = soup.find("div", class_="container-composition") | |
if title is None: | |
title = soup.find("h1", class_="pg-color10").get_text() | |
else: | |
title = title.h2.get_text() | |
title = title.replace("\xa0", "") | |
return title | |
def _get_grades(soup): | |
grades = soup.find("section", class_="results-table") | |
final_grades = [] | |
if grades is not None: | |
grades = grades.find_all("span", class_="points") | |
assert len(grades) == 6, f"Missing grades: {len(grades)}" | |
for single_grade in grades: | |
grade = int(single_grade.get_text()) | |
final_grades.append(grade) | |
assert final_grades[-1] == sum( | |
final_grades[:-1] | |
), "Grading sum is not making sense" | |
else: | |
grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7") | |
grades_sum = float( | |
soup.find("th", class_="noBorder-left").get_text().replace(",", ".") | |
) | |
grades = grades.find_all("td")[:10] | |
for idx in range(1, 10, 2): | |
grade = float(grades[idx].get_text().replace(",", ".")) | |
final_grades.append(grade) | |
assert grades_sum == sum(final_grades), "Grading sum is not making sense" | |
final_grades.append(grades_sum) | |
return final_grades | |
def _get_general_comment(soup): | |
def get_general_comment_aux(soup): | |
result = soup.find("article", class_="list-item c") | |
if result is not None: | |
result = result.find("div", class_="description") | |
return result.get_text() | |
else: | |
result = soup.find("p", style="margin: 0px 0px 11px;") | |
if result is not None: | |
return result.get_text() | |
else: | |
result = soup.find("p", style="margin: 0px;") | |
if result is not None: | |
return result.get_text() | |
else: | |
result = soup.find( | |
"p", style="margin: 0px; text-align: justify;" | |
) | |
if result is not None: | |
return result.get_text() | |
else: | |
return "" | |
text = soup.find("div", class_="text") | |
if text is not None: | |
text = text.find("p") | |
if (text is None) or (len(text.get_text()) < 2): | |
return get_general_comment_aux(soup) | |
return text.get_text() | |
else: | |
return get_general_comment_aux(soup) | |
def _get_specific_comment(soup): | |
result = soup.find("div", class_="text") | |
if result is not None: | |
result = result.find_all("li") | |
cms = [] | |
if result != []: | |
for item in result: | |
text = item.get_text() | |
if text != "\xa0": | |
cms.append(text) | |
return cms | |
else: | |
result = soup.find("div", class_="text").find_all("p") | |
for item in result: | |
text = item.get_text() | |
if text != "\xa0": | |
cms.append(text) | |
return cms | |
else: | |
result = soup.find_all("article", class_="list-item c") | |
if len(result) < 2: | |
return ["First if"] | |
result = result[1].find_all("p") | |
cms = [] | |
for item in result: | |
text = item.get_text() | |
if text != "\xa0": | |
cms.append(text) | |
return cms | |
def _get_essay(soup): | |
essay = soup.find("div", class_="text-composition") | |
if essay is not None: | |
essay = essay.find_all("p") | |
for f in essay: | |
while f.find("span", style="color:#00b050") is not None: | |
f.find("span", style="color:#00b050").decompose() | |
while f.find("span", class_="certo") is not None: | |
f.find("span", class_="certo").decompose() | |
result = [] | |
for paragraph in essay: | |
result.append(paragraph.get_text()) | |
return result | |
else: | |
essay = soup.find("div", {"id": "texto"}) | |
essay.find("section", class_="list-items").decompose() | |
essay = essay.find_all("p") | |
for f in essay: | |
while f.find("span", class_="certo") is not None: | |
f.find("span", class_="certo").decompose() | |
result = [] | |
for paragraph in essay: | |
result.append(paragraph.get_text()) | |
return result | |
def _get_essay_year(soup): | |
pattern = r"redações corrigidas - \w+/\d+" | |
first_occurrence = re.search(pattern, soup.get_text().lower()) | |
matched_url = first_occurrence.group(0) if first_occurrence else None | |
year_pattern = r"\d{4}" | |
return re.search(year_pattern, matched_url).group(0) | |
def _clean_title(self, title): | |
smaller_index = title.find("[") | |
if smaller_index == -1: | |
return title | |
else: | |
bigger_index = title.find("]") | |
new_title = title[:smaller_index] + title[bigger_index + 1 :] | |
return self._clean_title(new_title.replace(" ", " ")) | |
def _clean_list(self, list): | |
if list == []: | |
return [] | |
else: | |
new_list = [] | |
for phrase in list: | |
phrase = ( | |
phrase.replace("\xa0", "").replace(" ,", ",").replace(" .", ".") | |
) | |
while phrase.find(" ") != -1: | |
phrase = phrase.replace(" ", " ") | |
if len(phrase) > 1: | |
new_list.append(phrase) | |
return new_list | |
def parse(self): | |
for key, filepath in self.paths_dict.items(): | |
full_path = os.path.join(filepath, key) | |
if key == "sourceA": | |
self.sourceA = f"{full_path}/sourceA.csv" | |
with open( | |
f"{full_path}/{key}.csv", "w", newline="", encoding="utf8" | |
) as final_file: | |
writer = csv.writer(final_file) | |
writer.writerow(CSV_HEADER) | |
sub_folders = [ | |
name for name in os.listdir(full_path) if not name.endswith(".csv") | |
] | |
essay_id = 0 | |
essay_title = None | |
essay_text = None | |
essay_grades = None | |
general_comment = None | |
specific_comment = None | |
essay_year = None | |
for prompt_folder in tqdm( | |
sub_folders, | |
desc=f"Parsing HTML files from: {key}", | |
total=len(sub_folders), | |
): | |
if prompt_folder in PROMPTS_TO_IGNORE: | |
continue | |
prompt = os.path.join(full_path, prompt_folder) | |
prompt_essays = [name for name in os.listdir(prompt)] | |
prompt_essays = prompt_essays[:-1] | |
essay_year = HTMLParser._get_essay_year( | |
self.apply_soup(prompt, "Prompt.html") | |
) | |
for essay in prompt_essays: | |
soup_text = self.apply_soup(prompt, essay) | |
if essay == "Prompt.html": | |
continue | |
essay_title = self._clean_title( | |
HTMLParser._get_title(soup_text).replace(";", ",") | |
) | |
essay_grades = HTMLParser._get_grades(soup_text) | |
general_comment = HTMLParser._get_general_comment( | |
soup_text | |
).strip() | |
specific_comment = HTMLParser._get_specific_comment(soup_text) | |
if general_comment in specific_comment: | |
specific_comment.remove(general_comment) | |
if (len(specific_comment) > 1) and ( | |
len(specific_comment[0]) < 2 | |
): | |
specific_comment = specific_comment[1:] | |
essay_text = self._clean_list(HTMLParser._get_essay(soup_text)) | |
specific_comment = self._clean_list(specific_comment) | |
writer.writerow( | |
[ | |
essay, | |
prompt_folder, | |
essay_title, | |
essay_text, | |
essay_grades, | |
general_comment, | |
specific_comment, | |
essay_year, | |
] | |
) | |
essay_id += 1 | |