|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import math |
|
import os |
|
import re |
|
|
|
import datasets |
|
import numpy as np |
|
import pandas as pd |
|
from bs4 import BeautifulSoup |
|
from tqdm.auto import tqdm |
|
|
|
np.random.seed(42) |
|
|
|
|
|
|
|
_CITATION = """\ |
|
TODO |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
_URLS = { |
|
"sourceA": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceA.tar.gz?download=true", |
|
} |
|
|
|
|
|
PROMPTS_TO_IGNORE = [ |
|
"brasileiros-tem-pessima-educacao-argumentativa-segundo-cientista", |
|
"carta-convite-discutir-discriminacao-na-escola", |
|
"informacao-no-rotulo-de-produtos-transgenicos", |
|
] |
|
CSV_HEADER = [ |
|
"id", |
|
"id_prompt", |
|
"title", |
|
"essay", |
|
"grades", |
|
"general", |
|
"specific", |
|
"essay_year", |
|
] |
|
|
|
|
|
class AesEnemDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("0.0.1") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="sourceA", version=VERSION, description="TODO"), |
|
datasets.BuilderConfig( |
|
name="sourceB", |
|
version=VERSION, |
|
description="TODO", |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "sourceA" |
|
|
|
def _info(self): |
|
|
|
if ( |
|
self.config.name == "sourceA" |
|
): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"id_prompt": datasets.Value("string"), |
|
"essay_title": datasets.Value("string"), |
|
"essay_text": datasets.Value("string"), |
|
"grades": datasets.Sequence(datasets.Value("int16")), |
|
"essay_year": datasets.Value("int16"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
urls = _URLS[self.config.name] |
|
extracted_files = dl_manager.download_and_extract({"sourceA": urls}) |
|
html_parser = self._process_html_files(extracted_files) |
|
self._generate_splits(html_parser.sourceA) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
extracted_files["sourceA"], "sourceA", "train.csv" |
|
), |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
extracted_files["sourceA"], "sourceA", "validation.csv" |
|
), |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
extracted_files["sourceA"], "sourceA", "test.csv" |
|
), |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
def _process_html_files(self, paths_dict): |
|
html_parser = HTMLParser(paths_dict) |
|
html_parser.parse() |
|
return html_parser |
|
|
|
def _generate_splits(self, filepath: str, train_size=0.7): |
|
def map_year(year): |
|
if year <= 2017: |
|
return "<=2017" |
|
return str(year) |
|
|
|
def normalize_grades(grades): |
|
grades = grades.strip("[]").split(", ") |
|
grade_mapping = {"0.0": 0, "20": 40} |
|
|
|
|
|
if any( |
|
single_grade in grades |
|
for single_grade in ["50", "100", "150", "0.5", "1.0", "1.5"] |
|
): |
|
return None |
|
|
|
mapped_grades = [ |
|
int(grade_mapping.get(grade_concept, grade_concept)) |
|
for grade_concept in grades[:-1] |
|
] |
|
|
|
|
|
mapped_grades.append(sum(mapped_grades)) |
|
return mapped_grades |
|
|
|
df = pd.read_csv(filepath) |
|
df["general"] = df["general"].fillna("") |
|
df["essay_year"] = df["essay_year"].astype("int") |
|
df["mapped_year"] = df["essay_year"].apply(map_year) |
|
df["grades"] = df["grades"].apply(normalize_grades) |
|
df = df.dropna() |
|
buckets = df.groupby("mapped_year")["id_prompt"].unique().to_dict() |
|
df.drop('mapped_year', axis=1, inplace=True) |
|
train_set = [] |
|
val_set = [] |
|
test_set = [] |
|
for year, prompts in buckets.items(): |
|
np.random.shuffle(prompts) |
|
num_prompts = len(prompts) |
|
|
|
|
|
if num_prompts <= 3: |
|
train_set.append(df[df["id_prompt"].isin([prompts[0]])]) |
|
val_set.append(df[df["id_prompt"].isin([prompts[1]])]) |
|
test_set.append(df[df["id_prompt"].isin([prompts[2]])]) |
|
continue |
|
|
|
|
|
num_train = math.floor(num_prompts * train_size) |
|
num_val_test = num_prompts - num_train |
|
num_val = num_val_test // 2 |
|
num_test = num_val_test - num_val |
|
|
|
|
|
train_set.append(df[df["id_prompt"].isin(prompts[:num_train])]) |
|
val_set.append( |
|
df[df["id_prompt"].isin(prompts[num_train : (num_train + num_val)])] |
|
) |
|
test_set.append( |
|
df[ |
|
df["id_prompt"].isin( |
|
prompts[ |
|
(num_train + num_val) : (num_train + num_val + num_test) |
|
] |
|
) |
|
] |
|
) |
|
|
|
|
|
train_df = pd.concat(train_set) |
|
val_df = pd.concat(val_set) |
|
test_df = pd.concat(test_set) |
|
|
|
|
|
assert ( |
|
len(set(train_df["id_prompt"]).intersection(set(val_df["id_prompt"]))) == 0 |
|
), "Overlap between train and val id_prompt" |
|
assert ( |
|
len(set(train_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0 |
|
), "Overlap between train and test id_prompt" |
|
assert ( |
|
len(set(val_df["id_prompt"]).intersection(set(test_df["id_prompt"]))) == 0 |
|
), "Overlap between val and test id_prompt" |
|
dirname = os.path.dirname(filepath) |
|
train_df.to_csv(f"{dirname}/train.csv", index=False) |
|
val_df.to_csv(f"{dirname}/validation.csv", index=False) |
|
test_df.to_csv(f"{dirname}/test.csv", index=False) |
|
|
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
if self.config.name == "sourceA": |
|
with open(filepath, encoding="utf-8") as csvfile: |
|
next(csvfile) |
|
csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER) |
|
for i, row in enumerate(csv_reader): |
|
grades = row["grades"].strip("[]").split(", ") |
|
yield i, { |
|
"id": row["id"], |
|
"id_prompt": row["id_prompt"], |
|
"essay_title": row["title"], |
|
"essay_text": row["essay"], |
|
"grades": grades, |
|
"essay_year": row["essay_year"], |
|
} |
|
|
|
|
|
class HTMLParser: |
|
def __init__(self, paths_dict): |
|
self.paths_dict = paths_dict |
|
self.sourceA = None |
|
self.sourceB = None |
|
|
|
def apply_soup(self, filepath, num): |
|
|
|
file = open(os.path.join(filepath, num), "r", encoding="utf8") |
|
conteudo = file.read() |
|
soup = BeautifulSoup(conteudo, "html.parser") |
|
return soup |
|
|
|
@staticmethod |
|
def _get_title(soup): |
|
title = soup.find("div", class_="container-composition") |
|
if title is None: |
|
title = soup.find("h1", class_="pg-color10").get_text() |
|
else: |
|
title = title.h2.get_text() |
|
title = title.replace("\xa0", "") |
|
return title |
|
|
|
@staticmethod |
|
def _get_grades(soup): |
|
grades = soup.find("section", class_="results-table") |
|
final_grades = [] |
|
if grades is not None: |
|
grades = grades.find_all("span", class_="points") |
|
assert len(grades) == 6, f"Missing grades: {len(grades)}" |
|
for single_grade in grades: |
|
grade = int(single_grade.get_text()) |
|
final_grades.append(grade) |
|
assert final_grades[-1] == sum( |
|
final_grades[:-1] |
|
), "Grading sum is not making sense" |
|
else: |
|
grades = soup.find("div", class_="redacoes-corrigidas pg-bordercolor7") |
|
grades_sum = float( |
|
soup.find("th", class_="noBorder-left").get_text().replace(",", ".") |
|
) |
|
grades = grades.find_all("td")[:10] |
|
for idx in range(1, 10, 2): |
|
grade = float(grades[idx].get_text().replace(",", ".")) |
|
final_grades.append(grade) |
|
assert grades_sum == sum(final_grades), "Grading sum is not making sense" |
|
final_grades.append(grades_sum) |
|
return final_grades |
|
|
|
@staticmethod |
|
def _get_general_comment(soup): |
|
def get_general_comment_aux(soup): |
|
result = soup.find("article", class_="list-item c") |
|
if result is not None: |
|
result = result.find("div", class_="description") |
|
return result.get_text() |
|
else: |
|
result = soup.find("p", style="margin: 0px 0px 11px;") |
|
if result is not None: |
|
return result.get_text() |
|
else: |
|
result = soup.find("p", style="margin: 0px;") |
|
if result is not None: |
|
return result.get_text() |
|
else: |
|
result = soup.find( |
|
"p", style="margin: 0px; text-align: justify;" |
|
) |
|
if result is not None: |
|
return result.get_text() |
|
else: |
|
return "" |
|
|
|
text = soup.find("div", class_="text") |
|
if text is not None: |
|
text = text.find("p") |
|
if (text is None) or (len(text.get_text()) < 2): |
|
return get_general_comment_aux(soup) |
|
return text.get_text() |
|
else: |
|
return get_general_comment_aux(soup) |
|
|
|
@staticmethod |
|
def _get_specific_comment(soup): |
|
result = soup.find("div", class_="text") |
|
if result is not None: |
|
result = result.find_all("li") |
|
cms = [] |
|
if result != []: |
|
for item in result: |
|
text = item.get_text() |
|
if text != "\xa0": |
|
cms.append(text) |
|
return cms |
|
else: |
|
result = soup.find("div", class_="text").find_all("p") |
|
for item in result: |
|
text = item.get_text() |
|
if text != "\xa0": |
|
cms.append(text) |
|
return cms |
|
else: |
|
result = soup.find_all("article", class_="list-item c") |
|
if len(result) < 2: |
|
return ["First if"] |
|
result = result[1].find_all("p") |
|
cms = [] |
|
for item in result: |
|
text = item.get_text() |
|
if text != "\xa0": |
|
cms.append(text) |
|
return cms |
|
|
|
@staticmethod |
|
def _get_essay(soup): |
|
essay = soup.find("div", class_="text-composition") |
|
if essay is not None: |
|
essay = essay.find_all("p") |
|
for f in essay: |
|
while f.find("span", style="color:#00b050") is not None: |
|
f.find("span", style="color:#00b050").decompose() |
|
while f.find("span", class_="certo") is not None: |
|
f.find("span", class_="certo").decompose() |
|
result = [] |
|
for paragraph in essay: |
|
result.append(paragraph.get_text()) |
|
return result |
|
else: |
|
essay = soup.find("div", {"id": "texto"}) |
|
essay.find("section", class_="list-items").decompose() |
|
essay = essay.find_all("p") |
|
for f in essay: |
|
while f.find("span", class_="certo") is not None: |
|
f.find("span", class_="certo").decompose() |
|
result = [] |
|
for paragraph in essay: |
|
result.append(paragraph.get_text()) |
|
return result |
|
|
|
@staticmethod |
|
def _get_essay_year(soup): |
|
pattern = r"redações corrigidas - \w+/\d+" |
|
first_occurrence = re.search(pattern, soup.get_text().lower()) |
|
matched_url = first_occurrence.group(0) if first_occurrence else None |
|
year_pattern = r"\d{4}" |
|
return re.search(year_pattern, matched_url).group(0) |
|
|
|
def _clean_title(self, title): |
|
smaller_index = title.find("[") |
|
if smaller_index == -1: |
|
return title |
|
else: |
|
bigger_index = title.find("]") |
|
new_title = title[:smaller_index] + title[bigger_index + 1 :] |
|
return self._clean_title(new_title.replace(" ", " ")) |
|
|
|
def _clean_list(self, list): |
|
if list == []: |
|
return [] |
|
else: |
|
new_list = [] |
|
for phrase in list: |
|
phrase = ( |
|
phrase.replace("\xa0", "").replace(" ,", ",").replace(" .", ".") |
|
) |
|
while phrase.find(" ") != -1: |
|
phrase = phrase.replace(" ", " ") |
|
if len(phrase) > 1: |
|
new_list.append(phrase) |
|
return new_list |
|
|
|
def parse(self): |
|
for key, filepath in self.paths_dict.items(): |
|
full_path = os.path.join(filepath, key) |
|
if key == "sourceA": |
|
self.sourceA = f"{full_path}/sourceA.csv" |
|
with open( |
|
f"{full_path}/{key}.csv", "w", newline="", encoding="utf8" |
|
) as final_file: |
|
writer = csv.writer(final_file) |
|
writer.writerow(CSV_HEADER) |
|
sub_folders = [ |
|
name for name in os.listdir(full_path) if not name.endswith(".csv") |
|
] |
|
essay_id = 0 |
|
essay_title = None |
|
essay_text = None |
|
essay_grades = None |
|
general_comment = None |
|
specific_comment = None |
|
essay_year = None |
|
for prompt_folder in tqdm( |
|
sub_folders, |
|
desc=f"Parsing HTML files from: {key}", |
|
total=len(sub_folders), |
|
): |
|
if prompt_folder in PROMPTS_TO_IGNORE: |
|
continue |
|
prompt = os.path.join(full_path, prompt_folder) |
|
prompt_essays = [name for name in os.listdir(prompt)] |
|
prompt_essays = prompt_essays[:-1] |
|
essay_year = HTMLParser._get_essay_year( |
|
self.apply_soup(prompt, "Prompt.html") |
|
) |
|
for essay in prompt_essays: |
|
soup_text = self.apply_soup(prompt, essay) |
|
if essay == "Prompt.html": |
|
continue |
|
essay_title = self._clean_title( |
|
HTMLParser._get_title(soup_text).replace(";", ",") |
|
) |
|
essay_grades = HTMLParser._get_grades(soup_text) |
|
general_comment = HTMLParser._get_general_comment( |
|
soup_text |
|
).strip() |
|
specific_comment = HTMLParser._get_specific_comment(soup_text) |
|
if general_comment in specific_comment: |
|
specific_comment.remove(general_comment) |
|
if (len(specific_comment) > 1) and ( |
|
len(specific_comment[0]) < 2 |
|
): |
|
specific_comment = specific_comment[1:] |
|
essay_text = self._clean_list(HTMLParser._get_essay(soup_text)) |
|
specific_comment = self._clean_list(specific_comment) |
|
writer.writerow( |
|
[ |
|
essay, |
|
prompt_folder, |
|
essay_title, |
|
essay_text, |
|
essay_grades, |
|
general_comment, |
|
specific_comment, |
|
essay_year, |
|
] |
|
) |
|
essay_id += 1 |
|
|