BLUEX_without_images / BLUEX_without_images_disabled.py
eduagarcia's picture
Loading script
c8ee2c3
"""OAB Exams dataset"""
import datasets
import pandas as pd
import re
from collections import defaultdict
import os
import json
_CITATION = """@misc{almeida2023bluex,
title={BLUEX: A benchmark based on Brazilian Leading Universities Entrance eXams},
author={Thales Sales Almeida and Thiago Laitz and Giovana K. Bonás and Rodrigo Nogueira},
year={2023},
eprint={2307.05410},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
Despite Portuguese being the fifth most widely spoken language, there is a lack of freely available resources for evaluating language models in Portuguese. This repository contains a multimodal dataset consisting of the two leading university entrance exams conducted in Brazil: Convest (Unicamp) and Fuvest (USP), spanning from 2018 to 2024. The dataset comprises a total of 1260 questions, of which 724 do not have accompanying images.
"""
_HOMEPAGE="https://github.com/Portuguese-Benchmark-Datasets/BLUEX"
_URL = "portuguese-benchmark-datasets/BLUEX"
_URL = "https://raw.githubusercontent.com/Portuguese-Benchmark-Datasets/BLUEX/main/data/bluex_dataset.zip"
class BLUEX_without_images(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"question_number": datasets.Value("int32"),
"exam_id": datasets.Value("string"),
"exam_year": datasets.Value("string"),
"university": datasets.Value("string"),
"question_type": datasets.Sequence(datasets.Value("string")),
"nullified": datasets.Value("bool"),
"question": datasets.Value("string"),
"choices": datasets.Sequence(feature={
"text": datasets.Value("string"),
"label": datasets.Value("string")
}),
"answerKey": datasets.Value("string"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
#dataset = datasets.load_dataset(_URL, split="questions")
#remove questions that require images
#dataset = dataset.filter(lambda example: not example['IU'] and example['alternatives_type'] == 'string')
filedir = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filedir": os.path.join(filedir, 'questions')
}
)
]
def _generate_examples(self, filedir):
for university in os.listdir(filedir):
years = sorted(os.listdir(os.path.join(filedir, university)))
for year in years:
days = [d for d in os.listdir(os.path.join(filedir, university, year)) if os.path.isdir(os.path.join(filedir, university, year, d))]
if len(days) == 0:
days = ['']
days = sorted(days)
for day in days:
if day == '':
path = os.path.join(filedir, university, year)
else:
path = os.path.join(filedir, university, year, day)
exam_id = f"{university}_{year}" if day == '' else f"{university}_{year}_{day.replace('day', '')}"
filenames = sorted(os.listdir(path), key=lambda x: int(re.findall(r'\d+', x)[0]))
for filename in filenames:
if filename.endswith('.json'):
with open(os.path.join(path, filename), 'r') as f:
example = json.load(f)
if example['IU'] or example['alternatives_type'] != 'string' or example['has_associated_images']:
continue
choices = {
"text": [],
"label": ["A", "B", "C", "D", "E"]
}
for alternative in example['alternatives']:
choices['text'].append(alternative[3:].strip())
choices['label'] = choices['label'][:len(choices['text'])]
doc_id = f"{exam_id}_{example['number']}"
yield doc_id, {
"id": doc_id,
"question_number": example['number'],
"exam_id": exam_id,
"exam_year": year,
"university": university,
"question_type": example['subject'],
"nullified": None,
"question": example['question'],
"choices": choices,
"answerKey": example['answer']
}