oab_exams_no_train / oab_exams_no_train.py
eduagarcia's picture
remove voided questions
264fdd7
"""OAB Exams dataset"""
import datasets
import pandas as pd
import re
from collections import defaultdict
_CITATION = """@misc{delfino2017passing,
title={Passing the Brazilian OAB Exam: data preparation and some experiments},
author={Pedro Delfino and Bruno Cuconato and Edward Hermann Haeusler and Alexandre Rademaker},
year={2017},
eprint={1712.05128},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
This repository contains the bar exams from the Ordem dos Advogados do Brasil (OAB) in Brazil from 2010 to 2018.
In Brazil, all legal professionals must demonstrate their knowledge of the law and its application by passing the OAB exams, the national bar exams. The OAB exams therefore provide an excellent benchmark for the performance of legal information systems since passing the exam would arguably signal that the system has acquired capacity of legal reasoning comparable to that of a human lawyer.
"""
_HOMEPAGE="https://github.com/legal-nlp/oab-exams"
_URL = "eduagarcia/oab_exams"
_EXAM_IDS_DEV = ["2010-01"]
class OABExamsNoTrain(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"question_number": datasets.Value("int32"),
"exam_id": datasets.Value("string"),
"exam_year": datasets.Value("string"),
"question_type": datasets.Value("string"),
"nullified": datasets.Value("bool"),
"question": datasets.Value("string"),
"choices": datasets.Sequence(feature={
"text": datasets.Value("string"),
"label": datasets.Value("string")
}),
"answerKey": datasets.Value("string"),
}),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dataset = datasets.load_dataset(_URL, split="train")
#remove voided questions
dataset = dataset.filter(lambda example: not example['nullified'])
dataset_dev = dataset.filter(lambda example: example['exam_id'] in _EXAM_IDS_DEV)
dataset_test = dataset.filter(lambda example: example['exam_id'] not in _EXAM_IDS_DEV)
dataset_ids_by_ex_type = defaultdict(list)
for i, ex_type in enumerate(dataset_dev['question_type']):
dataset_ids_by_ex_type[ex_type].append(i)
new_grouped_index = []
ex_types = list(dataset_ids_by_ex_type.keys())
while len(new_grouped_index) != len(dataset_dev):
for ex_type in ex_types:
if len(dataset_ids_by_ex_type[ex_type]) > 0:
new_grouped_index.append(dataset_ids_by_ex_type[ex_type].pop(0))
dataset_dev_reorder = dataset_dev.select(new_grouped_index)
return [
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"dataset": dataset_dev_reorder,
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"dataset": dataset_test,
}
)
]
def _generate_examples(self, dataset):
for i, example in enumerate(dataset):
yield i, example