eduagarcia commited on
Commit
c8ee2c3
1 Parent(s): f9fb911

Loading script

Browse files
Files changed (1) hide show
  1. BLUEX_without_images_disabled.py +115 -0
BLUEX_without_images_disabled.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """OAB Exams dataset"""
2
+
3
+ import datasets
4
+ import pandas as pd
5
+ import re
6
+ from collections import defaultdict
7
+ import os
8
+ import json
9
+
10
+ _CITATION = """@misc{almeida2023bluex,
11
+ title={BLUEX: A benchmark based on Brazilian Leading Universities Entrance eXams},
12
+ author={Thales Sales Almeida and Thiago Laitz and Giovana K. Bonás and Rodrigo Nogueira},
13
+ year={2023},
14
+ eprint={2307.05410},
15
+ archivePrefix={arXiv},
16
+ primaryClass={cs.CL}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """
21
+ Despite Portuguese being the fifth most widely spoken language, there is a lack of freely available resources for evaluating language models in Portuguese. This repository contains a multimodal dataset consisting of the two leading university entrance exams conducted in Brazil: Convest (Unicamp) and Fuvest (USP), spanning from 2018 to 2024. The dataset comprises a total of 1260 questions, of which 724 do not have accompanying images.
22
+ """
23
+
24
+ _HOMEPAGE="https://github.com/Portuguese-Benchmark-Datasets/BLUEX"
25
+
26
+ _URL = "portuguese-benchmark-datasets/BLUEX"
27
+ _URL = "https://raw.githubusercontent.com/Portuguese-Benchmark-Datasets/BLUEX/main/data/bluex_dataset.zip"
28
+
29
+ class BLUEX_without_images(datasets.GeneratorBasedBuilder):
30
+
31
+ VERSION = datasets.Version("1.1.0")
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "id": datasets.Value("string"),
38
+ "question_number": datasets.Value("int32"),
39
+ "exam_id": datasets.Value("string"),
40
+ "exam_year": datasets.Value("string"),
41
+ "university": datasets.Value("string"),
42
+ "question_type": datasets.Sequence(datasets.Value("string")),
43
+ "nullified": datasets.Value("bool"),
44
+ "question": datasets.Value("string"),
45
+ "choices": datasets.Sequence(feature={
46
+ "text": datasets.Value("string"),
47
+ "label": datasets.Value("string")
48
+ }),
49
+ "answerKey": datasets.Value("string"),
50
+ }),
51
+ supervised_keys=None,
52
+ homepage=_HOMEPAGE,
53
+ citation=_CITATION,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ #dataset = datasets.load_dataset(_URL, split="questions")
58
+ #remove questions that require images
59
+ #dataset = dataset.filter(lambda example: not example['IU'] and example['alternatives_type'] == 'string')
60
+ filedir = dl_manager.download_and_extract(_URL)
61
+ return [
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.TRAIN,
64
+ gen_kwargs={
65
+ "filedir": os.path.join(filedir, 'questions')
66
+ }
67
+ )
68
+ ]
69
+
70
+ def _generate_examples(self, filedir):
71
+
72
+ for university in os.listdir(filedir):
73
+ years = sorted(os.listdir(os.path.join(filedir, university)))
74
+ for year in years:
75
+ days = [d for d in os.listdir(os.path.join(filedir, university, year)) if os.path.isdir(os.path.join(filedir, university, year, d))]
76
+ if len(days) == 0:
77
+ days = ['']
78
+ days = sorted(days)
79
+ for day in days:
80
+ if day == '':
81
+ path = os.path.join(filedir, university, year)
82
+ else:
83
+ path = os.path.join(filedir, university, year, day)
84
+
85
+ exam_id = f"{university}_{year}" if day == '' else f"{university}_{year}_{day.replace('day', '')}"
86
+ filenames = sorted(os.listdir(path), key=lambda x: int(re.findall(r'\d+', x)[0]))
87
+ for filename in filenames:
88
+ if filename.endswith('.json'):
89
+ with open(os.path.join(path, filename), 'r') as f:
90
+ example = json.load(f)
91
+
92
+ if example['IU'] or example['alternatives_type'] != 'string' or example['has_associated_images']:
93
+ continue
94
+
95
+ choices = {
96
+ "text": [],
97
+ "label": ["A", "B", "C", "D", "E"]
98
+ }
99
+ for alternative in example['alternatives']:
100
+ choices['text'].append(alternative[3:].strip())
101
+ choices['label'] = choices['label'][:len(choices['text'])]
102
+
103
+ doc_id = f"{exam_id}_{example['number']}"
104
+ yield doc_id, {
105
+ "id": doc_id,
106
+ "question_number": example['number'],
107
+ "exam_id": exam_id,
108
+ "exam_year": year,
109
+ "university": university,
110
+ "question_type": example['subject'],
111
+ "nullified": None,
112
+ "question": example['question'],
113
+ "choices": choices,
114
+ "answerKey": example['answer']
115
+ }