abarbosa commited on
Commit
848cbb1
2 Parent(s): dd92d23 c740f03

Merge branch 'u/andrebarbosa/adapt-reproducibility-dataset'

Browse files
Files changed (1) hide show
  1. aes_enem_dataset.py +73 -6
aes_enem_dataset.py CHANGED
@@ -50,6 +50,7 @@ _URLS = {
50
  "sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz?download=true",
51
  "sourceAWithGraders": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz?download=true",
52
  "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
 
53
  }
54
 
55
  PROMPTS_TO_IGNORE = [
@@ -78,23 +79,58 @@ CSV_HEADER = [
78
  "essay_year",
79
  ]
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  class AesEnemDataset(datasets.GeneratorBasedBuilder):
83
- """TODO: Short description of my dataset."""
 
84
 
85
- VERSION = datasets.Version("0.0.2")
 
 
 
 
86
 
87
  # You will be able to load one or the other configurations in the following list with
88
  BUILDER_CONFIGS = [
89
- datasets.BuilderConfig(name="sourceAOnly", version=VERSION, description="TODO"),
90
  datasets.BuilderConfig(
91
- name="sourceAWithGraders", version=VERSION, description="TODO"
92
  ),
93
  datasets.BuilderConfig(
94
  name="sourceB",
95
  version=VERSION,
96
- description="TODO",
97
  ),
 
98
  ]
99
 
100
  def _info(self):
@@ -165,6 +201,33 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
165
  def _split_generators(self, dl_manager):
166
  urls = _URLS[self.config.name]
167
  extracted_files = dl_manager.download_and_extract({self.config.name: urls})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  html_parser = self._process_html_files(extracted_files)
169
  if "sourceA" in self.config.name:
170
  self._post_process_dataframe(html_parser.sourceA)
@@ -340,7 +403,11 @@ class AesEnemDataset(datasets.GeneratorBasedBuilder):
340
  next(csvfile)
341
  csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER)
342
  for i, row in enumerate(csv_reader):
343
- grades = row["grades"].strip("[]").split(", ")
 
 
 
 
344
  yield i, {
345
  "id": row["id"],
346
  "id_prompt": row["id_prompt"],
 
50
  "sourceAOnly": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz?download=true",
51
  "sourceAWithGraders": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceAWithGraders.tar.gz?download=true",
52
  "sourceB": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/sourceB.tar.gz?download=true",
53
+ "PROPOR2024": "https://huggingface.co/datasets/kamel-usp/aes_enem_dataset/resolve/main/propor2024.tar.gz?download=true"
54
  }
55
 
56
  PROMPTS_TO_IGNORE = [
 
79
  "essay_year",
80
  ]
81
 
82
+ SOURCE_A_DESC = """
83
+ Source A have 860 essays available from August 2015 to March 2020.
84
+ For each month of that period, a new prompt together with supporting texts were given, and the graded essays from the previous month were made available.
85
+ Of the 56 prompts, 12 had no associated essays available (at the time of download).
86
+ Additionally, there were 3 prompts that asked for a text in the format of a letter. We removed those 15 prompts and associated texts from the corpus.
87
+ For an unknown reason, 414 of the essays were graded using a five-point scale of either {0, 50, 100, 150, 200} or its scaled-down version going from 0 to 2.
88
+ To avoid introducing bias, we also discarded such instances, resulting in a dataset of 386 annotated essays with prompts and supporting texts (with each component being clearly identified).
89
+ Some of the essays used a six-point scale with 20 points instead of 40 points as the second class. As we believe this introduces minimal bias, we kept such essays and relabeled class 20 as class 40.
90
+ The original data contains comments from the annotators explaining their per-competence scores. They are included in our dataset.
91
+ """
92
+
93
+ SOURCE_A_WITH_GRADERS = "Same as SourceA but augmented with reviwers contractors grade's. Each essay then have three grades: the downloaded one and each grader's feedback. "
94
+
95
+ SOURCE_B_DESC = """
96
+ Source B is very similar to Source A: a new prompt and supporting texts are made available every month along with the graded essays submitted in the previous month.
97
+ We downloaded HTML sources from 7,700 essays from May 2009 to May 2023. Essays released prior to June 2016 were graded on a five-point scale and consequently discarded.
98
+ This resulted in a corpus of approx. 3,200 graded essays on 83 different prompts.
99
+
100
+ Although in principle, Source B also provides supporting texts for students, none were available at the time the data was downloaded.
101
+ To mitigate this, we extracted supporting texts from the Essay-Br corpus, whenever possible, by manually matching prompts between the two corpora.
102
+ We ended up with approx. 1,000 essays containing both prompt and supporting texts, and approx. 2,200 essays containing only the respective prompt.
103
+ """
104
+
105
+ PROPOR2024 = """
106
+ Splits used for PROPOR paper. It is a variation of sourceAWithGraders dataset. Post publication we noticed that there was an issue in the reproducible setting.
107
+
108
+ We fix that and set this config to keep reproducibility w.r.t. numbers reported in the paper.
109
+ """
110
+
111
 
112
  class AesEnemDataset(datasets.GeneratorBasedBuilder):
113
+ """
114
+ AES Enem Dataset. For full explanation about generation process, please refer to: https://aclanthology.org/2024.propor-1.23/
115
 
116
+ We realized in our experiments that there was an issue in the determistic process regarding how the dataset is generated.
117
+ To reproduce results from PROPOR paper, please refer to "PROPOR2024" config. Other configs are reproducible now.
118
+ """
119
+
120
+ VERSION = datasets.Version("0.1.0")
121
 
122
  # You will be able to load one or the other configurations in the following list with
123
  BUILDER_CONFIGS = [
124
+ datasets.BuilderConfig(name="sourceAOnly", version=VERSION, description=SOURCE_A_DESC),
125
  datasets.BuilderConfig(
126
+ name="sourceAWithGraders", version=VERSION, description=SOURCE_A_WITH_GRADERS
127
  ),
128
  datasets.BuilderConfig(
129
  name="sourceB",
130
  version=VERSION,
131
+ description=SOURCE_B_DESC,
132
  ),
133
+ datasets.BuilderConfig(name="PROPOR2024", version=VERSION, description=PROPOR2024),
134
  ]
135
 
136
  def _info(self):
 
201
  def _split_generators(self, dl_manager):
202
  urls = _URLS[self.config.name]
203
  extracted_files = dl_manager.download_and_extract({self.config.name: urls})
204
+ if "PROPOR2024" == self.config.name:
205
+ base_path = extracted_files["PROPOR2024"]
206
+ return [
207
+ datasets.SplitGenerator(
208
+ name=datasets.Split.TRAIN,
209
+ # These kwargs will be passed to _generate_examples
210
+ gen_kwargs={
211
+ "filepath": os.path.join(base_path, "propor2024/train.csv"),
212
+ "split": "train",
213
+ },
214
+ ),
215
+ datasets.SplitGenerator(
216
+ name=datasets.Split.VALIDATION,
217
+ # These kwargs will be passed to _generate_examples
218
+ gen_kwargs={
219
+ "filepath": os.path.join(base_path, "propor2024/validation.csv"),
220
+ "split": "validation",
221
+ },
222
+ ),
223
+ datasets.SplitGenerator(
224
+ name=datasets.Split.TEST,
225
+ gen_kwargs={
226
+ "filepath": os.path.join(base_path, "propor2024/test.csv"),
227
+ "split": "test",
228
+ },
229
+ ),
230
+ ]
231
  html_parser = self._process_html_files(extracted_files)
232
  if "sourceA" in self.config.name:
233
  self._post_process_dataframe(html_parser.sourceA)
 
403
  next(csvfile)
404
  csv_reader = csv.DictReader(csvfile, fieldnames=CSV_HEADER)
405
  for i, row in enumerate(csv_reader):
406
+ grades = row["grades"].strip("[]")
407
+ if self.config.name == "PROPOR2024":
408
+ grades = grades.strip().split()
409
+ else:
410
+ grades = grades.split(", ")
411
  yield i, {
412
  "id": row["id"],
413
  "id_prompt": row["id_prompt"],