Update climate-evaluation.py
Browse files- climate-evaluation.py +294 -3
climate-evaluation.py
CHANGED
@@ -18,6 +18,10 @@ _CITATION = """
|
|
18 |
}
|
19 |
"""
|
20 |
|
|
|
|
|
|
|
|
|
21 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
22 |
|
23 |
_LICENSE = ""
|
@@ -168,7 +172,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
168 |
"""\
|
169 |
CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
|
170 |
),
|
171 |
-
data_dir="CDP
|
172 |
text_features={"question": "question", "answer": "answer"},
|
173 |
label_classes=["0", "1"],
|
174 |
label_column="label",
|
@@ -213,10 +217,10 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
213 |
),
|
214 |
),
|
215 |
ClimateEvaluationConfig(
|
216 |
-
name="
|
217 |
description=textwrap.dedent(
|
218 |
"""\
|
219 |
-
EXAMS is a multiple choice question answering collected from high school examinations. To evaluate ClimateGPT on the cascaded machine translation approach, we evaluate on the Arabic subset of this dataset. The Arabic subset covers questions from biology, physics, science, social science and Islamic studies.
|
220 |
"""
|
221 |
),
|
222 |
data_dir="exams/translated",
|
@@ -250,4 +254,291 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
250 |
}"""
|
251 |
),
|
252 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
253 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
}
|
19 |
"""
|
20 |
|
21 |
+
_DESCRIPTION = """\
|
22 |
+
Datasets for Climate Evaluation.
|
23 |
+
"""
|
24 |
+
|
25 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
26 |
|
27 |
_LICENSE = ""
|
|
|
172 |
"""\
|
173 |
CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
|
174 |
),
|
175 |
+
data_dir="CDP",
|
176 |
text_features={"question": "question", "answer": "answer"},
|
177 |
label_classes=["0", "1"],
|
178 |
label_column="label",
|
|
|
217 |
),
|
218 |
),
|
219 |
ClimateEvaluationConfig(
|
220 |
+
name="translated_exams",
|
221 |
description=textwrap.dedent(
|
222 |
"""\
|
223 |
+
EXAMS is a multiple choice question answering collected from high school examinations. To evaluate ClimateGPT on the cascaded machine translation approach, we evaluate on the English translation of the Arabic subset of this dataset. The Arabic subset covers questions from biology, physics, science, social science and Islamic studies.
|
224 |
"""
|
225 |
),
|
226 |
data_dir="exams/translated",
|
|
|
254 |
}"""
|
255 |
),
|
256 |
),
|
257 |
+
ClimateEvaluationConfig(
|
258 |
+
name="exams",
|
259 |
+
description=textwrap.dedent(
|
260 |
+
"""\
|
261 |
+
EXAMS is a multiple choice question answering collected from high school examinations. To evaluate ClimateGPT on the cascaded machine translation approach, we evaluate on the Arabic subset of this dataset. The Arabic subset covers questions from biology, physics, science, social science and Islamic studies. Note, this dataset is in arabic.
|
262 |
+
"""
|
263 |
+
),
|
264 |
+
data_dir="exams/",
|
265 |
+
text_features={"subject": "subject", "question_stem": "question_stem", "choices": "choices"},
|
266 |
+
label_classes=["A", "B", "C", "D"],
|
267 |
+
label_column="answerKey",
|
268 |
+
url="https://arxiv.org/abs/2301.04253",
|
269 |
+
citation=textwrap.dedent(
|
270 |
+
"""\
|
271 |
+
@inproceedings{hardalov-etal-2020-exams,
|
272 |
+
title = "{EXAMS}: A Multi-subject High School Examinations Dataset for Cross-lingual and Multilingual Question Answering",
|
273 |
+
author = "Hardalov, Momchil and
|
274 |
+
Mihaylov, Todor and
|
275 |
+
Zlatkova, Dimitrina and
|
276 |
+
Dinkov, Yoan and
|
277 |
+
Koychev, Ivan and
|
278 |
+
Nakov, Preslav",
|
279 |
+
editor = "Webber, Bonnie and
|
280 |
+
Cohn, Trevor and
|
281 |
+
He, Yulan and
|
282 |
+
Liu, Yang",
|
283 |
+
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
|
284 |
+
month = nov,
|
285 |
+
year = "2020",
|
286 |
+
address = "Online",
|
287 |
+
publisher = "Association for Computational Linguistics",
|
288 |
+
url = "https://aclanthology.org/2020.emnlp-main.438",
|
289 |
+
doi = "10.18653/v1/2020.emnlp-main.438",
|
290 |
+
pages = "5427--5444",
|
291 |
+
}
|
292 |
+
}"""
|
293 |
+
),
|
294 |
+
),
|
295 |
]
|
296 |
+
|
297 |
+
def _info(self):
|
298 |
+
if self.config.name == "exams" or self.config.name == "translated_exams":
|
299 |
+
features = datasets.Features(
|
300 |
+
{
|
301 |
+
"subject": datasets.Value("string"),
|
302 |
+
"question_stem": datasets.Value("string"),
|
303 |
+
"answerKey": datasets.ClassLabel(
|
304 |
+
names=["A", "B", "C", "D"]
|
305 |
+
),
|
306 |
+
"choices":
|
307 |
+
{
|
308 |
+
"text": datasets.features.Sequence(datasets.Value("string")),
|
309 |
+
"label": datasets.ClassLabel(
|
310 |
+
names=["A", "B", "C", "D"]
|
311 |
+
),
|
312 |
+
},
|
313 |
+
}
|
314 |
+
)
|
315 |
+
else:
|
316 |
+
if self.config.name == "cdp_qa":
|
317 |
+
features = {
|
318 |
+
text_feature: datasets.Value("string")
|
319 |
+
for text_feature in self.config.text_features.keys()
|
320 |
+
}
|
321 |
+
features["category"] = datasets.Value("string")
|
322 |
+
else:
|
323 |
+
features = {
|
324 |
+
text_feature: datasets.Value("string")
|
325 |
+
for text_feature in self.config.text_features.keys()
|
326 |
+
}
|
327 |
+
if self.config.label_classes:
|
328 |
+
features["label"] = datasets.features.ClassLabel(
|
329 |
+
names=self.config.label_classes
|
330 |
+
)
|
331 |
+
else:
|
332 |
+
features["label"] = datasets.Value("float32")
|
333 |
+
features["idx"] = datasets.Value("int32")
|
334 |
+
return datasets.DatasetInfo(
|
335 |
+
description=_DESCRIPTION,
|
336 |
+
features=datasets.Features(features),
|
337 |
+
homepage=self.config.url,
|
338 |
+
citation=self.config.citation + "\n" + _CITATION,
|
339 |
+
)
|
340 |
+
|
341 |
+
def _split_generators(self, dl_manager):
|
342 |
+
data_dir = self.config.data_dir
|
343 |
+
|
344 |
+
if self.config.name == "exams" or self.config.name == "translated_exams":
|
345 |
+
return [
|
346 |
+
datasets.SplitGenerator(
|
347 |
+
name=datasets.Split.TEST,
|
348 |
+
gen_kwargs={
|
349 |
+
"data_file": os.path.join(data_dir or "", "test.csv"),
|
350 |
+
"split": "test",
|
351 |
+
},
|
352 |
+
),
|
353 |
+
]
|
354 |
+
|
355 |
+
if self.config.name == "exeter":
|
356 |
+
return [
|
357 |
+
datasets.SplitGenerator(
|
358 |
+
name=datasets.Split.TRAIN,
|
359 |
+
gen_kwargs={
|
360 |
+
"data_file": os.path.join(data_dir or "", "training.csv"),
|
361 |
+
"split": "train",
|
362 |
+
},
|
363 |
+
),
|
364 |
+
datasets.SplitGenerator(
|
365 |
+
name=datasets.Split.VALIDATION,
|
366 |
+
gen_kwargs={
|
367 |
+
"data_file": os.path.join(data_dir or "", "validation.csv"),
|
368 |
+
"split": "valid",
|
369 |
+
},
|
370 |
+
),
|
371 |
+
datasets.SplitGenerator(
|
372 |
+
name=datasets.Split.TEST,
|
373 |
+
gen_kwargs={
|
374 |
+
"data_file": os.path.join(data_dir or "", "test.csv"),
|
375 |
+
"split": "test",
|
376 |
+
},
|
377 |
+
),
|
378 |
+
]
|
379 |
+
|
380 |
+
if self.config.name == "climate_fever":
|
381 |
+
return [
|
382 |
+
datasets.SplitGenerator(
|
383 |
+
name=datasets.Split.TEST,
|
384 |
+
gen_kwargs={
|
385 |
+
"data_file": os.path.join(
|
386 |
+
data_dir or "", "climate-fever-dataset-r1.jsonl"
|
387 |
+
),
|
388 |
+
"split": "test",
|
389 |
+
},
|
390 |
+
),
|
391 |
+
]
|
392 |
+
|
393 |
+
if self.config.name == "climatext":
|
394 |
+
files = {
|
395 |
+
"train": [
|
396 |
+
"train-data/AL-10Ks.tsv : 3000 (58 positives, 2942 negatives) (TSV, 127138 KB).tsv",
|
397 |
+
"train-data/AL-Wiki (train).tsv",
|
398 |
+
],
|
399 |
+
"valid": ["dev-data/Wikipedia (dev).tsv"],
|
400 |
+
"test": [
|
401 |
+
"test-data/Claims (test).tsv",
|
402 |
+
"test-data/Wikipedia (test).tsv",
|
403 |
+
"test-data/10-Ks (2018, test).tsv",
|
404 |
+
],
|
405 |
+
}
|
406 |
+
return [
|
407 |
+
datasets.SplitGenerator(
|
408 |
+
name=datasets.Split.TRAIN,
|
409 |
+
gen_kwargs={
|
410 |
+
"data_file": [
|
411 |
+
os.path.join(data_dir or "", f) for f in files["train"]
|
412 |
+
],
|
413 |
+
"split": "train",
|
414 |
+
},
|
415 |
+
),
|
416 |
+
datasets.SplitGenerator(
|
417 |
+
name=datasets.Split.VALIDATION,
|
418 |
+
gen_kwargs={
|
419 |
+
"data_file": [
|
420 |
+
os.path.join(data_dir or "", f) for f in files["valid"]
|
421 |
+
],
|
422 |
+
"split": "valid",
|
423 |
+
},
|
424 |
+
),
|
425 |
+
datasets.SplitGenerator(
|
426 |
+
name=datasets.Split.TEST,
|
427 |
+
gen_kwargs={
|
428 |
+
"data_file": [
|
429 |
+
os.path.join(data_dir or "", f) for f in files["test"]
|
430 |
+
],
|
431 |
+
"split": "test",
|
432 |
+
},
|
433 |
+
),
|
434 |
+
]
|
435 |
+
|
436 |
+
if self.config.name == "cdp_qa":
|
437 |
+
categories = {
|
438 |
+
"cities": "Cities/Cities Responses",
|
439 |
+
"states": "States",
|
440 |
+
"corporations": "Corporations/Corporations Responses/Climate Change",
|
441 |
+
"combined": "Combined",
|
442 |
+
}
|
443 |
+
return [
|
444 |
+
datasets.SplitGenerator(
|
445 |
+
name=datasets.Split.TRAIN,
|
446 |
+
gen_kwargs={
|
447 |
+
"data_file": [
|
448 |
+
(k, os.path.join(data_dir or "", v, "train_qa.csv"))
|
449 |
+
for k, v in categories.items()
|
450 |
+
],
|
451 |
+
"split": "train",
|
452 |
+
},
|
453 |
+
),
|
454 |
+
datasets.SplitGenerator(
|
455 |
+
name=datasets.Split.VALIDATION,
|
456 |
+
gen_kwargs={
|
457 |
+
"data_file": [
|
458 |
+
(k, os.path.join(data_dir or "", v, "val_qa.csv"))
|
459 |
+
for k, v in categories.items()
|
460 |
+
],
|
461 |
+
"split": "valid",
|
462 |
+
},
|
463 |
+
),
|
464 |
+
datasets.SplitGenerator(
|
465 |
+
name=datasets.Split.TEST,
|
466 |
+
gen_kwargs={
|
467 |
+
"data_file": [
|
468 |
+
(k, os.path.join(data_dir or "", v, "test_qa.csv"))
|
469 |
+
for k, v in categories.items()
|
470 |
+
],
|
471 |
+
"split": "test",
|
472 |
+
},
|
473 |
+
),
|
474 |
+
]
|
475 |
+
|
476 |
+
return [
|
477 |
+
datasets.SplitGenerator(
|
478 |
+
name=datasets.Split.TRAIN,
|
479 |
+
gen_kwargs={
|
480 |
+
"data_file": os.path.join(data_dir or "", "train.csv"),
|
481 |
+
"split": "train",
|
482 |
+
},
|
483 |
+
),
|
484 |
+
datasets.SplitGenerator(
|
485 |
+
name=datasets.Split.VALIDATION,
|
486 |
+
gen_kwargs={
|
487 |
+
"data_file": os.path.join(data_dir or "", "val.csv"),
|
488 |
+
"split": "valid",
|
489 |
+
},
|
490 |
+
),
|
491 |
+
datasets.SplitGenerator(
|
492 |
+
name=datasets.Split.TEST,
|
493 |
+
gen_kwargs={
|
494 |
+
"data_file": os.path.join(data_dir or "", "test.csv"),
|
495 |
+
"split": "test",
|
496 |
+
},
|
497 |
+
),
|
498 |
+
]
|
499 |
+
|
500 |
+
def _generate_examples(self, data_file, split):
|
501 |
+
if self.config.name == "climatext":
|
502 |
+
idx = iter(range(100000))
|
503 |
+
for file in data_file:
|
504 |
+
yield from self._process_file(file, delimiter="\t", idx=idx)
|
505 |
+
elif self.config.name == "cdp_qa":
|
506 |
+
idx = iter(range(10000000))
|
507 |
+
for category, file in data_file:
|
508 |
+
yield from self._process_file(file, idx=idx, category=category)
|
509 |
+
else:
|
510 |
+
yield from self._process_file(data_file)
|
511 |
+
|
512 |
+
def _process_file(self, data_file, delimiter=",", idx=None, category=None):
|
513 |
+
with open(data_file, encoding="utf8") as f:
|
514 |
+
process_label = self.config.process_label
|
515 |
+
label_classes = self.config.label_classes
|
516 |
+
reader = csv.DictReader(f, delimiter=delimiter, quoting=csv.QUOTE_ALL)
|
517 |
+
for n, row in enumerate(reader):
|
518 |
+
example = {
|
519 |
+
feat: row[col] for feat, col in self.config.text_features.items()
|
520 |
+
}
|
521 |
+
if idx:
|
522 |
+
example["idx"] = next(idx)
|
523 |
+
else:
|
524 |
+
example["idx"] = n
|
525 |
+
|
526 |
+
if category:
|
527 |
+
example["category"] = category
|
528 |
+
|
529 |
+
if self.config.label_column in row:
|
530 |
+
label = row[self.config.label_column]
|
531 |
+
# For some tasks, the label is represented as 0 and 1 in the tsv
|
532 |
+
# files and needs to be cast to integer to work with the feature.
|
533 |
+
if label_classes and label not in label_classes:
|
534 |
+
label = int(label) if label else None
|
535 |
+
example["label"] = process_label(label)
|
536 |
+
else:
|
537 |
+
example["label"] = process_label(-1)
|
538 |
+
|
539 |
+
# Filter out corrupted rows.
|
540 |
+
for value in example.values():
|
541 |
+
if value is None:
|
542 |
+
break
|
543 |
+
else:
|
544 |
+
yield example["idx"], example
|