Update climate-evaluation.py
Browse files- climate-evaluation.py +70 -37
climate-evaluation.py
CHANGED
@@ -24,6 +24,8 @@ Datasets for Climate Evaluation.
|
|
24 |
|
25 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
26 |
|
|
|
|
|
27 |
_LICENSE = ""
|
28 |
|
29 |
_ClimateEvaluation_BASE_KWARGS = dict(
|
@@ -172,7 +174,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
172 |
"""\
|
173 |
CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
|
174 |
),
|
175 |
-
data_dir="CDP",
|
176 |
text_features={"question": "question", "answer": "answer"},
|
177 |
label_classes=["0", "1"],
|
178 |
label_column="label",
|
@@ -196,7 +198,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
196 |
"""\
|
197 |
The Exeter Climate Claims dataset contains textual data from 33 influential climate contrarian blogs and the climate-change-related content from 20 conservative think tanks spanning the years 1998 to 2020. Annotation of the dataset was done manually using a thorough three-layer taxonomy of (climate-change related) contrarian claims, which was developed by the authors. We utilize this dataset specifically for the binary classification task of discerning whether a given text contains a contrarian claim pertaining to climate change or not. """
|
198 |
),
|
199 |
-
data_dir="exeter",
|
200 |
text_features={"text": "text"},
|
201 |
label_classes=["0", "1"],
|
202 |
label_column="label",
|
@@ -335,39 +337,50 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
335 |
|
336 |
def _split_generators(self, dl_manager):
|
337 |
data_dir = self.config.data_dir
|
338 |
-
|
|
|
339 |
|
340 |
if self.config.name == "exams" or self.config.name == "translated_exams":
|
|
|
|
|
|
|
|
|
341 |
return [
|
342 |
datasets.SplitGenerator(
|
343 |
name=datasets.Split.TEST,
|
344 |
gen_kwargs={
|
345 |
-
"data_file": os.path.join(data_dir or "", "test.csv"),
|
346 |
"split": "test",
|
347 |
},
|
348 |
),
|
349 |
]
|
350 |
|
351 |
if self.config.name == "exeter":
|
|
|
|
|
|
|
|
|
|
|
|
|
352 |
return [
|
353 |
datasets.SplitGenerator(
|
354 |
name=datasets.Split.TRAIN,
|
355 |
gen_kwargs={
|
356 |
-
"data_file":
|
357 |
"split": "train",
|
358 |
},
|
359 |
),
|
360 |
datasets.SplitGenerator(
|
361 |
name=datasets.Split.VALIDATION,
|
362 |
gen_kwargs={
|
363 |
-
"data_file":
|
364 |
"split": "valid",
|
365 |
},
|
366 |
),
|
367 |
datasets.SplitGenerator(
|
368 |
name=datasets.Split.TEST,
|
369 |
gen_kwargs={
|
370 |
-
"data_file":
|
371 |
"split": "test",
|
372 |
},
|
373 |
),
|
@@ -376,7 +389,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
376 |
if self.config.name == "climatext":
|
377 |
files = {
|
378 |
"train": [
|
379 |
-
|
380 |
"train-data/AL-Wiki (train).tsv",
|
381 |
],
|
382 |
"valid": ["dev-data/Wikipedia (dev).tsv"],
|
@@ -386,95 +399,115 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
386 |
"test-data/10-Ks (2018, test).tsv",
|
387 |
],
|
388 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
389 |
return [
|
390 |
datasets.SplitGenerator(
|
391 |
name=datasets.Split.TRAIN,
|
392 |
gen_kwargs={
|
393 |
-
"data_file": [
|
394 |
-
|
395 |
-
]
|
|
|
396 |
"split": "train",
|
397 |
},
|
398 |
),
|
399 |
datasets.SplitGenerator(
|
400 |
name=datasets.Split.VALIDATION,
|
401 |
gen_kwargs={
|
402 |
-
"data_file": [
|
403 |
-
os.path.join(data_dir or "", f) for f in files["valid"]
|
404 |
-
],
|
405 |
"split": "valid",
|
406 |
},
|
407 |
),
|
408 |
datasets.SplitGenerator(
|
409 |
name=datasets.Split.TEST,
|
410 |
gen_kwargs={
|
411 |
-
"data_file": [
|
412 |
-
os.path.join(data_dir or "", f) for f in files["test"]
|
413 |
-
],
|
414 |
"split": "test",
|
415 |
},
|
416 |
),
|
417 |
]
|
418 |
|
419 |
if self.config.name == "cdp_qa":
|
420 |
-
categories = {
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
|
|
|
|
|
|
|
|
|
|
425 |
}
|
|
|
|
|
426 |
return [
|
427 |
datasets.SplitGenerator(
|
428 |
name=datasets.Split.TRAIN,
|
429 |
gen_kwargs={
|
430 |
-
"data_file": [
|
431 |
-
|
432 |
-
|
433 |
-
|
|
|
434 |
"split": "train",
|
435 |
},
|
436 |
),
|
437 |
datasets.SplitGenerator(
|
438 |
name=datasets.Split.VALIDATION,
|
439 |
gen_kwargs={
|
440 |
-
"data_file": [
|
441 |
-
|
442 |
-
|
443 |
-
|
|
|
444 |
"split": "valid",
|
445 |
},
|
446 |
),
|
447 |
datasets.SplitGenerator(
|
448 |
name=datasets.Split.TEST,
|
449 |
gen_kwargs={
|
450 |
-
"data_file": [
|
451 |
-
|
452 |
-
|
453 |
-
|
|
|
454 |
"split": "test",
|
455 |
},
|
456 |
),
|
457 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
458 |
|
459 |
return [
|
460 |
datasets.SplitGenerator(
|
461 |
name=datasets.Split.TRAIN,
|
462 |
gen_kwargs={
|
463 |
-
"data_file":
|
464 |
"split": "train",
|
465 |
},
|
466 |
),
|
467 |
datasets.SplitGenerator(
|
468 |
name=datasets.Split.VALIDATION,
|
469 |
gen_kwargs={
|
470 |
-
"data_file":
|
471 |
"split": "valid",
|
472 |
},
|
473 |
),
|
474 |
datasets.SplitGenerator(
|
475 |
name=datasets.Split.TEST,
|
476 |
gen_kwargs={
|
477 |
-
"data_file":
|
478 |
"split": "test",
|
479 |
},
|
480 |
),
|
|
|
24 |
|
25 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
26 |
|
27 |
+
_URL = "https://huggingface.co/datasets/eci-io/tree/main/"
|
28 |
+
|
29 |
_LICENSE = ""
|
30 |
|
31 |
_ClimateEvaluation_BASE_KWARGS = dict(
|
|
|
174 |
"""\
|
175 |
CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
|
176 |
),
|
177 |
+
data_dir="CDP/Combined/",
|
178 |
text_features={"question": "question", "answer": "answer"},
|
179 |
label_classes=["0", "1"],
|
180 |
label_column="label",
|
|
|
198 |
"""\
|
199 |
The Exeter Climate Claims dataset contains textual data from 33 influential climate contrarian blogs and the climate-change-related content from 20 conservative think tanks spanning the years 1998 to 2020. Annotation of the dataset was done manually using a thorough three-layer taxonomy of (climate-change related) contrarian claims, which was developed by the authors. We utilize this dataset specifically for the binary classification task of discerning whether a given text contains a contrarian claim pertaining to climate change or not. """
|
200 |
),
|
201 |
+
data_dir="exeter/",
|
202 |
text_features={"text": "text"},
|
203 |
label_classes=["0", "1"],
|
204 |
label_column="label",
|
|
|
337 |
|
338 |
def _split_generators(self, dl_manager):
|
339 |
data_dir = self.config.data_dir
|
340 |
+
# urls_to_download = _URL
|
341 |
+
# print(f"self.config.data_dir: {self.config.data_dir}")
|
342 |
|
343 |
if self.config.name == "exams" or self.config.name == "translated_exams":
|
344 |
+
urls_to_download={
|
345 |
+
"test": _URL + os.path.join(data_dir or "", "test.csv")
|
346 |
+
}
|
347 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
348 |
return [
|
349 |
datasets.SplitGenerator(
|
350 |
name=datasets.Split.TEST,
|
351 |
gen_kwargs={
|
352 |
+
"data_file": downloaded_files["test"], #os.path.join(data_dir or "", "test.csv"),
|
353 |
"split": "test",
|
354 |
},
|
355 |
),
|
356 |
]
|
357 |
|
358 |
if self.config.name == "exeter":
|
359 |
+
urls_to_download={
|
360 |
+
"train": _URL + data_dir + "training.csv",
|
361 |
+
"valid": _URL + data_dir + "validation.csv",
|
362 |
+
"test": _URL + data_dir + "test.csv"
|
363 |
+
}
|
364 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
365 |
return [
|
366 |
datasets.SplitGenerator(
|
367 |
name=datasets.Split.TRAIN,
|
368 |
gen_kwargs={
|
369 |
+
"data_file": downloaded_files["train"],
|
370 |
"split": "train",
|
371 |
},
|
372 |
),
|
373 |
datasets.SplitGenerator(
|
374 |
name=datasets.Split.VALIDATION,
|
375 |
gen_kwargs={
|
376 |
+
"data_file": downloaded_files["valid"],
|
377 |
"split": "valid",
|
378 |
},
|
379 |
),
|
380 |
datasets.SplitGenerator(
|
381 |
name=datasets.Split.TEST,
|
382 |
gen_kwargs={
|
383 |
+
"data_file": downloaded_files["test"],
|
384 |
"split": "test",
|
385 |
},
|
386 |
),
|
|
|
389 |
if self.config.name == "climatext":
|
390 |
files = {
|
391 |
"train": [
|
392 |
+
"train-data/AL-10Ks.tsv : 3000 (58 positives, 2942 negatives) (TSV, 127138 KB).tsv",
|
393 |
"train-data/AL-Wiki (train).tsv",
|
394 |
],
|
395 |
"valid": ["dev-data/Wikipedia (dev).tsv"],
|
|
|
399 |
"test-data/10-Ks (2018, test).tsv",
|
400 |
],
|
401 |
}
|
402 |
+
urls_to_download={
|
403 |
+
"train": [_URL+os.path.join(data_dir or "", f) for f in files["train"]],
|
404 |
+
"valid": [_URL+os.path.join(data_dir or "", f) for f in files["valid"]],
|
405 |
+
"test": [_URL+os.path.join(data_dir or "", f) for f in files["test"]],
|
406 |
+
}
|
407 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
408 |
return [
|
409 |
datasets.SplitGenerator(
|
410 |
name=datasets.Split.TRAIN,
|
411 |
gen_kwargs={
|
412 |
+
"data_file": downloaded_files["train"],
|
413 |
+
# [
|
414 |
+
# os.path.join(data_dir or "", f) for f in files["train"]
|
415 |
+
# ],
|
416 |
"split": "train",
|
417 |
},
|
418 |
),
|
419 |
datasets.SplitGenerator(
|
420 |
name=datasets.Split.VALIDATION,
|
421 |
gen_kwargs={
|
422 |
+
"data_file": downloaded_files["valid"],
|
|
|
|
|
423 |
"split": "valid",
|
424 |
},
|
425 |
),
|
426 |
datasets.SplitGenerator(
|
427 |
name=datasets.Split.TEST,
|
428 |
gen_kwargs={
|
429 |
+
"data_file": downloaded_files["test"],
|
|
|
|
|
430 |
"split": "test",
|
431 |
},
|
432 |
),
|
433 |
]
|
434 |
|
435 |
if self.config.name == "cdp_qa":
|
436 |
+
# categories = {
|
437 |
+
# "cities": "Cities/Cities Responses",
|
438 |
+
# "states": "States",
|
439 |
+
# "corporations": "Corporations/Corporations Responses/Climate Change",
|
440 |
+
# "combined": "Combined",
|
441 |
+
# }
|
442 |
+
urls_to_download={
|
443 |
+
"train": _URL + os.path.join(data_dir or "", "train_qa.csv"),
|
444 |
+
"valid": _URL + os.path.join(data_dir or "", "val_qa.csv"),
|
445 |
+
"test": _URL + os.path.join(data_dir or "", "test_qa.csv")
|
446 |
}
|
447 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
448 |
+
|
449 |
return [
|
450 |
datasets.SplitGenerator(
|
451 |
name=datasets.Split.TRAIN,
|
452 |
gen_kwargs={
|
453 |
+
"data_file": downloaded_files["train"],
|
454 |
+
# "data_file": [
|
455 |
+
# (k, os.path.join(data_dir or "", v, "train_qa.csv"))
|
456 |
+
# for k, v in categories.items()
|
457 |
+
# ],
|
458 |
"split": "train",
|
459 |
},
|
460 |
),
|
461 |
datasets.SplitGenerator(
|
462 |
name=datasets.Split.VALIDATION,
|
463 |
gen_kwargs={
|
464 |
+
"data_file": downloaded_files["valid"],
|
465 |
+
# [
|
466 |
+
# (k, os.path.join(data_dir or "", v, "val_qa.csv"))
|
467 |
+
# for k, v in categories.items()
|
468 |
+
# ],
|
469 |
"split": "valid",
|
470 |
},
|
471 |
),
|
472 |
datasets.SplitGenerator(
|
473 |
name=datasets.Split.TEST,
|
474 |
gen_kwargs={
|
475 |
+
"data_file": downloaded_files["test"],
|
476 |
+
# [
|
477 |
+
# (k, os.path.join(data_dir or "", v, "test_qa.csv"))
|
478 |
+
# for k, v in categories.items()
|
479 |
+
# ],
|
480 |
"split": "test",
|
481 |
},
|
482 |
),
|
483 |
]
|
484 |
+
|
485 |
+
urls_to_download={
|
486 |
+
"train": _URL + os.path.join(data_dir or "", "train.csv"),
|
487 |
+
"valid": _URL + os.path.join(data_dir or "", "val.csv"),
|
488 |
+
"test": _URL + os.path.join(data_dir or "", "test.csv")
|
489 |
+
}
|
490 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
491 |
|
492 |
return [
|
493 |
datasets.SplitGenerator(
|
494 |
name=datasets.Split.TRAIN,
|
495 |
gen_kwargs={
|
496 |
+
"data_file": downloaded_files["train"],
|
497 |
"split": "train",
|
498 |
},
|
499 |
),
|
500 |
datasets.SplitGenerator(
|
501 |
name=datasets.Split.VALIDATION,
|
502 |
gen_kwargs={
|
503 |
+
"data_file": downloaded_files["valid"],
|
504 |
"split": "valid",
|
505 |
},
|
506 |
),
|
507 |
datasets.SplitGenerator(
|
508 |
name=datasets.Split.TEST,
|
509 |
gen_kwargs={
|
510 |
+
"data_file": downloaded_files["test"],
|
511 |
"split": "test",
|
512 |
},
|
513 |
),
|