Update climate-evaluation.py
Browse files- climate-evaluation.py +37 -37
climate-evaluation.py
CHANGED
@@ -25,13 +25,13 @@ Datasets for Climate Evaluation.
|
|
25 |
|
26 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
27 |
|
28 |
-
_URL = "https://huggingface.co/datasets/eci-io/climate-evaluation/resolve/main/"
|
29 |
|
30 |
_LICENSE = ""
|
31 |
|
32 |
# _BASE_HF_URL = "https://huggingface.co/datasets/eci-io/climate-evaluation/resolve/main" #Path("./")
|
33 |
|
34 |
-
|
35 |
|
36 |
# print(f"_BASE_HF_URL: {_BASE_HF_URL}")
|
37 |
|
@@ -371,16 +371,16 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
371 |
]
|
372 |
|
373 |
if self.config.name == "exeter":
|
374 |
-
# urls_to_download={
|
375 |
-
# "train": _BASE_HF_URL / data_dir / f"training.csv",
|
376 |
-
# "valid": _BASE_HF_URL / data_dir / f"validation.csv",
|
377 |
-
# "test": _BASE_HF_URL / data_dir / f"test.csv"
|
378 |
-
# }
|
379 |
urls_to_download={
|
380 |
-
"train":
|
381 |
-
"valid":
|
382 |
-
"test":
|
383 |
}
|
|
|
|
|
|
|
|
|
|
|
384 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
385 |
return [
|
386 |
datasets.SplitGenerator(
|
@@ -420,18 +420,18 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
420 |
],
|
421 |
}
|
422 |
# os.path.join(data_dir or "", f)
|
423 |
-
# urls_to_download={
|
424 |
-
# "train": [_BASE_HF_URL / data_dir / f for f in files["train"]],
|
425 |
-
# "valid": [_BASE_HF_URL / data_dir / f for f in files["valid"]],
|
426 |
-
# "test": [_BASE_HF_URL / data_dir / f for f in files["test"]],
|
427 |
-
# }
|
428 |
-
|
429 |
urls_to_download={
|
430 |
-
"train": [
|
431 |
-
"valid": [
|
432 |
-
"test": [
|
433 |
}
|
434 |
|
|
|
|
|
|
|
|
|
|
|
|
|
435 |
|
436 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
437 |
return [
|
@@ -468,17 +468,17 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
468 |
# "corporations": "Corporations/Corporations Responses/Climate Change",
|
469 |
# "combined": "Combined",
|
470 |
# }
|
471 |
-
# urls_to_download={
|
472 |
-
# "train": _BASE_HF_URL / data_dir / f"train_qa.csv",
|
473 |
-
# "valid": _BASE_HF_URL / data_dir / f"val_qa.csv",
|
474 |
-
# "test": _BASE_HF_URL / data_dir / f"test_qa.csv"
|
475 |
-
# }
|
476 |
-
|
477 |
urls_to_download={
|
478 |
-
"train":
|
479 |
-
"valid":
|
480 |
-
"test":
|
481 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
482 |
|
483 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
484 |
|
@@ -520,17 +520,17 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
|
|
520 |
),
|
521 |
]
|
522 |
|
523 |
-
# urls_to_download={
|
524 |
-
# "train": _BASE_HF_URL / data_dir / f"train.csv", #os.path.join(data_dir or "", "train.csv"),
|
525 |
-
# "valid": _BASE_HF_URL / data_dir / f"val.csv", #+ os.path.join(data_dir or "", "val.csv"),
|
526 |
-
# "test": _BASE_HF_URL / data_dir / f"test.csv", #+ os.path.join(data_dir or "", "test.csv")
|
527 |
-
# }
|
528 |
-
|
529 |
urls_to_download={
|
530 |
-
"train":
|
531 |
-
"valid":
|
532 |
-
"test":
|
533 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
534 |
|
535 |
# print(f"urls_to_download['train']: {urls_to_download['train']}")
|
536 |
# print(f"urls_to_download['valid']: {urls_to_download['valid']}")
|
|
|
25 |
|
26 |
_HOMEPAGE = "https://arxiv.org/abs/2401.09646"
|
27 |
|
28 |
+
# _URL = "https://huggingface.co/datasets/eci-io/climate-evaluation/resolve/main/"
|
29 |
|
30 |
_LICENSE = ""
|
31 |
|
32 |
# _BASE_HF_URL = "https://huggingface.co/datasets/eci-io/climate-evaluation/resolve/main" #Path("./")
|
33 |
|
34 |
+
_BASE_HF_URL = Path("./")
|
35 |
|
36 |
# print(f"_BASE_HF_URL: {_BASE_HF_URL}")
|
37 |
|
|
|
371 |
]
|
372 |
|
373 |
if self.config.name == "exeter":
|
|
|
|
|
|
|
|
|
|
|
374 |
urls_to_download={
|
375 |
+
"train": _BASE_HF_URL / data_dir / f"training.csv",
|
376 |
+
"valid": _BASE_HF_URL / data_dir / f"validation.csv",
|
377 |
+
"test": _BASE_HF_URL / data_dir / f"test.csv"
|
378 |
}
|
379 |
+
# urls_to_download={
|
380 |
+
# "train": _URL + os.path.join(data_dir or "", "training.csv"),
|
381 |
+
# "valid": _URL + os.path.join(data_dir or "", "validation.csv"),
|
382 |
+
# "test": _URL + os.path.join(data_dir or "", "test.csv"),
|
383 |
+
# }
|
384 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
385 |
return [
|
386 |
datasets.SplitGenerator(
|
|
|
420 |
],
|
421 |
}
|
422 |
# os.path.join(data_dir or "", f)
|
|
|
|
|
|
|
|
|
|
|
|
|
423 |
urls_to_download={
|
424 |
+
"train": [_BASE_HF_URL / data_dir / f for f in files["train"]],
|
425 |
+
"valid": [_BASE_HF_URL / data_dir / f for f in files["valid"]],
|
426 |
+
"test": [_BASE_HF_URL / data_dir / f for f in files["test"]],
|
427 |
}
|
428 |
|
429 |
+
# urls_to_download={
|
430 |
+
# "train": [_URL + os.path.join(data_dir or "", f) for f in files["train"]],
|
431 |
+
# "valid": [_URL + os.path.join(data_dir or "", f) for f in files["valid"]],
|
432 |
+
# "test": [_URL + os.path.join(data_dir or "", f) for f in files["test"]],
|
433 |
+
# }
|
434 |
+
|
435 |
|
436 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
437 |
return [
|
|
|
468 |
# "corporations": "Corporations/Corporations Responses/Climate Change",
|
469 |
# "combined": "Combined",
|
470 |
# }
|
|
|
|
|
|
|
|
|
|
|
|
|
471 |
urls_to_download={
|
472 |
+
"train": _BASE_HF_URL / data_dir / f"train_qa.csv",
|
473 |
+
"valid": _BASE_HF_URL / data_dir / f"val_qa.csv",
|
474 |
+
"test": _BASE_HF_URL / data_dir / f"test_qa.csv"
|
475 |
}
|
476 |
+
|
477 |
+
# urls_to_download={
|
478 |
+
# "train": _URL + os.path.join(data_dir or "", "train_qa.csv"),
|
479 |
+
# "valid": _URL + os.path.join(data_dir or "", "val_qa.csv"),
|
480 |
+
# "test": _URL + os.path.join(data_dir or "", "test_qa.csv"),
|
481 |
+
# }
|
482 |
|
483 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
484 |
|
|
|
520 |
),
|
521 |
]
|
522 |
|
|
|
|
|
|
|
|
|
|
|
|
|
523 |
urls_to_download={
|
524 |
+
"train": _BASE_HF_URL / data_dir / f"train.csv", #os.path.join(data_dir or "", "train.csv"),
|
525 |
+
"valid": _BASE_HF_URL / data_dir / f"val.csv", #+ os.path.join(data_dir or "", "val.csv"),
|
526 |
+
"test": _BASE_HF_URL / data_dir / f"test.csv", #+ os.path.join(data_dir or "", "test.csv")
|
527 |
}
|
528 |
+
|
529 |
+
# urls_to_download={
|
530 |
+
# "train": _URL + os.path.join(data_dir or "", "train.csv"),
|
531 |
+
# "valid": _URL + os.path.join(data_dir or "", "val.csv"),
|
532 |
+
# "test": _URL + os.path.join(data_dir or "", "test.csv")
|
533 |
+
# }
|
534 |
|
535 |
# print(f"urls_to_download['train']: {urls_to_download['train']}")
|
536 |
# print(f"urls_to_download['valid']: {urls_to_download['valid']}")
|