Datasets:

Languages:
English
ArXiv:
Rricha commited on
Commit
9365349
1 Parent(s): 29ef767

Update climate-evaluation.py

Browse files
Files changed (1) hide show
  1. climate-evaluation.py +35 -19
climate-evaluation.py CHANGED
@@ -5,6 +5,7 @@ import datasets
5
  import csv
6
  import textwrap
7
  import json
 
8
 
9
 
10
  _CITATION = """
@@ -28,6 +29,10 @@ _URL = "https://huggingface.co/datasets/eci-io/climate-evaluation/blob/main/"
28
 
29
  _LICENSE = ""
30
 
 
 
 
 
31
  _ClimateEvaluation_BASE_KWARGS = dict(
32
  citation=_CITATION,
33
  url=_HOMEPAGE,
@@ -174,7 +179,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
174
  """\
175
  CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
176
  ),
177
- data_dir="CDP/Combined/",
178
  text_features={"question": "question", "answer": "answer"},
179
  label_classes=["0", "1"],
180
  label_column="label",
@@ -315,7 +320,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
315
  text_feature: datasets.Value("string")
316
  for text_feature in self.config.text_features.keys()
317
  }
318
- features["category"] = datasets.Value("string")
319
  else:
320
  features = {
321
  text_feature: datasets.Value("string")
@@ -342,7 +347,7 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
342
 
343
  if self.config.name == "exams" or self.config.name == "translated_exams":
344
  urls_to_download={
345
- "test": _URL + os.path.join(data_dir or "", "test.csv")
346
  }
347
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
348
  return [
@@ -357,9 +362,9 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
357
 
358
  if self.config.name == "exeter":
359
  urls_to_download={
360
- "train": _URL + data_dir + "training.csv",
361
- "valid": _URL + data_dir + "validation.csv",
362
- "test": _URL + data_dir + "test.csv"
363
  }
364
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
365
  return [
@@ -400,9 +405,9 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
400
  ],
401
  }
402
  urls_to_download={
403
- "train": [_URL+os.path.join(data_dir or "", f) for f in files["train"]],
404
- "valid": [_URL+os.path.join(data_dir or "", f) for f in files["valid"]],
405
- "test": [_URL+os.path.join(data_dir or "", f) for f in files["test"]],
406
  }
407
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
408
  return [
@@ -440,11 +445,13 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
440
  # "combined": "Combined",
441
  # }
442
  urls_to_download={
443
- "train": _URL + os.path.join(data_dir or "", "train_qa.csv"),
444
- "valid": _URL + os.path.join(data_dir or "", "val_qa.csv"),
445
- "test": _URL + os.path.join(data_dir or "", "test_qa.csv")
446
  }
447
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
 
 
448
 
449
  return [
450
  datasets.SplitGenerator(
@@ -483,10 +490,12 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
483
  ]
484
 
485
  urls_to_download={
486
- "train": _URL + os.path.join(data_dir or "", "train.csv"),
487
- "valid": _URL + os.path.join(data_dir or "", "val.csv"),
488
- "test": _URL + os.path.join(data_dir or "", "test.csv")
489
  }
 
 
490
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
491
 
492
  return [
@@ -520,17 +529,24 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
520
  yield from self._process_file(file, delimiter="\t", idx=idx)
521
  elif self.config.name == "cdp_qa":
522
  idx = iter(range(10000000))
523
- for category, file in data_file:
524
- yield from self._process_file(file, idx=idx, category=category)
 
525
  else:
526
  yield from self._process_file(data_file)
527
 
528
  def _process_file(self, data_file, delimiter=",", idx=None, category=None):
 
529
  with open(data_file, encoding="utf8") as f:
530
  process_label = self.config.process_label
531
  label_classes = self.config.label_classes
 
 
 
532
  reader = csv.DictReader(f, delimiter=delimiter, quoting=csv.QUOTE_ALL)
 
533
  for n, row in enumerate(reader):
 
534
  example = {
535
  feat: row[col] for feat, col in self.config.text_features.items()
536
  }
@@ -539,8 +555,8 @@ class ClimateEvaluation(datasets.GeneratorBasedBuilder):
539
  else:
540
  example["idx"] = n
541
 
542
- if category:
543
- example["category"] = category
544
 
545
  if self.config.label_column in row:
546
  # print(f"self.config.label_column: {self.config.label_column}")
 
5
  import csv
6
  import textwrap
7
  import json
8
+ from pathlib import Path
9
 
10
 
11
  _CITATION = """
 
29
 
30
  _LICENSE = ""
31
 
32
+ _BASE_HF_URL = Path("./")
33
+
34
+ print(f"_BASE_HF_URL: {_BASE_HF_URL}")
35
+
36
  _ClimateEvaluation_BASE_KWARGS = dict(
37
  citation=_CITATION,
38
  url=_HOMEPAGE,
 
179
  """\
180
  CDP-QA is a dataset compiled from the questionnaires of the Carbon Disclosure Project, where cities, corporations, and states disclose their environmental information. The dataset presents pairs of questions and answers, and the objective is to predict whether a given answer is valid for the corresponding question. We benchmarked ClimateGPT on the questionnaires from the Combined split. """
181
  ),
182
+ data_dir="CDP/Combined",
183
  text_features={"question": "question", "answer": "answer"},
184
  label_classes=["0", "1"],
185
  label_column="label",
 
320
  text_feature: datasets.Value("string")
321
  for text_feature in self.config.text_features.keys()
322
  }
323
+ # features["category"] = datasets.Value("string")
324
  else:
325
  features = {
326
  text_feature: datasets.Value("string")
 
347
 
348
  if self.config.name == "exams" or self.config.name == "translated_exams":
349
  urls_to_download={
350
+ "test": _BASE_HF_URL / data_dir / f"test.csv"
351
  }
352
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
353
  return [
 
362
 
363
  if self.config.name == "exeter":
364
  urls_to_download={
365
+ "train": _BASE_HF_URL / data_dir / f"training.csv",
366
+ "valid": _BASE_HF_URL / data_dir / f"validation.csv",
367
+ "test": _BASE_HF_URL / data_dir / f"test.csv"
368
  }
369
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
370
  return [
 
405
  ],
406
  }
407
  urls_to_download={
408
+ "train": [_BASE_HF_URL / data_dir / f for f in files["train"]],
409
+ "valid": [_BASE_HF_URL / data_dir / f for f in files["valid"]],
410
+ "test": [_BASE_HF_URL / data_dir / f for f in files["test"]],
411
  }
412
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
413
  return [
 
445
  # "combined": "Combined",
446
  # }
447
  urls_to_download={
448
+ "train": _BASE_HF_URL / data_dir / f"train_qa.csv",
449
+ "valid": _BASE_HF_URL / data_dir / f"val_qa.csv",
450
+ "test": _BASE_HF_URL / data_dir / f"test_qa.csv"
451
  }
452
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
453
+
454
+ print(f"downloaded_files: {downloaded_files['train']}")
455
 
456
  return [
457
  datasets.SplitGenerator(
 
490
  ]
491
 
492
  urls_to_download={
493
+ "train": _BASE_HF_URL / data_dir / f"train.csv", #os.path.join(data_dir or "", "train.csv"),
494
+ "valid": _BASE_HF_URL / data_dir / f"val.csv", #+ os.path.join(data_dir or "", "val.csv"),
495
+ "test": _BASE_HF_URL / data_dir / f"test.csv", #+ os.path.join(data_dir or "", "test.csv")
496
  }
497
+ # print(f"urls_to_download['train']: {urls_to_download['train']}")
498
+ # print(f"urls_to_download['valid']: {urls_to_download['valid']}")
499
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
500
 
501
  return [
 
529
  yield from self._process_file(file, delimiter="\t", idx=idx)
530
  elif self.config.name == "cdp_qa":
531
  idx = iter(range(10000000))
532
+ print(f"!!!data_file: {data_file}")
533
+ # for file in data_file:
534
+ yield from self._process_file(data_file, idx=idx)
535
  else:
536
  yield from self._process_file(data_file)
537
 
538
  def _process_file(self, data_file, delimiter=",", idx=None, category=None):
539
+ print(f"data_file: {data_file}")
540
  with open(data_file, encoding="utf8") as f:
541
  process_label = self.config.process_label
542
  label_classes = self.config.label_classes
543
+ # print(f"self.config.text_features: {self.config.text_features}")
544
+ # data = f.read()
545
+ # print(f"data: {data}")
546
  reader = csv.DictReader(f, delimiter=delimiter, quoting=csv.QUOTE_ALL)
547
+ # print(f"reader: {reader}")
548
  for n, row in enumerate(reader):
549
+ # print(f"row: {row}")
550
  example = {
551
  feat: row[col] for feat, col in self.config.text_features.items()
552
  }
 
555
  else:
556
  example["idx"] = n
557
 
558
+ # if category:
559
+ # example["category"] = category
560
 
561
  if self.config.label_column in row:
562
  # print(f"self.config.label_column: {self.config.label_column}")