pminervini commited on
Commit
f8ae921
1 Parent(s): 8bb4b16
lm_eval/__init__.py ADDED
File without changes
lm_eval/tasks/__init__.py ADDED
File without changes
lm_eval/tasks/halueval/halueval_qa.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: halueval_qa
2
+ dataset_path: pminervini/HaluEval
3
+ dataset_name: qa_samples
4
+ output_type: generate_until
5
+ training_split: data
6
+ validation_split: data
7
+ doc_to_text: !function utils.doc_to_text_qa
8
+ doc_to_target: !function utils.doc_to_target_qa
9
+ process_results: !function utils.process_results_qa
10
+ fewshot_delimiter: "\n"
11
+ generation_kwargs:
12
+ until:
13
+ - "\n"
14
+ - "."
15
+ - ","
16
+ do_sample: false
17
+ temperature: 0.0
18
+ filter_list:
19
+ - name: remove_whitespace
20
+ filter:
21
+ - function: remove_whitespace
22
+ - function: take_first
23
+ target_delimiter: " "
24
+ metric_list:
25
+ - metric: em
26
+ aggregation: mean
27
+ higher_is_better: true
28
+ - metric: f1
29
+ aggregation: mean
30
+ higher_is_better: true
31
+ metadata:
32
+ - version: 0.0
lm_eval/tasks/halueval/utils.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from itertools import zip_longest
2
+ import transformers.data.metrics.squad_metrics as squad_metrics
3
+
4
+ QA_INSTURCTIONS = """I want you act as an answer judge. Given a question and an answer, your objective is to determine if the provided answer contains non-factual or hallucinated information. You SHOULD give your judgement based on the following hallucination types and the world knowledge.
5
+
6
+ You are trying to determine if the answer misunderstands the question context and intention.
7
+ #Question#: What is a rare breed of dog that was derived as a variant of Rat Terrier, Shiloh Shepherd dog or American Hairless Terrier?
8
+ #Answer#: American Hairless Terrier
9
+ #Your Judgement#: No
10
+
11
+ You are trying to determine if there is a factual contradiction between the answer and the world knowledge. Some information in the answer might be fabricated.
12
+ #Question#: Are the New Orleans Outfall Canals the same length as the Augusta Canal?
13
+ #Answer#: No, the New Orleans Outfall Canals and the Augusta Canal are not the same length. The Orleans Canal is approximately 3.6 miles (5.8 kilometers) long while the Augusta Canal is approximately 7 miles (11.3 kilometers) long.
14
+ #Your Judgement#: Yes
15
+ #Question#: What U.S Highway gives access to Zilpo Road, and is also known as Midland Trail?
16
+ #Answer#: U.S Highway 70
17
+ #Your Judgement#: Yes
18
+
19
+ You are trying to determine if the answer is too general or too specific to answer the question at an appropriate level of specificity.
20
+ #Question#: What genre do Superheaven and Oceansize belong to?
21
+ #Answer#: Superheaven and Oceansize belong to the rock genre.
22
+ #Your Judgement#: No
23
+ #Question#: What profession do Kōbō Abe and Agatha Christie share?
24
+ #Answer#: Playwright.
25
+ #Your Judgement#: No
26
+
27
+ You are trying to determine if the answer can be correctly inferred from the knowledge.
28
+ #Question#: Which band has more members, Muse or The Raconteurs?
29
+ #Answer#: Muse has more members than The Raconteurs.
30
+ #Your Judgement#: Yes
31
+ #Question#: Which is currently more valuable, Temagami-Lorrain Mine or Meadowbank Gold Mine?
32
+ #Answer#: Meadowbank Gold Mine, since Meadowbank Gold Mine is still producing gold and the TemagamiLorrain Mine has been inactive for years.
33
+ #Your Judgement#: No
34
+
35
+ You should try your best to determine if the answer contains non-factual or hallucinated information according to the above hallucination types. The answer you give MUST be \"Yes\" or \"No\""."""
36
+
37
+
38
+ def doc_to_text_qa(doc: dict[str, str]) -> str:
39
+ doc_text = QA_INSTURCTIONS + "\n\n#Question#: " + doc["question"] + "\n#Answer#: " + doc["answer"] + "\n#Your Judgement#:"
40
+ return doc_text
41
+
42
+
43
+ def doc_to_target_qa(doc: dict[str, str]) -> str:
44
+ return doc['hallucination']
45
+
46
+
47
+ def em(gold_list: list[str], predictions: list[str]):
48
+ # tests for exact match and on the normalised answer (compute_exact)
49
+ em_sum = 0.0
50
+ if len(gold_list) > 1:
51
+ for i in range(len(gold_list)):
52
+ gold_answers = gold_list[0:i] + gold_list[i + 1 :]
53
+ # predictions compared against (n) golds and take maximum
54
+ em_sum += max(squad_metrics.compute_exact(a, predictions) for a in gold_answers)
55
+ else:
56
+ em_sum += max(squad_metrics.compute_exact(a, predictions) for a in gold_list)
57
+ return em_sum / max(1, len(gold_list))
58
+
59
+
60
+ def compute_metrics(gold_list: list[str], predictions: list[str]) -> dict[str, float]:
61
+ f1_sum = 0.0
62
+ em_sum = 0.0
63
+
64
+ is_correct_lst = []
65
+ is_exact_lst = []
66
+
67
+ if len(gold_list) > 1:
68
+ for i in range(len(gold_list)):
69
+ gold_answers = gold_list[0:i] + gold_list[i + 1 :]
70
+ # predictions compared against (n) golds and take maximum
71
+ em_sum += max(squad_metrics.compute_exact(a, predictions) for a in gold_answers)
72
+ f1_sum += max(squad_metrics.compute_f1(a, predictions) for a in gold_answers)
73
+ else:
74
+ em_sum += max(squad_metrics.compute_exact(a, predictions) for a in gold_list)
75
+ f1_sum += max(squad_metrics.compute_f1(a, predictions) for a in gold_list)
76
+
77
+ return {
78
+ "em": em_sum / max(1, len(gold_list)),
79
+ "f1": f1_sum / max(1, len(gold_list)),
80
+ }
81
+
82
+
83
+ def process_results_qa(doc: dict[str, str], results):
84
+ gold_list = doc_to_target_qa(doc)
85
+ pred = results[0].strip().split("\n")[0]
86
+ scores = compute_metrics(gold_list, pred)
87
+ return scores
scripts/data/dialogue_data.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05caf10c3a95b8102a5c8eda093586daa15d7c633658520dfb1ea938172371cc
3
+ size 1861371
scripts/data/general_data.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b380d8c981662d5597eaa9bb5a4116971b915a35f1cbae29af9658fc8776f677
3
+ size 1051292
scripts/data/qa_data.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4b67b18c37f19e12b35b4856d983a8d4d9653aaf5e9940862fd27329b92c00a
3
+ size 1995662
scripts/data/summarization_data.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b44a4083b14dd647c0ff7f04de0391fd3860befd0e5ca84c8492b08732270eac
3
+ size 16445285
scripts/halueval-upload-cli.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import random
4
+ import requests
5
+
6
+ from datasets import load_dataset, Dataset, DatasetDict
7
+
8
+
9
+ path = 'pminervini/HaluEval'
10
+
11
+ API_URL = f"https://datasets-server.huggingface.co/splits?dataset={path}"
12
+ response = requests.get(API_URL)
13
+ res_json = response.json()
14
+
15
+ gold_splits = {'dialogue', 'qa', 'summarization', 'general'}
16
+
17
+ available_splits = {split['config'] for split in res_json['splits']} if 'splits' in res_json else set()
18
+
19
+ name_to_ds = dict()
20
+
21
+ for name in gold_splits:
22
+ ds = load_dataset("json", data_files={'data': f"data/{name}_data.json"})
23
+ name_to_ds[name] = ds
24
+ # if name not in available_splits:
25
+ ds.push_to_hub(path, config_name=name)
26
+
27
+ def list_to_dict(lst: list) -> dict:
28
+ res = dict()
29
+ for entry in lst:
30
+ for k, v in entry.items():
31
+ if k not in res:
32
+ res[k] = []
33
+ res[k] += [v]
34
+ return res
35
+
36
+ for name in (gold_splits - {'general'}):
37
+ random.seed(42)
38
+ ds = name_to_ds[name]
39
+ new_entry_lst = []
40
+
41
+ for entry in ds['data']:
42
+ is_hallucinated = random.random() > 0.5
43
+ if name in {'qa'}:
44
+ new_entry = {
45
+ 'knowledge': entry['knowledge'],
46
+ 'question': entry['question'],
47
+ 'answer': entry[f'{"hallucinated" if is_hallucinated else "right"}_answer'],
48
+ 'hallucination': 'yes' if is_hallucinated else 'no'
49
+ }
50
+ new_entry_lst += [new_entry]
51
+ if name in {'dialogue'}:
52
+ new_entry = {
53
+ 'knowledge': entry['knowledge'],
54
+ 'dialogue_history': entry['dialogue_history'],
55
+ 'response': entry[f'{"hallucinated" if is_hallucinated else "right"}_response'],
56
+ 'hallucination': 'yes' if is_hallucinated else 'no'
57
+ }
58
+ if name in {'summarization'}:
59
+ new_entry = {
60
+ 'document': entry['document'],
61
+ 'summary': entry[f'{"hallucinated" if is_hallucinated else "right"}_summary'],
62
+ 'hallucination': 'yes' if is_hallucinated else 'no'
63
+ }
64
+
65
+ new_ds_map = list_to_dict(new_entry_lst)
66
+ new_ds = Dataset.from_dict(new_ds_map)
67
+ new_dsd = DatasetDict({'data': new_ds})
68
+
69
+ new_dsd.push_to_hub(path, config_name=f'{name}_samples')