BK-Lee commited on
Commit
0c18e2a
·
1 Parent(s): ebe1140
.gitattributes DELETED
@@ -1,35 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -99,7 +99,8 @@ def bot_streaming(message, history):
99
  time.sleep(0.02)
100
  yield buffer
101
 
102
- demo = gr.ChatInterface(fn=bot_streaming, title="☄️ Meteor",
 
103
  description="Meteor is efficient 7B size Large Language and Vision Model built on the help of traversal of rationale",
104
  stop_btn="Stop Generation", multimodal=True)
105
  demo.launch()
 
99
  time.sleep(0.02)
100
  yield buffer
101
 
102
+ demo = gr.ChatInterface(fn=bot_streaming, title="☄️ Meteor",
103
+ examples=[{"text": "Show the detailed recipe for this dish.", "files":["./lobster.jpg"]}],
104
  description="Meteor is efficient 7B size Large Language and Vision Model built on the help of traversal of rationale",
105
  stop_btn="Stop Generation", multimodal=True)
106
  demo.launch()
eval/create_evaluator.py DELETED
@@ -1,529 +0,0 @@
1
- import os
2
- import re
3
- import json
4
- import shortuuid
5
- import numpy as np
6
- import pandas as pd
7
- from config import *
8
- from collections import defaultdict
9
- from eval.utils import *
10
-
11
- class BaseEvaluator:
12
- def __init__(self):
13
- super(BaseEvaluator, self).__init__()
14
-
15
- # Create evaluation results folder
16
- self.save_dir = os.path.join(DATASET_ROOT, "eval_results")
17
- if not os.path.exists(self.save_dir):
18
- os.makedirs(self.save_dir)
19
-
20
- def reset(self):
21
- # Reset results for new dataset evaluation
22
- self.gen_answers = []
23
- self.inputs = []
24
-
25
- def process(self, inputs, outputs):
26
- # Merge results
27
- self.inputs.extend(inputs)
28
- self.gen_answers.extend(outputs)
29
-
30
- class Evaluator(BaseEvaluator):
31
- def __init__(self):
32
- """
33
- Eval Datasets
34
-
35
- - VQAv2
36
- - GQA
37
- - SQA-IMG
38
- - VizWiz
39
- - TextVQA
40
- - POPE
41
- - MME
42
- - MMBench
43
- - MMBench-CN
44
- - QBench
45
- - MM-Vet
46
- - MMMU
47
- - MathVista
48
- - AI2D
49
- - HallusionBench
50
- - ChartQA
51
- - SEED
52
- - LLaVA Wild
53
- - BLINK
54
- - MathVerse
55
-
56
- """
57
-
58
- super().__init__()
59
-
60
- def evaluate(self, model, dataset, accel):
61
-
62
- # gathering all gpu to one device
63
- self.inputs = accel.gather_for_metrics(self.inputs)
64
- self.gen_answers = accel.gather_for_metrics(self.gen_answers)
65
-
66
- if accel.is_main_process:
67
- # check for duplicates
68
- self.inputs, self.gen_answers = remove_duplicate(dataset, self.inputs, self.gen_answers)
69
-
70
- # Select evaluation for dataset
71
- if dataset == "vqav2":
72
- return self.evaluate_vqa(model, accel)
73
- elif dataset == "gqa":
74
- return self.evaluate_gqa(model, accel)
75
- elif dataset == "sqa":
76
- return self.evaluate_sqa(model, accel)
77
- elif dataset == "vizwiz":
78
- return self.evaluate_vizwiz(model, accel)
79
- elif dataset == "textvqa":
80
- return self.evaluate_textvqa(model, accel)
81
- elif dataset == "pope":
82
- return self.evaluate_pope(model, accel)
83
- elif dataset == "mme":
84
- return self.evaluate_mme(model, accel)
85
- elif dataset == "mmbench":
86
- return self.evaluate_mmbench(model, accel)
87
- elif dataset == "mmbench_dev":
88
- return self.evaluate_mmbench_dev(model, accel)
89
- elif dataset == "mmbench_cn":
90
- return self.evaluate_mmbench_cn(model, accel)
91
- elif dataset == "mmbench_cn_dev":
92
- return self.evaluate_mmbench_cn_dev(model, accel)
93
- elif dataset == "qbench":
94
- return self.evaluate_qbench(model, accel)
95
- elif dataset == "mm-vet":
96
- return self.evaluate_mmvet(model, accel)
97
- elif dataset == "mmmu":
98
- return self.evaluate_mmmu(model, accel)
99
- elif dataset == "mathvista":
100
- return self.evaluate_mathvista(model, accel)
101
- elif dataset == "ai2d":
102
- return self.evaluate_ai2d(model, accel)
103
- elif dataset == "hallusionbench":
104
- return self.evaluate_hallusionbench(model, accel)
105
- elif dataset == "chartqa":
106
- return self.evaluate_chartqa(model, accel)
107
- elif dataset == "seed":
108
- return self.evaluate_seed(model, accel)
109
- elif dataset == "llava":
110
- return self.evaluate_llava(model, accel)
111
- elif dataset == "blink":
112
- return self.evaluate_blink(model, accel)
113
- elif dataset == "mathverse":
114
- return self.evaluate_mathverse(model, accel)
115
- elif dataset == "mmstar":
116
- return self.evaluate_mmstar(model, accel)
117
- else:
118
- raise ValueError(
119
- f'{dataset} is not an available dataset.')
120
- else:
121
- return None
122
-
123
- def evaluate_vqa(self, model, accel):
124
- # VQAv2 Evaluation for EvalAI server
125
- pred_answers = [{'question_id': inputs['id'], 'answer': answer} for inputs, answer in zip(self.inputs, self.gen_answers)]
126
- pred_pth = os.path.join(self.save_dir, f"{model}_vqav2_results.json")
127
- json.dump(pred_answers, open(pred_pth, "w"))
128
- accel.print(f"Finished evaluating VQAv2. Evaluate the result file saved to {pred_pth} on EvalAI server.")
129
- return
130
-
131
- def evaluate_gqa(self, model, accel):
132
- # GQA Evaluation
133
- pred_answers = {inputs['id']: answer for inputs, answer in zip(self.inputs, self.gen_answers)}
134
- # pred_answers = [{'question_id': inputs['id'], 'answer': answer} for inputs, answer in zip(self.inputs, self.gen_answers)]
135
- pred_pth = os.path.join(self.save_dir, f"{model}_gqa_results.json")
136
- json.dump(pred_answers, open(pred_pth, "w"))
137
- accel.print("GQA Results:")
138
- results = eval_gqa(pred_answers, json.load(open(os.path.join(DATASET_ROOT, GQA))))
139
- return results['accuracy']
140
-
141
- def evaluate_sqa(self, model, accel):
142
- # SQA Evaluation
143
- pred_answers = [{'question_id': inputs['id'], 'answer': convert_to_choice(answer, inputs['candidates']), 'gt': inputs['gt']} for inputs, answer in zip(self.inputs, self.gen_answers)]
144
- pred_pth = os.path.join(self.save_dir, f"{model}_sqa_results.json")
145
- json.dump(pred_answers, open(pred_pth, "w"))
146
-
147
- # Compute accuracy
148
- results = [(answer['answer'] == answer['gt']) for answer in pred_answers]
149
- accel.print (f"SQA Accuracy: {np.mean(results)*100} %")
150
- return np.mean(results)*100
151
-
152
- def evaluate_vizwiz(self, model, accel):
153
- # VizWiz Evaluation
154
- evaluator = EvalAIAnswerProcessor()
155
- pred_answers = [{'image': inputs['id'], 'answer': evaluator(answer)} for inputs, answer in zip(self.inputs, self.gen_answers)]
156
- pred_pth = os.path.join(self.save_dir, f"{model}_vizwiz_results.json")
157
- json.dump(pred_answers, open(pred_pth, "w"))
158
- accel.print(f"Finished evaluating VizWiz. Evaluate the result file saved to {pred_pth} on EvalAI server.")
159
- return
160
-
161
- def evaluate_textvqa(self, model, accel):
162
- # TextVQA Evaluation
163
- pred_answers = [{'question_id': inputs['id'], 'pred_answer': answer, 'question': inputs['question'], 'gt_answers': inputs['gt']} for inputs, answer in zip(self.inputs, self.gen_answers)]
164
- pred_pth = os.path.join(self.save_dir, f"{model}_textvqa_results.json")
165
- json.dump(pred_answers, open(pred_pth, "w"))
166
-
167
- evaluator = TextVQAAccuracyEvaluator()
168
- results = evaluator.eval_pred_list(pred_answers)*100
169
- accel.print (f"TextVQA Accuracy: {results} %")
170
- return results
171
-
172
- def evaluate_pope(self, model, accel):
173
- # POPE Evaluation
174
- pred_answers = [{'question_id': inputs['id'], 'answer': answer, 'question': inputs['question'], 'category': inputs['category']} for inputs, answer in zip(self.inputs, self.gen_answers)]
175
- pred_pth = os.path.join(self.save_dir, f"{model}_pope_results.json")
176
- json.dump(pred_answers, open(pred_pth, "w"))
177
-
178
- pope_results = {}
179
- pope_results['adversarial'] = None
180
- pope_results['popular'] = None
181
- pope_results['random'] = None
182
-
183
- categories = ['adversarial', 'popular', 'random']
184
- files = [POPE_ADVERSARIAL, POPE_POPULAR, POPE_RANDOM]
185
-
186
- for category, file in zip(categories, files):
187
- cur_answers = [x for x in pred_answers if x['category'] == category]
188
- cur_answers = sorted(cur_answers, key=lambda x:x["question_id"])
189
- pope_results[category] = eval_pope(cur_answers, os.path.join(DATASET_ROOT, file))
190
- accel.print (f"POPE Adversarial Accuracy: {pope_results['adversarial']} %")
191
- accel.print (f"POPE Popular Accuracy: {pope_results['popular']} %")
192
- accel.print (f"POPE Random Accuracy: {pope_results['random']} %")
193
- return pope_results
194
-
195
- def evaluate_mme(self, model, accel):
196
- # MME Evaluation
197
- pred_answers = [{'question_id': inputs['id'], 'answer': answer, "question": inputs['question'], 'category': inputs['category']} for inputs, answer in zip(self.inputs, self.gen_answers)]
198
- pred_pth = os.path.join(self.save_dir, f"{model}_mme_results.json")
199
- json.dump(pred_answers, open(pred_pth, "w"))
200
-
201
- ground_truth = get_gt(data_path=os.path.join(DATASET_ROOT, MME_DIR))
202
- result_dir = os.path.join(self.save_dir, 'mme')
203
- os.makedirs(result_dir, exist_ok=True)
204
- results = defaultdict(list)
205
-
206
- for answer in pred_answers:
207
- file = answer['question_id'].split('/')[-1].split('.')[0] + '.txt'
208
- results[answer['category']].append((file, answer['question'], answer['answer']))
209
-
210
- for category, cate_tups in results.items():
211
- with open(os.path.join(result_dir, f'{category}.txt'), 'w') as fp:
212
- questions = set() # check for duplicates
213
- for file, prompt, answer in cate_tups:
214
- if 'Answer the question using a single word or phrase.' in prompt:
215
- prompt = prompt.replace('Answer the question using a single word or phrase.', '').strip()
216
- if 'Please answer yes or no.' not in prompt:
217
- prompt = prompt + ' Please answer yes or no.'
218
- if (category, file, prompt) not in ground_truth:
219
- prompt = prompt.replace(' Please answer yes or no.', ' Please answer yes or no.')
220
- gt_ans = ground_truth[category, file, prompt]
221
- dup = file, prompt, gt_ans
222
- tup = file, prompt, gt_ans, answer
223
- if dup in questions:
224
- continue
225
- questions.add(dup)
226
- fp.write('\t'.join(tup) + '\n')
227
-
228
- evaluator = MMEEvaluator()
229
- scores = evaluator.process_result(result_dir)
230
- accel.print("MME Scores:")
231
- accel.print(scores)
232
- for eval_type, eval_scores in scores.items():
233
- accel.print("===========", eval_type, "===========")
234
- accel.print("total score:", eval_scores['total'], "\n")
235
- for task_name, score in eval_scores.items():
236
- accel.print("\t", task_name, " score:", score)
237
- accel.print("\n")
238
- return scores
239
-
240
- def evaluate_mmbench(self, model, accel):
241
- # MMBench Evaluation
242
- df = pd.read_table(os.path.join(DATASET_ROOT, MMBENCH))
243
- cur_df = df.copy()
244
- cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category'])
245
- cur_df.insert(6, 'prediction', None)
246
- for inputs, answer in zip(self.inputs, self.gen_answers):
247
- cur_df.loc[df['index'] == inputs['id'], 'prediction'] = answer
248
- pred_pth = os.path.join(self.save_dir, f"{model}_mmbench_results.xlsx")
249
- cur_df.to_excel(pred_pth, index=False, engine='openpyxl')
250
- accel.print(f"Finished evaluating MMBench. Change {pred_pth} name to submission.xlsx and evaluate the result file saved to {pred_pth} on OpenCompass server.")
251
- return
252
-
253
- def evaluate_mmbench_dev(self, model, accel):
254
- # MMBench Dev Evaluation
255
- df = pd.read_table(os.path.join(DATASET_ROOT, MMBENCH_DEV))
256
- cur_df = df.copy()
257
- cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category'])
258
- cur_df.insert(6, 'prediction', None)
259
- for inputs, answer in zip(self.inputs, self.gen_answers):
260
- cur_df.loc[df['index'] == inputs['id'], 'prediction'] = answer[0]
261
- pred_pth = os.path.join(self.save_dir, f"{model}_mmbench_dev_results.xlsx")
262
- cur_df.to_excel(pred_pth, index=False, engine='openpyxl')
263
- accuracy = (cur_df['prediction'] == cur_df['answer']).mean()
264
- accel.print(f'MMBench_dev Accuracy: {accuracy:.2%}')
265
- return
266
-
267
- def evaluate_mmbench_cn(self, model, accel):
268
- # MMBench_CN Evaluation
269
- df = pd.read_table(os.path.join(DATASET_ROOT, MMBENCH_CN))
270
- cur_df = df.copy()
271
- cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category'])
272
- cur_df.insert(6, 'prediction', None)
273
- for inputs, answer in zip(self.inputs, self.gen_answers):
274
- cur_df.loc[df['index'] == inputs['id'], 'prediction'] = answer
275
- pred_pth = os.path.join(self.save_dir, f"{model}_mmbench_cn_results.xlsx")
276
- cur_df.to_excel(pred_pth, index=False, engine='openpyxl')
277
- accel.print(f"Finished evaluating MMBench_CN. Change {pred_pth} name to submission.xlsx and evaluate the result file saved to {pred_pth} on OpenCompass server.")
278
- return
279
-
280
- def evaluate_mmbench_cn_dev(self, model, accel):
281
- # MMBench_CN Dev Evaluation
282
- df = pd.read_table(os.path.join(DATASET_ROOT, MMBENCH_CN_DEV))
283
- cur_df = df.copy()
284
- cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category'])
285
- cur_df.insert(6, 'prediction', None)
286
- for inputs, answer in zip(self.inputs, self.gen_answers):
287
- cur_df.loc[df['index'] == inputs['id'], 'prediction'] = answer[0]
288
- pred_pth = os.path.join(self.save_dir, f"{model}_mmbench_cn_dev_results.xlsx")
289
- cur_df.to_excel(pred_pth, index=False, engine='openpyxl')
290
- accuracy = (cur_df['prediction'] == cur_df['answer']).mean()
291
- accel.print(f'MMBench_CN_dev Accuracy: {accuracy:.2%}')
292
- return
293
-
294
- def evaluate_qbench(self, model, accel):
295
- # QBench Evaluation
296
- pred_answers = [{'id': inputs['id'], 'answer': convert_to_choice(answer, inputs['candidates']), 'gt': inputs['gt'], 'candidates': inputs['candidates']} for inputs, answer in zip(self.inputs, self.gen_answers)]
297
- pred_pth = os.path.join(self.save_dir, f'{model}_qbench_results.jsonl')
298
- with open(pred_pth, "w") as pf:
299
- pf.write(json.dumps(pred_answers) + "\n")
300
-
301
- results = [(pred['candidates'][pred['answer']] == pred['gt']) for pred in pred_answers]
302
- accel.print (f"QBench Accuracy: {np.mean(results)*100} %")
303
- return np.mean(results)*100
304
-
305
- def evaluate_mmvet(self, model, accel):
306
- # MM-Vet Evaluation
307
- cur_result = {f"{inputs['id']}": answer for inputs, answer in zip(self.inputs, self.gen_answers)}
308
- pred_pth = os.path.join(self.save_dir, f'{model}_mmvet_results.json')
309
- with open(pred_pth, 'w') as f:
310
- json.dump(cur_result, f, indent=2)
311
-
312
- accel.print(f"Finished evaluating MM-Vet. Evaluate the result file saved to {pred_pth}.")
313
- return
314
-
315
- def evaluate_mmmu(self, model, accel):
316
- # MMMU Evaluation
317
- predictions = {inputs['id']: answer for inputs, answer in zip(self.inputs, self.gen_answers)}
318
- answers = {inputs['id']: {'ground_truth': inputs['gt'], 'question_type': inputs['question_type']} for inputs, answer in zip(self.inputs, self.gen_answers)}
319
- pred_pth = os.path.join(self.save_dir, f'{model}_mmmu_results.json')
320
- with open(pred_pth, "w") as f:
321
- json.dump(predictions, f, indent=2)
322
- ans_pth = os.path.join(self.save_dir, 'mmmu_answers.json')
323
- with open(ans_pth, "w") as pf:
324
- json.dump(answers, pf, indent=2)
325
-
326
- # group by category
327
- output_dict_w_cat = {}
328
- for data_id, parsed_pred in predictions.items():
329
- category = "_".join(data_id.split("_")[1:-1])
330
- if category not in output_dict_w_cat:
331
- output_dict_w_cat.update({category: {}})
332
- output_dict_w_cat[category].update({data_id: parsed_pred})
333
-
334
- # group by category
335
- answer_dict_w_cat = {}
336
- for data_id, parsed_pred in answers.items():
337
- category = "_".join(data_id.split("_")[1:-1])
338
- if category not in answer_dict_w_cat:
339
- answer_dict_w_cat.update({category: {}})
340
- answer_dict_w_cat[category].update({data_id: parsed_pred})
341
-
342
- evaluation_result = {}
343
-
344
- for category in CAT_SHORT2LONG.values():
345
- accel.print("Evaluating: {}".format(category))
346
- # get cat_outputs and cat_answers
347
- try:
348
- cat_outputs = output_dict_w_cat[category]
349
- cat_answers = answer_dict_w_cat[category]
350
- except KeyError:
351
- accel.print("Skipping {} for not found".format(category))
352
- continue
353
-
354
- exampels_to_eval = []
355
- for data_id, parsed_pred in cat_outputs.items():
356
- question_type = cat_answers[data_id]['question_type']
357
- if question_type != 'multiple-choice':
358
- parsed_pred = parse_open_response(parsed_pred) # mainly for type consistency (make it number, etc.)
359
- else:
360
- parsed_pred = parsed_pred
361
-
362
- exampels_to_eval.append({
363
- "id": data_id,
364
- "question_type": question_type,
365
- "answer": cat_answers[data_id]['ground_truth'],
366
- "parsed_pred": parsed_pred
367
- })
368
-
369
- judge_dict, metric_dict = evaluate(exampels_to_eval)
370
- metric_dict.update({"num_example": len(exampels_to_eval)})
371
-
372
- evaluation_result[category] = metric_dict
373
-
374
- printable_results = {}
375
- # add domain Subject
376
- for domain, in_domain_cats in DOMAIN_CAT2SUB_CAT.items():
377
- in_domain_cat_results = {}
378
- for cat_name in in_domain_cats: # use the order in DOMAIN_CAT2SUB_CAT
379
- if cat_name in evaluation_result.keys():
380
- in_domain_cat_results[cat_name] = evaluation_result[cat_name]
381
- else:
382
- pass
383
- in_domain_ins_acc = calculate_ins_level_acc(in_domain_cat_results)
384
- in_domain_data_num = sum([cat_results['num_example'] for cat_results in in_domain_cat_results.values()])
385
- printable_results['Overall-' + domain] = {"num": int(in_domain_data_num),
386
- "acc": round(in_domain_ins_acc, 3)
387
- }
388
- # add sub category
389
- for cat_name, cat_results in in_domain_cat_results.items():
390
- printable_results[cat_name] = {"num": int(cat_results['num_example']),
391
- "acc": round(cat_results['acc'], 3)
392
- }
393
-
394
- # table.append(["-----------------------------", "-----", "----"])
395
- all_ins_acc = calculate_ins_level_acc(evaluation_result)
396
- printable_results['Overall'] = {"num": sum([cat_results['num_example'] for cat_results in evaluation_result.values()]),
397
- "acc": round(all_ins_acc, 3)
398
- }
399
-
400
- accel.print(printable_results)
401
- return
402
-
403
- def evaluate_mathvista(self, model, accel):
404
- # MathVista Evaluation
405
- pred_answers = [{'pid': inputs['id'], 'image': inputs['id'], 'response': answer,
406
- 'question_type': inputs['question_type'], 'answer_type': inputs['answer_type'], 'metadata': inputs['metadata'],
407
- 'choices': inputs['choices'], 'query': inputs['question'], 'precision': inputs['precision'],} for inputs, answer in zip(self.inputs, self.gen_answers)]
408
- predictions = {pred['pid']: pred for pred in pred_answers}
409
- pred_pth = os.path.join(self.save_dir, f"{model}_mathvista_results.json")
410
- json.dump(predictions, open(pred_pth, "w"))
411
-
412
- accel.print(f"Finished evaluating MathVista. Evaluate the result file saved to {pred_pth}.")
413
- return
414
-
415
- def evaluate_ai2d(self, model, accel):
416
- # AI2D Evaluation
417
- pred_answers = [{'question_id': inputs['id'], 'answer': answer, 'gt': inputs['gt']} for inputs, answer in zip(self.inputs, self.gen_answers)]
418
- pred_pth = os.path.join(self.save_dir, f"{model}_ai2d_results.json")
419
- json.dump(pred_answers, open(pred_pth, "w"))
420
-
421
- # Compute accuracy
422
- pattern = re.compile(r'[A-Z]')
423
- results = [(char_to_int(pattern.findall(answer)[0]) == inputs['gt']) for inputs, answer in zip(self.inputs, self.gen_answers)]
424
-
425
- accel.print(f"AI2D Accuracy: {np.mean(results)*100} %")
426
- return np.mean(results)*100
427
-
428
- def evaluate_hallusionbench(self, model, accel):
429
- # HallusionBench Evaluation
430
- pred_answers = [{'answer': '1' if answer.lower().find('yes') != -1 else '0', 'question': inputs['question'], 'gt': inputs['gt']} for inputs, answer in zip(self.inputs, self.gen_answers)]
431
- pred_pth = os.path.join(self.save_dir, f"{model}_hallusionbench_results.json")
432
- json.dump(pred_answers, open(pred_pth, "w"))
433
-
434
- # Compute accuracy
435
- results = [(answer['answer'] == answer['gt']) for answer in pred_answers]
436
- accel.print(f"HallusionBench Accuracy: {np.mean(results)*100} %")
437
- return np.mean(results)*100
438
-
439
- def evaluate_chartqa(self, model, accel):
440
- # ChartQA Evaluation
441
- # post processing
442
- processed_answers = []
443
- for x in self.gen_answers:
444
- if any(i.isdigit() for i in x):
445
- processed_answers.append(x.split(" ")[0])
446
- else:
447
- processed_answers.append(x)
448
- pred_answers = [{'answer': answer, 'question': inputs['question'], 'annotation': inputs['gt']} for inputs, answer in zip(self.inputs, processed_answers)]
449
- pred_pth = os.path.join(self.save_dir, f"{model}_chartqa_results.json")
450
- json.dump(pred_answers, open(pred_pth, "w"))
451
-
452
- # Compute accuracy
453
- acc = evaluate_relaxed_accuracy(pred_answers)
454
- accel.print(f"ChartQA Accuracy: {acc*100}%")
455
- return acc
456
-
457
- def evaluate_seed(self, model, accel):
458
- # SEED Evaluation
459
- pred_answers = [{'answer': answer, 'question': inputs['question'], 'question_id': inputs['id'], 'gt': inputs['gt'], 'question_type': inputs['question_type']} for inputs, answer in zip(self.inputs, self.gen_answers)]
460
- pred_pth = os.path.join(self.save_dir, f"{model}_seed_results.json")
461
- json.dump(pred_answers, open(pred_pth, "w"))
462
-
463
- # Compute accuracy
464
- results = [(answer['answer'] == answer['gt']) for answer in pred_answers]
465
- accel.print (f"SEED Accuracy: {np.mean(results)*100} %")
466
-
467
- # Per question type accuracy
468
- for k, v in SEED_TYPES.items():
469
- sub_results = []
470
- for pred in pred_answers:
471
- if pred['question_type'] == k:
472
- sub_results.append(pred['answer'] == pred['gt'])
473
- accel.print (f"{v}: {np.mean(sub_results)*100} %")
474
-
475
- return np.mean(results)*100
476
-
477
- def evaluate_llava(self, model, accel):
478
- # LLaVA-in-the-Wild Evaluation
479
- pred_answers = [{'question_id': inputs['id'], 'prompt': inputs['question'], 'text': answer, "answer_id": shortuuid.uuid()} for inputs, answer in zip(self.inputs, self.gen_answers)]
480
- sorted_answers = sorted(pred_answers, key=lambda x: x['question_id'])
481
- pred_pth = os.path.join(self.save_dir, f'{model}_llava_results.jsonl')
482
- ans_file = open(pred_pth, "w")
483
- for pred in sorted_answers:
484
- ans_file.write(json.dumps(pred) + "\n")
485
- ans_file.flush()
486
- ans_file.close()
487
-
488
- accel.print(f"Finished evaluating LLaVA-in-the-wild. Evaluate the result file saved to {pred_pth}.")
489
- return
490
-
491
- def evaluate_blink(self, model, accel):
492
- # BLINK Evaluation
493
- # TODO
494
- return
495
-
496
- def evaluate_mathverse(self, model, accel):
497
- # Mathverse Evaluation
498
- pred_answers = [{'sample_index' : inputs['id'], 'problem_index' : inputs['problem_index'], 'problem_version' : inputs['problem_version'],
499
- 'question' : inputs['origin_question'], 'answer' : inputs['gt'],
500
- 'question_type': inputs['question_type'], 'question_type': inputs['question_type'],
501
- 'metadata': inputs['metadata'], 'query_wo': inputs['question'], 'query_cot' : inputs['query_cot'], 'model_answer' : answer} for inputs, answer in zip(self.inputs, self.gen_answers)]
502
-
503
- # answers = [item for item in pred_answers if item['problem_version'] != 'Text_Only']
504
- # text_only_answers = [item for item in pred_answers if item['problem_version'] == 'Text_Only']
505
-
506
- pred_pth = os.path.join(self.save_dir, f'{model}_mathverse_results.json')
507
- json.dump(pred_answers, open(pred_pth, "w"))
508
- pred_pth = os.path.join(self.save_dir, f'{model}_mathverse_scores.json')
509
- eval_mathverse(self.save_dir, pred_answers,f'{model}_mathverse_extracts.json', f'{model}_mathverse_scores.json')
510
- accel.print(f"Finished evaluating MathVerse. Evaluate the result file saved to {pred_pth}.")
511
- # TODO
512
- return
513
-
514
- def evaluate_mmstar(self, model, accel):
515
- pred_answers = [{'question': inputs['question'],
516
- 'answer': inputs['answer'],
517
- 'category': inputs['category'],
518
- 'l2_category': inputs['l2_category'],
519
- # 'bench': inputs['bench'],
520
- 'prediction' : answer} for inputs, answer in zip(self.inputs, self.gen_answers)]
521
-
522
- pred_pth = os.path.join(self.save_dir, f'{model}_mmstar_results.json')
523
- json.dump(pred_answers, open(pred_pth, "w"))
524
-
525
- df = pd.DataFrame(pred_answers)
526
-
527
- eval_mmstar(df, self.save_dir, f'{model}_mmstar_scores.json')
528
- pred_pth = os.path.join(self.save_dir, f'{model}_mmstar_scores.json')
529
- accel.print(f"Finished evaluating MMStar. Evaluate the result file saved to {pred_pth}.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/llavabench/eval.sh DELETED
@@ -1,11 +0,0 @@
1
- python eval/llavabench/eval_gpt_review_bench.py \
2
- --question /mnt/ssd/lbk-cvpr/dataset/llava-bench-in-the-wild/questions.jsonl \
3
- --context /mnt/ssd/lbk-cvpr/dataset/llava-bench-in-the-wild/context.jsonl \
4
- --rule eval/llavabench/rule.json \
5
- --answer-list \
6
- /mnt/ssd/lbk-cvpr/dataset/llava-bench-in-the-wild/answers_gpt4.jsonl \
7
- /mnt/ssd/lbk-cvpr/dataset/eval_results/Meteor_llava_results.jsonl \
8
- --output \
9
- /mnt/ssd/lbk-cvpr/dataset/eval_results/reviews_meteor_llava_results_step3.jsonl
10
-
11
- python eval/llavabench/summarize_gpt_review.py -f /mnt/ssd/lbk-cvpr/dataset/eval_results/reviews_meteor_llava_results_step3.jsonl
 
 
 
 
 
 
 
 
 
 
 
 
eval/llavabench/eval_gpt_review_bench.py DELETED
@@ -1,122 +0,0 @@
1
- import argparse
2
- import json
3
- import os
4
-
5
- import openai
6
- import time
7
-
8
- NUM_SECONDS_TO_SLEEP = 0.5
9
-
10
- openai.api_key= ""
11
-
12
- def get_eval(content: str, max_tokens: int):
13
- while True:
14
- try:
15
- response = openai.ChatCompletion.create(
16
- model='gpt-4-0613',
17
- messages=[{
18
- 'role': 'system',
19
- 'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
20
- }, {
21
- 'role': 'user',
22
- 'content': content,
23
- }],
24
- temperature=0.2, # TODO: figure out which temperature is best for evaluation
25
- max_tokens=max_tokens,
26
- )
27
- break
28
- except openai.error.RateLimitError:
29
- pass
30
- except Exception as e:
31
- print(e)
32
- time.sleep(NUM_SECONDS_TO_SLEEP)
33
-
34
- return response['choices'][0]['message']['content']
35
-
36
-
37
- def parse_score(review):
38
- try:
39
- score_pair = review.split('\n')[0]
40
- score_pair = score_pair.replace(',', ' ')
41
- sp = score_pair.split(' ')
42
- if len(sp) == 2:
43
- return [float(sp[0]), float(sp[1])]
44
- else:
45
- print('error', review)
46
- return [-1, -1]
47
- except Exception as e:
48
- print(e)
49
- print('error', review)
50
- return [-1, -1]
51
-
52
-
53
- if __name__ == '__main__':
54
- parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
55
- parser.add_argument('-q', '--question')
56
- parser.add_argument('-c', '--context')
57
- parser.add_argument('-a', '--answer-list', nargs='+', default=[])
58
- parser.add_argument('-r', '--rule')
59
- parser.add_argument('-o', '--output')
60
- parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
61
- args = parser.parse_args()
62
-
63
- f_q = open(os.path.expanduser(args.question))
64
- f_ans1 = open(os.path.expanduser(args.answer_list[0]))
65
- f_ans2 = open(os.path.expanduser(args.answer_list[1]))
66
- rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
67
-
68
- if os.path.isfile(os.path.expanduser(args.output)):
69
- cur_reviews = [json.loads(line) for line in open(os.path.expanduser(args.output))]
70
- else:
71
- cur_reviews = []
72
-
73
- review_file = open(f'{args.output}', 'a')
74
-
75
- context_list = [json.loads(line) for line in open(os.path.expanduser(args.context))]
76
- image_to_context = {context['image']: context for context in context_list}
77
-
78
- handles = []
79
- idx = 0
80
- for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
81
- ques = json.loads(ques_js)
82
- ans1 = json.loads(ans1_js)
83
- ans2 = json.loads(ans2_js)
84
-
85
- inst = image_to_context[ques['image']]
86
-
87
- if isinstance(inst['caption'], list):
88
- cap_str = '\n'.join(inst['caption'])
89
- else:
90
- cap_str = inst['caption']
91
-
92
- category = 'llava_bench_' + json.loads(ques_js)['category']
93
- if category in rule_dict:
94
- rule = rule_dict[category]
95
- else:
96
- assert False, f"Visual QA category not found in rule file: {category}."
97
- prompt = rule['prompt']
98
- role = rule['role']
99
- content = (f'[Context]\n{cap_str}\n\n'
100
- f'[Question]\n{ques["text"]}\n\n'
101
- f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
102
- f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
103
- f'[System]\n{prompt}\n\n')
104
- cur_js = {
105
- 'id': idx+1,
106
- 'question_id': ques['question_id'],
107
- 'answer1_id': ans1.get('answer_id', ans1['question_id']),
108
- 'answer2_id': ans2.get('answer_id', ans2['answer_id']),
109
- 'category': category
110
- }
111
- if idx >= len(cur_reviews):
112
- review = get_eval(content, args.max_tokens)
113
- scores = parse_score(review)
114
- cur_js['content'] = review
115
- cur_js['tuple'] = scores
116
- review_file.write(json.dumps(cur_js) + '\n')
117
- review_file.flush()
118
- else:
119
- print(f'Skipping {idx} as we already have it.')
120
- idx += 1
121
- print(idx)
122
- review_file.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/llavabench/rule.json DELETED
@@ -1,11 +0,0 @@
1
- {
2
- "coding": {"role": "Assistant", "prompt": "Your task is to evaluate the coding abilities of the above two assistants. They have been asked to implement a program to solve a given problem. Please review their code submissions, paying close attention to their problem-solving approach, code structure, readability, and the inclusion of helpful comments.\n\nPlease ensure that the assistants' submissions:\n\n1. Correctly implement the given problem statement.\n2. Contain accurate and efficient code.\n3. Include clear and concise comments that explain the code's logic and functionality.\n4. Adhere to proper coding standards and best practices.\n\nOnce you have carefully reviewed both submissions, provide detailed feedback on their strengths and weaknesses, along with any suggestions for improvement. You should first output a single line containing two scores on the scale of 1-10 (1: no code/no sense; 10: perfect) for Assistant 1 and 2, respectively. Then give extra comments starting from the next line."},
3
- "math": {"role": "Assistant", "prompt": "We would like to request your feedback on the mathematical proficiency of two AI assistants regarding the given user question.\nFirstly, please solve the problem independently, without referring to the answers provided by Assistant 1 and Assistant 2.\nAfterward, please examine the problem-solving process of Assistant 1 and Assistant 2 step-by-step to ensure their correctness, identifying any incorrect steps if present. Your evaluation should take into account not only the answer but also the problem-solving steps.\nFinally, please output a Python tuple containing two numerical scores for Assistant 1 and Assistant 2, ranging from 1 to 10, respectively. If applicable, explain the reasons for any variations in their scores and determine which assistant performed better."},
4
- "default": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above.\nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
5
- "conv": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
6
- "detail": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
7
- "complex": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with five descriptive sentences describing the same image and the bounding box coordinates of each object in the scene. These coordinates are in the form of bounding boxes, represented as (x1, y1, x2, y2) with floating numbers ranging from 0 to 1. These values correspond to the top left x, top left y, bottom right x, and bottom right y. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
8
- "llava_bench_conv": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
9
- "llava_bench_detail": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."},
10
- "llava_bench_complex": {"role": "Assistant", "prompt": "We would like to request your feedback on the performance of two AI assistants in response to the user question displayed above. The user asks the question on observing an image. For your reference, the visual content in the image is represented with a few sentences describing the image. \nPlease rate the helpfulness, relevance, accuracy, level of details of their responses. Each assistant receives an overall score on a scale of 1 to 10, where a higher score indicates better overall performance.\nPlease first output a single line containing only two values indicating the scores for Assistant 1 and 2, respectively. The two scores are separated by a space.\nIn the subsequent line, please provide a comprehensive explanation of your evaluation, avoiding any potential bias and ensuring that the order in which the responses were presented does not affect your judgment."}
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
eval/llavabench/summarize_gpt_review.py DELETED
@@ -1,60 +0,0 @@
1
- import json
2
- import os
3
- from collections import defaultdict
4
-
5
- import numpy as np
6
-
7
- import argparse
8
-
9
- def parse_args():
10
- parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
11
- parser.add_argument('-d', '--dir', default=None)
12
- parser.add_argument('-v', '--version', default=None)
13
- parser.add_argument('-s', '--select', nargs='*', default=None)
14
- parser.add_argument('-f', '--files', nargs='*', default=[])
15
- parser.add_argument('-i', '--ignore', nargs='*', default=[])
16
- return parser.parse_args()
17
-
18
-
19
- if __name__ == '__main__':
20
- args = parse_args()
21
-
22
- if args.ignore is not None:
23
- args.ignore = [int(x) for x in args.ignore]
24
-
25
- if len(args.files) > 0:
26
- review_files = args.files
27
- else:
28
- review_files = [x for x in os.listdir(args.dir) if x.endswith('.jsonl') and (x.startswith('gpt4_text') or x.startswith('reviews_') or x.startswith('review_') or 'review' in args.dir)]
29
-
30
- for review_file in sorted(review_files):
31
- config = os.path.basename(review_file).replace('gpt4_text_', '').replace('.jsonl', '')
32
- if args.select is not None and any(x not in config for x in args.select):
33
- continue
34
- if '0613' in config:
35
- version = '0613'
36
- else:
37
- version = '0314'
38
- if args.version is not None and args.version != version:
39
- continue
40
- scores = defaultdict(list)
41
- print(config)
42
- with open(os.path.join(args.dir, review_file) if args.dir is not None else review_file) as f:
43
- for review_str in f:
44
- review = json.loads(review_str)
45
- if review['question_id'] in args.ignore:
46
- continue
47
- if 'category' in review:
48
- scores[review['category']].append(review['tuple'])
49
- scores['all'].append(review['tuple'])
50
- else:
51
- if 'tuple' in review:
52
- scores['all'].append(review['tuple'])
53
- else:
54
- scores['all'].append(review['score'])
55
- for k, v in sorted(scores.items()):
56
- stats = np.asarray(v).mean(0).tolist()
57
- stats = [round(x, 3) for x in stats]
58
- # print(k, stats, round(stats[1]/stats[0]*100, 1))
59
- print(k, round(stats[1]/stats[0]*100, 1), round(stats[0] * 10, 1), round(stats[1] * 10, 1))
60
- print('=================================')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/mathvista/calculate_score.py DELETED
@@ -1,259 +0,0 @@
1
- import os
2
- import re
3
- import argparse
4
- import pandas as pd
5
-
6
- # !pip install python-Levenshtein
7
- from Levenshtein import distance
8
-
9
- import sys
10
- sys.path.append('../')
11
- from utilities import *
12
-
13
-
14
- def get_most_similar(prediction, choices):
15
- """
16
- Use the Levenshtein distance (or edit distance) to determine which of the choices is most similar to the given prediction
17
- """
18
- distances = [distance(prediction, choice) for choice in choices]
19
- ind = distances.index(min(distances))
20
- return choices[ind]
21
- # return min(choices, key=lambda choice: distance(prediction, choice))
22
-
23
-
24
- def normalize_extracted_answer(extraction, choices, question_type, answer_type, precision):
25
- """
26
- Normalize the extracted answer to match the answer type
27
- """
28
- if question_type == 'multi_choice':
29
- # make sure the extraction is a string
30
- if isinstance(extraction, str):
31
- extraction = extraction.strip()
32
- else:
33
- try:
34
- extraction = str(extraction)
35
- except:
36
- extraction = ""
37
-
38
- # extract "A" from "(A) text"
39
- letter = re.findall(r'\(([a-zA-Z])\)', extraction)
40
- if len(letter) > 0:
41
- extraction = letter[0].upper()
42
-
43
- options = [chr(ord('A') + i) for i in range(len(choices))]
44
-
45
- if extraction in options:
46
- # convert option letter to text, e.g. "A" -> "text"
47
- ind = options.index(extraction)
48
- extraction = choices[ind]
49
- else:
50
- # select the most similar option
51
- extraction = get_most_similar(extraction, choices)
52
- assert extraction in choices
53
-
54
- elif answer_type == 'integer':
55
- try:
56
- extraction = str(int(float(extraction)))
57
- except:
58
- extraction = None
59
-
60
- elif answer_type == 'float':
61
- try:
62
- extraction = str(round(float(extraction), precision))
63
- except:
64
- extraction = None
65
-
66
- elif answer_type == 'list':
67
- try:
68
- extraction = str(extraction)
69
- except:
70
- extraction = None
71
-
72
- return extraction
73
-
74
-
75
- def safe_equal(prediction, answer):
76
- """
77
- Check if the prediction is equal to the answer, even if they are of different types
78
- """
79
- try:
80
- if prediction == answer:
81
- return True
82
- return False
83
- except Exception as e:
84
- print(e)
85
- return False
86
-
87
-
88
- def get_acc_with_contion(res_pd, key, value):
89
- if key == 'skills':
90
- # if value in res_pd[key]:
91
- total_pd = res_pd[res_pd[key].apply(lambda x: value in x)]
92
- else:
93
- total_pd = res_pd[res_pd[key] == value]
94
-
95
- correct_pd = total_pd[total_pd['true_false'] == True]
96
- acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
97
- return len(correct_pd), len(total_pd), acc
98
-
99
- if __name__ == '__main__':
100
- parser = argparse.ArgumentParser()
101
- parser.add_argument('--output_dir', type=str, default='../results')
102
- parser.add_argument('--output_file', type=str, default='output.json')
103
- parser.add_argument('--score_file', type=str, default='scores.json')
104
- parser.add_argument('--gt_file', type=str, default='../data/testmini.json', help='ground truth file')
105
- parser.add_argument('--number', type=int, default=-1, help='number of problems to run')
106
- parser.add_argument('--rerun', action='store_true', help='rerun the evaluation')
107
- parser.add_argument('--caculate_gain', action='store_true', help='caculate the socre gains over random guess')
108
- parser.add_argument('--random_file', type=str, default='score_random_guess.json')
109
- args = parser.parse_args()
110
-
111
- # args
112
- output_file = os.path.join(args.output_dir, args.output_file)
113
-
114
- # # quick test
115
- # output_file = '../results/llava-llama-2-13b/output_llava_llama_2_13b.json'
116
-
117
- # read json
118
- print(f"Reading {output_file}...")
119
- results = read_json(output_file)
120
-
121
- # read ground truth
122
- print(f"Reading {args.gt_file}...")
123
- gts = read_json(args.gt_file)
124
-
125
- # full pids
126
- full_pids = list(results.keys())
127
- if args.number > 0:
128
- full_pids = full_pids[:min(args.number, len(full_pids))]
129
- print("Number of testing problems:", len(full_pids))
130
-
131
- ## [1] Evaluate if the prediction is true or false
132
- print("\nEvaluating the predictions...")
133
- update_json_flag = False
134
- for pid in full_pids:
135
- problem = results[pid]
136
- # print(problem)
137
-
138
- if args.rerun:
139
- if 'prediction' in problem:
140
- del problem['prediction']
141
- if 'true_false' in problem:
142
- del problem['true_false']
143
-
144
- choices = problem['choices']
145
- question_type = problem['question_type']
146
- answer_type = problem['answer_type']
147
- precision = problem['precision']
148
- extraction = problem['extraction']
149
-
150
- if 'answer' in problem:
151
- answer = problem['answer']
152
- else:
153
- answer = gts[pid]['answer']
154
- problem['answer'] = answer
155
-
156
- # normalize the extracted answer to match the answer type
157
- prediction = normalize_extracted_answer(extraction, choices, question_type, answer_type, precision)
158
-
159
- # verify the prediction is true or false
160
- true_false = safe_equal(prediction, answer)
161
-
162
- # update the problem
163
- if "true_false" not in problem:
164
- update_json_flag = True
165
-
166
- elif true_false != problem['true_false']:
167
- update_json_flag = True
168
-
169
- if "prediction" not in problem:
170
- update_json_flag = True
171
-
172
- elif prediction != problem['prediction']:
173
- update_json_flag = True
174
-
175
- problem['prediction'] = prediction
176
- problem['true_false'] = true_false
177
-
178
- # save the updated json
179
- if update_json_flag:
180
- print("\n!!!Some problems are updated.!!!")
181
- print(f"\nSaving {output_file}...")
182
- save_json(results, output_file)
183
-
184
- ## [2] Calculate the average accuracy
185
- total = len(full_pids)
186
- correct = 0
187
- for pid in full_pids:
188
- if results[pid]['true_false']:
189
- correct += 1
190
- accuracy = str(round(correct / total * 100, 2))
191
- print(f"\nCorrect: {correct}, Total: {total}, Accuracy: {accuracy}%")
192
-
193
- scores = {"average": {"accuracy": accuracy, "correct": correct, "total": total}}
194
-
195
- ## [3] Calculate the fine-grained accuracy scores
196
-
197
- # merge the 'metadata' attribute into the data
198
- for pid in results:
199
- results[pid].update(results[pid].pop('metadata'))
200
-
201
- # convert the data to a pandas DataFrame
202
- df = pd.DataFrame(results).T
203
-
204
- print(len(df))
205
- print("Number of test problems:", len(df))
206
- # assert len(df) == 1000 # Important!!!
207
-
208
- # asign the target keys for evaluation
209
- target_keys = ['question_type', 'answer_type', 'language', 'source', 'category', 'task', 'context', 'grade', 'skills']
210
-
211
- for key in target_keys:
212
- print(f"\nType: [{key}]")
213
- # get the unique values of the key
214
- if key == 'skills':
215
- # the value is a list
216
- values = []
217
- for i in range(len(df)):
218
- values += df[key][i]
219
- values = list(set(values))
220
- else:
221
- values = df[key].unique()
222
- #print(values)
223
-
224
- # calculate the accuracy for each value
225
- scores[key] = {}
226
- for value in values:
227
- correct, total, acc = get_acc_with_contion(df, key, value)
228
- if total > 0:
229
- print(f"[{value}]: {acc}% ({correct}/{total})")
230
- scores[key][value] = {"accuracy": acc, "correct": correct, "total": total}
231
-
232
- # sort the scores by accuracy
233
- scores[key] = dict(sorted(scores[key].items(), key=lambda item: float(item[1]['accuracy']), reverse=True))
234
-
235
- # save the scores
236
- scores_file = os.path.join(args.output_dir, args.score_file)
237
- print(f"\nSaving {scores_file}...")
238
- save_json(scores, scores_file)
239
- print("\nDone!")
240
-
241
- # [4] Calculate the score gains over random guess
242
- if args.caculate_gain:
243
- random_file = os.path.join(args.output_dir, args.random_file)
244
- random_scores = json.load(open(random_file))
245
-
246
- print("\nCalculating the score gains...")
247
- for key in scores:
248
- if key == 'average':
249
- gain = round(float(scores[key]['accuracy']) - float(random_scores[key]['accuracy']), 2)
250
- scores[key]['acc_gain'] = gain
251
- else:
252
- for sub_key in scores[key]:
253
- gain = round(float(scores[key][sub_key]['accuracy']) - float(random_scores[key][sub_key]['accuracy']), 2)
254
- scores[key][sub_key]['acc_gain'] = str(gain)
255
-
256
- # save the score gains
257
- print(f"\nSaving {scores_file}...")
258
- save_json(scores, scores_file)
259
- print("\nDone!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/mathvista/eval.sh DELETED
@@ -1,9 +0,0 @@
1
- # python eval/mathvista/extract_answer.py \
2
- # --output_dir /mnt/ssd/lbk-cvpr/dataset/eval_results \
3
- # --output_file Meteor_mathvista_results.json
4
-
5
- python eval/mathvista/calculate_score.py \
6
- --output_dir /mnt/ssd/lbk-cvpr/dataset/eval_results \
7
- --output_file Meteor_mathvista_results_refixed.json \
8
- --score_file Meteor_mathvista_scores.json \
9
- --gt_file /mnt/ssd/lbk-cvpr/dataset/MathVista/annot_testmini.json \
 
 
 
 
 
 
 
 
 
 
eval/mathvista/extract_answer.py DELETED
@@ -1,150 +0,0 @@
1
- import os
2
- import re
3
- import time
4
- import argparse
5
-
6
- from tqdm import tqdm
7
-
8
- import sys
9
- sys.path.append('../')
10
- from utilities import *
11
-
12
- # OpenAI
13
- import openai
14
- openai.api_key = ""
15
- # print(openai.api_key)
16
-
17
- # load demo prompt
18
- from prompts.ext_ans import demo_prompt
19
-
20
-
21
- def verify_extraction(extraction):
22
- extraction = extraction.strip()
23
- if extraction == "" or extraction == None:
24
- return False
25
- return True
26
-
27
-
28
- def create_test_prompt(demo_prompt, query, response):
29
- demo_prompt = demo_prompt.strip()
30
- test_prompt = f"{query}\n\n{response}"
31
- full_prompt = f"{demo_prompt}\n\n{test_prompt}\n\nExtracted answer: "
32
- return full_prompt
33
-
34
-
35
- def extract_answer(response, problem, quick_extract=False):
36
- question_type = problem['question_type']
37
- answer_type = problem['answer_type']
38
- choices = problem['choices']
39
- query = problem['query']
40
- pid = problem['pid']
41
-
42
- if response == "":
43
- return ""
44
-
45
- if question_type == 'multi_choice' and response in choices:
46
- return response
47
-
48
- if answer_type == "integer":
49
- try:
50
- extraction = int(response)
51
- return str(extraction)
52
- except:
53
- pass
54
-
55
- if answer_type == "float":
56
- try:
57
- extraction = str(float(response))
58
- return extraction
59
- except:
60
- pass
61
-
62
- # quick extraction
63
- if quick_extract:
64
- print("Quickly extracting answer...")
65
- # The answer is "text". -> "text"
66
- try:
67
- result = re.search(r'The answer is "(.*)"\.', response)
68
- if result:
69
- extraction = result.group(1)
70
- return extraction
71
- except:
72
- pass
73
-
74
- # general extraction
75
- try:
76
- full_prompt = create_test_prompt(demo_prompt, query, response)
77
- extraction = get_chat_response(full_prompt, openai.api_key)
78
- return extraction
79
- except Exception as e:
80
- print(e)
81
- print(f"Error in extracting answer for {pid}")
82
-
83
- return ""
84
-
85
-
86
- if __name__ == '__main__':
87
- parser = argparse.ArgumentParser()
88
- # input
89
- parser.add_argument('--output_dir', type=str, default='../results')
90
- parser.add_argument('--output_file', type=str, default='answer.json')
91
- parser.add_argument('--response_label', type=str, default='response', help='response label for the input file')
92
- # model
93
- parser.add_argument('--llm_engine', type=str, default='gpt-4-0613', help='llm engine',
94
- choices = ['gpt-3.5-turbo', 'gpt-3.5', 'gpt-4', 'gpt-4-0314', 'gpt-4-0613'])
95
- parser.add_argument('--number', type=int, default=-1, help='number of problems to run')
96
- parser.add_argument('--quick_extract', action='store_true', help='use rules to extract answer for some problems')
97
- parser.add_argument('--rerun', action='store_true', help='rerun the answer extraction')
98
- # output
99
- parser.add_argument('--save_every', type=int, default=10, help='save every n problems')
100
- parser.add_argument('--output_label', type=str, default='', help='label for the output file')
101
- args = parser.parse_args()
102
-
103
- # args
104
- label = args.response_label
105
- result_file = os.path.join(args.output_dir, args.output_file)
106
-
107
- if args.output_label != '':
108
- output_file = result_file.replace('.json', f'_{args.output_label}.json')
109
- else:
110
- output_file = result_file
111
-
112
- # read results
113
- print(f"Reading {result_file}...")
114
- results = read_json(result_file)
115
-
116
- # full pids
117
- full_pids = list(results.keys())
118
- if args.number > 0:
119
- full_pids = full_pids[:min(args.number, len(full_pids))]
120
- print("Number of testing problems:", len(full_pids))
121
-
122
- # test pids
123
- if args.rerun:
124
- test_pids = full_pids
125
- else:
126
- test_pids = []
127
- for pid in full_pids:
128
- # print(pid)
129
- if 'extraction' not in results[pid] or not verify_extraction(results[pid]['extraction']):
130
- test_pids.append(pid)
131
-
132
- test_num = len(test_pids)
133
- print("Number of problems to run:", test_num)
134
- # print(test_pids)
135
-
136
- # tqdm, enumerate results
137
- for i, pid in enumerate(tqdm(test_pids)):
138
- problem = results[pid]
139
-
140
- assert label in problem
141
- response = problem[label]
142
-
143
-
144
- extraction = extract_answer(response, problem, args.quick_extract)
145
- results[pid]['extraction'] = extraction
146
-
147
- if i % args.save_every == 0 or i == test_num - 1:
148
- print(f"Saving results to {output_file}...")
149
- save_json(results, output_file)
150
- print(f"Results saved.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/mathvista/prompts/ext_ans.py DELETED
@@ -1,42 +0,0 @@
1
-
2
-
3
- # pids = 852, 104, 824, 506, 540
4
-
5
- demo_prompt = """
6
- Please read the following example. Then extract the answer from the model response and type it at the end of the prompt.
7
-
8
- Hint: Please answer the question requiring an integer answer and provide the final value, e.g., 1, 2, 3, at the end.
9
- Question: Which number is missing?
10
-
11
- Model response: The number missing in the sequence is 14.
12
-
13
- Extracted answer: 14
14
-
15
- Hint: Please answer the question requiring a floating-point number with one decimal place and provide the final value, e.g., 1.2, 1.3, 1.4, at the end.
16
- Question: What is the fraction of females facing the camera?
17
-
18
- Model response: The fraction of females facing the camera is 0.6, which means that six out of ten females in the group are facing the camera.
19
-
20
- Extracted answer: 0.6
21
-
22
- Hint: Please answer the question requiring a floating-point number with two decimal places and provide the final value, e.g., 1.23, 1.34, 1.45, at the end.
23
- Question: How much money does Luca need to buy a sour apple candy and a butterscotch candy? (Unit: $)
24
-
25
- Model response: Luca needs $1.45 to buy a sour apple candy and a butterscotch candy.
26
-
27
- Extracted answer: 1.45
28
-
29
- Hint: Please answer the question requiring a Python list as an answer and provide the final list, e.g., [1, 2, 3], [1.2, 1.3, 1.4], at the end.
30
- Question: Between which two years does the line graph saw its maximum peak?
31
-
32
- Model response: The line graph saw its maximum peak between 2007 and 2008.
33
-
34
- Extracted answer: [2007, 2008]
35
-
36
- Hint: Please answer the question and provide the correct option letter, e.g., A, B, C, D, at the end.
37
- Question: What fraction of the shape is blue?\nChoices:\n(A) 3/11\n(B) 8/11\n(C) 6/11\n(D) 3/5
38
-
39
- Model response: The correct answer is (B) 8/11.
40
-
41
- Extracted answer: B
42
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/mathvista/utilities.py DELETED
@@ -1,199 +0,0 @@
1
- import os
2
- import cv2
3
- import json
4
- import time
5
- import pickle
6
- import openai
7
- import re
8
- from word2number import w2n
9
-
10
-
11
- def create_dir(output_dir):
12
- if not os.path.exists(output_dir):
13
- os.makedirs(output_dir)
14
-
15
-
16
- def read_csv(file):
17
- data = []
18
- with open(file, 'r') as f:
19
- for line in f:
20
- data.append(line.strip())
21
- return data
22
-
23
-
24
- def read_pandas_csv(csv_path):
25
- # read a pandas csv sheet
26
- import pandas as pd
27
- df = pd.read_csv(csv_path)
28
- return df
29
-
30
-
31
- def read_json(path):
32
- with open(path, 'r', encoding='utf-8') as f:
33
- return json.load(f)
34
-
35
-
36
- def read_jsonl(file):
37
- with open(file, 'r') as f:
38
- data = [json.loads(line) for line in f]
39
- return data
40
-
41
-
42
- def read_pickle(path):
43
- with open(path, 'rb') as f:
44
- return pickle.load(f)
45
-
46
-
47
- def save_json(data, path):
48
- with open(path, 'w') as f:
49
- json.dump(data, f, indent=4)
50
-
51
-
52
- def save_array_img(path, image):
53
- cv2.imwrite(path, image)
54
-
55
-
56
- def contains_digit(text):
57
- # check if text contains a digit
58
- if any(char.isdigit() for char in text):
59
- return True
60
- return False
61
-
62
- def contains_number_word(text):
63
- # check if text contains a number word
64
- ignore_words = ["a", "an", "point"]
65
- words = re.findall(r'\b\w+\b', text) # This regex pattern matches any word in the text
66
- for word in words:
67
- if word in ignore_words:
68
- continue
69
- try:
70
- w2n.word_to_num(word)
71
- return True # If the word can be converted to a number, return True
72
- except ValueError:
73
- continue # If the word can't be converted to a number, continue with the next word
74
-
75
- # check if text contains a digit
76
- if any(char.isdigit() for char in text):
77
- return True
78
-
79
- return False # If none of the words could be converted to a number, return False
80
-
81
-
82
- def contains_quantity_word(text, special_keep_words=[]):
83
- # check if text contains a quantity word
84
- quantity_words = ["most", "least", "fewest"
85
- "more", "less", "fewer",
86
- "largest", "smallest", "greatest",
87
- "larger", "smaller", "greater",
88
- "highest", "lowest", "higher", "lower",
89
- "increase", "decrease",
90
- "minimum", "maximum", "max", "min",
91
- "mean", "average", "median",
92
- "total", "sum", "add", "subtract",
93
- "difference", "quotient", "gap",
94
- "half", "double", "twice", "triple",
95
- "square", "cube", "root",
96
- "approximate", "approximation",
97
- "triangle", "rectangle", "circle", "square", "cube", "sphere", "cylinder", "cone", "pyramid",
98
- "multiply", "divide",
99
- "percentage", "percent", "ratio", "proportion", "fraction", "rate",
100
- ]
101
-
102
- quantity_words += special_keep_words # dataset specific words
103
-
104
- words = re.findall(r'\b\w+\b', text) # This regex pattern matches any word in the text
105
- if any(word in quantity_words for word in words):
106
- return True
107
-
108
- return False # If none of the words could be converted to a number, return False
109
-
110
-
111
- def is_bool_word(text):
112
- if text in ["Yes", "No", "True", "False",
113
- "yes", "no", "true", "false",
114
- "YES", "NO", "TRUE", "FALSE"]:
115
- return True
116
- return False
117
-
118
-
119
- def is_digit_string(text):
120
- # remove ".0000"
121
- text = text.strip()
122
- text = re.sub(r'\.0+$', '', text)
123
- try:
124
- int(text)
125
- return True
126
- except ValueError:
127
- return False
128
-
129
-
130
- def is_float_string(text):
131
- # text is a float string if it contains a "." and can be converted to a float
132
- if "." in text:
133
- try:
134
- float(text)
135
- return True
136
- except ValueError:
137
- return False
138
- return False
139
-
140
-
141
- def copy_image(image_path, output_image_path):
142
- from shutil import copyfile
143
- copyfile(image_path, output_image_path)
144
-
145
-
146
- def copy_dir(src_dir, dst_dir):
147
- from shutil import copytree
148
- # copy the source directory to the target directory
149
- copytree(src_dir, dst_dir)
150
-
151
-
152
- import PIL.Image as Image
153
- def get_image_size(img_path):
154
- img = Image.open(img_path)
155
- width, height = img.size
156
- return width, height
157
-
158
-
159
- def get_chat_response(promot, api_key, model="gpt-3.5-turbo", temperature=0, max_tokens=256, n=1, patience=10000000,
160
- sleep_time=0):
161
- messages = [
162
- {"role": "user", "content": promot},
163
- ]
164
- # print("I am here")
165
- while patience > 0:
166
- patience -= 1
167
- try:
168
- response = openai.ChatCompletion.create(model=model,
169
- messages=messages,
170
- api_key=api_key,
171
- temperature=temperature,
172
- max_tokens=max_tokens,
173
- n=n)
174
- if n == 1:
175
- prediction = response['choices'][0]['message']['content'].strip()
176
- if prediction != "" and prediction != None:
177
- return prediction
178
- else:
179
- prediction = [choice['message']['content'].strip() for choice in response['choices']]
180
- if prediction[0] != "" and prediction[0] != None:
181
- return prediction
182
-
183
- except Exception as e:
184
- if "Rate limit" not in str(e):
185
- print(e)
186
-
187
- if "Please reduce the length of the messages" in str(e):
188
- print("!!Reduce promot size")
189
- # reduce input prompt and keep the tail
190
- new_size = int(len(promot) * 0.9)
191
- new_start = len(promot) - new_size
192
- promot = promot[new_start:]
193
- messages = [
194
- {"role": "user", "content": promot},
195
- ]
196
-
197
- if sleep_time > 0:
198
- time.sleep(sleep_time)
199
- return ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/mm-vet/eval.sh DELETED
@@ -1 +0,0 @@
1
- python eval/mm-vet/evaluate_mmvet.py
 
 
eval/mm-vet/evaluate_mmvet.py DELETED
@@ -1,250 +0,0 @@
1
- import openai
2
- import json
3
-
4
- import os
5
- from tqdm import tqdm
6
- import pandas as pd
7
- import numpy as np
8
- from collections import Counter
9
- import time
10
-
11
- gpt_model = "gpt-4-0613"
12
- openai.api_key= ""
13
- DATASET_ROOT=""
14
- MMVET = "mm-vet/mm-vet.json"
15
-
16
- prompt = """Compare the ground truth and prediction from AI models, to give a correctness score for the prediction. <AND> in the ground truth means it is totally right only when all elements in the ground truth are present in the prediction, and <OR> means it is totally right when any one element in the ground truth is present in the prediction. The correctness score is 0.0 (totally wrong), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, or 1.0 (totally right). Just complete the last space of the correctness score.
17
-
18
- Question | Ground truth | Prediction | Correctness
19
- --- | --- | --- | ---
20
- What is x in the equation? | -1 <AND> -5 | x = 3 | 0.0
21
- What is x in the equation? | -1 <AND> -5 | x = -1 | 0.5
22
- What is x in the equation? | -1 <AND> -5 | x = -5 | 0.5
23
- What is x in the equation? | -1 <AND> -5 | x = -5 or 5 | 0.5
24
- What is x in the equation? | -1 <AND> -5 | x = -1 or x = -5 | 1.0
25
- Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme talks about Iceland and Greenland. It's pointing out that despite their names, Iceland is not very icy and Greenland isn't very green. | 0.4
26
- Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme is using humor to point out the misleading nature of Iceland's and Greenland's names. Iceland, despite its name, has lush green landscapes while Greenland is mostly covered in ice and snow. The text 'This is why I have trust issues' is a playful way to suggest that these contradictions can lead to distrust or confusion. The humor in this meme is derived from the unexpected contrast between the names of the countries and their actual physical characteristics. | 1.0
27
- """
28
-
29
- # load metadata
30
- decimal_places = 1 # number of decimal places to round to
31
-
32
-
33
- sub_set = None
34
- sub_set_name = ''
35
-
36
- mmvet_metadata = os.path.join(DATASET_ROOT, MMVET)
37
- with open(mmvet_metadata, 'r') as f:
38
- data = json.load(f)
39
-
40
-
41
- counter = Counter()
42
- cap_set_list = []
43
- cap_set_counter = []
44
- len_data = 0
45
- for id, value in data.items():
46
- if sub_set is not None and id not in sub_set:
47
- continue
48
- question = value["question"]
49
- answer = value["answer"]
50
- cap = value["capability"]
51
- cap = set(cap)
52
- counter.update(cap)
53
- if cap not in cap_set_list:
54
- cap_set_list.append(cap)
55
- cap_set_counter.append(1)
56
- else:
57
- cap_set_counter[cap_set_list.index(cap)] += 1
58
-
59
- len_data += 1
60
-
61
- sorted_list = counter.most_common()
62
- columns = [k for k, v in sorted_list]
63
- columns.append("total")
64
- columns.append("std")
65
- columns.append('runs')
66
- df = pd.DataFrame(columns=columns)
67
-
68
-
69
- cap_set_sorted_indices = np.argsort(-np.array(cap_set_counter))
70
- new_cap_set_list = []
71
- new_cap_set_counter = []
72
- for index in cap_set_sorted_indices:
73
- new_cap_set_list.append(cap_set_list[index])
74
- new_cap_set_counter.append(cap_set_counter[index])
75
-
76
- cap_set_list = new_cap_set_list
77
- cap_set_counter = new_cap_set_counter
78
- cap_set_names = ["_".join(list(cap_set)) for cap_set in cap_set_list]
79
-
80
- columns2 = cap_set_names
81
- columns2.append("total")
82
- columns2.append("std")
83
- columns2.append('runs')
84
- df2 = pd.DataFrame(columns=columns2)
85
-
86
- ###### change your model name ######
87
- model = "Meteor"
88
- result_path = os.path.join(DATASET_ROOT, "eval_results")
89
- num_run = 1 # we set it as 5 in the paper
90
- model_results_file = os.path.join(result_path, f"{model}_mmvet_results.json")
91
-
92
- # grade results for each sample to svae
93
- grade_file = f'{model}_{gpt_model}-grade-{num_run}runs.json'
94
- grade_file = os.path.join(result_path, grade_file)
95
-
96
- # score results regarding capabilities/capability integration to save
97
- cap_score_file = f'{model}_{sub_set_name}{gpt_model}-cap-score-{num_run}runs.csv'
98
- cap_score_file = os.path.join(result_path, cap_score_file)
99
- cap_int_score_file = f'{model}_{sub_set_name}{gpt_model}-cap-int-score-{num_run}runs.csv'
100
- cap_int_score_file = os.path.join(result_path, cap_int_score_file)
101
-
102
- with open(model_results_file) as f:
103
- results = json.load(f)
104
- if os.path.exists(grade_file):
105
- with open(grade_file, 'r') as f:
106
- grade_results = json.load(f)
107
- else:
108
- grade_results = {}
109
-
110
-
111
- def need_more_runs():
112
- need_more_runs = False
113
- if len(grade_results) > 0:
114
- for k, v in grade_results.items():
115
- if len(v['score']) < num_run:
116
- need_more_runs = True
117
- break
118
- return need_more_runs or len(grade_results) < len_data
119
-
120
-
121
- while need_more_runs():
122
- for j in range(num_run):
123
- print(f'eval run {j}')
124
- for id, line in tqdm(data.items()):
125
- if sub_set is not None and id not in sub_set:
126
- continue
127
- if id in grade_results and len(grade_results[id]['score']) >= (j + 1):
128
- continue
129
-
130
- model_pred = results[id]
131
-
132
- question = prompt + '\n' + ' | '.join([line['question'], line['answer'].replace("<AND>", " <AND> ").replace("<OR>", " <OR> "), model_pred, ""])
133
- messages = [
134
- {"role": "user", "content": question},
135
- ]
136
-
137
- if id not in grade_results:
138
- sample_grade = {'model': [], 'content': [], 'score': []}
139
- else:
140
- sample_grade = grade_results[id]
141
-
142
-
143
- grade_sample_run_complete = False
144
- temperature = 0.0
145
-
146
- while not grade_sample_run_complete:
147
- try:
148
- response = openai.ChatCompletion.create(
149
- model=gpt_model,
150
- max_tokens=3,
151
- temperature=temperature,
152
- messages=messages)
153
- content = response['choices'][0]['message']['content']
154
- flag = True
155
- try_time = 1
156
- while flag:
157
- try:
158
- content = content.split(' ')[0].strip()
159
- score = float(content)
160
- if score > 1.0 or score < 0.0:
161
- assert False
162
- flag = False
163
- except:
164
- question = prompt + '\n' + ' | '.join([line['question'], line['answer'].replace("<AND>", " <AND> ").replace("<OR>", " <OR> "), model_pred, ""]) + "\nPredict the correctness of the answer (digit): "
165
- messages = [
166
- {"role": "user", "content": question},
167
- ]
168
- response = openai.ChatCompletion.create(
169
- model=gpt_model,
170
- max_tokens=3,
171
- temperature=temperature,
172
- messages=messages)
173
- content = response['choices'][0]['message']['content']
174
- try_time += 1
175
- temperature += 0.5
176
- print(f"{id} try {try_time} times")
177
- print(content)
178
- if try_time > 5:
179
- score = 0.0
180
- flag = False
181
- grade_sample_run_complete = True
182
- except:
183
- # gpt4 may have token rate limit
184
- print("sleep 30s")
185
- time.sleep(30)
186
-
187
- if len(sample_grade['model']) >= j + 1:
188
- sample_grade['model'][j] = response['model']
189
- sample_grade['content'][j] = content
190
- sample_grade['score'][j] = score
191
- else:
192
- sample_grade['model'].append(response['model'])
193
- sample_grade['content'].append(content)
194
- sample_grade['score'].append(score)
195
- grade_results[id] = sample_grade
196
-
197
- with open(grade_file, 'w') as f:
198
- json.dump(grade_results, f, indent=4)
199
-
200
-
201
- assert not need_more_runs()
202
- cap_socres = {k: [0.0]*num_run for k in columns[:-2]}
203
- counter['total'] = len_data
204
-
205
- cap_socres2 = {k: [0.0]*num_run for k in columns2[:-2]}
206
- counter2 = {columns2[i]:cap_set_counter[i] for i in range(len(cap_set_counter))}
207
- counter2['total'] = len_data
208
-
209
- for k, v in grade_results.items():
210
- if sub_set is not None and k not in sub_set:
211
- continue
212
- for i in range(num_run):
213
- score = v['score'][i]
214
- caps = set(data[k]['capability'])
215
- for c in caps:
216
- cap_socres[c][i] += score
217
-
218
- cap_socres['total'][i] += score
219
-
220
- index = cap_set_list.index(caps)
221
- cap_socres2[cap_set_names[index]][i] += score
222
- cap_socres2['total'][i] += score
223
-
224
- for k, v in cap_socres.items():
225
- cap_socres[k] = np.array(v) / counter[k] *100
226
-
227
-
228
- std = round(cap_socres['total'].std(), decimal_places)
229
- total_copy = cap_socres['total'].copy()
230
- runs = str(list(np.round(total_copy, decimal_places)))
231
-
232
- for k, v in cap_socres.items():
233
- cap_socres[k] = round(v.mean(), decimal_places)
234
-
235
- cap_socres['std'] = std
236
- cap_socres['runs'] = runs
237
- df.loc[model] = cap_socres
238
-
239
-
240
- for k, v in cap_socres2.items():
241
- cap_socres2[k] = round(np.mean(np.array(v) / counter2[k] *100), decimal_places)
242
- cap_socres2['std'] = std
243
- cap_socres2['runs'] = runs
244
- df2.loc[model] = cap_socres2
245
-
246
- df.to_csv(cap_score_file)
247
- df2.to_csv(cap_int_score_file)
248
-
249
- print(df)
250
- print(df2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eval/utils.py DELETED
@@ -1,1351 +0,0 @@
1
- import os
2
- import re
3
- import json
4
- import openai
5
- from typing import Dict
6
- from tqdm import tqdm
7
- import random
8
- import numpy as np
9
- from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix
10
- from typing import Optional
11
- from collections import defaultdict
12
- from eval.mathvista.utilities import get_chat_response
13
- from config import *
14
- from copy import deepcopy
15
-
16
- random.seed(42)
17
-
18
- # SEED Question types
19
- SEED_TYPES = {1: 'Scene Understanding', 2: 'Instance Identity', 3: 'Instance Location', 4: 'Instance Attributes', 5: 'Instances Counting', 6: 'Spatial Relation', 7: 'Instance Interaction', 8: 'Visual Reasoning', 9: 'Text Understanding'}
20
-
21
- # Check for duplicated questions, items
22
- def remove_duplicate(dataset, inputs, gen_answers):
23
- if dataset == "mme":
24
- return inputs, gen_answers
25
- elif dataset == "pope":
26
- questions = set()
27
- new_inputs, new_answers = [], []
28
- for i, a in zip(inputs, gen_answers):
29
- dup = i['id'], i['category']
30
- if dup in questions:
31
- continue
32
- questions.add(dup)
33
- new_inputs.append(i)
34
- new_answers.append(a)
35
- else:
36
- questions = set()
37
- new_inputs, new_answers = [], []
38
- for i, a in zip(inputs, gen_answers):
39
- if i['id'] in questions:
40
- continue
41
- questions.add(i['id'])
42
- new_inputs.append(i)
43
- new_answers.append(a)
44
- return new_inputs, new_answers
45
-
46
- class EvalAIAnswerProcessor:
47
- """
48
- Processes an answer similar to Eval AI
49
- copied from
50
- https://github.com/facebookresearch/mmf/blob/c46b3b3391275b4181567db80943473a89ab98ab/pythia/tasks/processors.py#L897
51
- """
52
-
53
- CONTRACTIONS = {
54
- "aint": "ain't",
55
- "arent": "aren't",
56
- "cant": "can't",
57
- "couldve": "could've",
58
- "couldnt": "couldn't",
59
- "couldn'tve": "couldn't've",
60
- "couldnt've": "couldn't've",
61
- "didnt": "didn't",
62
- "doesnt": "doesn't",
63
- "dont": "don't",
64
- "hadnt": "hadn't",
65
- "hadnt've": "hadn't've",
66
- "hadn'tve": "hadn't've",
67
- "hasnt": "hasn't",
68
- "havent": "haven't",
69
- "hed": "he'd",
70
- "hed've": "he'd've",
71
- "he'dve": "he'd've",
72
- "hes": "he's",
73
- "howd": "how'd",
74
- "howll": "how'll",
75
- "hows": "how's",
76
- "Id've": "I'd've",
77
- "I'dve": "I'd've",
78
- "Im": "I'm",
79
- "Ive": "I've",
80
- "isnt": "isn't",
81
- "itd": "it'd",
82
- "itd've": "it'd've",
83
- "it'dve": "it'd've",
84
- "itll": "it'll",
85
- "let's": "let's",
86
- "maam": "ma'am",
87
- "mightnt": "mightn't",
88
- "mightnt've": "mightn't've",
89
- "mightn'tve": "mightn't've",
90
- "mightve": "might've",
91
- "mustnt": "mustn't",
92
- "mustve": "must've",
93
- "neednt": "needn't",
94
- "notve": "not've",
95
- "oclock": "o'clock",
96
- "oughtnt": "oughtn't",
97
- "ow's'at": "'ow's'at",
98
- "'ows'at": "'ow's'at",
99
- "'ow'sat": "'ow's'at",
100
- "shant": "shan't",
101
- "shed've": "she'd've",
102
- "she'dve": "she'd've",
103
- "she's": "she's",
104
- "shouldve": "should've",
105
- "shouldnt": "shouldn't",
106
- "shouldnt've": "shouldn't've",
107
- "shouldn'tve": "shouldn't've",
108
- "somebody'd": "somebodyd",
109
- "somebodyd've": "somebody'd've",
110
- "somebody'dve": "somebody'd've",
111
- "somebodyll": "somebody'll",
112
- "somebodys": "somebody's",
113
- "someoned": "someone'd",
114
- "someoned've": "someone'd've",
115
- "someone'dve": "someone'd've",
116
- "someonell": "someone'll",
117
- "someones": "someone's",
118
- "somethingd": "something'd",
119
- "somethingd've": "something'd've",
120
- "something'dve": "something'd've",
121
- "somethingll": "something'll",
122
- "thats": "that's",
123
- "thered": "there'd",
124
- "thered've": "there'd've",
125
- "there'dve": "there'd've",
126
- "therere": "there're",
127
- "theres": "there's",
128
- "theyd": "they'd",
129
- "theyd've": "they'd've",
130
- "they'dve": "they'd've",
131
- "theyll": "they'll",
132
- "theyre": "they're",
133
- "theyve": "they've",
134
- "twas": "'twas",
135
- "wasnt": "wasn't",
136
- "wed've": "we'd've",
137
- "we'dve": "we'd've",
138
- "weve": "we've",
139
- "werent": "weren't",
140
- "whatll": "what'll",
141
- "whatre": "what're",
142
- "whats": "what's",
143
- "whatve": "what've",
144
- "whens": "when's",
145
- "whered": "where'd",
146
- "wheres": "where's",
147
- "whereve": "where've",
148
- "whod": "who'd",
149
- "whod've": "who'd've",
150
- "who'dve": "who'd've",
151
- "wholl": "who'll",
152
- "whos": "who's",
153
- "whove": "who've",
154
- "whyll": "why'll",
155
- "whyre": "why're",
156
- "whys": "why's",
157
- "wont": "won't",
158
- "wouldve": "would've",
159
- "wouldnt": "wouldn't",
160
- "wouldnt've": "wouldn't've",
161
- "wouldn'tve": "wouldn't've",
162
- "yall": "y'all",
163
- "yall'll": "y'all'll",
164
- "y'allll": "y'all'll",
165
- "yall'd've": "y'all'd've",
166
- "y'alld've": "y'all'd've",
167
- "y'all'dve": "y'all'd've",
168
- "youd": "you'd",
169
- "youd've": "you'd've",
170
- "you'dve": "you'd've",
171
- "youll": "you'll",
172
- "youre": "you're",
173
- "youve": "you've",
174
- }
175
-
176
- NUMBER_MAP = {
177
- "none": "0",
178
- "zero": "0",
179
- "one": "1",
180
- "two": "2",
181
- "three": "3",
182
- "four": "4",
183
- "five": "5",
184
- "six": "6",
185
- "seven": "7",
186
- "eight": "8",
187
- "nine": "9",
188
- "ten": "10",
189
- }
190
- ARTICLES = ["a", "an", "the"]
191
- PERIOD_STRIP = re.compile(r"(?!<=\d)(\.)(?!\d)")
192
- COMMA_STRIP = re.compile(r"(?<=\d)(\,)+(?=\d)")
193
- PUNCTUATIONS = [
194
- ";",
195
- r"/",
196
- "[",
197
- "]",
198
- '"',
199
- "{",
200
- "}",
201
- "(",
202
- ")",
203
- "=",
204
- "+",
205
- "\\",
206
- "_",
207
- "-",
208
- ">",
209
- "<",
210
- "@",
211
- "`",
212
- ",",
213
- "?",
214
- "!",
215
- ]
216
-
217
- def __init__(self, *args, **kwargs):
218
- pass
219
-
220
- def word_tokenize(self, word):
221
- word = word.lower()
222
- word = word.replace(",", "").replace("?", "").replace("'s", " 's")
223
- return word.strip()
224
-
225
- def process_punctuation(self, in_text):
226
- out_text = in_text
227
- for p in self.PUNCTUATIONS:
228
- if (p + " " in in_text or " " + p in in_text) or (
229
- re.search(self.COMMA_STRIP, in_text) is not None
230
- ):
231
- out_text = out_text.replace(p, "")
232
- else:
233
- out_text = out_text.replace(p, " ")
234
- out_text = self.PERIOD_STRIP.sub("", out_text, re.UNICODE)
235
- return out_text
236
-
237
- def process_digit_article(self, in_text):
238
- out_text = []
239
- temp_text = in_text.lower().split()
240
- for word in temp_text:
241
- word = self.NUMBER_MAP.setdefault(word, word)
242
- if word not in self.ARTICLES:
243
- out_text.append(word)
244
- else:
245
- pass
246
- for word_id, word in enumerate(out_text):
247
- if word in self.CONTRACTIONS:
248
- out_text[word_id] = self.CONTRACTIONS[word]
249
- out_text = " ".join(out_text)
250
- return out_text
251
-
252
- def __call__(self, item):
253
- item = self.word_tokenize(item)
254
- item = item.replace("\n", " ").replace("\t", " ").strip()
255
- item = self.process_punctuation(item)
256
- item = self.process_digit_article(item)
257
- return item
258
-
259
-
260
- class TextVQAAccuracyEvaluator:
261
- def __init__(self):
262
- self.answer_processor = EvalAIAnswerProcessor()
263
-
264
- def _compute_answer_scores(self, raw_answers):
265
- """
266
- compute the accuracy (soft score) of human answers
267
- """
268
- answers = [self.answer_processor(a) for a in raw_answers]
269
- assert len(answers) == 10
270
- gt_answers = list(enumerate(answers))
271
- unique_answers = set(answers)
272
- unique_answer_scores = {}
273
-
274
- for unique_answer in unique_answers:
275
- accs = []
276
- for gt_answer in gt_answers:
277
- other_answers = [item for item in gt_answers if item != gt_answer]
278
- matching_answers = [
279
- item for item in other_answers if item[1] == unique_answer
280
- ]
281
- acc = min(1, float(len(matching_answers)) / 3)
282
- accs.append(acc)
283
- unique_answer_scores[unique_answer] = sum(accs) / len(accs)
284
-
285
- return unique_answer_scores
286
-
287
- def eval_pred_list(self, pred_list):
288
- pred_scores = []
289
- for entry in pred_list:
290
- pred_answer = self.answer_processor(entry["pred_answer"])
291
- unique_answer_scores = self._compute_answer_scores(entry["gt_answers"])
292
- score = unique_answer_scores.get(pred_answer, 0.0)
293
- pred_scores.append(score)
294
-
295
- accuracy = sum(pred_scores) / len(pred_scores)
296
- return accuracy
297
-
298
- # MME
299
- class MMEEvaluator:
300
- def divide_chunks(self, l, n=2):
301
- # looping till length l
302
- for i in range(0, len(l), n):
303
- yield l[i:i + n]
304
-
305
- return
306
-
307
- def parse_pred_ans(self, pred_ans):
308
- pred_label = None
309
- if pred_ans in ["yes", "no"]:
310
- pred_label = pred_ans
311
- else:
312
- prefix_pred_ans = pred_ans[:4]
313
-
314
- if "yes" in prefix_pred_ans:
315
- pred_label = "yes"
316
- elif "no" in prefix_pred_ans:
317
- pred_label = "no"
318
- else:
319
- pred_label = "other"
320
-
321
- return pred_label
322
-
323
-
324
- def compute_metric(self, gts, preds):
325
- assert len(gts) == len(preds)
326
-
327
- label_map = {
328
- "yes": 1,
329
- "no": 0,
330
- "other": -1,
331
- }
332
-
333
- gts = [label_map[x] for x in gts]
334
- preds = [label_map[x] for x in preds]
335
-
336
- acc = accuracy_score(gts, preds)
337
-
338
- clean_gts = []
339
- clean_preds = []
340
- other_num = 0
341
- for gt, pred in zip(gts, preds):
342
- if pred == -1:
343
- other_num += 1
344
- continue
345
- clean_gts.append(gt)
346
- clean_preds.append(pred)
347
-
348
-
349
- conf_mat = confusion_matrix(clean_gts, clean_preds, labels=[1,0])
350
- precision = precision_score(clean_gts, clean_preds, average='binary')
351
- recall = recall_score(clean_gts, clean_preds, average='binary')
352
- tp, fn = conf_mat[0]
353
- fp, tn = conf_mat[1]
354
-
355
- metric_dict = dict()
356
- metric_dict = {
357
- "TP": tp,
358
- "FN": fn,
359
- "TN": tn,
360
- "FP": fp,
361
- "precision": precision,
362
- "recall": recall,
363
- "other_num": other_num,
364
- "acc": acc,
365
- }
366
-
367
- return metric_dict
368
-
369
-
370
- def process_result(self, results_dir):
371
- eval_type_dict = {
372
- "Perception": ["existence", "count", "position", "color", "posters", "celebrity", "scene", "landmark", "artwork", "OCR"],
373
- "Cognition": ["commonsense_reasoning", "numerical_calculation", "text_translation", "code_reasoning"]
374
- }
375
-
376
- model_score_dict = dict()
377
- for eval_type, task_name_list in eval_type_dict.items():
378
-
379
- scores = 0
380
- task_score_dict = dict()
381
-
382
- for task_name in task_name_list:
383
- if not os.path.exists(results_dir):
384
- os.makedirs(results_dir)
385
- task_txt = os.path.join(results_dir, task_name + ".txt")
386
- lines = open(task_txt, 'r').readlines()
387
- chunk_lines = list(self.divide_chunks(lines)) # one image corresponds to two questions
388
-
389
- img_num = len(chunk_lines)
390
- task_other_ans_num = 0
391
- task_score = 0
392
- acc_plus_correct_num = 0
393
- gts = []
394
- preds = []
395
-
396
- for img_items in chunk_lines:
397
- assert len(img_items) == 2
398
- img_correct_num = 0
399
-
400
- for img_item in img_items:
401
- img_name, question, gt_ans, pred_ans = img_item.split("\t")
402
-
403
- gt_ans = gt_ans.lower()
404
- pred_ans = pred_ans.lower()
405
-
406
- assert gt_ans in ["yes", "no"] # gt can only be yes or no.
407
-
408
- pred_ans = self.parse_pred_ans(pred_ans)
409
- assert pred_ans in ["yes", "no", "other"]
410
-
411
- gts.append(gt_ans)
412
- preds.append(pred_ans)
413
-
414
- if gt_ans == pred_ans:
415
- img_correct_num += 1
416
-
417
- if pred_ans not in ["yes", "no"]:
418
- task_other_ans_num += 1
419
-
420
- if img_correct_num == 2:
421
- acc_plus_correct_num += 1
422
-
423
- # cal TP precision acc, etc.
424
- metric_dict = self.compute_metric(gts, preds)
425
- acc_plus = acc_plus_correct_num / img_num
426
- metric_dict["acc_plus"] = acc_plus
427
-
428
-
429
- for k, v in metric_dict.items():
430
- if k in ["acc", "acc_plus"]:
431
- task_score += v*100
432
-
433
- task_score_dict[task_name] = task_score
434
-
435
- scores += task_score
436
- task_score_dict['total'] = scores
437
- model_score_dict[eval_type] = task_score_dict
438
- return model_score_dict
439
-
440
- # For MMMU, convert all <image #> tokens to <image>
441
- def replace_image_tokens(question):
442
- replaced = set()
443
- def replace_token(match):
444
- token = match.group(0)
445
- if token not in replaced:
446
- replaced.add(token)
447
- return '<image>'
448
- return token
449
-
450
- pattern = re.compile(r'<image\s\d+>')
451
- return pattern.sub(replace_token, question)
452
-
453
- # For MMMU, count all <image #> tokens
454
- def count_unique_image_tokens(string):
455
- pattern = r'<image\s\d+>'
456
- matches = re.findall(pattern, string)
457
- return len(set(matches))
458
-
459
- # TextVQA
460
- def prompt_processor(self, prompt):
461
- if prompt.startswith('OCR tokens: '):
462
- pattern = r"Question: (.*?) Short answer:"
463
- match = re.search(pattern, prompt, re.DOTALL)
464
- question = match.group(1)
465
- elif 'Reference OCR token: ' in prompt and len(prompt.split('\n')) == 3:
466
- if prompt.startswith('Reference OCR token:'):
467
- question = prompt.split('\n')[1]
468
- else:
469
- question = prompt.split('\n')[0]
470
- elif len(prompt.split('\n')) == 2:
471
- question = prompt.split('\n')[0]
472
- else:
473
- assert False
474
-
475
- return question.lower()
476
-
477
- # Convert answer to integer
478
- def char_to_int(char):
479
- return ord(char.upper()) - ord('A')
480
-
481
- # In case model does not output a single letter, find the choice in answer
482
- def convert_to_choice(answer, candidates):
483
- options = ["A", "B", "C", "D", "E"]
484
- if answer in options:
485
- extracted_answer = answer
486
- elif len(answer) >= 2 and answer[0] in options and "." in answer:
487
- extracted_answer= answer[0]
488
- else:
489
- pattern = re.compile(r'The answer is ([A-Z]).')
490
- res = pattern.findall(answer)
491
- if len(res) == 1:
492
- extracted_answer = res[0] # 'A', 'B', ...
493
- else:
494
- extracted_answer = "FAILED"
495
-
496
- if extracted_answer in options[:len(candidates)]:
497
- return options.index(extracted_answer)
498
- else:
499
- return -1
500
-
501
- def get_pred_idx(prediction, choices, options):
502
- """
503
- Get the index (e.g. 2) from the prediction (e.g. 'C')
504
- """
505
- if prediction in options[:len(choices)]:
506
- return options.index(prediction)
507
- else:
508
- return -1
509
-
510
- # Chart QA
511
- def relaxed_correctness(target: str,
512
- prediction: str,
513
- max_relative_change: float = 0.05) -> bool:
514
- """Calculates relaxed correctness.
515
-
516
- The correctness tolerates certain error ratio defined by max_relative_change.
517
- See https://arxiv.org/pdf/2203.10244.pdf, end of section 5.1:
518
- “Following Methani et al. (2020), we use a relaxed accuracy measure for the
519
- numeric answers to allow a minor inaccuracy that may result from the automatic
520
- data extraction process. We consider an answer to be correct if it is within
521
- 5% of the gold answer. For non-numeric answers, we still need an exact match
522
- to consider an answer to be correct.”
523
-
524
- Args:
525
- target: Target string.
526
- prediction: Predicted string.
527
- max_relative_change: Maximum relative change.
528
-
529
- Returns:
530
- Whether the prediction was correct given the specified tolerance.
531
- """
532
-
533
- def _to_float(text: str) -> Optional[float]:
534
- try:
535
- if text.endswith('%'):
536
- # Convert percentages to floats.
537
- return float(text.rstrip('%')) / 100.0
538
- else:
539
- return float(text)
540
- except ValueError:
541
- return None
542
-
543
- prediction_float = _to_float(prediction)
544
- target_float = _to_float(target)
545
- if prediction_float is not None and target_float:
546
- relative_change = abs(prediction_float -
547
- target_float) / abs(target_float)
548
- return relative_change <= max_relative_change
549
- else:
550
- return prediction.lower() == target.lower()
551
-
552
- # MME
553
- def get_gt(data_path):
554
- ground_truth = {}
555
- for category in os.listdir(data_path):
556
- category_dir = os.path.join(data_path, category)
557
- if not os.path.isdir(category_dir):
558
- continue
559
- if os.path.exists(os.path.join(category_dir, 'images')):
560
- image_path = os.path.join(category_dir, 'images')
561
- qa_path = os.path.join(category_dir, 'questions_answers_YN')
562
- else:
563
- image_path = qa_path = category_dir
564
- assert os.path.isdir(image_path), image_path
565
- assert os.path.isdir(qa_path), qa_path
566
- for file in os.listdir(qa_path):
567
- if not file.endswith('.txt'):
568
- continue
569
- for line in open(os.path.join(qa_path, file)):
570
- question, answer = line.strip().split('\t')
571
- ground_truth[(category, file, question)] = answer
572
- return ground_truth
573
-
574
- # Chart QA
575
- def relaxed_correctness(target: str,
576
- prediction: str,
577
- max_relative_change: float = 0.05) -> bool:
578
- """Calculates relaxed correctness.
579
-
580
- The correctness tolerates certain error ratio defined by max_relative_change.
581
- See https://arxiv.org/pdf/2203.10244.pdf, end of section 5.1:
582
- “Following Methani et al. (2020), we use a relaxed accuracy measure for the
583
- numeric answers to allow a minor inaccuracy that may result from the automatic
584
- data extraction process. We consider an answer to be correct if it is within
585
- 5% of the gold answer. For non-numeric answers, we still need an exact match
586
- to consider an answer to be correct.”
587
-
588
- Args:
589
- target: Target string.
590
- prediction: Predicted string.
591
- max_relative_change: Maximum relative change.
592
-
593
- Returns:
594
- Whether the prediction was correct given the specified tolerance.
595
- """
596
-
597
- def _to_float(text: str) -> Optional[float]:
598
- try:
599
- if text.endswith('%'):
600
- # Convert percentages to floats.
601
- return float(text.rstrip('%'))
602
- else:
603
- return float(text)
604
- except ValueError:
605
- return None
606
-
607
- prediction_float = _to_float(prediction)
608
- target_float = _to_float(target)
609
- if prediction_float is not None and target_float:
610
- relative_change = abs(prediction_float -
611
- target_float) / abs(target_float)
612
- return relative_change <= max_relative_change
613
- else:
614
- return prediction.lower() == target.lower()
615
-
616
- def evaluate_relaxed_accuracy(entries):
617
- scores = []
618
- for elem in entries:
619
- if isinstance(elem['annotation'], str):
620
- elem['annotation'] = [elem['annotation']]
621
- score = max([
622
- relaxed_correctness(elem['answer'].strip(), ann)
623
- for ann in elem['annotation']
624
- ])
625
- scores.append(score)
626
- return sum(scores) / len(scores)
627
-
628
- # POPE
629
- def eval_pope(answers, label_file):
630
- label_list = [json.loads(q)['label'] for q in open(label_file, 'r')]
631
-
632
- for answer in answers:
633
- text = answer['answer'].lower()
634
-
635
- # Only keep the first sentence
636
- if text.find('.') != -1:
637
- text = text.split('.')[0]
638
-
639
- text = text.replace(',', '')
640
- words = text.split(' ')
641
- if 'No' in words or 'not' in words or 'no' in words:
642
- answer['answer'] = 'no'
643
- else:
644
- answer['answer'] = 'yes'
645
-
646
- for i in range(len(label_list)):
647
- if label_list[i] == 'no':
648
- label_list[i] = 0
649
- else:
650
- label_list[i] = 1
651
-
652
- pred_list = []
653
- for answer in answers:
654
- if answer['answer'] == 'no':
655
- pred_list.append(0)
656
- else:
657
- pred_list.append(1)
658
-
659
- pos = 1
660
- neg = 0
661
-
662
- TP, TN, FP, FN = 0, 0, 0, 0
663
- for pred, label in zip(pred_list, label_list):
664
- if pred == pos and label == pos:
665
- TP += 1
666
- elif pred == pos and label == neg:
667
- FP += 1
668
- elif pred == neg and label == neg:
669
- TN += 1
670
- elif pred == neg and label == pos:
671
- FN += 1
672
-
673
- acc = (TP + TN) / (TP + TN + FP + FN)
674
- return acc
675
-
676
- # Eval GQA
677
- # book to float
678
- def toScore(b):
679
- return float(1 if b else 0)
680
-
681
- # Compute average of a list
682
- def avg(l):
683
- if len(l) == 0:
684
- return 0
685
- return float(sum(l)) / len(l)
686
-
687
- def eval_gqa(predictions, questions):
688
- # Initialize data structure to track all metrics: e.g. accuracy, validity and plausibility, as well as
689
- # accuracy per question type, length and number of reasoning steps.
690
- scores = {
691
- "accuracy": [], # list of accuracies per question (1 if correct else 0). Will be averaged ultimately.
692
- "binary": [], # list of accuracies per a binary question (1 if correct else 0). Will be averaged ultimately.
693
- "open": [], # list of accuracies per an open question (1 if correct else 0). Will be averaged ultimately.
694
- "validity": [], # list of validity per question (1 if valid else 0).
695
- "plausibility": [], # list of plausibility per question (1 if plausible else 0).
696
- "consistency": [], # list of consistency scores for entailed questions.
697
- "accuracyPerStructuralType": defaultdict(list), # list of question accuracies for each structural type (e.g. compare, logic questions).
698
- "accuracyPerSemanticType": defaultdict(list), # list of question accuracies for each semantic type (e.g. questions about an object, an attribute, a relation).
699
- "accuracyPerLength": defaultdict(list), # list of question accuracies per question's word number.
700
- "accuracyPerSteps": defaultdict(list), # list of question accuracies per question's reasoning length (steps number).
701
- "grounding": [] # list of grounding scores for each question.
702
- }
703
-
704
- # Initialize golden and predicted histograms per each question group. Used to compute the distribution metric.
705
- dist = {
706
- "gold": defaultdict(lambda: defaultdict(int)),
707
- "predicted": defaultdict(lambda: defaultdict(int))
708
- }
709
-
710
- ##### Question lengths - words numbers and reasoning steps number
711
- ##########################################################################################
712
-
713
- # Compute question length (words number)
714
- def getWordsNum(question):
715
- return len(question["question"].split())
716
-
717
- # Compute number of reasoning steps (excluding the final "querying" step which doesn't increase effective reasoning length)
718
- def getStepsNum(question):
719
- return len([c for c in question["semantic"] if not (any([o in "{}: {}".format(c["operation"], c["argument"])
720
- for o in ["exist", "query: name", "choose name"]]))])
721
-
722
- ##### Main score computation
723
- ##########################################################################################
724
-
725
- # Loop over the questions and compute mterics
726
- for qid, question in questions.items():
727
- gold = question["answer"]
728
- if qid not in predictions:
729
- continue
730
- predicted = predictions[qid].lower()
731
-
732
- correct = (predicted == gold)
733
- score = toScore(correct)
734
-
735
- wordsNum = getWordsNum(question)
736
- stepsNum = getStepsNum(question)
737
-
738
- # Compute scores over the balanced dataset (more robust against cheating by making educated guesses)
739
- if question["isBalanced"]:
740
- # Update accuracy
741
- scores["accuracy"].append(score)
742
- scores["accuracyPerLength"][wordsNum].append(score)
743
- scores["accuracyPerSteps"][stepsNum].append(score)
744
- scores["accuracyPerStructuralType"][question["types"]["structural"]].append(score)
745
- scores["accuracyPerSemanticType"][question["types"]["semantic"]].append(score)
746
- answerType = "open" if question["types"]["structural"] == "query" else "binary"
747
- scores[answerType].append(score)
748
-
749
- # Update histograms for gold and predicted answers
750
- globalGroup = question["groups"]["global"]
751
- if globalGroup is not None:
752
- dist["gold"][globalGroup][gold] += 1
753
- dist["predicted"][globalGroup][predicted] += 1
754
-
755
- # Average scores over all questions (in the balanced dataset) and print scores
756
- metrics = [
757
- "binary",
758
- "open",
759
- "accuracy",
760
- "consistency",
761
- "validity",
762
- "plausibility",
763
- "grounding",
764
- ]
765
-
766
- detailedMetrics = [
767
- ("accuracyPerStructuralType", "Accuracy / structural type"),
768
- ("accuracyPerSemanticType", "Accuracy / semantic type"),
769
- ("accuracyPerSteps", "Accuracy / steps number"),
770
- ("accuracyPerLength", "Accuracy / words number")
771
- ]
772
-
773
- subMetrics = {
774
- "attr": "attribute",
775
- "cat": "category",
776
- "global": "scene",
777
- "obj": "object",
778
- "rel": "relation"
779
- }
780
- # average
781
- for k in metrics:
782
- if isinstance(scores[k], list):
783
- scores[k] = avg(scores[k]) * 100
784
-
785
- for k, _ in detailedMetrics:
786
- for t in scores[k]:
787
- scores[k][t] = avg(scores[k][t]) * 100, len(scores[k][t])
788
-
789
- # print
790
- print("")
791
- for m in metrics:
792
- # skip grounding and consistency scores if not requested
793
- if m == "grounding":
794
- continue
795
- if m == "consistency":
796
- continue
797
-
798
- # print score
799
- print("{title}: {score:.2f}{suffix}".format(title = m.capitalize(), score = scores[m],
800
- suffix = " (lower is better)" if m == "distribution" else "%"))
801
-
802
- for m, mPrintName in detailedMetrics:
803
- print("")
804
- # print metric title
805
- print("{}:".format(mPrintName))
806
-
807
- for t in sorted(list(scores[m].keys())):
808
- # set sub-metric title
809
- tName = t
810
- if isinstance(scores[k], list):
811
- tName = subMetrics.get(t, t).capitalize()
812
-
813
- # print score
814
- print(" {title}: {score:.2f}{suffix} ({amount} questions)".format(title = tName,
815
- score = scores[m][t][0], suffix = "%", amount = scores[m][t][1]))
816
-
817
- return scores
818
-
819
- # MMMU
820
- DOMAIN_CAT2SUB_CAT = {
821
- 'Art and Design': ['Art', 'Art_Theory', 'Design', 'Music'],
822
- 'Business': ['Accounting', 'Economics', 'Finance', 'Manage','Marketing'],
823
- 'Science': ['Biology', 'Chemistry', 'Geography', 'Math', 'Physics',],
824
- 'Health and Medicine': ['Basic_Medical_Science', 'Clinical_Medicine', 'Diagnostics_and_Laboratory_Medicine', 'Pharmacy', 'Public_Health'],
825
- 'Humanities and Social Science': ['History', 'Literature', 'Sociology', 'Psychology'],
826
- 'Tech and Engineering': ['Agriculture', 'Architecture_and_Engineering', 'Computer_Science', 'Electronics', 'Energy_and_Power', 'Materials', 'Mechanical_Engineering'],
827
- }
828
-
829
-
830
- CAT_SHORT2LONG = {
831
- 'acc': 'Accounting',
832
- 'agri': 'Agriculture',
833
- 'arch': 'Architecture_and_Engineering',
834
- 'art': 'Art',
835
- 'art_theory': 'Art_Theory',
836
- 'bas_med': 'Basic_Medical_Science',
837
- 'bio': 'Biology',
838
- 'chem': 'Chemistry',
839
- 'cli_med': 'Clinical_Medicine',
840
- 'cs': 'Computer_Science',
841
- 'design': 'Design',
842
- 'diag_med': 'Diagnostics_and_Laboratory_Medicine',
843
- 'econ': 'Economics',
844
- 'elec': 'Electronics',
845
- 'ep': 'Energy_and_Power',
846
- 'fin': 'Finance',
847
- 'geo': 'Geography',
848
- 'his': 'History',
849
- 'liter': 'Literature',
850
- 'manage': 'Manage',
851
- 'mark': 'Marketing',
852
- 'mate': 'Materials',
853
- 'math': 'Math',
854
- 'mech': 'Mechanical_Engineering',
855
- 'music': 'Music',
856
- 'phar': 'Pharmacy',
857
- 'phys': 'Physics',
858
- 'psy': 'Psychology',
859
- 'pub_health': 'Public_Health',
860
- 'socio': 'Sociology'
861
- }
862
-
863
- """Response Parsing and Evaluation for various models"""
864
-
865
- # ----------- Process Multi-choice -------------
866
- def parse_multi_choice_response(response, all_choices, index2ans):
867
- """
868
- Parse the prediction from the generated response.
869
- Return the predicted index e.g., A, B, C, D.
870
- """
871
- for char in [',', '.', '!', '?', ';', ':', "'"]:
872
- response = response.strip(char)
873
- response = " " + response + " " # add space to avoid partial match
874
-
875
- index_ans = True
876
- ans_with_brack = False
877
- candidates = []
878
- for choice in all_choices: # e.g., (A) (B) (C) (D)
879
- if f'({choice})' in response:
880
- candidates.append(choice)
881
- ans_with_brack = True
882
-
883
- if len(candidates) == 0:
884
- for choice in all_choices: # e.g., A B C D
885
- if f' {choice} ' in response:
886
- candidates.append(choice)
887
-
888
- # if all above doesn't get candidates, check if the content is larger than 5 tokens and try to parse the example
889
- if len(candidates) == 0 and len(response.split()) > 5:
890
- for index, ans in index2ans.items():
891
- if ans.lower() in response.lower():
892
- candidates.append(index)
893
- index_ans = False # it's content ans.
894
-
895
- if len(candidates) == 0: # still not get answer, randomly choose one.
896
- pred_index = random.choice(all_choices)
897
- elif len(candidates) > 1:
898
- start_indexes = []
899
- if index_ans:
900
- if ans_with_brack:
901
- for can in candidates:
902
- index = response.rfind(f'({can})')
903
- start_indexes.append(index) # -1 will be ignored anyway
904
- # start_indexes = [generated_response.index(f'({can})') for can in candidates]
905
- else:
906
- for can in candidates:
907
- index = response.rfind(f" {can} ")
908
- start_indexes.append(index)
909
- else:
910
- for can in candidates:
911
- index = response.lower().rfind(index2ans[can].lower())
912
- start_indexes.append(index)
913
- # get the last one
914
- pred_index = candidates[np.argmax(start_indexes)]
915
- else: # if only one candidate, use it.
916
- pred_index = candidates[0]
917
-
918
- return pred_index
919
-
920
- # ----------- Process Open -------------
921
- def check_is_number(string):
922
- """
923
- Check if the given string a number.
924
- """
925
- try:
926
- float(string.replace(',', ''))
927
- return True
928
- except ValueError:
929
- # check if there's comma inside
930
- return False
931
-
932
- def normalize_str(string):
933
- """
934
- Normalize the str to lower case and make them float numbers if possible.
935
- """
936
- # check if characters in the string
937
-
938
- # if number, numerize it.
939
- string = string.strip()
940
-
941
- is_number = check_is_number(string)
942
-
943
- if is_number:
944
- string = string.replace(',', '')
945
- string = float(string)
946
- # leave 2 decimal
947
- string = round(string, 2)
948
- return [string]
949
- else: # it's likely to be a string
950
- # lower it
951
- string = string.lower()
952
- if len(string) == 1:
953
- return [" " + string, string + " "] # avoid trivial matches
954
- return [string]
955
-
956
- def extract_numbers(string):
957
- """
958
- Exact all forms of numbers from a string with regex.
959
- """
960
- # Pattern for numbers with commas
961
- pattern_commas = r'-?\b\d{1,3}(?:,\d{3})+\b'
962
- # Pattern for scientific notation
963
- pattern_scientific = r'-?\d+(?:\.\d+)?[eE][+-]?\d+'
964
- # Pattern for simple numbers without commas
965
- pattern_simple = r'-?(?:\d+\.\d+|\.\d+|\d+\b)(?![eE][+-]?\d+)(?![,\d])'
966
-
967
- # Extract numbers with commas
968
- numbers_with_commas = re.findall(pattern_commas, string)
969
- # Extract numbers in scientific notation
970
- numbers_scientific = re.findall(pattern_scientific, string)
971
- # Extract simple numbers without commas
972
- numbers_simple = re.findall(pattern_simple, string)
973
-
974
- # Combine all extracted numbers
975
- all_numbers = numbers_with_commas + numbers_scientific + numbers_simple
976
- return all_numbers
977
-
978
- def parse_open_response(response):
979
- """
980
- Parse the prediction from the generated response.
981
- Return a list of predicted strings or numbers.
982
- """
983
- # content = content.strip("\n").strip(".").strip(" ")
984
- def get_key_subresponses(response):
985
- key_responses = []
986
- response = response.strip().strip(".").lower()
987
- sub_responses = re.split(r'\.\s(?=[A-Z])|\n', response)
988
- indicators_of_keys = ['could be ', 'so ', 'is ',
989
- 'thus ', 'therefore ', 'final ', 'answer ', 'result ']
990
- key_responses = []
991
- for index, resp in enumerate(sub_responses):
992
- # if last one, accept it's an equation (the entire response can be just one sentence with equation)
993
- if index == len(sub_responses) - 1:
994
- indicators_of_keys.extend(['='])
995
- shortest_key_response = None # the shortest response that may contain the answer (tail part of the response)
996
- for indicator in indicators_of_keys:
997
- if indicator in resp:
998
- if not shortest_key_response:
999
- shortest_key_response = resp.split(indicator)[-1].strip()
1000
- else:
1001
- if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response):
1002
- shortest_key_response = resp.split(indicator)[-1].strip()
1003
- # key_responses.append(resp.split(indicator)[1].strip())
1004
-
1005
- if shortest_key_response:
1006
- # and it's not trivial
1007
- if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]:
1008
- key_responses.append(shortest_key_response)
1009
- if len(key_responses) == 0: # did not found any
1010
- return [response]
1011
- return key_responses
1012
- # pdb.set_trace()
1013
- key_responses = get_key_subresponses(response)
1014
-
1015
- pred_list = key_responses.copy() # keep the original string response
1016
- for resp in key_responses:
1017
- pred_list.extend(extract_numbers(resp))
1018
-
1019
- tmp_pred_list = []
1020
- for i in range(len(pred_list)):
1021
- tmp_pred_list.extend(normalize_str(pred_list[i]))
1022
- pred_list = tmp_pred_list
1023
-
1024
- # remove duplicates
1025
- pred_list = list(set(pred_list))
1026
-
1027
- return pred_list
1028
-
1029
- # ----------- Evaluation -------------
1030
-
1031
- def eval_multi_choice(gold_i, pred_i):
1032
- """
1033
- Evaluate a multiple choice instance.
1034
- """
1035
- correct = False
1036
- # only they are exactly the same, we consider it as correct
1037
- if isinstance(gold_i, list):
1038
- for answer in gold_i:
1039
- if answer == pred_i:
1040
- correct = True
1041
- break
1042
- else: # gold_i is a string
1043
- if gold_i == pred_i:
1044
- correct = True
1045
- return correct
1046
-
1047
- def eval_open(gold_i, pred_i):
1048
- """
1049
- Evaluate an open question instance
1050
- """
1051
- correct = False
1052
- if isinstance(gold_i, list):
1053
- # use float to avoid trivial matches
1054
- norm_answers = []
1055
- for answer in gold_i:
1056
- norm_answers.extend(normalize_str(answer))
1057
- else:
1058
- norm_answers = normalize_str(gold_i)
1059
- for pred in pred_i: # pred is already normalized in parse response phase
1060
- if isinstance(pred, str): # if it's a string, then find if ans in the pred_i
1061
- for norm_ans in norm_answers:
1062
- # only see if the string answer in the string pred
1063
- if isinstance(norm_ans, str) and norm_ans in pred:
1064
- if not correct:
1065
- correct = True
1066
- break
1067
- else: # it's a float number
1068
- if pred in norm_answers:
1069
- if not correct:
1070
- correct = True
1071
- break
1072
- return correct
1073
-
1074
- # ----------- Batch Evaluation -------------
1075
- def evaluate(samples):
1076
- """
1077
- Batch evaluation for multiple choice and open questions.
1078
- """
1079
- pred_correct = 0
1080
- judge_dict = dict()
1081
- for sample in samples:
1082
- gold_i = sample['answer']
1083
- pred_i = sample['parsed_pred']
1084
- if sample['question_type'] == 'multiple-choice':
1085
- correct = eval_multi_choice(gold_i, pred_i)
1086
- else: # open question
1087
- correct = eval_open(gold_i, pred_i)
1088
-
1089
- if correct:
1090
- judge_dict[sample['id']] = 'Correct'
1091
- pred_correct += 1
1092
- else:
1093
- judge_dict[sample['id']] = 'Wrong'
1094
-
1095
- if len(samples) == 0:
1096
- return {'acc': 0}
1097
- return judge_dict, {'acc': pred_correct / len(samples)}
1098
-
1099
-
1100
-
1101
- # ----------- Calculate Accuracy -------------
1102
- def calculate_ins_level_acc(results: Dict):
1103
- """Calculate the instruction level accuracy for given Subject results"""
1104
- acc = 0
1105
- ins_num = 0
1106
- for cat_results in results.values():
1107
- acc += cat_results['acc'] * cat_results['num_example']
1108
- ins_num += cat_results['num_example']
1109
- if ins_num == 0:
1110
- return 0
1111
- return acc / ins_num
1112
-
1113
- def eval_mathverse(output_dir, results, extract_file, score_file):
1114
- openai.api_key = OPENAI_KEY
1115
-
1116
- demo_prompt_extract = """
1117
- I am providing you a response from a model to a math problem, termed 'Model Response'. You should extract the answer from the response as 'Extracted Answer'. Directly output the extracted answer with no explanation.
1118
-
1119
- 1.
1120
- Model response: 'Rounded to two decimal places, the perimeter of the sector is approximately:\n\n(-2, 1)'
1121
- Extracted Answer: (-2, 1)
1122
-
1123
- 2.
1124
- Model response: 'at those points.\n\nTherefore, the correct option that represents the meaning of the intersection points of the graphs is:\n\nD. They give the solutions to the equation $f(t)=g(t)$.",'
1125
- Extracted Answer: D
1126
-
1127
- 3.
1128
- Model response: ' at 1 (there's a closed circle at y = 1), the range in interval notation is \\((-4, 1]\\).\n\nFinal values:\nDomain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\)'
1129
- Extracted Answer: Domain: \\((-3, 3]\\)\nRange: \\((-4, 1]\\)
1130
-
1131
- 4.
1132
- Model response: 'As it stands, I cannot provide the correct option letter because there isn't enough information to solve for 'y'.'
1133
- Extracted Answer: null
1134
-
1135
- 5.
1136
- Model response: 'Given that AB = 17.6 meters, we can now substitute into the equation:\n\nd = 17.6 / cos(38\u00b0)\n\nTherefore, to one decimal place, the distance d between Ned and Bart is approximately 22.3 meters.'
1137
- Extracted answer: 22.3
1138
-
1139
- 6.
1140
- Model response: have all the coefficients for the quadratic function:\n\\( f(x) = ax^2 + bx + c \\)\n\\( f(x) = -1x^2 - 2x + 1 \\)\n\nTherefore, the equation for the graphed function \\( f \\) is:\n\\( f(x) = -x^2 - 2x + 1 \\)"'
1141
- Extracted answer: f(x) = -x^2 - 2x + 1
1142
-
1143
- 7.
1144
- """
1145
- demo_prompt_score = """
1146
- Below are two answers to a math question. Question is [Question], [Standard Answer] is the standard answer to the question, and [Model_answer] is the answer extracted from a model's output to this question. Determine whether these two answers are consistent.
1147
- Please note that only when the [Model_answer] completely matches the [Standard Answer] means they are consistent. For non-multiple-choice questions, if the meaning is expressed in the same way, it is also considered consistent, for example, 0.5m and 50cm.
1148
- If they are consistent, Judement is 1; if they are different, Judement is 0.
1149
-
1150
- [Question]: Write the set of numbers represented on the number line in interval notation.
1151
- [Standard Answer]: (-2,1]
1152
- [Model_answer] : Extracted Answer: \\((-2, 1)\\)
1153
- Judgement: 0
1154
-
1155
- [Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}}
1156
- [Standard Answer]: C
1157
- [Model_answer] : B:2\u221a{{3}}
1158
- Judgement: 0
1159
-
1160
- [Question]: Find the domain and range of the function f using interval notation.
1161
- [Standard Answer]: domain: [-4, 0) and range: (-3, 1]
1162
- [Model_answer] : Range: \\((-4, 1]\\)
1163
- Judgement: 0
1164
-
1165
- [Question]: As shown in the figure, circle O has a radius 1.0, if angle BAC = 60.0, then the length of BC is ()\nChoices:\nA:2\nB:2\u221a{{3}}\nC:\u221a{{3}}\nD:2\u221a{{2}}
1166
- [Standard Answer]: C
1167
- [Model_answer] : null
1168
- Judgement: 0
1169
-
1170
- [Question]: Given the graph of the ellipse that intersects with x-axis at 9 and -9 and with y-axis at 3 and -3, determine its equation.A. \\frac{{x^2}}{{81}} + \\frac{{y^2}}{{9}} = 1 B. Can not determine.\n
1171
- [Standard Answer]: A
1172
- [Model_answer] : \\frac{{x^2}}{{81}} + \\frac{{y^2}}{{9}} = 1
1173
- Judgement: 1
1174
-
1175
- [Question]: {question}
1176
- [Standard Answer]: {gt}
1177
- [Model_answer] : {extraction}
1178
- Judgement: """
1179
-
1180
- def create_extract_prompt(demo_prompt, response, inst):
1181
- demo_prompt = demo_prompt.strip()
1182
- test_prompt = f"Model response: '{response}'\nExtracted Answer: "
1183
- full_prompt = f"{demo_prompt}\n\n{test_prompt}"
1184
- return full_prompt
1185
-
1186
- def create_scoring_prompt(demo_prompt, inst):
1187
- demo_prompt = demo_prompt.strip()
1188
- full_prompt = demo_prompt.format(question = inst['question'], gt=inst['answer'], extraction=inst['extraction'])
1189
- return full_prompt
1190
-
1191
-
1192
- def extract_answer(response, inst, api_key):
1193
- # general extraction
1194
- try:
1195
- full_prompt = create_extract_prompt(demo_prompt_extract, response, inst)
1196
- extraction = get_chat_response(full_prompt, api_key)
1197
- return extraction
1198
- except Exception as e:
1199
- print(e)
1200
- print(f"Error in extracting answer for {response}")
1201
- return ""
1202
-
1203
- def match_answer(inst, api_key):
1204
- try:
1205
- full_prompt = create_scoring_prompt(demo_prompt_score, inst)
1206
- extraction = get_chat_response(full_prompt, api_key)
1207
- return extraction.replace("Judgement:", "").strip()
1208
- except Exception as e:
1209
- print(e)
1210
- print(f"Error in matching answer")
1211
-
1212
- return ""
1213
-
1214
- save_results = []
1215
- score_dict = defaultdict(lambda: defaultdict(list))
1216
- score_version_dict = defaultdict(list)
1217
-
1218
- for i, inst in enumerate(tqdm(results)):
1219
- response = inst['model_answer']
1220
- extraction = extract_answer(response, inst, OPENAI_KEY)
1221
- inst['extraction'] = extraction.replace('Extracted Answer: ', '').strip()
1222
-
1223
- judgement = match_answer(inst, OPENAI_KEY)
1224
- while True:
1225
- if judgement.strip() not in ['0', '1']:
1226
- print('Wrong return format: ', judgement)
1227
- judgement = match_answer(inst, OPENAI_KEY)
1228
- else:
1229
- inst['judgement'] = int(judgement)
1230
- break
1231
-
1232
- save_results.append(inst)
1233
-
1234
- score_dict[inst['metadata']['subject']][inst['metadata']['subfield']].append(inst['judgement'])
1235
- score_version_dict[inst['problem_version']].append(inst['judgement'])
1236
-
1237
- results_file = os.path.join(output_dir, extract_file)
1238
- with open(results_file, 'w') as f:
1239
- json.dump(save_results, f, indent=4)
1240
-
1241
- print(f"Save MathVerse Results at {results_file}")
1242
-
1243
- save_json = {}
1244
- # version level acc
1245
- total_cnt, right_cnt = 0, 0
1246
- for version in score_version_dict:
1247
- version_total_cnt = len(score_version_dict[version])
1248
- version_right_cnt = len([inst for inst in score_version_dict[version] if inst == 1])
1249
- total_cnt += version_total_cnt
1250
- right_cnt += version_right_cnt
1251
- print(f"{version} Acc: {(version_right_cnt/version_total_cnt):.3f}")
1252
- save_json[version] = f"{(version_right_cnt/version_total_cnt):.3f}"
1253
-
1254
- print(f"Acc: {(right_cnt/total_cnt):.3f}")
1255
-
1256
- save_json["Total Acc"] = f"{(right_cnt/total_cnt):.3f}"
1257
-
1258
- scores_file = os.path.join(output_dir, score_file)
1259
- with open(scores_file, 'w') as f:
1260
- json.dump(save_json, f, indent=4)
1261
-
1262
-
1263
- def eval_mmstar(eval_file, output_dir, score_file):
1264
- MMStar_score_l2 = {
1265
- 'coarse perception': {
1266
- 'image scene and topic': 0,
1267
- 'image style & quality': 0,
1268
- 'image emotion': 0
1269
- },
1270
- 'fine-grained perception': {
1271
- 'object counting': 0,
1272
- 'recognition': 0,
1273
- 'localization': 0
1274
- },
1275
- 'instance reasoning': {
1276
- 'single-instance reasoning': 0,
1277
- 'cross-instance attribute reasoning': 0,
1278
- 'cross-instance relation reasoning': 0
1279
- },
1280
- 'logical reasoning': {
1281
- 'code & sequence reasoning': 0,
1282
- 'diagram reasoning': 0,
1283
- 'common reasoning': 0
1284
- },
1285
- 'science & technology': {
1286
- 'biology & chemistry & physics': 0,
1287
- 'electronics & energy & mechanical eng.': 0,
1288
- 'geography & earth science & agriculture': 0
1289
- },
1290
- 'math': {
1291
- 'geometry': 0,
1292
- 'numeric commonsense and calculation': 0,
1293
- 'statistical reasoning': 0
1294
- },
1295
- }
1296
- MMStar_counter = deepcopy(MMStar_score_l2)
1297
-
1298
- data = eval_file
1299
- lt = len(data)
1300
- lines = [data.iloc[i] for i in range(lt)]
1301
- for i in tqdm(range(len(lines))):
1302
- line = lines[i]
1303
- predict = str(line['prediction'])
1304
- answers = str(line['answer'])
1305
- # ori_bench = str(line['bench'])
1306
- category = str(line['category'])
1307
- l2_category = str(line['l2_category'])
1308
- MMStar_counter[category][l2_category] += 1
1309
-
1310
- answer = answers.lower().strip().replace('\n', ' ')
1311
- predict = predict.lower().strip().replace('\n', ' ')
1312
- # if ori_bench == 'MathVista' and answer not in ['a', 'b', 'c', 'd']:
1313
- # if answer in predict:
1314
- # MMStar_score_l2[category][l2_category] += 1
1315
- # else:
1316
- try:
1317
- if answer == predict[0]:
1318
- MMStar_score_l2[category][l2_category] += 1
1319
- elif predict[0] == '(' and answer == predict[1]:
1320
- MMStar_score_l2[category][l2_category] += 1
1321
- elif predict[0:7] == 'option ' and answer == predict[7]:
1322
- MMStar_score_l2[category][l2_category] += 1
1323
- elif predict[0:14] == 'the answer is ' and answer == predict[14]:
1324
- MMStar_score_l2[category][l2_category] += 1
1325
- except Exception as e:
1326
- pass
1327
-
1328
- MMStar_score = {}
1329
- MMStar_score['final score'] = 0
1330
- for k, v in MMStar_score_l2.items():
1331
- MMStar_score[k] = 0
1332
- for l2_k, l2_v in v.items():
1333
- if float(MMStar_counter[k][l2_k]) == 0:
1334
- MMStar_score[f'{k}({l2_k})'] = 0
1335
- else:
1336
- MMStar_score[f'{k}({l2_k})'] = float(l2_v) / \
1337
- float(MMStar_counter[k][l2_k])
1338
- MMStar_score[k] += l2_v
1339
- MMStar_score['final score'] += MMStar_score[k]
1340
- MMStar_score[k] = float(MMStar_score[k]) / 250.0
1341
- MMStar_score['final score'] = float(MMStar_score['final score']) / 1500.0
1342
-
1343
- score_pth = os.path.join(output_dir, score_file)
1344
- with open(score_pth, 'w') as f:
1345
- json.dump(MMStar_score, f, indent=4)
1346
-
1347
- print(
1348
- f'MMStar_eval successfully finished evaluating {eval_file}, results saved in {score_pth}')
1349
- print('Score: ')
1350
- for key, value in MMStar_score.items():
1351
- print('{}:{}'.format(key, value))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
lobster.jpg ADDED