ImAnonymousUser commited on
Commit
099d40e
1 Parent(s): 1841b56

Anonymous commit

Browse files
Files changed (4) hide show
  1. app.py +330 -0
  2. mm-vet/bard_set.json +1 -0
  3. mm-vet/mm-vet.json +0 -0
  4. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import os
4
+ from tqdm import tqdm
5
+ import pandas as pd
6
+ import numpy as np
7
+ from collections import Counter
8
+ import time
9
+ from zipfile import ZipFile
10
+ from openai import AzureOpenAI
11
+ from openai._exceptions import RateLimitError, BadRequestError
12
+
13
+ client = AzureOpenAI(
14
+ api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
15
+ api_version=os.environ.get("AZURE_OPENAI_API_VERSION"),
16
+ azure_endpoint=os.getenv("AZURE_OPENAI_API_ENDPOINT"),
17
+ )
18
+ deployment_id = os.environ.get("AZURE_OPENAI_DEP_ID")
19
+ gpt_model = deployment_id
20
+
21
+
22
+ prompt = """Compare the ground truth and prediction from AI models, to give a correctness score for the prediction. <AND> in the ground truth means it is totally right only when all elements in the ground truth are present in the prediction, and <OR> means it is totally right when any one element in the ground truth is present in the prediction. The correctness score is 0.0 (totally wrong), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, or 1.0 (totally right). Just complete the last space of the correctness score.
23
+
24
+ Question | Ground truth | Prediction | Correctness
25
+ --- | --- | --- | ---
26
+ What is x in the equation? | -1 <AND> -5 | x = 3 | 0.0
27
+ What is x in the equation? | -1 <AND> -5 | x = -1 | 0.5
28
+ What is x in the equation? | -1 <AND> -5 | x = -5 | 0.5
29
+ What is x in the equation? | -1 <AND> -5 | x = -5 or 5 | 0.5
30
+ What is x in the equation? | -1 <AND> -5 | x = -1 or x = -5 | 1.0
31
+ Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme talks about Iceland and Greenland. It's pointing out that despite their names, Iceland is not very icy and Greenland isn't very green. | 0.4
32
+ Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme is using humor to point out the misleading nature of Iceland's and Greenland's names. Iceland, despite its name, has lush green landscapes while Greenland is mostly covered in ice and snow. The text 'This is why I have trust issues' is a playful way to suggest that these contradictions can lead to distrust or confusion. The humor in this meme is derived from the unexpected contrast between the names of the countries and their actual physical characteristics. | 1.0
33
+ """
34
+
35
+
36
+
37
+ def grade(file_obj, progress=gr.Progress()):
38
+ # load metadata
39
+ # Download mm-vet.zip and `unzip mm-vet.zip` and change the path below
40
+ mmvet_path = "mm-vet"
41
+ use_sub_set = False
42
+ decimal_places = 1 # number of decimal places to round to
43
+
44
+
45
+ if use_sub_set:
46
+ bard_set_file = os.path.join(mmvet_path, "bard_set.json")
47
+ with open(bard_set_file, 'r') as f:
48
+ sub_set = json.load(f)
49
+ sub_set_name = 'bardset'
50
+ sub_set_name = sub_set_name + '_'
51
+ else:
52
+ sub_set = None
53
+ sub_set_name = ''
54
+
55
+ mmvet_metadata = os.path.join(mmvet_path, "mm-vet.json")
56
+ with open(mmvet_metadata, 'r') as f:
57
+ data = json.load(f)
58
+
59
+
60
+ counter = Counter()
61
+ cap_set_list = []
62
+ cap_set_counter = []
63
+ len_data = 0
64
+ for id, value in data.items():
65
+ if sub_set is not None and id not in sub_set:
66
+ continue
67
+ question = value["question"]
68
+ answer = value["answer"]
69
+ cap = value["capability"]
70
+ cap = set(cap)
71
+ counter.update(cap)
72
+ if cap not in cap_set_list:
73
+ cap_set_list.append(cap)
74
+ cap_set_counter.append(1)
75
+ else:
76
+ cap_set_counter[cap_set_list.index(cap)] += 1
77
+
78
+ len_data += 1
79
+
80
+ sorted_list = counter.most_common()
81
+ columns = [k for k, v in sorted_list]
82
+ columns.append("total")
83
+ columns.append("std")
84
+ columns.append('runs')
85
+ df = pd.DataFrame(columns=columns)
86
+
87
+
88
+ cap_set_sorted_indices = np.argsort(-np.array(cap_set_counter))
89
+ new_cap_set_list = []
90
+ new_cap_set_counter = []
91
+ for index in cap_set_sorted_indices:
92
+ new_cap_set_list.append(cap_set_list[index])
93
+ new_cap_set_counter.append(cap_set_counter[index])
94
+
95
+ cap_set_list = new_cap_set_list
96
+ cap_set_counter = new_cap_set_counter
97
+ cap_set_names = ["_".join(list(cap_set)) for cap_set in cap_set_list]
98
+
99
+ columns2 = cap_set_names
100
+ columns2.append("total")
101
+ columns2.append("std")
102
+ columns2.append('runs')
103
+ df2 = pd.DataFrame(columns=columns2)
104
+
105
+
106
+ ###### change your model name ######
107
+ model = file_obj.name.split("/")[-1][:-5]
108
+ # result_path = "results"
109
+ num_run = 1 # we set 5 in the paper
110
+ # model_results_file = os.path.join(result_path, f"{model}.json")
111
+ model_results_file = file_obj.name
112
+
113
+ # grade results for each sample to svae
114
+ grade_file = f'{model}_{gpt_model}-grade-{num_run}runs.json'
115
+ # grade_file = os.path.join(result_path, grade_file)
116
+
117
+ # score results regarding capabilities/capability integration to save
118
+ cap_score_file = f'{model}_{sub_set_name}{gpt_model}-cap-score-{num_run}runs.csv'
119
+ # cap_score_file = os.path.join(result_path, cap_score_file)
120
+ cap_int_score_file = f'{model}_{sub_set_name}{gpt_model}-cap-int-score-{num_run}runs.csv'
121
+ # cap_int_score_file = os.path.join(result_path, cap_int_score_file)
122
+
123
+
124
+
125
+ with open(model_results_file) as f:
126
+ results = json.load(f)
127
+ if os.path.exists(grade_file):
128
+ with open(grade_file, 'r') as f:
129
+ grade_results = json.load(f)
130
+ else:
131
+ grade_results = {}
132
+
133
+
134
+ def need_more_runs():
135
+ need_more_runs = False
136
+ if len(grade_results) > 0:
137
+ for k, v in grade_results.items():
138
+ if len(v['score']) < num_run:
139
+ need_more_runs = True
140
+ break
141
+ return need_more_runs or len(grade_results) < len_data
142
+
143
+
144
+ while need_more_runs():
145
+ for j in range(num_run):
146
+ print(f'eval run {j}')
147
+ for id, line in progress.tqdm(data.items(), desc="Grading"):
148
+ if sub_set is not None and id not in sub_set:
149
+ continue
150
+ if id in grade_results and len(grade_results[id]['score']) >= (j + 1):
151
+ continue
152
+
153
+ model_pred = results[id]
154
+
155
+ question = prompt + '\n' + ' | '.join([line['question'], line['answer'].replace("<AND>", " <AND> ").replace("<OR>", " <OR> "), model_pred, ""])
156
+ messages = [
157
+ {"role": "user", "content": question},
158
+ ]
159
+
160
+ if id not in grade_results:
161
+ sample_grade = {'model': [], 'content': [], 'score': []}
162
+ else:
163
+ sample_grade = grade_results[id]
164
+
165
+
166
+ grade_sample_run_complete = False
167
+ temperature = 0.0
168
+
169
+ num_sleep = 0
170
+ while not grade_sample_run_complete:
171
+ try:
172
+ response = client.chat.completions.create(
173
+ model=gpt_model,
174
+ max_tokens=3,
175
+ temperature=temperature,
176
+ messages=messages)
177
+ content = response.choices[0].message.content
178
+ flag = True
179
+ try_time = 1
180
+ while flag:
181
+ try:
182
+ content = content.split(' ')[0].strip()
183
+ score = float(content)
184
+ if score > 1.0 or score < 0.0:
185
+ assert False
186
+ flag = False
187
+ except:
188
+ question = prompt + '\n' + ' | '.join([line['question'], line['answer'].replace("<AND>", " <AND> ").replace("<OR>", " <OR> "), model_pred, ""]) + "\nPredict the correctness of the answer (digit): "
189
+ messages = [
190
+ {"role": "user", "content": question},
191
+ ]
192
+ response = client.chat.completions.create(
193
+ model=gpt_model,
194
+ max_tokens=3,
195
+ temperature=temperature,
196
+ messages=messages)
197
+ content = response.choices[0].message.content
198
+ try_time += 1
199
+ temperature += 0.5
200
+ print(f"{id} try {try_time} times")
201
+ print(content)
202
+ if try_time > 5:
203
+ score = 0.0
204
+ flag = False
205
+ grade_sample_run_complete = True
206
+ response_model = response.model
207
+ except BadRequestError as e:
208
+ content = "BadRequestError"
209
+ score = 0.0
210
+ flag = False
211
+ print(id, "BadRequestError")
212
+ response_model = gpt_model
213
+ break
214
+ # except RateLimitError as e:
215
+ except:
216
+ # gpt4 may have token rate limit
217
+ num_sleep += 1
218
+ if num_sleep > 12:
219
+ score = 0.0
220
+ grade_sample_run_complete = True
221
+ content = "RateLimitError"
222
+ num_sleep = 0
223
+ continue
224
+ print("sleep 5s")
225
+ time.sleep(5)
226
+ response_model = gpt_model
227
+
228
+
229
+ if len(sample_grade['model']) >= j + 1:
230
+ sample_grade['model'][j] = response_model
231
+ sample_grade['content'][j] = content
232
+ sample_grade['score'][j] = score
233
+ else:
234
+ sample_grade['model'].append(response_model)
235
+ sample_grade['content'].append(content)
236
+ sample_grade['score'].append(score)
237
+ grade_results[id] = sample_grade
238
+
239
+ with open(grade_file, 'w') as f:
240
+ json.dump(grade_results, f, indent=4)
241
+
242
+
243
+ assert not need_more_runs()
244
+ cap_socres = {k: [0.0]*num_run for k in columns[:-2]}
245
+ counter['total'] = len_data
246
+
247
+ cap_socres2 = {k: [0.0]*num_run for k in columns2[:-2]}
248
+ counter2 = {columns2[i]:cap_set_counter[i] for i in range(len(cap_set_counter))}
249
+ counter2['total'] = len_data
250
+
251
+ for k, v in grade_results.items():
252
+ if sub_set is not None and k not in sub_set:
253
+ continue
254
+ for i in range(num_run):
255
+ score = v['score'][i]
256
+ caps = set(data[k]['capability'])
257
+ for c in caps:
258
+ cap_socres[c][i] += score
259
+
260
+ cap_socres['total'][i] += score
261
+
262
+ index = cap_set_list.index(caps)
263
+ cap_socres2[cap_set_names[index]][i] += score
264
+ cap_socres2['total'][i] += score
265
+
266
+ for k, v in cap_socres.items():
267
+ cap_socres[k] = np.array(v) / counter[k] *100
268
+
269
+
270
+ std = round(cap_socres['total'].std(), decimal_places)
271
+ total_copy = cap_socres['total'].copy()
272
+ runs = str(list(np.round(total_copy, decimal_places)))
273
+
274
+ for k, v in cap_socres.items():
275
+ cap_socres[k] = round(v.mean(), decimal_places)
276
+
277
+ cap_socres['std'] = std
278
+ cap_socres['runs'] = runs
279
+ df.loc[model] = cap_socres
280
+
281
+
282
+ for k, v in cap_socres2.items():
283
+ cap_socres2[k] = round(np.mean(np.array(v) / counter2[k] *100), decimal_places)
284
+ cap_socres2['std'] = std
285
+ cap_socres2['runs'] = runs
286
+ df2.loc[model] = cap_socres2
287
+
288
+ df.to_csv(cap_score_file)
289
+ df2.to_csv(cap_int_score_file)
290
+
291
+ files = [cap_score_file, cap_int_score_file, grade_file]
292
+ zip_file = f"results.zip"
293
+ with ZipFile(zip_file, "w") as zipObj:
294
+ for idx, file in enumerate(files):
295
+ zipObj.write(file, file)
296
+ for file in files:
297
+ os.remove(file)
298
+ return zip_file
299
+
300
+
301
+
302
+ # demo = gr.Interface(
303
+ # fn=grade,
304
+ # inputs=gr.File(file_types=[".json"]),
305
+ # outputs="file")
306
+
307
+
308
+ model_result_example = "https://raw.githubusercontent.com/ImAnonymousUser/MM-Vet/main/results/llava_llama2_13b_chat.json"
309
+
310
+ markdown = f"""
311
+ # MM-Vet: Evaluating Large Multimodal Models for Integrated Capabilities
312
+
313
+ We offer MM-Vet LLM-based (GPT-4) evaluator to grade open-ended outputs from your models.
314
+
315
+ Plese upload your json file of your model results containing `{{v1_0: ..., v1_1: ..., }}`like [this json file]({model_result_example}).
316
+
317
+ The grading results will be downloaded as a zip file.
318
+ """
319
+
320
+
321
+ with gr.Blocks() as demo:
322
+ gr.Markdown(markdown)
323
+ with gr.Row():
324
+ inp = gr.File(file_types=[".json"])
325
+ out = gr.File(file_types=[".zip"])
326
+ inp.change(grade, inp, out)
327
+
328
+
329
+ if __name__ == "__main__":
330
+ demo.queue().launch()
mm-vet/bard_set.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["v1_0", "v1_1", "v1_2", "v1_3", "v1_4", "v1_5", "v1_6", "v1_7", "v1_9", "v1_10", "v1_11", "v1_12", "v1_13", "v1_14", "v1_15", "v1_16", "v1_18", "v1_19", "v1_20", "v1_21", "v1_22", "v1_23", "v1_24", "v1_27", "v1_28", "v1_29", "v1_30", "v1_31", "v1_32", "v1_33", "v1_34", "v1_35", "v1_36", "v1_37", "v1_38", "v1_39", "v1_40", "v1_41", "v1_42", "v1_43", "v1_44", "v1_45", "v1_46", "v1_47", "v1_48", "v1_49", "v1_50", "v1_51", "v1_52", "v1_53", "v1_54", "v1_55", "v1_56", "v1_57", "v1_58", "v1_59", "v1_60", "v1_61", "v1_62", "v1_63", "v1_67", "v1_68", "v1_69", "v1_70", "v1_71", "v1_72", "v1_73", "v1_74", "v1_75", "v1_76", "v1_77", "v1_78", "v1_80", "v1_81", "v1_82", "v1_83", "v1_84", "v1_85", "v1_86", "v1_87", "v1_88", "v1_89", "v1_90", "v1_91", "v1_93", "v1_94", "v1_95", "v1_96", "v1_97", "v1_98", "v1_99", "v1_100", "v1_101", "v1_102", "v1_106", "v1_107", "v1_109", "v1_110", "v1_111", "v1_113", "v1_114", "v1_116", "v1_117", "v1_118", "v1_119", "v1_121", "v1_122", "v1_123", "v1_124", "v1_125", "v1_126", "v1_128", "v1_129", "v1_132", "v1_133", "v1_134", "v1_137", "v1_138", "v1_139", "v1_140", "v1_142", "v1_148", "v1_150", "v1_151", "v1_152", "v1_153", "v1_156", "v1_157", "v1_158", "v1_159", "v1_160", "v1_161", "v1_162", "v1_163", "v1_164", "v1_165", "v1_166", "v1_167", "v1_177", "v1_179", "v1_180", "v1_181", "v1_182", "v1_183", "v1_184", "v1_185", "v1_186", "v1_187", "v1_188", "v1_189", "v1_190", "v1_192", "v1_193", "v1_194", "v1_195", "v1_196", "v1_197", "v1_201", "v1_208", "v1_209", "v1_210", "v1_211", "v1_212", "v1_213", "v1_214", "v1_215", "v1_216", "v1_217"]
mm-vet/mm-vet.json ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ openai==1.14.2