Spaces:
Sleeping
Sleeping
update font
Browse files- app.py +14 -14
- generate_plot.py +5 -5
app.py
CHANGED
@@ -20,7 +20,7 @@ i2t_models = [ # Average time spent running the following example
|
|
20 |
"gpt-4o-2024-05-13",
|
21 |
"llava-hf/llava-v1.6-vicuna-7b-hf"
|
22 |
]
|
23 |
-
perspectives = ["
|
24 |
main_scores_t2i = {}
|
25 |
main_scores_i2t = {}
|
26 |
|
@@ -32,22 +32,22 @@ for model in t2i_models:
|
|
32 |
for perspective in perspectives:
|
33 |
if perspective not in sub_scores_t2i.keys():
|
34 |
sub_scores_t2i[perspective] = {}
|
35 |
-
if perspective == "
|
36 |
main_scores_t2i[model][perspective] = hallucination_t2i_agg(model, "./data/results")["score"]
|
37 |
sub_scores_t2i[perspective][model] = hallucination_t2i_agg(model, "./data/results")["subscenarios"]
|
38 |
-
elif perspective == "
|
39 |
main_scores_t2i[model][perspective] = safety_t2i_agg(model, "./data/results")["score"]
|
40 |
sub_scores_t2i[perspective][model] = safety_t2i_agg(model, "./data/results")["subscenarios"]
|
41 |
-
elif perspective == "
|
42 |
main_scores_t2i[model][perspective] = adversarial_robustness_t2i_agg(model, "./data/results")["score"]
|
43 |
sub_scores_t2i[perspective][model] = adversarial_robustness_t2i_agg(model, "./data/results")["subscenarios"]
|
44 |
-
elif perspective == "
|
45 |
main_scores_t2i[model][perspective] = fairness_t2i_agg(model, "./data/results")["score"]
|
46 |
sub_scores_t2i[perspective][model] = fairness_t2i_agg(model, "./data/results")["subscenarios"]
|
47 |
-
elif perspective == "
|
48 |
main_scores_t2i[model][perspective] = privacy_t2i_agg(model, "./data/results")["score"]
|
49 |
sub_scores_t2i[perspective][model] = privacy_t2i_agg(model, "./data/results")["subscenarios"]
|
50 |
-
elif perspective == "
|
51 |
main_scores_t2i[model][perspective] = ood_t2i_agg(model, "./data/results")["score"]
|
52 |
sub_scores_t2i[perspective][model] = ood_t2i_agg(model, "./data/results")["subscenarios"]
|
53 |
else:
|
@@ -60,24 +60,24 @@ for model in i2t_models:
|
|
60 |
for perspective in perspectives:
|
61 |
if perspective not in sub_scores_i2t.keys():
|
62 |
sub_scores_i2t[perspective] = {}
|
63 |
-
if perspective == "
|
64 |
main_scores_i2t[model][perspective] = hallucination_i2t_agg(model, "./data/results")["score"]
|
65 |
sub_scores_i2t[perspective][model] = hallucination_i2t_agg(model, "./data/results")["subscenarios"]
|
66 |
-
elif perspective == "
|
67 |
main_scores_i2t[model][perspective] = safety_i2t_agg(model, "./data/results")["score"]
|
68 |
sub_scores_i2t[perspective][model] = safety_i2t_agg(model, "./data/results")["subscenarios"]
|
69 |
-
elif perspective == "
|
70 |
main_scores_i2t[model][perspective] = adversarial_robustness_i2t_agg(model, "./data/results")["score"]
|
71 |
sub_scores_i2t[perspective][model] = adversarial_robustness_i2t_agg(model, "./data/results")["subscenarios"]
|
72 |
-
elif perspective == "
|
73 |
main_scores_i2t[model][perspective] = fairness_i2t_agg(model, "./data/results")["score"]
|
74 |
sub_scores_i2t[perspective][model] = fairness_i2t_agg(model, "./data/results")["subscenarios"]
|
75 |
-
elif perspective == "
|
76 |
main_scores_i2t[model][perspective] = privacy_i2t_agg(model, "./data/results")["score"]
|
77 |
sub_scores_i2t[perspective][model] = privacy_i2t_agg(model, "./data/results")["subscenarios"]
|
78 |
-
elif perspective == "
|
79 |
main_scores_i2t[model][perspective] = ood_i2t_agg(model, "./data/results")["score"]
|
80 |
-
sub_scores_i2t[perspective][model] = ood_i2t_agg
|
81 |
else:
|
82 |
raise ValueError("Invalid perspective")
|
83 |
|
|
|
20 |
"gpt-4o-2024-05-13",
|
21 |
"llava-hf/llava-v1.6-vicuna-7b-hf"
|
22 |
]
|
23 |
+
perspectives = ["Safety", "Fairness", "Hallucination", "Privacy", "Adv", "OOD"]
|
24 |
main_scores_t2i = {}
|
25 |
main_scores_i2t = {}
|
26 |
|
|
|
32 |
for perspective in perspectives:
|
33 |
if perspective not in sub_scores_t2i.keys():
|
34 |
sub_scores_t2i[perspective] = {}
|
35 |
+
if perspective == "Hallucination":
|
36 |
main_scores_t2i[model][perspective] = hallucination_t2i_agg(model, "./data/results")["score"]
|
37 |
sub_scores_t2i[perspective][model] = hallucination_t2i_agg(model, "./data/results")["subscenarios"]
|
38 |
+
elif perspective == "Safety":
|
39 |
main_scores_t2i[model][perspective] = safety_t2i_agg(model, "./data/results")["score"]
|
40 |
sub_scores_t2i[perspective][model] = safety_t2i_agg(model, "./data/results")["subscenarios"]
|
41 |
+
elif perspective == "Adv":
|
42 |
main_scores_t2i[model][perspective] = adversarial_robustness_t2i_agg(model, "./data/results")["score"]
|
43 |
sub_scores_t2i[perspective][model] = adversarial_robustness_t2i_agg(model, "./data/results")["subscenarios"]
|
44 |
+
elif perspective == "Fairness":
|
45 |
main_scores_t2i[model][perspective] = fairness_t2i_agg(model, "./data/results")["score"]
|
46 |
sub_scores_t2i[perspective][model] = fairness_t2i_agg(model, "./data/results")["subscenarios"]
|
47 |
+
elif perspective == "Privacy":
|
48 |
main_scores_t2i[model][perspective] = privacy_t2i_agg(model, "./data/results")["score"]
|
49 |
sub_scores_t2i[perspective][model] = privacy_t2i_agg(model, "./data/results")["subscenarios"]
|
50 |
+
elif perspective == "OOD":
|
51 |
main_scores_t2i[model][perspective] = ood_t2i_agg(model, "./data/results")["score"]
|
52 |
sub_scores_t2i[perspective][model] = ood_t2i_agg(model, "./data/results")["subscenarios"]
|
53 |
else:
|
|
|
60 |
for perspective in perspectives:
|
61 |
if perspective not in sub_scores_i2t.keys():
|
62 |
sub_scores_i2t[perspective] = {}
|
63 |
+
if perspective == "Hallucination":
|
64 |
main_scores_i2t[model][perspective] = hallucination_i2t_agg(model, "./data/results")["score"]
|
65 |
sub_scores_i2t[perspective][model] = hallucination_i2t_agg(model, "./data/results")["subscenarios"]
|
66 |
+
elif perspective == "Safety":
|
67 |
main_scores_i2t[model][perspective] = safety_i2t_agg(model, "./data/results")["score"]
|
68 |
sub_scores_i2t[perspective][model] = safety_i2t_agg(model, "./data/results")["subscenarios"]
|
69 |
+
elif perspective == "Adv":
|
70 |
main_scores_i2t[model][perspective] = adversarial_robustness_i2t_agg(model, "./data/results")["score"]
|
71 |
sub_scores_i2t[perspective][model] = adversarial_robustness_i2t_agg(model, "./data/results")["subscenarios"]
|
72 |
+
elif perspective == "Fairness":
|
73 |
main_scores_i2t[model][perspective] = fairness_i2t_agg(model, "./data/results")["score"]
|
74 |
sub_scores_i2t[perspective][model] = fairness_i2t_agg(model, "./data/results")["subscenarios"]
|
75 |
+
elif perspective == "Privacy":
|
76 |
main_scores_i2t[model][perspective] = privacy_i2t_agg(model, "./data/results")["score"]
|
77 |
sub_scores_i2t[perspective][model] = privacy_i2t_agg(model, "./data/results")["subscenarios"]
|
78 |
+
elif perspective == "OOD":
|
79 |
main_scores_i2t[model][perspective] = ood_i2t_agg(model, "./data/results")["score"]
|
80 |
+
sub_scores_i2t[perspective][model] = ood_i2t_agg(model, "./data/results")["subscenarios"]
|
81 |
else:
|
82 |
raise ValueError("Invalid perspective")
|
83 |
|
generate_plot.py
CHANGED
@@ -52,11 +52,11 @@ def radar_plot(results, thetas, selected_models):
|
|
52 |
|
53 |
fig.add_trace(
|
54 |
go.Table(
|
55 |
-
header=dict(values=header_texts, font=dict(size=
|
56 |
cells=dict(
|
57 |
values=rows,
|
58 |
align="left",
|
59 |
-
font=dict(size=
|
60 |
height=30
|
61 |
),
|
62 |
# columnwidth=column_widths
|
@@ -120,11 +120,11 @@ def main_radar_plot(main_scores, selected_models):
|
|
120 |
|
121 |
fig.add_trace(
|
122 |
go.Table(
|
123 |
-
header=dict(values=header_texts, font=dict(size=
|
124 |
cells=dict(
|
125 |
values=rows,
|
126 |
align="left",
|
127 |
-
font=dict(size=
|
128 |
height=30,
|
129 |
),
|
130 |
columnwidth=column_widths,
|
@@ -206,7 +206,7 @@ if __name__ == "__main__":
|
|
206 |
"gpt-4o-2024-05-13",
|
207 |
"llava-hf/llava-v1.6-vicuna-7b-hf"
|
208 |
]
|
209 |
-
perspectives = ["
|
210 |
main_scores_t2i = {}
|
211 |
main_scores_i2t = {}
|
212 |
sub_scores_t2i = {}
|
|
|
52 |
|
53 |
fig.add_trace(
|
54 |
go.Table(
|
55 |
+
header=dict(values=header_texts, font=dict(size=14.5), align="left"),
|
56 |
cells=dict(
|
57 |
values=rows,
|
58 |
align="left",
|
59 |
+
font=dict(size=14.5),
|
60 |
height=30
|
61 |
),
|
62 |
# columnwidth=column_widths
|
|
|
120 |
|
121 |
fig.add_trace(
|
122 |
go.Table(
|
123 |
+
header=dict(values=header_texts, font=dict(size=14.5), align="left"),
|
124 |
cells=dict(
|
125 |
values=rows,
|
126 |
align="left",
|
127 |
+
font=dict(size=14.5),
|
128 |
height=30,
|
129 |
),
|
130 |
columnwidth=column_widths,
|
|
|
206 |
"gpt-4o-2024-05-13",
|
207 |
"llava-hf/llava-v1.6-vicuna-7b-hf"
|
208 |
]
|
209 |
+
perspectives = ["Safety", "Fairness", "Hallucination", "Privacy", "Adv", "OOD"]
|
210 |
main_scores_t2i = {}
|
211 |
main_scores_i2t = {}
|
212 |
sub_scores_t2i = {}
|