Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -95,13 +95,13 @@ with block:
|
|
95 |
inputs=[leaderboard_method_selector, leaderboard_metric_selector],
|
96 |
outputs=data_component
|
97 |
)
|
98 |
-
|
99 |
-
# Dropdown for benchmark type
|
100 |
-
benchmark_types = TASK_INFO + ['flexible']
|
101 |
-
benchmark_type_selector = gr.Dropdown(choices=benchmark_types, label="Select Benchmark Type for Visualization", value="flexible")
|
102 |
|
|
|
103 |
x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
|
104 |
y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
|
|
|
|
|
|
|
105 |
single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
|
106 |
|
107 |
# CheckboxGroup for methods
|
@@ -111,25 +111,46 @@ with block:
|
|
111 |
plot_button = gr.Button("Plot")
|
112 |
plot_output = gr.Image(label="Plot")
|
113 |
|
114 |
-
# Update metric selectors
|
115 |
def update_metric_choices(benchmark_type):
|
116 |
-
if benchmark_type == '
|
117 |
-
# Show x and y metric selectors for similarity
|
118 |
metric_names = benchmark_specific_metrics.get(benchmark_type, [])
|
119 |
return (
|
120 |
gr.update(choices=metric_names, value=metric_names[0], visible=True),
|
121 |
gr.update(choices=metric_names, value=metric_names[1], visible=True),
|
122 |
-
gr.update(visible=False)
|
|
|
123 |
)
|
124 |
-
elif benchmark_type
|
125 |
-
# Show
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
return (
|
128 |
-
gr.update(visible=False),
|
129 |
-
gr.update(visible=False),
|
|
|
130 |
gr.update(choices=metrics, value=metrics[0], visible=True)
|
131 |
)
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
# Dropdown for benchmark type
|
135 |
benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
|
@@ -138,19 +159,14 @@ with block:
|
|
138 |
benchmark_type_selector.change(
|
139 |
update_metric_choices,
|
140 |
inputs=[benchmark_type_selector],
|
141 |
-
outputs=[x_metric_selector, y_metric_selector, single_metric_selector]
|
142 |
)
|
143 |
|
144 |
-
# Generate the plot based on user input
|
145 |
-
def benchmark_plot(benchmark_type, method_names, x_metric, y_metric, single_metric):
|
146 |
-
# Implement plot generation logic based on selected benchmark type and metrics
|
147 |
-
pass
|
148 |
-
|
149 |
plot_button.click(
|
150 |
benchmark_plot,
|
151 |
-
inputs=[benchmark_type_selector, method_selector, x_metric_selector, y_metric_selector, single_metric_selector],
|
152 |
outputs=plot_output
|
153 |
-
)
|
154 |
|
155 |
with gr.TabItem("📝 About", elem_id="probe-benchmark-tab-table", id=2):
|
156 |
with gr.Row():
|
|
|
95 |
inputs=[leaderboard_method_selector, leaderboard_metric_selector],
|
96 |
outputs=data_component
|
97 |
)
|
|
|
|
|
|
|
|
|
98 |
|
99 |
+
# Dynamic selectors
|
100 |
x_metric_selector = gr.Dropdown(choices=[], label="Select X-axis Metric", visible=False)
|
101 |
y_metric_selector = gr.Dropdown(choices=[], label="Select Y-axis Metric", visible=False)
|
102 |
+
aspect_type_selector = gr.Dropdown(choices=[], label="Select Aspect Type", visible=False)
|
103 |
+
dataset_type_selector = gr.Dropdown(choices=[], label="Select Dataset Type", visible=False)
|
104 |
+
dataset_selector = gr.Dropdown(choices=[], label="Select Dataset", visible=False)
|
105 |
single_metric_selector = gr.Dropdown(choices=[], label="Select Metric", visible=False)
|
106 |
|
107 |
# CheckboxGroup for methods
|
|
|
111 |
plot_button = gr.Button("Plot")
|
112 |
plot_output = gr.Image(label="Plot")
|
113 |
|
114 |
+
# Update metric selectors based on benchmark type
|
115 |
def update_metric_choices(benchmark_type):
|
116 |
+
if benchmark_type == 'similarity':
|
117 |
+
# Show x and y metric selectors for similarity
|
118 |
metric_names = benchmark_specific_metrics.get(benchmark_type, [])
|
119 |
return (
|
120 |
gr.update(choices=metric_names, value=metric_names[0], visible=True),
|
121 |
gr.update(choices=metric_names, value=metric_names[1], visible=True),
|
122 |
+
gr.update(visible=False), gr.update(visible=False),
|
123 |
+
gr.update(visible=False), gr.update(visible=False)
|
124 |
)
|
125 |
+
elif benchmark_type == 'function':
|
126 |
+
# Show aspect and dataset type selectors for function
|
127 |
+
aspect_types = benchmark_specific_metrics[benchmark_type]['aspect_types']
|
128 |
+
dataset_types = benchmark_specific_metrics[benchmark_type]['dataset_types']
|
129 |
+
return (
|
130 |
+
gr.update(visible=False), gr.update(visible=False),
|
131 |
+
gr.update(choices=aspect_types, value=aspect_types[0], visible=True),
|
132 |
+
gr.update(choices=dataset_types, value=dataset_types[0], visible=True),
|
133 |
+
gr.update(visible=False), gr.update(visible=False)
|
134 |
+
)
|
135 |
+
elif benchmark_type == 'family':
|
136 |
+
# Show dataset and metric selectors for family
|
137 |
+
datasets = benchmark_specific_metrics[benchmark_type]['datasets']
|
138 |
+
metrics = benchmark_specific_metrics[benchmark_type]['metrics']
|
139 |
return (
|
140 |
+
gr.update(visible=False), gr.update(visible=False),
|
141 |
+
gr.update(visible=False), gr.update(visible=False),
|
142 |
+
gr.update(choices=datasets, value=datasets[0], visible=True),
|
143 |
gr.update(choices=metrics, value=metrics[0], visible=True)
|
144 |
)
|
145 |
+
elif benchmark_type == 'affinity':
|
146 |
+
# Show single metric selector for affinity
|
147 |
+
metrics = benchmark_specific_metrics[benchmark_type]
|
148 |
+
return (
|
149 |
+
gr.update(visible=False), gr.update(visible=False),
|
150 |
+
gr.update(visible=False), gr.update(visible=False),
|
151 |
+
gr.update(visible=False), gr.update(choices=metrics, value=metrics[0], visible=True)
|
152 |
+
)
|
153 |
+
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
154 |
|
155 |
# Dropdown for benchmark type
|
156 |
benchmark_type_selector = gr.Dropdown(choices=list(benchmark_specific_metrics.keys()), label="Select Benchmark Type")
|
|
|
159 |
benchmark_type_selector.change(
|
160 |
update_metric_choices,
|
161 |
inputs=[benchmark_type_selector],
|
162 |
+
outputs=[x_metric_selector, y_metric_selector, aspect_type_selector, dataset_type_selector, dataset_selector, single_metric_selector]
|
163 |
)
|
164 |
|
|
|
|
|
|
|
|
|
|
|
165 |
plot_button.click(
|
166 |
benchmark_plot,
|
167 |
+
inputs=[benchmark_type_selector, method_selector, x_metric_selector, y_metric_selector, aspect_type_selector, dataset_type_selector, dataset_selector, single_metric_selector],
|
168 |
outputs=plot_output
|
169 |
+
)
|
170 |
|
171 |
with gr.TabItem("📝 About", elem_id="probe-benchmark-tab-table", id=2):
|
172 |
with gr.Row():
|