mgyigit commited on
Commit
c822a7e
·
verified ·
1 Parent(s): b694a77

Update src/vis_utils.py

Browse files
Files changed (1) hide show
  1. src/vis_utils.py +42 -1
src/vis_utils.py CHANGED
@@ -244,4 +244,45 @@ def plot_affinity_results(file_path, method_names, metric, save_path="./plot_ima
244
  ax.get_figure().savefig(filename, dpi=400, bbox_inches='tight')
245
  plt.close() # Close the plot to free memory
246
 
247
- return filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  ax.get_figure().savefig(filename, dpi=400, bbox_inches='tight')
245
  plt.close() # Close the plot to free memory
246
 
247
+ return filename
248
+
249
+ def update_metric_choices(benchmark_type):
250
+ if benchmark_type == 'similarity':
251
+ # Show x and y metric selectors for similarity
252
+ metric_names = benchmark_specific_metrics.get(benchmark_type, [])
253
+ return (
254
+ gr.update(choices=metric_names, value=metric_names[0], visible=True),
255
+ gr.update(choices=metric_names, value=metric_names[1], visible=True),
256
+ gr.update(visible=False), gr.update(visible=False),
257
+ gr.update(visible=False), gr.update(visible=False)
258
+ )
259
+ elif benchmark_type == 'function':
260
+ # Show aspect and dataset type selectors for function
261
+ aspect_types = benchmark_specific_metrics[benchmark_type]['aspect_types']
262
+ dataset_types = benchmark_specific_metrics[benchmark_type]['dataset_types']
263
+ return (
264
+ gr.update(visible=False), gr.update(visible=False),
265
+ gr.update(choices=aspect_types, value=aspect_types[0], visible=True),
266
+ gr.update(choices=dataset_types, value=dataset_types[0], visible=True),
267
+ gr.update(visible=False), gr.update(visible=False)
268
+ )
269
+ elif benchmark_type == 'family':
270
+ # Show dataset and metric selectors for family
271
+ datasets = benchmark_specific_metrics[benchmark_type]['datasets']
272
+ metrics = benchmark_specific_metrics[benchmark_type]['metrics']
273
+ return (
274
+ gr.update(visible=False), gr.update(visible=False),
275
+ gr.update(visible=False), gr.update(visible=False),
276
+ gr.update(choices=datasets, value=datasets[0], visible=True),
277
+ gr.update(choices=metrics, value=metrics[0], visible=True)
278
+ )
279
+ elif benchmark_type == 'affinity':
280
+ # Show single metric selector for affinity
281
+ metrics = benchmark_specific_metrics[benchmark_type]
282
+ return (
283
+ gr.update(visible=False), gr.update(visible=False),
284
+ gr.update(visible=False), gr.update(visible=False),
285
+ gr.update(visible=False), gr.update(choices=metrics, value=metrics[0], visible=True)
286
+ )
287
+
288
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)