Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -31,6 +31,7 @@ def add_new_eval(
|
|
31 |
):
|
32 |
representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
|
33 |
results = run_probe(benchmark_type, representation_name, human_file, skempi_file, similarity_tasks, function_prediction_aspect, function_prediction_dataset, family_prediction_dataset)
|
|
|
34 |
return results
|
35 |
|
36 |
for benchmark_type in results:
|
@@ -101,8 +102,7 @@ with block:
|
|
101 |
gr.Markdown(
|
102 |
"""
|
103 |
## **Below, you can visualize the results displayed in the Leaderboard.**
|
104 |
-
### Once you choose a benchmark type, the related options for metrics, datasets, and other parameters will become visible.
|
105 |
-
### Select the methods and metrics of interest from the options above to generate visualizations.
|
106 |
"""
|
107 |
)
|
108 |
|
|
|
31 |
):
|
32 |
representation_name = model_name_textbox if revision_name_textbox == '' else revision_name_textbox
|
33 |
results = run_probe(benchmark_type, representation_name, human_file, skempi_file, similarity_tasks, function_prediction_aspect, function_prediction_dataset, family_prediction_dataset)
|
34 |
+
print results
|
35 |
return results
|
36 |
|
37 |
for benchmark_type in results:
|
|
|
102 |
gr.Markdown(
|
103 |
"""
|
104 |
## **Below, you can visualize the results displayed in the Leaderboard.**
|
105 |
+
### Once you choose a benchmark type, the related options for metrics, datasets, and other parameters will become visible. Select the methods and metrics of interest from the options to generate visualizations.
|
|
|
106 |
"""
|
107 |
)
|
108 |
|