virtualmartire's picture
lambda model_id: eval_requests.request_model
ff559f5
raw
history blame contribute delete
No virus
3.62 kB
import gradio as gr
import pandas as pd
import constants
from utils import display, eval_requests
#
##
###
##
#
# Load leaderboard data
leaderboard_df = pd.read_csv("leaderboard.csv")
# Get already evaluated or requested models
evaluated_models = leaderboard_df["model"].tolist()
requested_models = eval_requests.get_requested_models()
# Format the dataframe
for col in leaderboard_df.columns:
if col == "model":
leaderboard_df[col] = leaderboard_df[col].apply(lambda x: x.replace(x, display.make_clickable_model(x)))
else:
leaderboard_df[col] = leaderboard_df[col].apply(display.round_numbers)
leaderboard_df.rename(
columns={"Average WER": "Average WER ⬇️", "RTF (1e-3)": "RTF (1e-3) ⬇️"},
inplace=True,
)
leaderboard_df.sort_values(by='Average WER ⬇️', inplace=True)
with gr.Blocks() as leaderboard_app:
gr.HTML(constants.BANNER, elem_id="constants.banner")
gr.Markdown(constants.INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("πŸ… Leaderboard", elem_id="od-benchmark-tab-table", id=0):
leaderboard_table = gr.components.Dataframe(
value=leaderboard_df,
datatype=constants.COLUMN_DTYPES_LIST,
elem_id="leaderboard-table",
interactive=False,
visible=True,
)
with gr.TabItem("πŸ“ˆ Metrics", elem_id="od-benchmark-tab-table", id=1):
gr.Markdown(constants.METRICS_TAB_TEXT, elem_classes="markdown-text")
with gr.TabItem("βœ‰οΈ Request a model here!", elem_id="od-benchmark-tab-table", id=2):
with gr.Column():
gr.Markdown("# βœ‰οΈ Request results for a new model here!", elem_classes="markdown-text")
gr.Markdown("In case of multiple requests, wait the restart of the Space after each of them to ensure a correct submission.", elem_classes="markdown-text")
with gr.Column():
with gr.Column():
model_id = gr.Textbox(label="Model ID (user_name/model_name)")
with gr.Column():
md_submission_result = gr.Markdown()
btn_submitt = gr.Button(value="πŸš€ Request")
btn_submitt.click(
fn=lambda model_id: eval_requests.request_model(model_id, evaluated_models, requested_models),
inputs=[model_id], # inputs: List of gradio.components
outputs=md_submission_result,
)
with gr.TabItem("☒️ Evaluate", elem_id="od-benchmark-tab-table", id=3):
with gr.Column():
gr.Markdown("For admins only.", elem_classes="markdown-text")
with gr.Column():
md_submission_result = gr.Markdown()
btn_submitt = gr.Button(value="RUN EVALUATION")
# btn_submitt.click(eval_requests.request_model, [model_id], md_submission_result)
with gr.Row():
with gr.Accordion("πŸ“™ Citation", open=False):
gr.Textbox(
value=constants.CITATION_TEXT, lines=7,
label="Copy the BibTeX snippet to cite this source",
elem_id="citation-button",
show_label=True,
show_copy_button=True,
)
leaderboard_app.launch(allowed_paths=["banner.png"])