|
import gradio as gr |
|
import pandas as pd |
|
|
|
import constants |
|
from utils import display, eval_requests |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
leaderboard_df = pd.read_csv("leaderboard.csv") |
|
|
|
|
|
evaluated_models = leaderboard_df["model"].tolist() |
|
requested_models = eval_requests.get_requested_models() |
|
|
|
|
|
for col in leaderboard_df.columns: |
|
if col == "model": |
|
leaderboard_df[col] = leaderboard_df[col].apply(lambda x: x.replace(x, display.make_clickable_model(x))) |
|
else: |
|
leaderboard_df[col] = leaderboard_df[col].apply(display.round_numbers) |
|
leaderboard_df.rename( |
|
columns={"Average WER": "Average WER β¬οΈ", "RTF (1e-3)": "RTF (1e-3) β¬οΈ"}, |
|
inplace=True, |
|
) |
|
leaderboard_df.sort_values(by='Average WER β¬οΈ', inplace=True) |
|
|
|
with gr.Blocks() as leaderboard_app: |
|
|
|
gr.HTML(constants.BANNER, elem_id="constants.banner") |
|
gr.Markdown(constants.INTRODUCTION_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.Tabs(elem_classes="tab-buttons") as tabs: |
|
|
|
with gr.TabItem("π
Leaderboard", elem_id="od-benchmark-tab-table", id=0): |
|
leaderboard_table = gr.components.Dataframe( |
|
value=leaderboard_df, |
|
datatype=constants.COLUMN_DTYPES_LIST, |
|
elem_id="leaderboard-table", |
|
interactive=False, |
|
visible=True, |
|
) |
|
|
|
with gr.TabItem("π Metrics", elem_id="od-benchmark-tab-table", id=1): |
|
gr.Markdown(constants.METRICS_TAB_TEXT, elem_classes="markdown-text") |
|
|
|
with gr.TabItem("βοΈ Request a model here!", elem_id="od-benchmark-tab-table", id=2): |
|
with gr.Column(): |
|
gr.Markdown("# βοΈ Request results for a new model here!", elem_classes="markdown-text") |
|
gr.Markdown("In case of multiple requests, wait the restart of the Space after each of them to ensure a correct submission.", elem_classes="markdown-text") |
|
with gr.Column(): |
|
with gr.Column(): |
|
model_id = gr.Textbox(label="Model ID (user_name/model_name)") |
|
with gr.Column(): |
|
md_submission_result = gr.Markdown() |
|
btn_submitt = gr.Button(value="π Request") |
|
btn_submitt.click( |
|
fn=lambda model_id: eval_requests.request_model(model_id, evaluated_models, requested_models), |
|
inputs=[model_id], |
|
outputs=md_submission_result, |
|
) |
|
|
|
with gr.TabItem("β’οΈ Evaluate", elem_id="od-benchmark-tab-table", id=3): |
|
with gr.Column(): |
|
gr.Markdown("For admins only.", elem_classes="markdown-text") |
|
with gr.Column(): |
|
md_submission_result = gr.Markdown() |
|
btn_submitt = gr.Button(value="RUN EVALUATION") |
|
|
|
|
|
with gr.Row(): |
|
with gr.Accordion("π Citation", open=False): |
|
gr.Textbox( |
|
value=constants.CITATION_TEXT, lines=7, |
|
label="Copy the BibTeX snippet to cite this source", |
|
elem_id="citation-button", |
|
show_label=True, |
|
show_copy_button=True, |
|
) |
|
|
|
leaderboard_app.launch(allowed_paths=["banner.png"]) |
|
|