long-code-arena / app.py
jdev8's picture
Update app.py
0548807 verified
raw
history blame
7.49 kB
import logging
import os
import gradio as gr # type: ignore[import]
from src.content import (
INTRODUCTION_TEXT,
INTRODUCTION_TITLE,
LEADERBOARD_TEXT,
LEADERBOARD_TITLE,
SUBMISSION_TEXT_FILES,
SUBMISSION_TEXT_INTRO,
SUBMISSION_TEXT_METADATA,
SUBMISSION_TEXT_SUBMIT,
SUBMISSION_TEXT_TASK,
SUBMISSION_TITLE,
)
from src.get_results_for_task import get_results_for_task
from src.leaderboard_formatting import get_types_per_task
from src.submission_uploader import SubmissionUploader
from src.tasks_content import (
TASKS_DESCRIPTIONS,
TASKS_PRETTY,
TASKS_PRETTY_REVERSE,
get_submission_text_files_for_task,
)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler()],
)
submission_uploader = SubmissionUploader(
dataset_id=os.environ["DATASET_ID"], private_dataset_id=os.environ["PRIVATE_DATASET_ID"]
)
def get_leaderboard_for_task(task_pretty: str) -> gr.components.Dataframe:
return gr.components.Dataframe(
value=get_results_for_task(task_pretty),
interactive=False,
datatype=get_types_per_task(TASKS_PRETTY_REVERSE[task_pretty]),
)
code_completion_dataset_names = get_results_for_task(TASKS_PRETTY['project_code_completion'])['Dataset Name'].unique().tolist()
def get_leaderboard_for_completion_task(dataset_name: str | None):
df = get_results_for_task(TASKS_PRETTY['project_code_completion'])
if dataset_name is None:
dataset_name = code_completion_dataset_names[0]
filtered_df = df[df['Dataset Name']==dataset_name]
filtered_df.drop(columns=['Dataset'], inplace=True)
return gr.components.Dataframe(
value=filtered_df,
interactive=False,
datatype=get_types_per_task('project_code_completion'),
)
with gr.Blocks() as demo:
# intro
gr.HTML(INTRODUCTION_TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
# leaderboard
gr.HTML(LEADERBOARD_TITLE)
gr.Markdown(LEADERBOARD_TEXT, elem_classes="markdown-text")
with gr.Tabs():
for task_pretty in TASKS_PRETTY_REVERSE:
with gr.TabItem(task_pretty):
with gr.Row():
gr.Markdown(TASKS_DESCRIPTIONS[TASKS_PRETTY_REVERSE[task_pretty]])
if task_pretty == TASKS_PRETTY['project_code_completion']:
leaderboard_table = get_leaderboard_for_completion_task(dataset_name=None)
else:
leaderboard_table = get_leaderboard_for_task(task_pretty)
task_input = gr.Text(value=task_pretty, visible=False)
if task_pretty == TASKS_PRETTY['project_code_completion']:
dataset_dropdown = gr.Dropdown(choices=code_completion_dataset_names, label="Select the Dataset")
dataset_dropdown.change(
fn=get_leaderboard_for_completion_task,
inputs=dataset_dropdown,
outputs=leaderboard_table
)
refresh_button = gr.Button("πŸ”„ Refresh", variant="secondary")
refresh_button.click(
fn=get_leaderboard_for_completion_task,
inputs=dataset_dropdown,
outputs=leaderboard_table,
)
else:
refresh_button = gr.Button("πŸ”„ Refresh", variant="secondary")
refresh_button.click(
fn=get_leaderboard_for_task,
inputs=task_input,
outputs=leaderboard_table,
)
# submission
'''
gr.HTML(SUBMISSION_TITLE)
gr.Markdown(SUBMISSION_TEXT_INTRO, elem_classes="markdown-text")
with gr.Accordion("πŸš€ Submit new results", open=False):
gr.Markdown(SUBMISSION_TEXT_TASK, elem_classes="markdown-text")
task_selection = gr.Radio(TASKS_PRETTY_REVERSE.keys(), label="Task")
gr.Markdown(SUBMISSION_TEXT_METADATA, elem_classes="markdown-text")
with gr.Row():
with gr.Column():
model_folder_textbox = gr.Textbox(
label="Model Folder",
placeholder="How to call a folder related to this submission in our results dataset (should be unique).",
)
model_name_textbox = gr.Textbox(
label="Model Name",
placeholder="How to display model's name on the leaderboard.",
)
model_url_textbox = gr.Textbox(
label="Model URL",
placeholder="Link to a model's page - will be clickable on a leaderboard (optional).",
)
with gr.Column():
url_textbox = gr.Textbox(
label="Relevant URLs",
placeholder='URLs to relevant resources with additional details about your submission (optional). Use the following format: "[text1](link1), [text2](link2)".',
)
model_availability_textbox = gr.Textbox(
label="Availability",
placeholder="Information about the model's availability and licensing.",
)
context_size_textbox = gr.Textbox(
label="Context Size",
placeholder="Context size in tokens used for the submission (should be an integer).",
)
with gr.Column():
submitted_by_textbox = gr.Textbox(
label="Submitted By",
placeholder="How to display on the leaderboard who submitted the model.",
)
contact_textbox = gr.Textbox(
label="Contact Information",
placeholder="How Long Code Arena team can contact you (won't go to public dataset).",
)
comment_textbox = gr.Textbox(
label="Comment",
placeholder="Any comments you have for Long Code Arena team (optional, won't go to public dataset).",
)
gr.Markdown(SUBMISSION_TEXT_FILES, elem_classes="markdown-text")
with gr.Row():
with gr.Column(variant="panel"):
task_specific_instructions = gr.Markdown(get_submission_text_files_for_task(None))
task_selection.select(get_submission_text_files_for_task, [task_selection], task_specific_instructions)
with gr.Column():
file_output = gr.File(file_count="multiple")
gr.Markdown(SUBMISSION_TEXT_SUBMIT, elem_classes="markdown-text")
submit_button = gr.Button("Submit")
submission_result = gr.Markdown()
submit_button.click(
submission_uploader.upload_files,
[
task_selection,
model_folder_textbox,
model_name_textbox,
model_availability_textbox,
model_url_textbox,
url_textbox,
context_size_textbox,
submitted_by_textbox,
contact_textbox,
comment_textbox,
file_output,
],
submission_result,
)
'''
if __name__ == "__main__":
demo.queue()
demo.launch()