|
import gradio as gr |
|
from huggingface_hub import HfApi |
|
from loguru import logger |
|
|
|
from display.formatting import styled_error |
|
from submission.submit import submit_hf_pipeline_agent, validate_model_name |
|
|
|
|
|
def error_html(msg: str, model_id) -> str: |
|
error_msg = msg.format( |
|
model_id=f'<code style="color:#222;background:#f2f2f2;padding:2px 4px;border-radius:4px;">{model_id}</code>' |
|
) |
|
return ( |
|
f"<div style='background-color:#ffeaea; color:#b30000; border:1px solid #ffcccc; " |
|
f"padding:10px; border-radius:6px; font-size:16px; text-align:left;'>" |
|
f"{error_msg}" |
|
f"</div>" |
|
) |
|
|
|
|
|
def verify_pipeline(model_id: str) -> tuple[bool, str]: |
|
""" |
|
Check if model_id is a public, non-gated model repo on Hugging Face Hub. |
|
Returns (success: bool, error_message: str) |
|
""" |
|
|
|
try: |
|
api = HfApi() |
|
|
|
info = api.model_info(model_id) |
|
|
|
is_public = info.private is False |
|
is_gated = getattr(info, "gated", False) |
|
if not is_public or is_gated: |
|
if not is_public: |
|
return False, error_html("Model {model_id} is private. Please make your model public.", model_id) |
|
if is_gated: |
|
return False, error_html("Model {model_id} is gated. Please use a non-gated model.", model_id) |
|
return True, "" |
|
except Exception as e: |
|
logger.exception(e) |
|
return False, error_html( |
|
"Could not verify model {model_id}. Please check if the model is public and not gated.", model_id |
|
) |
|
|
|
|
|
def attempt_submission(model_id: str, description: str, competition_type: str, profile: gr.OAuthProfile | None): |
|
if profile is None: |
|
return styled_error("Authentication required. Please log in first to submit your model.") |
|
|
|
if "/" not in model_id: |
|
full_model_id = f"{profile.username}/{model_id}" |
|
|
|
elif model_id.split("/")[0] != profile.username: |
|
return error_html(f"Model {{model_id}} is not owned by you (username: {profile.username}).", model_id) |
|
else: |
|
full_model_id = model_id |
|
model_id = model_id.split("/", 1)[1] |
|
valid, msg = validate_model_name(model_id) |
|
if not valid: |
|
return error_html(msg, model_id) |
|
|
|
success, msg = verify_pipeline(full_model_id) |
|
if not success: |
|
return msg |
|
try: |
|
return submit_hf_pipeline_agent(model_id, description, competition_type, profile) |
|
except Exception as e: |
|
return styled_error(f"Error: Could not submit model '{model_id}': {e}") |
|
|
|
|
|
def create_model_submission_panel(app: gr.Blocks, competition_type: str): |
|
with gr.Column(): |
|
model_name_input = gr.Textbox( |
|
label="Hugging Face Model ID", |
|
placeholder="<yourname/my-qbt-model> OR <my-qbt-model>", |
|
) |
|
description_input = gr.Textbox( |
|
label="Pipeline Description", |
|
placeholder="e.g. My QBT model is a simple model that uses a pipeline to predict the answer to a question.", |
|
) |
|
|
|
submit_btn = gr.Button("π€ Submit", interactive=False) |
|
|
|
submit_status = gr.HTML(label="Submission Status", visible=False) |
|
|
|
def check_user_login(profile: gr.OAuthProfile | None): |
|
if profile is not None: |
|
return gr.update(interactive=True, value="π€ Submit") |
|
return gr.update(interactive=False, value="π Login to submit") |
|
|
|
gr.on(triggers=app.load, fn=check_user_login, inputs=[], outputs=[submit_btn]) |
|
submit_btn.click( |
|
attempt_submission, |
|
inputs=[model_name_input, description_input, gr.State(competition_type)], |
|
outputs=[submit_status], |
|
concurrency_limit=1, |
|
) |
|
return model_name_input, description_input, submit_btn, submit_status |
|
|
|
|
|
def create_hf_pipeline_submission_interface(demo: gr.Blocks): |
|
gr.Markdown( |
|
""" |
|
# Submit Your Hugging Face Pipeline Model |
|
|
|
Welcome to the Hugging Face pipeline submission interface for the QANTA 2025 competition! |
|
This page allows you to submit your models for both Tossup and Bonus tasks. |
|
|
|
**General Requirements:** |
|
- Your model must be a public, non-gated repository on the Hugging Face Hub. |
|
- Ensure your model can be loaded using the `pipeline()` function from the `transformers` library. |
|
- Adhere to the specified input/output formats for each task. |
|
|
|
For help getting started, check out our [Starter Code](https://github.com/qanta-challenge/qanta25-starter). |
|
|
|
You can also refer to Hugging Face's [custom pipeline creation guide](https://huggingface.co/docs/transformers/en/add_new_pipeline) for more information on how to create a custom pipeline. |
|
|
|
Select the appropriate tab below based on the type of question your model is designed for. |
|
""" |
|
) |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown( |
|
""" |
|
## ποΈ QuizBowl Tossup β Submit your model |
|
|
|
Tossup questions are individual questions progressively revealed where you need to provide an answer, a confidence score, and decide whether to buzz. |
|
|
|
**Pipeline Loading:** |
|
Your model repository **must** be loadable with: |
|
```python |
|
from transformers import pipeline |
|
model = pipeline(task="quizbowl-tossup", model="<your-username/your-repo-name>") |
|
``` |
|
|
|
**Input:** |
|
The pipeline will receive a dictionary with the key `question_text` (string) which contains the progressively revealed question so far. |
|
```python |
|
{ |
|
"question_text": "In 1900, this city hosted a world's fair that introduced the public to the first escalator. Its famous tower, designed by Gustave Eiffel, was initially criticized by artists but is now a global icon. Name this European capital." |
|
} |
|
``` |
|
|
|
**Output:** |
|
Similar to our agents, the pipeline **must** return a dictionary with the following keys: |
|
```python |
|
{ |
|
"answer": <str: concise answer>, |
|
"confidence": <float: confidence score between 0.0 and 1.0>, |
|
"buzz": <bool: True if your model decides to buzz in with the answer, False otherwise> |
|
} |
|
``` |
|
Enter your Hugging Face model repository ID (`<your-username/your-repo-name>`) and a brief description below, then click "Submit". |
|
""" |
|
) |
|
create_model_submission_panel(demo, "tossup") |
|
|
|
with gr.Column(): |
|
gr.Markdown( |
|
""" |
|
## π§ QuizBowl Bonus β Submit your model |
|
|
|
Bonus questions consist of a lead-in paragraph followed by multiple parts. Your model will be called for each part. |
|
|
|
**Pipeline Loading:** |
|
Your model repository **must** be loadable with: |
|
```python |
|
from transformers import pipeline |
|
model = pipeline(task="quizbowl-bonus", model="<your-username/your-repo-name>") |
|
``` |
|
|
|
**Input:** |
|
The pipeline will receive a dictionary with two keys: |
|
- `leadin` (str): The introductory paragraph for the bonus question. |
|
- `part` (str): The specific part of the bonus question to answer. |
|
```python |
|
{ |
|
"leadin": "This author wrote about a young wizard attending a magical school.", |
|
"part": "For 10 points, name this author." |
|
} |
|
``` |
|
|
|
**Output:** |
|
Similar to our agents, the pipeline **must** return a dictionary with the following keys for each part: |
|
```python |
|
{ |
|
"answer": <str: concise answer to the part>, |
|
"confidence": <float: confidence score between 0.0 and 1.0>, |
|
"explanation": <str: brief explanation (<=30 words) for your answer (useful for human collaboration)> |
|
} |
|
``` |
|
Enter your Hugging Face model repository ID (`<your-username/your-repo-name>`) and a brief description below, then click "Submit". |
|
""" |
|
) |
|
create_model_submission_panel(demo, "bonus") |
|
|