import gradio as gr from gradio_leaderboard import Leaderboard, ColumnFilter import json import os import time import subprocess import requests from huggingface_hub import HfApi, hf_hub_download from huggingface_hub.errors import HfHubHTTPError import backoff from dotenv import load_dotenv import pandas as pd import random import plotly.graph_objects as go from plotly.subplots import make_subplots from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.triggers.cron import CronTrigger from datetime import datetime, timezone # Load environment variables load_dotenv() # ============================================================================= # CONFIGURATION # ============================================================================= AGENTS_REPO = "SWE-Arena/bot_metadata" # HuggingFace dataset for assistant metadata AGENTS_REPO_LOCAL_PATH = os.path.expanduser("~/bot_metadata") # Local git clone path LEADERBOARD_FILENAME = f"{os.getenv('COMPOSE_PROJECT_NAME')}.json" LEADERBOARD_REPO = "SWE-Arena/leaderboard_data" # HuggingFace dataset for leaderboard data LONGSTANDING_GAP_DAYS = 30 # Minimum days for an issue to be considered long-standing GIT_SYNC_TIMEOUT = 300 # 5 minutes timeout for git pull MAX_RETRIES = 5 LEADERBOARD_COLUMNS = [ ("Assistant", "string"), ("Website", "string"), ("Total Issues", "number"), ("Total Discussions", "number"), ("Issue Resolved Rate (%)", "number"), ("Discussion Resolved Rate (%)", "number"), ("Resolved Wanted Issues", "number"), ("Resolved Issues", "number"), ("Resolved Discussions", "number"), ] # ============================================================================= # HUGGINGFACE API WRAPPERS WITH BACKOFF # ============================================================================= def is_rate_limit_error(e): """Check if exception is a HuggingFace rate limit error (429).""" if isinstance(e, HfHubHTTPError): return e.response.status_code == 429 return False @backoff.on_exception( backoff.expo, HfHubHTTPError, max_tries=MAX_RETRIES, base=300, max_value=3600, giveup=lambda e: not is_rate_limit_error(e), on_backoff=lambda details: print( f"Rate limited. Retrying in {details['wait']/60:.1f} minutes ({details['wait']:.0f}s) - attempt {details['tries']}/5..." ) ) def list_repo_files_with_backoff(api, **kwargs): """Wrapper for api.list_repo_files() with exponential backoff for rate limits.""" return api.list_repo_files(**kwargs) @backoff.on_exception( backoff.expo, HfHubHTTPError, max_tries=MAX_RETRIES, base=300, max_value=3600, giveup=lambda e: not is_rate_limit_error(e), on_backoff=lambda details: print( f"Rate limited. Retrying in {details['wait']/60:.1f} minutes ({details['wait']:.0f}s) - attempt {details['tries']}/5..." ) ) def hf_hub_download_with_backoff(**kwargs): """Wrapper for hf_hub_download() with exponential backoff for rate limits.""" return hf_hub_download(**kwargs) # ============================================================================= # GITHUB USERNAME VALIDATION # ============================================================================= def validate_github_username(identifier): """Verify that a GitHub identifier exists.""" try: response = requests.get(f'https://api.github.com/users/{identifier}', timeout=10) return (True, "Username is valid") if response.status_code == 200 else (False, "GitHub identifier not found" if response.status_code == 404 else f"Validation error: HTTP {response.status_code}") except Exception as e: return False, f"Validation error: {str(e)}" # ============================================================================= # HUGGINGFACE DATASET OPERATIONS # ============================================================================= def sync_agents_repo(): """ Sync local bot_metadata repository with remote using git pull. This is MANDATORY to ensure we have the latest bot data. Raises exception if sync fails. """ if not os.path.exists(AGENTS_REPO_LOCAL_PATH): error_msg = f"Local repository not found at {AGENTS_REPO_LOCAL_PATH}" print(f" Error {error_msg}") print(f" Please clone it first: git clone https://huggingface.co/datasets/{AGENTS_REPO}") raise FileNotFoundError(error_msg) if not os.path.exists(os.path.join(AGENTS_REPO_LOCAL_PATH, '.git')): error_msg = f"{AGENTS_REPO_LOCAL_PATH} exists but is not a git repository" print(f" Error {error_msg}") raise ValueError(error_msg) try: # Run git pull with extended timeout due to large repository result = subprocess.run( ['git', 'pull'], cwd=AGENTS_REPO_LOCAL_PATH, capture_output=True, text=True, timeout=GIT_SYNC_TIMEOUT ) if result.returncode == 0: output = result.stdout.strip() if "Already up to date" in output or "Already up-to-date" in output: print(f" Success Repository is up to date") else: print(f" Success Repository synced successfully") if output: # Print first few lines of output lines = output.split('\n')[:5] for line in lines: print(f" {line}") return True else: error_msg = f"Git pull failed: {result.stderr.strip()}" print(f" Error {error_msg}") raise RuntimeError(error_msg) except subprocess.TimeoutExpired: error_msg = f"Git pull timed out after {GIT_SYNC_TIMEOUT} seconds" print(f" Error {error_msg}") raise TimeoutError(error_msg) except (FileNotFoundError, ValueError, RuntimeError, TimeoutError): raise # Re-raise expected exceptions except Exception as e: error_msg = f"Error syncing repository: {str(e)}" print(f" Error {error_msg}") raise RuntimeError(error_msg) from e def load_agents_from_hf(): """ Load all assistant metadata JSON files from local git repository. ALWAYS syncs with remote first to ensure we have the latest bot data. """ # MANDATORY: Sync with remote first to get latest bot data print(f" Syncing bot_metadata repository to get latest assistants...") sync_agents_repo() # Will raise exception if sync fails assistants = [] # Scan local directory for JSON files if not os.path.exists(AGENTS_REPO_LOCAL_PATH): raise FileNotFoundError(f"Local repository not found at {AGENTS_REPO_LOCAL_PATH}") # Walk through the directory to find all JSON files files_processed = 0 print(f" Loading assistant metadata from {AGENTS_REPO_LOCAL_PATH}...") for root, dirs, files in os.walk(AGENTS_REPO_LOCAL_PATH): # Skip .git directory if '.git' in root: continue for filename in files: if not filename.endswith('.json'): continue files_processed += 1 file_path = os.path.join(root, filename) try: with open(file_path, 'r', encoding='utf-8') as f: agent_data = json.load(f) # Only include active assistants if agent_data.get('status') != 'active': continue # Extract github_identifier from filename github_identifier = filename.replace('.json', '') agent_data['github_identifier'] = github_identifier assistants.append(agent_data) except Exception as e: print(f" Warning Error loading {filename}: {str(e)}") continue print(f" Success Loaded {len(assistants)} active assistants (from {files_processed} total files)") return assistants def get_hf_token(): """Get HuggingFace token from environment variables.""" token = os.getenv('HF_TOKEN') if not token: print("Warning: HF_TOKEN not found in environment variables") return token def upload_with_retry(api, path_or_fileobj, path_in_repo, repo_id, repo_type, token, max_retries=5): """ Upload file to HuggingFace with exponential backoff retry logic. Args: api: HfApi instance path_or_fileobj: Local file path to upload path_in_repo: Target path in the repository repo_id: Repository ID repo_type: Type of repository (e.g., "dataset") token: HuggingFace token max_retries: Maximum number of retry attempts Returns: True if upload succeeded, raises exception if all retries failed """ delay = 2.0 # Initial delay in seconds for attempt in range(max_retries): try: api.upload_file( path_or_fileobj=path_or_fileobj, path_in_repo=path_in_repo, repo_id=repo_id, repo_type=repo_type, token=token ) if attempt > 0: print(f" Upload succeeded on attempt {attempt + 1}/{max_retries}") return True except Exception as e: if attempt < max_retries - 1: wait_time = delay + random.uniform(0, 1.0) print(f" Upload failed (attempt {attempt + 1}/{max_retries}): {str(e)}") print(f" Retrying in {wait_time:.1f} seconds...") time.sleep(wait_time) delay = min(delay * 2, 60.0) # Exponential backoff, max 60s else: print(f" Upload failed after {max_retries} attempts: {str(e)}") raise def save_agent_to_hf(data): """Save a new assistant to HuggingFace dataset as {identifier}.json in root.""" try: api = HfApi() token = get_hf_token() if not token: raise Exception("No HuggingFace token found. Please set HF_TOKEN in your Space settings.") identifier = data['github_identifier'] filename = f"{identifier}.json" # Save locally first with open(filename, 'w') as f: json.dump(data, f, indent=2) try: # Upload to HuggingFace (root directory) upload_with_retry( api=api, path_or_fileobj=filename, path_in_repo=filename, repo_id=AGENTS_REPO, repo_type="dataset", token=token ) print(f"Saved assistant to HuggingFace: {filename}") return True finally: # Always clean up local file, even if upload fails if os.path.exists(filename): os.remove(filename) except Exception as e: print(f"Error saving assistant: {str(e)}") return False def load_leaderboard_data_from_hf(): """ Load leaderboard data and monthly metrics from HuggingFace dataset. Returns: dict: Dictionary with 'leaderboard', 'monthly_metrics', and 'metadata' keys Returns None if file doesn't exist or error occurs """ try: token = get_hf_token() # Download file file_path = hf_hub_download_with_backoff( repo_id=LEADERBOARD_REPO, filename=LEADERBOARD_FILENAME, repo_type="dataset", token=token ) # Load JSON data with open(file_path, 'r') as f: data = json.load(f) last_updated = data.get('metadata', {}).get('last_updated', 'Unknown') print(f"Loaded leaderboard data from HuggingFace (last updated: {last_updated})") return data except Exception as e: print(f"Could not load leaderboard data from HuggingFace: {str(e)}") return None # ============================================================================= # UI FUNCTIONS # ============================================================================= def create_monthly_metrics_plot(type="issue", top_n=5): """ Create a Plotly figure with dual y-axes showing monthly metrics: - Left y-axis: Resolved Rate (%) as line curves - Right y-axis: Total count (Issues or Discussions) as bar charts Each assistant gets a unique color for both their line and bars. Args: type: Type of metrics to display - "issue" or "discussion" (default: "issue") top_n: Number of top assistants to show (default: 5) """ # Determine metrics key and field names based on type if type == "discussion": metrics_key = 'discussion_monthly_metrics' total_field = 'total_discussions' no_data_msg = "No discussion data available for visualization" total_label = "Total Discussions" print_msg = "discussion" else: # default to "issue" metrics_key = 'issue_monthly_metrics' total_field = 'total_issues' no_data_msg = "No data available for visualization" total_label = "Total Issues" print_msg = "issue" # Load from saved dataset saved_data = load_leaderboard_data_from_hf() if not saved_data or metrics_key not in saved_data: # Return an empty figure with a message fig = go.Figure() fig.add_annotation( text=no_data_msg, xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, font=dict(size=16) ) fig.update_layout( title=None, xaxis_title=None, height=500 ) return fig metrics = saved_data[metrics_key] print(f"Loaded {print_msg} monthly metrics from saved dataset") # Apply top_n filter if specified if top_n is not None and top_n > 0 and metrics.get('assistants'): # Calculate total count for each assistant agent_totals = [] for agent_name in metrics['assistants']: agent_data = metrics['data'].get(agent_name, {}) if type == "discussion": total_count = agent_data.get(total_field, 0) else: total_count = sum(agent_data.get(total_field, [])) agent_totals.append((agent_name, total_count)) # Sort by total count and take top N agent_totals.sort(key=lambda x: x[1], reverse=True) top_agents = [agent_name for agent_name, _ in agent_totals[:top_n]] # Filter metrics to only include top assistants metrics = { 'assistants': top_agents, 'months': metrics['months'], 'data': {assistant: metrics['data'][assistant] for assistant in top_agents if assistant in metrics['data']} } if not metrics['assistants'] or not metrics['months']: # Return an empty figure with a message fig = go.Figure() fig.add_annotation( text=no_data_msg, xref="paper", yref="paper", x=0.5, y=0.5, showarrow=False, font=dict(size=16) ) fig.update_layout( title=None, xaxis_title=None, height=500 ) return fig # Create figure with secondary y-axis fig = make_subplots(specs=[[{"secondary_y": True}]]) # Generate unique colors for many assistants using HSL color space def generate_color(index, total): """Generate distinct colors using HSL color space for better distribution""" hue = (index * 360 / total) % 360 saturation = 70 + (index % 3) * 10 # Vary saturation slightly lightness = 45 + (index % 2) * 10 # Vary lightness slightly return f'hsl({hue}, {saturation}%, {lightness}%)' assistants = metrics['assistants'] months = metrics['months'] data = metrics['data'] # Generate colors for all assistants agent_colors = {assistant: generate_color(idx, len(assistants)) for idx, assistant in enumerate(assistants)} # Add traces for each assistant for idx, agent_name in enumerate(assistants): color = agent_colors[agent_name] agent_data = data[agent_name] # Add line trace for resolved rate (left y-axis) resolved_rates = agent_data['resolved_rates'] # Filter out None values for plotting x_resolved = [month for month, rate in zip(months, resolved_rates) if rate is not None] y_resolved = [rate for rate in resolved_rates if rate is not None] if x_resolved and y_resolved: # Only add trace if there's data fig.add_trace( go.Scatter( x=x_resolved, y=y_resolved, name=agent_name, mode='lines+markers', line=dict(color=color, width=2), marker=dict(size=8), legendgroup=agent_name, showlegend=(top_n is not None and top_n <= 10), # Show legend for top N assistants hovertemplate='Assistant: %{fullData.name}
' + 'Month: %{x}
' + 'Resolved Rate: %{y:.2f}%
' + '' ), secondary_y=False ) # Add bar trace for total count (right y-axis) # Only show bars for months where assistant has data x_bars = [] y_bars = [] for month, count in zip(months, agent_data[total_field]): if count > 0: # Only include months with data x_bars.append(month) y_bars.append(count) if x_bars and y_bars: # Only add trace if there's data fig.add_trace( go.Bar( x=x_bars, y=y_bars, name=agent_name, marker=dict(color=color, opacity=0.6), legendgroup=agent_name, showlegend=False, # Hide duplicate legend entry (already shown in Scatter) hovertemplate=f'Assistant: %{{fullData.name}}
' + f'Month: %{{x}}
' + f'{total_label}: %{{y}}
' + '', offsetgroup=agent_name # Group bars by assistant for proper spacing ), secondary_y=True ) # Update axes labels fig.update_xaxes(title_text=None) fig.update_yaxes( title_text="Resolved Rate (%)", range=[0, 100], secondary_y=False, showticklabels=True, tickmode='linear', dtick=10, showgrid=True ) fig.update_yaxes(title_text=f"{total_label}", secondary_y=True) # Update layout show_legend = (top_n is not None and top_n <= 10) fig.update_layout( title=None, hovermode='closest', # Show individual assistant info on hover barmode='group', height=600, showlegend=show_legend, margin=dict(l=50, r=150 if show_legend else 50, t=50, b=50) # More right margin when legend is shown ) return fig def get_leaderboard_dataframe(): """ Load leaderboard from saved dataset and convert to pandas DataFrame for display. Returns formatted DataFrame sorted by total issues. """ # Load from saved dataset saved_data = load_leaderboard_data_from_hf() if not saved_data or 'leaderboard' not in saved_data: print(f"No leaderboard data available") # Return empty DataFrame with correct columns if no data column_names = [col[0] for col in LEADERBOARD_COLUMNS] return pd.DataFrame(columns=column_names) cache_dict = saved_data['leaderboard'] last_updated = saved_data.get('metadata', {}).get('last_updated', 'Unknown') print(f"Loaded leaderboard from saved dataset (last updated: {last_updated})") print(f"Cache dict size: {len(cache_dict)}") if not cache_dict: print("WARNING: cache_dict is empty!") # Return empty DataFrame with correct columns if no data column_names = [col[0] for col in LEADERBOARD_COLUMNS] return pd.DataFrame(columns=column_names) rows = [] filtered_count = 0 for identifier, data in cache_dict.items(): total_issues = data.get('total_issues', 0) print(f" Assistant '{identifier}': {total_issues} issues") # Filter out assistants with zero total issues if total_issues == 0: filtered_count += 1 continue # Only include display-relevant fields (new column order) rows.append([ data.get('name', 'Unknown'), data.get('website', 'N/A'), total_issues, # Total Issues data.get('total_discussions', 0), # Total Discussions data.get('resolved_rate', 0.0), # Issue Resolved Rate (%) data.get('discussion_resolved_rate', 0.0), # Discussion Resolved Rate (%) data.get('resolved_wanted_issues', 0), # Resolved Wanted Issues data.get('resolved_issues', 0), # Resolved Issues data.get('resolved_discussions', 0), # Resolved Discussions ]) print(f"Filtered out {filtered_count} assistants with 0 issues") print(f"Leaderboard will show {len(rows)} assistants") # Create DataFrame column_names = [col[0] for col in LEADERBOARD_COLUMNS] df = pd.DataFrame(rows, columns=column_names) # Ensure numeric types numeric_cols = [ "Total Issues", "Total Discussions", "Issue Resolved Rate (%)", "Discussion Resolved Rate (%)", "Resolved Issues", "Resolved Discussions", "Resolved Wanted Issues" ] for col in numeric_cols: if col in df.columns: df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0) # Sort by Total Issues descending if "Total Issues" in df.columns and not df.empty: df = df.sort_values(by="Total Issues", ascending=False).reset_index(drop=True) print(f"Final DataFrame shape: {df.shape}") print("="*60 + "\n") return df def get_wanted_issues_dataframe(): """Load wanted issues and convert to pandas DataFrame.""" saved_data = load_leaderboard_data_from_hf() if not saved_data or 'wanted_issues' not in saved_data: print(f"No wanted issues data available") return pd.DataFrame(columns=["Title", "URL", "Age (days)", "Labels"]) wanted_issues = saved_data['wanted_issues'] print(f"Loaded {len(wanted_issues)} wanted issues") if not wanted_issues: return pd.DataFrame(columns=["Title", "URL", "Age (days)", "Labels"]) rows = [] for issue in wanted_issues: # Calculate age created_at = issue.get('created_at') age_days = 0 if created_at and created_at != 'N/A': try: created = datetime.fromisoformat(created_at.replace('Z', '+00:00')) age_days = (datetime.now(timezone.utc) - created).days except: pass # Create clickable link url = issue.get('url', '') repo = issue.get('repo', '') issue_number = issue.get('number', '') url_link = f'{repo}#{issue_number}' rows.append([ issue.get('title', ''), url_link, age_days, ', '.join(issue.get('labels', [])) ]) df = pd.DataFrame(rows, columns=["Title", "URL", "Age (days)", "Labels"]) # Sort by age descending if "Age (days)" in df.columns and not df.empty: df = df.sort_values(by="Age (days)", ascending=False).reset_index(drop=True) return df def submit_agent(identifier, agent_name, organization, website): """ Submit a new assistant to the leaderboard. Validates input and saves submission. """ # Validate required fields if not identifier or not identifier.strip(): return "ERROR: GitHub identifier is required", gr.update() if not agent_name or not agent_name.strip(): return "ERROR: Assistant name is required", gr.update() if not organization or not organization.strip(): return "ERROR: Organization name is required", gr.update() if not website or not website.strip(): return "ERROR: Website URL is required", gr.update() # Clean inputs identifier = identifier.strip() agent_name = agent_name.strip() organization = organization.strip() website = website.strip() # Validate GitHub identifier is_valid, message = validate_github_username(identifier) if not is_valid: return f"ERROR: {message}", gr.update() # Check for duplicates by loading assistants from HuggingFace assistants = load_agents_from_hf() if assistants: existing_names = {assistant['github_identifier'] for assistant in assistants} if identifier in existing_names: return f"WARNING: Assistant with identifier '{identifier}' already exists", gr.update() # Create submission submission = { 'name': agent_name, 'organization': organization, 'github_identifier': identifier, 'website': website, 'status': 'active' } # Save to HuggingFace if not save_agent_to_hf(submission): return "ERROR: Failed to save submission", gr.update() # Return success message - data will be populated by backend updates return f"SUCCESS: Successfully submitted {agent_name}! Issue data will be automatically populated by the backend system via the maintainers.", gr.update() # ============================================================================= # DATA RELOAD FUNCTION # ============================================================================= def reload_leaderboard_data(): """ Reload leaderboard data from HuggingFace. This function is called by the scheduler on a daily basis. """ print(f"\n{'='*80}") print(f"Reloading leaderboard data from HuggingFace...") print(f"{'='*80}\n") try: data = load_leaderboard_data_from_hf() if data: print(f"Successfully reloaded leaderboard data") print(f" Last updated: {data.get('metadata', {}).get('last_updated', 'Unknown')}") print(f" Assistants: {len(data.get('leaderboard', {}))}") else: print(f"No data available") except Exception as e: print(f"Error reloading leaderboard data: {str(e)}") print(f"{'='*80}\n") # ============================================================================= # GRADIO APPLICATION # ============================================================================= print(f"\nStarting SWE Assistant Issue Leaderboard") print(f" Data source: {LEADERBOARD_REPO}") print(f" Reload frequency: Daily at 12:00 AM UTC\n") # Start APScheduler for daily data reload at 12:00 AM UTC scheduler = BackgroundScheduler(timezone="UTC") scheduler.add_job( reload_leaderboard_data, trigger=CronTrigger(hour=0, minute=0), # 12:00 AM UTC daily id='daily_data_reload', name='Daily Data Reload', replace_existing=True ) scheduler.start() print(f"\n{'='*80}") print(f"Scheduler initialized successfully") print(f"Reload schedule: Daily at 12:00 AM UTC") print(f"On startup: Loads cached data from HuggingFace on demand") print(f"{'='*80}\n") # Create Gradio interface with gr.Blocks(title="SWE Assistant Issue & Discussion Leaderboard", theme=gr.themes.Soft()) as app: gr.Markdown("# SWE Assistant Issue & Discussion Leaderboard") gr.Markdown(f"Track and compare GitHub issue and discussion resolution statistics for SWE assistants") with gr.Tabs(): # Leaderboard Tab with gr.Tab("Leaderboard"): gr.Markdown("*Statistics are based on assistant issue resolution activity tracked by the system*") leaderboard_table = Leaderboard( value=pd.DataFrame(columns=[col[0] for col in LEADERBOARD_COLUMNS]), # Empty initially datatype=LEADERBOARD_COLUMNS, search_columns=["Assistant", "Website"], filter_columns=[ ColumnFilter( "Issue Resolved Rate (%)", min=0, max=100, default=[0, 100], type="slider", label="Issue Resolved Rate (%)" ) ] ) # Load leaderboard data when app starts app.load( fn=get_leaderboard_dataframe, inputs=[], outputs=[leaderboard_table] ) # Monthly Metrics Section gr.Markdown("---") # Divider gr.Markdown("## Monthly Performance Metrics - Top 5 Assistants") with gr.Row(): with gr.Column(): gr.Markdown("*Issue volume and resolved rate over time*") monthly_metrics_plot = gr.Plot() with gr.Column(): gr.Markdown("*Discussion volume and resolved rate over time*") discussion_metrics_plot = gr.Plot() # Load monthly metrics when app starts app.load( fn=lambda: create_monthly_metrics_plot(), inputs=[], outputs=[monthly_metrics_plot] ) # Load discussion monthly metrics when app starts app.load( fn=lambda: create_monthly_metrics_plot(type="discussion"), inputs=[], outputs=[discussion_metrics_plot] ) # Issues Wanted Tab with gr.Tab("Issues Wanted"): gr.Markdown("### Long-Standing Patch-Wanted Issues") gr.Markdown(f"*Issues open for {LONGSTANDING_GAP_DAYS}+ days with patch-wanted labels from tracked organizations*") wanted_table = gr.Dataframe( value=pd.DataFrame(columns=["Title", "URL", "Age (days)", "Labels"]), datatype=["str", "html", "number", "str"], interactive=False, wrap=True ) app.load( fn=get_wanted_issues_dataframe, inputs=[], outputs=[wanted_table] ) # Submit Assistant Tab with gr.Tab("Submit Your Assistant"): gr.Markdown("Fill in the details below to add your assistant to the leaderboard.") with gr.Row(): with gr.Column(): github_input = gr.Textbox( label="GitHub Identifier*", placeholder="Your assistant username (e.g., my-assistant[bot])" ) name_input = gr.Textbox( label="Assistant Name*", placeholder="Your assistant's display name" ) with gr.Column(): organization_input = gr.Textbox( label="Organization*", placeholder="Your organization or team name" ) website_input = gr.Textbox( label="Website*", placeholder="https://your-assistant-website.com" ) submit_button = gr.Button( "Submit Assistant", variant="primary" ) submission_status = gr.Textbox( label="Submission Status", interactive=False ) # Event handler submit_button.click( fn=submit_agent, inputs=[github_input, name_input, organization_input, website_input], outputs=[submission_status, leaderboard_table] ) # Launch application if __name__ == "__main__": app.launch()