import gradio as gr from gradio_leaderboard import Leaderboard, SelectColumns, ColumnFilter from pathlib import Path import pandas as pd import random import requests import json data = requests.get("https://raw.githubusercontent.com/BerriAI/litellm/refs/heads/main/model_prices_and_context_window.json").json() if 'sample_spec' in data: del data['sample_spec'] df = pd.DataFrame.from_dict(data, orient='index') df = df.reset_index() filters = [] df['deprecation_date'] = pd.to_datetime(df['deprecation_date'], errors='coerce') df = df[df['deprecation_date'].isna() | (df['deprecation_date'] > pd.Timestamp.now())] for col in df.columns: if 'supports_' in col: filters.append(ColumnFilter(col, type="boolean", default=False)) if col=='metadata': try: df[col] = df[col].apply(lambda x: json.dumps(x)) except: pass if 'cost_per_token' in col: df[col] = df[col] * 1000000 df = df.rename(columns={col: col.replace('cost_per_token', 'cost_per_M_tokens')}) df = df.rename(columns={'index': 'model_name'}) # print(df.head()) with gr.Blocks() as demo: gr.Markdown(""" # 🥇 LLM Comparison (LiteLLM) """) Leaderboard( value=df, select_columns=SelectColumns( default_selection=['model_name','max_input_tokens','max_output_tokens','input_cost_per_M_tokens','output_cost_per_M_tokens','tpm','rpm','rpd'], # cant_deselect=["model_name"], label="Select Columns to Display:", ), search_columns=["model_name"], # hide_columns=["model_name_for_query", "Model Size"], filter_columns=[ "mode" ]+filters, # datatype=config.TYPES, # column_widths=["33%"], height=1920, ) if __name__ == "__main__": demo.launch()