Spaces:
Runtime error
Runtime error
File size: 4,253 Bytes
b3b66bf c919a10 53c7098 6aaa79e 59c62cc b9f9a46 53c7098 c919a10 90a0c6e af4bac7 c919a10 af4bac7 81d02d1 f599584 828cfdc 81d02d1 828cfdc 81d02d1 c919a10 828cfdc 53c7098 81d02d1 f599584 828cfdc 81d02d1 828cfdc 81d02d1 53c7098 828cfdc ca6e398 9452a52 81d02d1 9452a52 59c62cc b9f9a46 9452a52 c919a10 b3b66bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import gradio as gr
import gemini_gradio
import openai_gradio
import anthropic_gradio
import sambanova_gradio
import xai_gradio
import hyperbolic_gradio
with gr.Blocks(fill_height=True) as demo:
with gr.Tab("Gemini"):
gr.load(
name='gemini-1.5-pro-002',
src=gemini_gradio.registry,
accept_token=True
)
with gr.Tab("ChatGPT"):
with gr.Row():
model_choice = gr.Dropdown(
choices=[
'gpt-4o', # Most advanced model
'gpt-4o-2024-08-06', # Latest snapshot
'gpt-4o-2024-05-13', # Original snapshot
'chatgpt-4o-latest', # Latest ChatGPT version
'gpt-4o-mini', # Small model
'gpt-4o-mini-2024-07-18', # Latest mini version
'o1-preview', # Reasoning model
'o1-preview-2024-09-12', # Latest o1 model snapshot
'o1-mini', # Faster reasoning model
'o1-mini-2024-09-12', # Latest o1-mini model snapshot
'gpt-4-turbo', # Latest GPT-4 Turbo model
'gpt-4-turbo-2024-04-09', # Latest GPT-4 Turbo snapshot
'gpt-4-turbo-preview', # GPT-4 Turbo preview model
'gpt-4-0125-preview', # GPT-4 Turbo preview model for laziness
'gpt-4-1106-preview', # Improved instruction following model
'gpt-4', # Standard GPT-4 model
'gpt-4-0613' # Snapshot of GPT-4 from June 2023
],
value='gpt-4o', # Default to the most advanced model
label="Select Model",
interactive=True
)
chatgpt_interface = gr.load(
name=model_choice.value,
src=openai_gradio.registry,
accept_token=True
)
def update_model(new_model):
return gr.load(
name=new_model,
src=openai_gradio.registry,
accept_token=True
)
model_choice.change(
fn=update_model,
inputs=[model_choice],
outputs=[chatgpt_interface]
)
with gr.Tab("Claude"):
with gr.Row():
claude_model = gr.Dropdown(
choices=[
'claude-3-5-sonnet-20241022', # Latest Sonnet
'claude-3-5-haiku-20241022', # Latest Haiku
'claude-3-opus-20240229', # Opus
'claude-3-sonnet-20240229', # Previous Sonnet
'claude-3-haiku-20240307' # Previous Haiku
],
value='claude-3-5-sonnet-20241022', # Default to latest Sonnet
label="Select Model",
interactive=True
)
claude_interface = gr.load(
name=claude_model.value,
src=anthropic_gradio.registry,
accept_token=True
)
def update_claude_model(new_model):
return gr.load(
name=new_model,
src=anthropic_gradio.registry,
accept_token=True
)
claude_model.change(
fn=update_claude_model,
inputs=[claude_model],
outputs=[claude_interface]
)
with gr.Tab("Meta Llama-3.2-90B-Vision-Instruct"):
gr.load(
name='Llama-3.2-90B-Vision-Instruct',
src=sambanova_gradio.registry,
accept_token=True,
multimodal=True,
description="Requires SambaNova API key"
)
with gr.Tab("Grok"):
gr.load(
name='grok-beta',
src=xai_gradio.registry,
accept_token=True
)
with gr.Tab("Qwen2.5 72B"):
gr.load(
name='Qwen/Qwen2.5-72B-Instruct',
src=hyperbolic_gradio.registry,
accept_token=True
)
demo.launch()
|