Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
add gemini dropdown
Browse files
app.py
CHANGED
@@ -9,10 +9,36 @@ import hyperbolic_gradio
|
|
9 |
|
10 |
with gr.Blocks(fill_height=True) as demo:
|
11 |
with gr.Tab("Gemini"):
|
12 |
-
gr.
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
src=gemini_gradio.registry,
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
)
|
17 |
with gr.Tab("ChatGPT"):
|
18 |
with gr.Row():
|
|
|
9 |
|
10 |
with gr.Blocks(fill_height=True) as demo:
|
11 |
with gr.Tab("Gemini"):
|
12 |
+
with gr.Row():
|
13 |
+
gemini_model = gr.Dropdown(
|
14 |
+
choices=[
|
15 |
+
'gemini-1.5-flash', # Fast and versatile performance
|
16 |
+
'gemini-1.5-flash-8b', # High volume, lower intelligence tasks
|
17 |
+
'gemini-1.5-pro', # Complex reasoning tasks
|
18 |
+
'gemini-exp-1114' # Quality improvements
|
19 |
+
],
|
20 |
+
value='gemini-1.5-pro', # Default to the most advanced model
|
21 |
+
label="Select Gemini Model",
|
22 |
+
interactive=True
|
23 |
+
)
|
24 |
+
|
25 |
+
gemini_interface = gr.load(
|
26 |
+
name=gemini_model.value,
|
27 |
src=gemini_gradio.registry,
|
28 |
+
accept_token=True
|
29 |
+
)
|
30 |
+
|
31 |
+
def update_gemini_model(new_model):
|
32 |
+
return gr.load(
|
33 |
+
name=new_model,
|
34 |
+
src=gemini_gradio.registry,
|
35 |
+
accept_token=True
|
36 |
+
)
|
37 |
+
|
38 |
+
gemini_model.change(
|
39 |
+
fn=update_gemini_model,
|
40 |
+
inputs=[gemini_model],
|
41 |
+
outputs=[gemini_interface]
|
42 |
)
|
43 |
with gr.Tab("ChatGPT"):
|
44 |
with gr.Row():
|