new gemini model
Browse files- app.py +2 -2
- app_gemini.py +2 -1
app.py
CHANGED
|
@@ -29,9 +29,9 @@ from utils import get_app
|
|
| 29 |
|
| 30 |
# Create mapping of providers to their demos
|
| 31 |
PROVIDERS = {
|
|
|
|
| 32 |
"OpenAI Voice": demo_openai_voice,
|
| 33 |
"Gemini Voice": demo_gemini_voice,
|
| 34 |
-
"Gemini": demo_gemini,
|
| 35 |
"LumaAI": demo_lumaai,
|
| 36 |
"ChatGPT": demo_openai,
|
| 37 |
"Grok": demo_grok,
|
|
@@ -58,7 +58,7 @@ PROVIDERS = {
|
|
| 58 |
"NVIDIA": demo_nvidia,
|
| 59 |
}
|
| 60 |
|
| 61 |
-
demo = get_app(models=list(PROVIDERS.keys()), default_model="
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
demo.queue(api_open=False).launch(show_api=False)
|
|
|
|
| 29 |
|
| 30 |
# Create mapping of providers to their demos
|
| 31 |
PROVIDERS = {
|
| 32 |
+
"Gemini": demo_gemini,
|
| 33 |
"OpenAI Voice": demo_openai_voice,
|
| 34 |
"Gemini Voice": demo_gemini_voice,
|
|
|
|
| 35 |
"LumaAI": demo_lumaai,
|
| 36 |
"ChatGPT": demo_openai,
|
| 37 |
"Grok": demo_grok,
|
|
|
|
| 58 |
"NVIDIA": demo_nvidia,
|
| 59 |
}
|
| 60 |
|
| 61 |
+
demo = get_app(models=list(PROVIDERS.keys()), default_model="Gemini", src=PROVIDERS, dropdown_label="Select Provider")
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
demo.queue(api_open=False).launch(show_api=False)
|
app_gemini.py
CHANGED
|
@@ -13,8 +13,9 @@ demo = get_app(
|
|
| 13 |
"gemini-exp-1121",
|
| 14 |
"gemini-exp-1206",
|
| 15 |
"gemini-2.0-flash-exp",
|
|
|
|
| 16 |
],
|
| 17 |
-
default_model="gemini-2.0-flash-exp",
|
| 18 |
src=gemini_gradio.registry,
|
| 19 |
accept_token=not os.getenv("GEMINI_API_KEY"),
|
| 20 |
)
|
|
|
|
| 13 |
"gemini-exp-1121",
|
| 14 |
"gemini-exp-1206",
|
| 15 |
"gemini-2.0-flash-exp",
|
| 16 |
+
"gemini-2.0-flash-thinking-exp-1219",
|
| 17 |
],
|
| 18 |
+
default_model="gemini-2.0-flash-thinking-exp-1219",
|
| 19 |
src=gemini_gradio.registry,
|
| 20 |
accept_token=not os.getenv("GEMINI_API_KEY"),
|
| 21 |
)
|