Spaces:
Running on CPU Upgrade

akhaliq HF staff commited on
Commit
2eb382e
·
1 Parent(s): 2cc415b

updates for gemini

Browse files
Files changed (5) hide show
  1. app.py +3 -1
  2. app_gemini.py +19 -16
  3. app_gemini_coder.py +28 -0
  4. app_gemini_voice.py +19 -9
  5. requirements.txt +1 -1
app.py CHANGED
@@ -27,10 +27,12 @@ from app_crew import demo as demo_crew
27
  from app_compare import demo as demo_compare
28
  from app_hyperbolic import demo as demo_hyperbolic
29
  from app_openai import demo as demo_openai
 
30
  from utils import get_app
31
 
32
  # Create mapping of providers to their demos
33
  PROVIDERS = {
 
34
  "DeepSeek": demo_deepseek,
35
  "OpenAI": demo_openai,
36
  "Compare": demo_compare,
@@ -62,7 +64,7 @@ PROVIDERS = {
62
  "NVIDIA": demo_nvidia,
63
  }
64
 
65
- demo = get_app(models=list(PROVIDERS.keys()), default_model="DeepSeek", src=PROVIDERS, dropdown_label="Select Provider")
66
 
67
  if __name__ == "__main__":
68
  demo.queue(api_open=False).launch(show_api=False)
 
27
  from app_compare import demo as demo_compare
28
  from app_hyperbolic import demo as demo_hyperbolic
29
  from app_openai import demo as demo_openai
30
+ from app_gemini_coder import demo as demo_gemini_coder
31
  from utils import get_app
32
 
33
  # Create mapping of providers to their demos
34
  PROVIDERS = {
35
+ "Gemini Coder": demo_gemini_coder,
36
  "DeepSeek": demo_deepseek,
37
  "OpenAI": demo_openai,
38
  "Compare": demo_compare,
 
64
  "NVIDIA": demo_nvidia,
65
  }
66
 
67
+ demo = get_app(models=list(PROVIDERS.keys()), default_model="Gemini Coder", src=PROVIDERS, dropdown_label="Select Provider")
68
 
69
  if __name__ == "__main__":
70
  demo.queue(api_open=False).launch(show_api=False)
app_gemini.py CHANGED
@@ -1,23 +1,26 @@
1
- import os
 
2
 
3
- import gemini_gradio
 
 
 
 
4
 
5
- from utils import get_app
 
 
 
 
6
 
 
7
  demo = get_app(
8
- models=[
9
- "gemini-1.5-flash",
10
- "gemini-1.5-flash-8b",
11
- "gemini-1.5-pro",
12
- "gemini-exp-1114",
13
- "gemini-exp-1121",
14
- "gemini-exp-1206",
15
- "gemini-2.0-flash-exp",
16
- "gemini-2.0-flash-thinking-exp-1219",
17
- ],
18
- default_model="gemini-2.0-flash-thinking-exp-1219",
19
- src=gemini_gradio.registry,
20
- accept_token=not os.getenv("GEMINI_API_KEY"),
21
  )
22
 
23
  if __name__ == "__main__":
 
1
+ import ai_gradio
2
+ from utils_ai_gradio import get_app
3
 
4
+ # Get the Gemini models but keep their full names for loading
5
+ GEMINI_MODELS_FULL = [
6
+ k for k in ai_gradio.registry.keys()
7
+ if k.startswith('gemini:')
8
+ ]
9
 
10
+ # Create display names without the prefix
11
+ GEMINI_MODELS_DISPLAY = [
12
+ k.replace('gemini:', '')
13
+ for k in GEMINI_MODELS_FULL
14
+ ]
15
 
16
+ # Create and launch the interface using get_app utility
17
  demo = get_app(
18
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
19
+ default_model=GEMINI_MODELS_FULL[-1],
20
+ dropdown_label="Select Gemini Model",
21
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
22
+ src=ai_gradio.registry,
23
+ fill_height=True,
 
 
 
 
 
 
 
24
  )
25
 
26
  if __name__ == "__main__":
app_gemini_coder.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+ from utils_ai_gradio import get_app
3
+
4
+ # Get the Gemini models but keep their full names for loading
5
+ GEMINI_MODELS_FULL = [
6
+ k for k in ai_gradio.registry.keys()
7
+ if k.startswith('gemini:')
8
+ ]
9
+
10
+ # Create display names without the prefix
11
+ GEMINI_MODELS_DISPLAY = [
12
+ k.replace('gemini:', '')
13
+ for k in GEMINI_MODELS_FULL
14
+ ]
15
+
16
+ # Create and launch the interface using get_app utility
17
+ demo = get_app(
18
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
19
+ default_model=GEMINI_MODELS_FULL[-1],
20
+ dropdown_label="Select Gemini Model",
21
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
22
+ src=ai_gradio.registry,
23
+ fill_height=True,
24
+ coder=True,
25
+ )
26
+
27
+ if __name__ == "__main__":
28
+ demo.launch()
app_gemini_voice.py CHANGED
@@ -1,17 +1,27 @@
1
- import os
 
2
 
3
- import gemini_gradio
 
 
 
 
4
 
5
- from utils import get_app
 
 
 
 
6
 
 
7
  demo = get_app(
8
- models=[
9
- "gemini-2.0-flash-exp",
10
- ],
11
- default_model="gemini-2.0-flash-exp",
12
- src=gemini_gradio.registry,
13
- accept_token=not os.getenv("GEMINI_API_KEY"),
14
  enable_voice=True,
 
15
  )
16
 
17
  if __name__ == "__main__":
 
1
+ import ai_gradio
2
+ from utils_ai_gradio import get_app
3
 
4
+ # Get the Gemini models but keep their full names for loading
5
+ GEMINI_MODELS_FULL = [
6
+ k for k in ai_gradio.registry.keys()
7
+ if k.startswith('gemini:')
8
+ ]
9
 
10
+ # Create display names without the prefix
11
+ GEMINI_MODELS_DISPLAY = [
12
+ k.replace('gemini:', '')
13
+ for k in GEMINI_MODELS_FULL
14
+ ]
15
 
16
+ # Create and launch the interface using get_app utility
17
  demo = get_app(
18
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
19
+ default_model=GEMINI_MODELS_FULL[-2],
20
+ dropdown_label="Select Gemini Model",
21
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
22
+ src=ai_gradio.registry,
 
23
  enable_voice=True,
24
+ fill_height=True,
25
  )
26
 
27
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -513,7 +513,7 @@ xai-gradio==0.0.2
513
  # via anychat (pyproject.toml)
514
  yarl==1.18.3
515
  # via aiohttp
516
- ai-gradio[deepseek,crewai,hyperbolic,openai]==0.1.8
517
  crewai==0.86.0
518
  instructor==1.3.3
519
  crewai-tools>=0.17.0
 
513
  # via anychat (pyproject.toml)
514
  yarl==1.18.3
515
  # via aiohttp
516
+ ai-gradio[deepseek,crewai,hyperbolic,openai,gemini]==0.1.9
517
  crewai==0.86.0
518
  instructor==1.3.3
519
  crewai-tools>=0.17.0