Spaces:
Running on CPU Upgrade

akhaliq HF staff commited on
Commit
2766701
·
1 Parent(s): ed79ac1

update qwen

Browse files
Files changed (3) hide show
  1. app.py +2 -2
  2. app_qwen.py +14 -13
  3. requirements.txt +1 -0
app.py CHANGED
@@ -29,6 +29,7 @@ from utils import get_app
29
 
30
  # Create mapping of providers to their demos
31
  PROVIDERS = {
 
32
  "Gemini": demo_gemini,
33
  "OpenAI Voice": demo_openai_voice,
34
  "Gemini Voice": demo_gemini_voice,
@@ -44,7 +45,6 @@ PROVIDERS = {
44
  "Groq": demo_groq,
45
  "Meta Llama": demo_meta,
46
  "Paligemma": demo_paligemma,
47
- "Qwen": demo_qwen,
48
  "Replicate": demo_replicate,
49
  "Huggingface": demo_huggingface,
50
  "Fal": demo_fal,
@@ -58,7 +58,7 @@ PROVIDERS = {
58
  "NVIDIA": demo_nvidia,
59
  }
60
 
61
- demo = get_app(models=list(PROVIDERS.keys()), default_model="Gemini", src=PROVIDERS, dropdown_label="Select Provider")
62
 
63
  if __name__ == "__main__":
64
  demo.queue(api_open=False).launch(show_api=False)
 
29
 
30
  # Create mapping of providers to their demos
31
  PROVIDERS = {
32
+ "Qwen": demo_qwen,
33
  "Gemini": demo_gemini,
34
  "OpenAI Voice": demo_openai_voice,
35
  "Gemini Voice": demo_gemini_voice,
 
45
  "Groq": demo_groq,
46
  "Meta Llama": demo_meta,
47
  "Paligemma": demo_paligemma,
 
48
  "Replicate": demo_replicate,
49
  "Huggingface": demo_huggingface,
50
  "Fal": demo_fal,
 
58
  "NVIDIA": demo_nvidia,
59
  }
60
 
61
+ demo = get_app(models=list(PROVIDERS.keys()), default_model="Qwen", src=PROVIDERS, dropdown_label="Select Provider")
62
 
63
  if __name__ == "__main__":
64
  demo.queue(api_open=False).launch(show_api=False)
app_qwen.py CHANGED
@@ -1,24 +1,25 @@
1
  import os
2
 
3
- import dashscope_gradio
4
 
5
  from utils import get_app
6
 
7
  demo = get_app(
8
  models=[
9
- "qwen-turbo-latest",
10
- "qwen-turbo",
11
- "qwen-plus",
12
- "qwen-max",
13
- "qwen1.5-110b-chat",
14
- "qwen1.5-72b-chat",
15
- "qwen1.5-32b-chat",
16
- "qwen1.5-14b-chat",
17
- "qwen1.5-7b-chat",
18
- "qwq-32b-preview",
 
19
  ],
20
- default_model="qwq-32b-preview",
21
- src=dashscope_gradio.registry,
22
  accept_token=not os.getenv("DASHSCOPE_API_KEY"),
23
  )
24
 
 
1
  import os
2
 
3
+ import ai_gradio
4
 
5
  from utils import get_app
6
 
7
  demo = get_app(
8
  models=[
9
+ "qwen:qwen-turbo-latest",
10
+ "qwen:qwen-turbo",
11
+ "qwen:qwen-plus",
12
+ "qwen:qwen-max",
13
+ "qwen:qwen1.5-110b-chat",
14
+ "qwen:qwen1.5-72b-chat",
15
+ "qwen:qwen1.5-32b-chat",
16
+ "qwen:qwen1.5-14b-chat",
17
+ "qwen:qwen1.5-7b-chat",
18
+ "qwen:qwq-32b-preview",
19
+ "qwen:qvq-72b-preview",
20
  ],
21
+ default_model="qwen:qvq-72b-preview",
22
+ src=ai_gradio.registry,
23
  accept_token=not os.getenv("DASHSCOPE_API_KEY"),
24
  )
25
 
requirements.txt CHANGED
@@ -513,3 +513,4 @@ xai-gradio==0.0.2
513
  # via anychat (pyproject.toml)
514
  yarl==1.18.3
515
  # via aiohttp
 
 
513
  # via anychat (pyproject.toml)
514
  yarl==1.18.3
515
  # via aiohttp
516
+ ai-gradio==0.1.5