This view is limited to 50 files because it contains too many changes.  See the raw diff here.
.gitattributes CHANGED
@@ -1 +1,35 @@
1
- Animated_Logo_Video_Ready.gif filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,3 +1,5 @@
 
 
1
  # Byte-compiled / optimized / DLL files
2
  __pycache__/
3
  *.py[cod]
@@ -19,18 +21,16 @@ lib64/
19
  parts/
20
  sdist/
21
  var/
 
 
22
  *.egg-info/
23
  .installed.cfg
24
  *.egg
25
  MANIFEST
26
 
27
- # Virtual environments
28
- venv/
29
- env/
30
- ENV/
31
- .venv/
32
-
33
  # PyInstaller
 
 
34
  *.manifest
35
  *.spec
36
 
@@ -48,34 +48,115 @@ htmlcov/
48
  nosetests.xml
49
  coverage.xml
50
  *.cover
 
51
  .hypothesis/
52
  .pytest_cache/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  # Jupyter Notebook
55
  .ipynb_checkpoints
56
 
 
 
 
 
57
  # pyenv
58
- .python-version
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  # mypy
61
  .mypy_cache/
62
  .dmypy.json
 
63
 
64
  # Pyre type checker
65
  .pyre/
66
 
67
- # Gradio cache
68
- log/
69
- logs/
70
-
71
- # System files
72
- .DS_Store
73
- Thumbs.db
74
 
75
- # Lock files
76
- uv.lock
77
- poetry.lock
78
- Pipfile.lock
79
 
80
- # VSCode
81
- .vscode/
 
 
 
 
 
1
+ .gradio/
2
+
3
  # Byte-compiled / optimized / DLL files
4
  __pycache__/
5
  *.py[cod]
 
21
  parts/
22
  sdist/
23
  var/
24
+ wheels/
25
+ share/python-wheels/
26
  *.egg-info/
27
  .installed.cfg
28
  *.egg
29
  MANIFEST
30
 
 
 
 
 
 
 
31
  # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
  *.manifest
35
  *.spec
36
 
 
48
  nosetests.xml
49
  coverage.xml
50
  *.cover
51
+ *.py,cover
52
  .hypothesis/
53
  .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
 
80
  # Jupyter Notebook
81
  .ipynb_checkpoints
82
 
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
  # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
 
143
  # mypy
144
  .mypy_cache/
145
  .dmypy.json
146
+ dmypy.json
147
 
148
  # Pyre type checker
149
  .pyre/
150
 
151
+ # pytype static type analyzer
152
+ .pytype/
 
 
 
 
 
153
 
154
+ # Cython debug symbols
155
+ cython_debug/
 
 
156
 
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.8.6
18
+ hooks:
19
+ - id: ruff
20
+ args: ["--fix"]
21
+ - repo: https://github.com/pre-commit/mirrors-mypy
22
+ rev: v1.14.1
23
+ hooks:
24
+ - id: mypy
25
+ args: ["--ignore-missing-imports"]
26
+ additional_dependencies:
27
+ [
28
+ "types-python-slugify",
29
+ "types-requests",
30
+ "types-PyYAML",
31
+ "types-pytz",
32
+ ]
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit"
9
+ }
10
+ },
11
+ "[jupyter]": {
12
+ "files.insertFinalNewline": false
13
+ },
14
+ "notebook.output.scrolling": true,
15
+ "notebook.formatOnSave.enabled": true
16
+ }
Animated_Logo_Video_Ready.gif DELETED

Git LFS Details

  • SHA256: e4cbd816d3d2d488b7854dee5685b9074e77b3ee87b22a4b71b5f6385cc08f64
  • Pointer size: 132 Bytes
  • Size of remote file: 4.06 MB
README.md CHANGED
@@ -8,149 +8,6 @@ sdk_version: 5.23.3
8
  app_file: app.py
9
  pinned: false
10
  disable_embedding: true
11
- hf_oauth: true
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
15
-
16
- # Anycoder - AI Code Generation with Hugging Face Inference
17
-
18
- An ultra-clean AI-powered code generation application using Hugging Face inference providers. Minimal files for maximum simplicity.
19
-
20
- ## Features
21
-
22
- - **Hugging Face Models**: Uses DeepSeek-V3-0324 via Novita provider
23
- - **Modern UI**: Built with Gradio and ModelScope Studio components
24
- - **Code Generation**: Generates working code based on user requirements
25
- - **Live Preview**: Renders generated HTML code in real-time
26
- - **History Management**: Keeps track of conversation history
27
- - **Streaming**: Real-time code generation with streaming responses
28
- - **OAuth Login Required**: Users must sign in with their Hugging Face account to use code generation features
29
-
30
- ## Project Structure
31
-
32
- ```
33
- anycoder/
34
- ├── app.py # Main application (everything included)
35
- ├── app.css # Basic styling
36
- ├── pyproject.toml # Dependencies
37
- └── README.md # This file
38
- ```
39
-
40
- ## Setup
41
-
42
- 1. Set your Hugging Face API token:
43
- ```bash
44
- export HF_TOKEN="your_huggingface_token_here"
45
- ```
46
-
47
- 2. Install dependencies:
48
- ```bash
49
- uv sync
50
- ```
51
-
52
- 3. Run the application:
53
- ```bash
54
- uv run python app.py
55
- ```
56
-
57
- ## Usage
58
-
59
- 1. **Sign in with your Hugging Face account** using the login button at the top left.
60
- 2. Enter your application requirements in the text area
61
- 3. Click "send" to generate code
62
- 4. View the generated code in the code drawer
63
- 5. See the live preview in the sandbox area
64
- 6. Use example cards for quick prompts
65
-
66
- ## Code Example
67
-
68
- ```python
69
- import os
70
- from huggingface_hub import InferenceClient
71
-
72
- client = InferenceClient(
73
- provider="novita",
74
- api_key=os.environ["HF_TOKEN"],
75
- bill_to="huggingface"
76
- )
77
-
78
- completion = client.chat.completions.create(
79
- model="deepseek-ai/DeepSeek-V3-0324",
80
- messages=[
81
- {
82
- "role": "user",
83
- "content": "Create a simple todo app"
84
- }
85
- ],
86
- )
87
- ```
88
-
89
- ## Architecture
90
-
91
- The application uses:
92
- - **Gradio**: For the web interface
93
- - **Hugging Face Hub**: For model inference
94
- - **ModelScope Studio**: For UI components
95
- - **OAuth Login**: Requires users to sign in with Hugging Face for code generation
96
- - **Streaming**: For real-time code generation
97
-
98
- # Hugging Face Coder
99
-
100
- A Gradio-based application that uses Hugging Face models to generate code based on user requirements. The app supports both text-only and multimodal (text + image) code generation.
101
-
102
- ## Features
103
-
104
- - **Multiple Model Support**: DeepSeek V3, DeepSeek R1, and ERNIE-4.5-VL
105
- - **Multimodal Input**: Upload images to help describe your requirements
106
- - **Real-time Code Generation**: Stream responses from the models
107
- - **Live Preview**: See your generated code in action with the built-in sandbox
108
- - **History Management**: Keep track of your previous generations
109
- - **Example Templates**: Quick-start with predefined application templates
110
-
111
- ## Setup
112
-
113
- 1. Install dependencies:
114
- ```bash
115
- pip install -r requirements.txt
116
- ```
117
-
118
- 2. Set your Hugging Face API token as an environment variable:
119
- ```bash
120
- export HF_TOKEN="your_huggingface_token_here"
121
- ```
122
-
123
- 3. Run the application:
124
- ```bash
125
- python app.py
126
- ```
127
-
128
- ## Usage
129
-
130
- 1. **Text-only Generation**: Simply type your requirements in the text area
131
- 2. **Multimodal Generation**: Upload an image and describe what you want to create
132
- 3. **Model Selection**: Switch between different models using the model selector
133
- 4. **Examples**: Use the provided example templates to get started quickly
134
-
135
- ## Supported Models
136
-
137
- - **DeepSeek V3**: General code generation
138
- - **DeepSeek R1**: Advanced code generation
139
- - **ERNIE-4.5-VL**: Multimodal code generation with image understanding
140
-
141
- ## Environment Variables
142
-
143
- - `HF_TOKEN`: Your Hugging Face API token (required)
144
-
145
- ## Examples
146
-
147
- - Todo App
148
- - Calculator
149
- - Weather Dashboard
150
- - Chat Interface
151
- - E-commerce Product Card
152
- - Login Form
153
- - Dashboard Layout
154
- - Data Table
155
- - Image Gallery
156
- - UI from Image (multimodal)
 
8
  app_file: app.py
9
  pinned: false
10
  disable_embedding: true
 
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.css DELETED
@@ -1,73 +0,0 @@
1
- /* Basic styling for the coder application */
2
-
3
- :root {
4
- --text-color: #333;
5
- --bg-color: #f5f5f5;
6
- --html-bg: white;
7
- }
8
-
9
- @media (prefers-color-scheme: dark) {
10
- :root {
11
- --text-color: #e0e0e0;
12
- --bg-color: #2d2d2d;
13
- --html-bg: #1e1e1e;
14
- }
15
- }
16
-
17
- .left_header {
18
- text-align: center;
19
- margin-bottom: 20px;
20
- }
21
-
22
- .left_header h1 {
23
- margin-top: 10px;
24
- color: var(--text-color);
25
- }
26
-
27
- .right_panel {
28
- background: var(--bg-color);
29
- border-radius: 8px;
30
- padding: 20px;
31
- height: 100%;
32
- }
33
-
34
- .render_header {
35
- display: flex;
36
- gap: 8px;
37
- margin-bottom: 15px;
38
- }
39
-
40
- .header_btn {
41
- width: 12px;
42
- height: 12px;
43
- border-radius: 50%;
44
- background: #ff5f56;
45
- }
46
-
47
- .header_btn:nth-child(2) {
48
- background: #ffbd2e;
49
- }
50
-
51
- .header_btn:nth-child(3) {
52
- background: #27ca3f;
53
- }
54
-
55
- .right_content {
56
- display: flex;
57
- align-items: center;
58
- justify-content: center;
59
- height: 800px;
60
- }
61
-
62
- .html_content {
63
- width: 100%;
64
- height: 920px;
65
- border: none;
66
- border-radius: 8px;
67
- background: var(--html-bg);
68
- }
69
-
70
- .history_chatbot {
71
- max-height: 960px;
72
- overflow-y: auto;
73
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,474 +1,54 @@
1
- import os
2
- import re
3
- from http import HTTPStatus
4
- from typing import Dict, List, Optional, Tuple
5
- import base64
6
-
7
  import gradio as gr
8
- from huggingface_hub import InferenceClient
9
-
10
- import modelscope_studio.components.base as ms
11
- import modelscope_studio.components.legacy as legacy
12
- import modelscope_studio.components.antd as antd
13
-
14
- # Configuration
15
- SystemPrompt = """You are a helpful coding assistant. You help users create applications by generating code based on their requirements.
16
- When asked to create an application, you should:
17
- 1. Understand the user's requirements
18
- 2. Generate clean, working code
19
- 3. Provide HTML output when appropriate for web applications
20
- 4. Include necessary comments and documentation
21
- 5. Ensure the code is functional and follows best practices
22
-
23
- If an image is provided, analyze it and use the visual information to better understand the user's requirements.
24
-
25
- Always respond with code that can be executed or rendered directly.
26
-
27
- Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text."""
28
-
29
- # Available models
30
- AVAILABLE_MODELS = [
31
- {
32
- "name": "DeepSeek V3",
33
- "id": "deepseek-ai/DeepSeek-V3-0324",
34
- "description": "DeepSeek V3 model for code generation"
35
- },
36
- {
37
- "name": "DeepSeek R1",
38
- "id": "deepseek-ai/DeepSeek-R1-0528",
39
- "description": "DeepSeek R1 model for code generation"
40
- },
41
- {
42
- "name": "ERNIE-4.5-VL",
43
- "id": "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT",
44
- "description": "ERNIE-4.5-VL model for multimodal code generation with image support"
45
- }
46
- ]
47
-
48
- DEMO_LIST = [
49
- {
50
- "title": "Todo App",
51
- "description": "Create a simple todo application with add, delete, and mark as complete functionality"
52
- },
53
- {
54
- "title": "Calculator",
55
- "description": "Build a basic calculator with addition, subtraction, multiplication, and division"
56
- },
57
- {
58
- "title": "Weather Dashboard",
59
- "description": "Create a weather dashboard that displays current weather information"
60
- },
61
- {
62
- "title": "Chat Interface",
63
- "description": "Build a chat interface with message history and user input"
64
- },
65
- {
66
- "title": "E-commerce Product Card",
67
- "description": "Create a product card component for an e-commerce website"
68
- },
69
- {
70
- "title": "Login Form",
71
- "description": "Build a responsive login form with validation"
72
- },
73
- {
74
- "title": "Dashboard Layout",
75
- "description": "Create a dashboard layout with sidebar navigation and main content area"
76
- },
77
- {
78
- "title": "Data Table",
79
- "description": "Build a data table with sorting and filtering capabilities"
80
- },
81
- {
82
- "title": "Image Gallery",
83
- "description": "Create an image gallery with lightbox functionality and responsive grid layout"
84
- },
85
- {
86
- "title": "UI from Image",
87
- "description": "Upload an image of a UI design and I'll generate the HTML/CSS code for it"
88
- }
89
- ]
90
-
91
- # HF Inference Client
92
- YOUR_API_TOKEN = os.getenv('HF_TOKEN')
93
- client = InferenceClient(
94
- provider="auto",
95
- api_key=YOUR_API_TOKEN,
96
- bill_to="huggingface"
97
- )
98
-
99
- History = List[Tuple[str, str]]
100
- Messages = List[Dict[str, str]]
101
-
102
- def history_to_messages(history: History, system: str) -> Messages:
103
- messages = [{'role': 'system', 'content': system}]
104
- for h in history:
105
- # Handle multimodal content in history
106
- user_content = h[0]
107
- if isinstance(user_content, list):
108
- # Extract text from multimodal content
109
- text_content = ""
110
- for item in user_content:
111
- if isinstance(item, dict) and item.get("type") == "text":
112
- text_content += item.get("text", "")
113
- user_content = text_content if text_content else str(user_content)
114
-
115
- messages.append({'role': 'user', 'content': user_content})
116
- messages.append({'role': 'assistant', 'content': h[1]})
117
- return messages
118
-
119
- def messages_to_history(messages: Messages) -> Tuple[str, History]:
120
- assert messages[0]['role'] == 'system'
121
- history = []
122
- for q, r in zip(messages[1::2], messages[2::2]):
123
- # Extract text content from multimodal messages for history
124
- user_content = q['content']
125
- if isinstance(user_content, list):
126
- text_content = ""
127
- for item in user_content:
128
- if isinstance(item, dict) and item.get("type") == "text":
129
- text_content += item.get("text", "")
130
- user_content = text_content if text_content else str(user_content)
131
-
132
- history.append([user_content, r['content']])
133
- return history
134
-
135
- def remove_code_block(text):
136
- # Try to match code blocks with language markers
137
- patterns = [
138
- r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML
139
- r'```\n([\s\S]+?)\n```', # Match code blocks without language markers
140
- r'```([\s\S]+?)```' # Match code blocks without line breaks
141
- ]
142
- for pattern in patterns:
143
- match = re.search(pattern, text, re.DOTALL)
144
- if match:
145
- extracted = match.group(1).strip()
146
- return extracted
147
- # If no code block is found, check if the entire text is HTML
148
- if text.strip().startswith('<!DOCTYPE html>') or text.strip().startswith('<html'):
149
- return text.strip()
150
- return text.strip()
151
-
152
- def history_render(history: History):
153
- return gr.update(open=True), history
154
-
155
- def clear_history():
156
- return []
157
-
158
- def update_image_input_visibility(model):
159
- """Update image input visibility based on selected model"""
160
- is_ernie_vl = model.get("id") == "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT"
161
- return gr.update(visible=is_ernie_vl)
162
-
163
- def process_image_for_model(image):
164
- """Convert image to base64 for model input"""
165
- if image is None:
166
- return None
167
-
168
- # Convert numpy array to PIL Image if needed
169
- import io
170
- import base64
171
- import numpy as np
172
- from PIL import Image
173
-
174
- # Handle numpy array from Gradio
175
- if isinstance(image, np.ndarray):
176
- image = Image.fromarray(image)
177
-
178
- buffer = io.BytesIO()
179
- image.save(buffer, format='PNG')
180
- img_str = base64.b64encode(buffer.getvalue()).decode()
181
- return f"data:image/png;base64,{img_str}"
182
-
183
- def create_multimodal_message(text, image=None):
184
- """Create a multimodal message with text and optional image"""
185
- if image is None:
186
- return {"role": "user", "content": text}
187
-
188
- content = [
189
- {
190
- "type": "text",
191
- "text": text
192
- },
193
- {
194
- "type": "image_url",
195
- "image_url": {
196
- "url": process_image_for_model(image)
197
- }
198
- }
199
- ]
200
-
201
- return {"role": "user", "content": content}
202
-
203
- def send_to_sandbox(code):
204
- # Add a wrapper to inject necessary permissions and ensure full HTML
205
- wrapped_code = f"""
206
- <!DOCTYPE html>
207
- <html>
208
- <head>
209
- <meta charset=\"UTF-8\">
210
- <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">
211
- <script>
212
- // Safe localStorage polyfill
213
- const safeStorage = {{
214
- _data: {{}},
215
- getItem: function(key) {{ return this._data[key] || null; }},
216
- setItem: function(key, value) {{ this._data[key] = value; }},
217
- removeItem: function(key) {{ delete this._data[key]; }},
218
- clear: function() {{ this._data = {{}}; }}
219
- }};
220
- Object.defineProperty(window, 'localStorage', {{
221
- value: safeStorage,
222
- writable: false
223
- }});
224
- window.onerror = function(message, source, lineno, colno, error) {{
225
- console.error('Error:', message);
226
- }};
227
- </script>
228
- </head>
229
- <body>
230
- {code}
231
- </body>
232
- </html>
233
- """
234
- encoded_html = base64.b64encode(wrapped_code.encode('utf-8')).decode('utf-8')
235
- data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
236
- iframe = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>'
237
- return iframe
238
-
239
- def demo_card_click(e: gr.EventData):
240
- try:
241
- # Get the index from the event data
242
- if hasattr(e, '_data') and e._data:
243
- # Try different ways to get the index
244
- if 'index' in e._data:
245
- index = e._data['index']
246
- elif 'component' in e._data and 'index' in e._data['component']:
247
- index = e._data['component']['index']
248
- elif 'target' in e._data and 'index' in e._data['target']:
249
- index = e._data['target']['index']
250
- else:
251
- # If we can't get the index, try to extract it from the card data
252
- index = 0
253
- else:
254
- index = 0
255
-
256
- # Ensure index is within bounds
257
- if index >= len(DEMO_LIST):
258
- index = 0
259
-
260
- return DEMO_LIST[index]['description']
261
- except (KeyError, IndexError, AttributeError) as e:
262
- # Return the first demo description as fallback
263
- return DEMO_LIST[0]['description']
264
 
265
- # Main application
266
- with gr.Blocks(css_paths="app.css") as demo:
267
- history = gr.State([])
268
- setting = gr.State({
269
- "system": SystemPrompt,
270
- })
271
- current_model = gr.State(AVAILABLE_MODELS[0]) # Default to first model
272
-
273
- with ms.Application() as app:
274
- with antd.ConfigProvider():
275
- with antd.Row(gutter=[32, 12]) as layout:
276
- with antd.Col(span=24, md=8):
277
- with antd.Flex(vertical=True, gap="middle", wrap=True):
278
- gr.LoginButton()
279
- login_message = gr.Markdown("", visible=False)
280
- header = gr.HTML("""
281
- <div class="left_header">
282
- <img src="https://huggingface.co/spaces/akhaliq/anycoder/resolve/main/Animated_Logo_Video_Ready.gif" width="200px" />
283
- <h1>Hugging Face Coder</h1>
284
- </div>
285
- """)
286
- current_model_display = gr.Markdown("**Current Model:** DeepSeek V3", visible=False)
287
- input = antd.InputTextarea(
288
- size="large", allow_clear=True, placeholder="Please enter what kind of application you want", visible=False)
289
- image_input = gr.Image(label="Upload an image (only for ERNIE-4.5-VL model)", visible=False)
290
- btn = antd.Button("send", type="primary", size="large", visible=False)
291
- clear_btn = antd.Button("clear history", type="default", size="large", visible=False)
292
-
293
- antd.Divider("examples", visible=False)
294
- with antd.Flex(gap="small", wrap=True, visible=False) as examples_flex:
295
- for i, demo_item in enumerate(DEMO_LIST):
296
- with antd.Card(hoverable=True, title=demo_item["title"]) as demoCard:
297
- antd.CardMeta(description=demo_item["description"])
298
- demoCard.click(lambda e, idx=i: (DEMO_LIST[idx]['description'], None), outputs=[input, image_input])
299
-
300
- antd.Divider("setting", visible=False)
301
- with antd.Flex(gap="small", wrap=True, visible=False) as setting_flex:
302
- settingPromptBtn = antd.Button(
303
- "⚙️ set system Prompt", type="default", visible=False)
304
- modelBtn = antd.Button("🤖 switch model", type="default", visible=False)
305
- codeBtn = antd.Button("🧑‍💻 view code", type="default", visible=False)
306
- historyBtn = antd.Button("📜 history", type="default", visible=False)
307
-
308
- with antd.Modal(open=False, title="set system Prompt", width="800px") as system_prompt_modal:
309
- systemPromptInput = antd.InputTextarea(
310
- SystemPrompt, auto_size=True)
311
-
312
- settingPromptBtn.click(lambda: gr.update(
313
- open=True), inputs=[], outputs=[system_prompt_modal])
314
- system_prompt_modal.ok(lambda input: ({"system": input}, gr.update(
315
- open=False)), inputs=[systemPromptInput], outputs=[setting, system_prompt_modal])
316
- system_prompt_modal.cancel(lambda: gr.update(
317
- open=False), outputs=[system_prompt_modal])
318
-
319
- with antd.Modal(open=False, title="Select Model", width="600px") as model_modal:
320
- with antd.Flex(vertical=True, gap="middle"):
321
- for i, model in enumerate(AVAILABLE_MODELS):
322
- with antd.Card(hoverable=True, title=model["name"]) as modelCard:
323
- antd.CardMeta(description=model["description"])
324
- modelCard.click(lambda m=model: (m, gr.update(open=False), f"**Current Model:** {m['name']}", update_image_input_visibility(m)), outputs=[current_model, model_modal, current_model_display, image_input])
325
-
326
- modelBtn.click(lambda: gr.update(open=True), inputs=[], outputs=[model_modal])
327
-
328
- with antd.Drawer(open=False, title="code", placement="left", width="750px") as code_drawer:
329
- code_output = legacy.Markdown()
330
-
331
- codeBtn.click(lambda: gr.update(open=True),
332
- inputs=[], outputs=[code_drawer])
333
- code_drawer.close(lambda: gr.update(
334
- open=False), inputs=[], outputs=[code_drawer])
335
-
336
- with antd.Drawer(open=False, title="history", placement="left", width="900px") as history_drawer:
337
- history_output = legacy.Chatbot(show_label=False, flushing=False, height=960, elem_classes="history_chatbot")
338
-
339
- historyBtn.click(history_render, inputs=[history], outputs=[history_drawer, history_output])
340
- history_drawer.close(lambda: gr.update(
341
- open=False), inputs=[], outputs=[history_drawer])
342
-
343
- with antd.Col(span=24, md=16):
344
- with ms.Div(elem_classes="right_panel"):
345
- gr.HTML('<div class="render_header"><span class="header_btn"></span><span class="header_btn"></span><span class="header_btn"></span></div>')
346
- # Move sandbox outside of tabs for always-on visibility
347
- sandbox = gr.HTML(elem_classes="html_content")
348
- with antd.Tabs(active_key="empty", render_tab_bar="() => null") as state_tab:
349
- with antd.Tabs.Item(key="empty"):
350
- empty = antd.Empty(description="empty input", elem_classes="right_content")
351
- with antd.Tabs.Item(key="loading"):
352
- loading = antd.Spin(True, tip="coding...", size="large", elem_classes="right_content")
353
-
354
- def update_login_ui(profile: gr.OAuthProfile | None):
355
- if profile is None:
356
- return (
357
- gr.update(value="**You must sign in with Hugging Face to use this app.**", visible=True),
358
- gr.update(visible=False),
359
- gr.update(visible=False),
360
- gr.update(visible=False),
361
- gr.update(visible=False),
362
- gr.update(visible=False),
363
- gr.update(visible=False),
364
- gr.update(visible=False),
365
- gr.update(visible=False),
366
- gr.update(visible=False),
367
- gr.update(visible=False),
368
- gr.update(visible=False),
369
- )
370
- else:
371
- return (
372
- gr.update(visible=False),
373
- gr.update(visible=True),
374
- gr.update(visible=False), # Image input hidden by default (DeepSeek V3)
375
- gr.update(visible=True),
376
- gr.update(visible=True),
377
- gr.update(visible=True),
378
- gr.update(visible=True),
379
- gr.update(visible=True),
380
- gr.update(visible=True),
381
- gr.update(visible=True),
382
- gr.update(visible=True),
383
- gr.update(visible=True),
384
- )
385
-
386
- def generation_code(query: Optional[str], image: Optional[gr.Image], _setting: Dict[str, str], _history: Optional[History], profile: gr.OAuthProfile | None, _current_model: Dict):
387
- if profile is None:
388
- return (
389
- "Please sign in with Hugging Face to use this feature.",
390
- _history,
391
- None,
392
- gr.update(active_key="empty"),
393
- gr.update(open=True),
394
- )
395
- if query is None:
396
- query = ''
397
- if _history is None:
398
- _history = []
399
- messages = history_to_messages(_history, _setting['system'])
400
-
401
- # Create multimodal message if image is provided
402
- if image is not None:
403
- messages.append(create_multimodal_message(query, image))
404
- else:
405
- messages.append({'role': 'user', 'content': query})
406
-
407
- try:
408
- completion = client.chat.completions.create(
409
- model=_current_model["id"],
410
- messages=messages,
411
- stream=True
412
- )
413
-
414
- content = ""
415
- for chunk in completion:
416
- if chunk.choices[0].delta.content:
417
- content += chunk.choices[0].delta.content
418
- yield {
419
- code_output: content,
420
- state_tab: gr.update(active_key="loading"),
421
- code_drawer: gr.update(open=True),
422
- }
423
-
424
- # Final response
425
- _history = messages_to_history(messages + [{
426
- 'role': 'assistant',
427
- 'content': content
428
- }])
429
-
430
- yield {
431
- code_output: content,
432
- history: _history,
433
- sandbox: send_to_sandbox(remove_code_block(content)),
434
- state_tab: gr.update(active_key="render"),
435
- code_drawer: gr.update(open=False),
436
- }
437
-
438
- except Exception as e:
439
- error_message = f"Error: {str(e)}"
440
- yield {
441
- code_output: error_message,
442
- state_tab: gr.update(active_key="empty"),
443
- code_drawer: gr.update(open=True),
444
- }
445
-
446
- btn.click(
447
- generation_code,
448
- inputs=[input, image_input, setting, history, current_model],
449
- outputs=[code_output, history, sandbox, state_tab, code_drawer]
450
- )
451
-
452
- clear_btn.click(clear_history, inputs=[], outputs=[history])
453
-
454
- demo.load(
455
- update_login_ui,
456
- inputs=None,
457
- outputs=[
458
- login_message,
459
- input,
460
- image_input,
461
- current_model_display,
462
- btn,
463
- clear_btn,
464
- examples_flex,
465
- setting_flex,
466
- settingPromptBtn,
467
- modelBtn,
468
- codeBtn,
469
- historyBtn,
470
- ]
471
- )
472
 
473
  if __name__ == "__main__":
474
- demo.queue(default_concurrency_limit=20).launch(ssr_mode=False)
 
1
+ from app_huggingface import demo as demo_huggingface
2
+ from app_gemini_coder import demo as demo_gemini
3
+ from utils import get_app
 
 
 
4
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # Create mapping of providers to their code snippets
7
+ PROVIDER_SNIPPETS = {
8
+ "Hugging Face": """
9
+ import gradio as gr
10
+ import ai_gradio
11
+ gr.load(
12
+ name='huggingface:deepseek-ai/DeepSeek-R1',
13
+ src=ai_gradio.registry,
14
+ coder=True,
15
+ provider="together"
16
+ ).launch()""",
17
+ "Gemini Coder": """
18
+ import gradio as gr
19
+ import ai_gradio
20
+ gr.load(
21
+ name='gemini:gemini-2.5-pro-exp-03-25',
22
+ src=ai_gradio.registry,
23
+ coder=True,
24
+ provider="together"
25
+ ).launch()
26
+ """,
27
+ }
28
+ # Create mapping of providers to their demos
29
+ PROVIDERS = {
30
+ "Hugging Face": demo_huggingface,
31
+ "Gemini Coder": demo_gemini,
32
+ }
33
+
34
+ # Modified get_app implementation
35
+ demo = gr.Blocks()
36
+ with demo:
37
+
38
+ provider_dropdown = gr.Dropdown(choices=list(PROVIDERS.keys()), value="Hugging Face", label="Select code snippet")
39
+ code_display = gr.Code(label="Provider Code Snippet", language="python", value=PROVIDER_SNIPPETS["Hugging Face"])
40
+
41
+ def update_code(provider):
42
+ return PROVIDER_SNIPPETS.get(provider, "Code snippet not available")
43
+
44
+ provider_dropdown.change(fn=update_code, inputs=[provider_dropdown], outputs=[code_display])
45
+
46
+ selected_demo = get_app(
47
+ models=list(PROVIDERS.keys()),
48
+ default_model="Hugging Face",
49
+ src=PROVIDERS,
50
+ dropdown_label="Select Provider",
51
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
  if __name__ == "__main__":
54
+ demo.queue(api_open=False).launch(show_api=False)
app_allenai.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_client import Client
3
+
4
+ MODELS = {"OLMo-2-1124-13B-Instruct": "akhaliq/olmo-anychat", "Llama-3.1-Tulu-3-8B": "akhaliq/allen-test"}
5
+
6
+
7
+ def create_chat_fn(client):
8
+ def chat(message, history):
9
+ response = client.predict(
10
+ message=message,
11
+ system_prompt="You are a helpful AI assistant.",
12
+ temperature=0.7,
13
+ max_new_tokens=1024,
14
+ top_k=40,
15
+ repetition_penalty=1.1,
16
+ top_p=0.95,
17
+ api_name="/chat",
18
+ )
19
+ return response
20
+
21
+ return chat
22
+
23
+
24
+ def set_client_for_session(model_name, request: gr.Request):
25
+ headers = {}
26
+ if request and hasattr(request, "request") and hasattr(request.request, "headers"):
27
+ x_ip_token = request.request.headers.get("x-ip-token")
28
+ if x_ip_token:
29
+ headers["X-IP-Token"] = x_ip_token
30
+
31
+ return Client(MODELS[model_name], headers=headers)
32
+
33
+
34
+ def safe_chat_fn(message, history, client):
35
+ if client is None:
36
+ return "Error: Client not initialized. Please refresh the page."
37
+ return create_chat_fn(client)(message, history)
38
+
39
+
40
+ with gr.Blocks() as demo:
41
+ client = gr.State()
42
+
43
+ model_dropdown = gr.Dropdown(
44
+ choices=list(MODELS.keys()), value="OLMo-2-1124-13B-Instruct", label="Select Model", interactive=True
45
+ )
46
+
47
+ chat_interface = gr.ChatInterface(fn=safe_chat_fn, additional_inputs=[client])
48
+
49
+ # Update client when model changes
50
+ def update_model(model_name, request):
51
+ return set_client_for_session(model_name, request)
52
+
53
+ model_dropdown.change(
54
+ fn=update_model,
55
+ inputs=[model_dropdown],
56
+ outputs=[client],
57
+ )
58
+
59
+ # Initialize client on page load
60
+ demo.load(
61
+ fn=set_client_for_session,
62
+ inputs=gr.State("OLMo-2-1124-13B-Instruct"),
63
+ outputs=client,
64
+ )
65
+
66
+ if __name__ == "__main__":
67
+ demo.launch()
app_cerebras.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cerebras_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "llama3.1-8b",
10
+ "llama3.1-70b",
11
+ "llama3.1-405b",
12
+ ],
13
+ default_model="llama3.1-70b",
14
+ src=cerebras_gradio.registry,
15
+ accept_token=not os.getenv("CEREBRAS_API_KEY"),
16
+ )
17
+
18
+ if __name__ == "__main__":
19
+ demo.launch()
app_claude.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import anthropic_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "claude-3-5-sonnet-20241022",
10
+ "claude-3-5-haiku-20241022",
11
+ "claude-3-opus-20240229",
12
+ "claude-3-sonnet-20240229",
13
+ "claude-3-haiku-20240307",
14
+ ],
15
+ default_model="claude-3-5-sonnet-20241022",
16
+ src=anthropic_gradio.registry,
17
+ accept_token=not os.getenv("ANTHROPIC_API_KEY"),
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_cohere.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cohere_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "command-r",
10
+ "command-r-08-2024",
11
+ "command-r-plus",
12
+ "command-r-plus-08-2024",
13
+ "command-r7b-12-2024",
14
+ ],
15
+ default_model="command-r7b-12-2024",
16
+ src=cohere_gradio.registry,
17
+ accept_token=not os.getenv("COHERE_API_KEY"),
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_compare.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import google.generativeai as genai
4
+ import gradio as gr
5
+ import openai
6
+ from anthropic import Anthropic
7
+ from openai import OpenAI # Add explicit OpenAI import
8
+
9
+
10
+ def get_all_models():
11
+ """Get all available models from the registries."""
12
+ return [
13
+ "SambaNova: Meta-Llama-3.2-1B-Instruct",
14
+ "SambaNova: Meta-Llama-3.2-3B-Instruct",
15
+ "SambaNova: Llama-3.2-11B-Vision-Instruct",
16
+ "SambaNova: Llama-3.2-90B-Vision-Instruct",
17
+ "SambaNova: Meta-Llama-3.1-8B-Instruct",
18
+ "SambaNova: Meta-Llama-3.1-70B-Instruct",
19
+ "SambaNova: Meta-Llama-3.1-405B-Instruct",
20
+ "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
21
+ "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
22
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
23
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
24
+ "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
25
+ "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
26
+ "Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
27
+ "Hyperbolic: deepseek-ai/DeepSeek-V2.5",
28
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
29
+ ]
30
+
31
+
32
+ def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
33
+ """Generate a prompt for models to discuss and build upon previous
34
+ responses.
35
+ """
36
+ prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
37
+
38
+ Previous responses from other AI models:
39
+ {chr(10).join(f"- {response}" for response in previous_responses)}
40
+
41
+ Please provide your perspective while:
42
+ 1. Acknowledging key insights from previous responses
43
+ 2. Adding any missing important points
44
+ 3. Respectfully noting if you disagree with anything and explaining why
45
+ 4. Building towards a complete answer
46
+
47
+ Keep your response focused and concise (max 3-4 paragraphs)."""
48
+ return prompt
49
+
50
+
51
+ def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
52
+ """Generate a prompt for final consensus building."""
53
+ return f"""Review this multi-AI discussion about: "{original_question}"
54
+
55
+ Discussion history:
56
+ {chr(10).join(discussion_history)}
57
+
58
+ As a final synthesizer, please:
59
+ 1. Identify the key points where all models agreed
60
+ 2. Explain how any disagreements were resolved
61
+ 3. Present a clear, unified answer that represents our collective best understanding
62
+ 4. Note any remaining uncertainties or caveats
63
+
64
+ Keep the final consensus concise but complete."""
65
+
66
+
67
+ def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
68
+ import openai
69
+
70
+ client = openai.OpenAI(api_key=api_key)
71
+ response = client.chat.completions.create(model=model, messages=messages)
72
+ return response.choices[0].message.content
73
+
74
+
75
+ def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
76
+ """Chat with Anthropic's Claude model."""
77
+ client = Anthropic(api_key=api_key)
78
+ response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
79
+ return response.content[0].text
80
+
81
+
82
+ def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
83
+ """Chat with Gemini Pro model."""
84
+ genai.configure(api_key=api_key)
85
+ model = genai.GenerativeModel("gemini-pro")
86
+
87
+ # Convert messages to Gemini format
88
+ gemini_messages = []
89
+ for msg in messages:
90
+ role = "user" if msg["role"] == "user" else "model"
91
+ gemini_messages.append({"role": role, "parts": [msg["content"]]})
92
+
93
+ response = model.generate_content([m["parts"][0] for m in gemini_messages])
94
+ return response.text
95
+
96
+
97
+ def chat_with_sambanova(
98
+ messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
99
+ ) -> str:
100
+ """Chat with SambaNova's models using their OpenAI-compatible API."""
101
+ client = openai.OpenAI(
102
+ api_key=api_key,
103
+ base_url="https://api.sambanova.ai/v1",
104
+ )
105
+
106
+ response = client.chat.completions.create(
107
+ model=model_name,
108
+ messages=messages,
109
+ temperature=0.1,
110
+ top_p=0.1, # Use the specific model name passed in
111
+ )
112
+ return response.choices[0].message.content
113
+
114
+
115
+ def chat_with_hyperbolic(
116
+ messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
117
+ ) -> str:
118
+ """Chat with Hyperbolic's models using their OpenAI-compatible API."""
119
+ client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
120
+
121
+ # Add system message to the start of the messages list
122
+ full_messages = [
123
+ {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
124
+ *messages,
125
+ ]
126
+
127
+ response = client.chat.completions.create(
128
+ model=model_name, # Use the specific model name passed in
129
+ messages=full_messages,
130
+ temperature=0.7,
131
+ max_tokens=1024,
132
+ )
133
+ return response.choices[0].message.content
134
+
135
+
136
+ def multi_model_consensus(
137
+ question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
138
+ ) -> list[tuple[str, str]]:
139
+ if not selected_models:
140
+ raise gr.Error("Please select at least one model to chat with.")
141
+
142
+ chat_history = []
143
+ progress(0, desc="Getting responses from all models...")
144
+
145
+ # Get responses from all models in parallel
146
+ for i, model in enumerate(selected_models):
147
+ provider, model_name = model.split(": ", 1)
148
+ progress((i + 1) / len(selected_models), desc=f"Getting response from {model}...")
149
+
150
+ try:
151
+ if provider == "Anthropic":
152
+ api_key = os.getenv("ANTHROPIC_API_KEY")
153
+ response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
154
+ elif provider == "SambaNova":
155
+ api_key = os.getenv("SAMBANOVA_API_KEY")
156
+ response = chat_with_sambanova(
157
+ messages=[
158
+ {"role": "system", "content": "You are a helpful assistant"},
159
+ {"role": "user", "content": question},
160
+ ],
161
+ api_key=api_key,
162
+ model_name=model_name,
163
+ )
164
+ elif provider == "Hyperbolic":
165
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
166
+ response = chat_with_hyperbolic(
167
+ messages=[{"role": "user", "content": question}],
168
+ api_key=api_key,
169
+ model_name=model_name,
170
+ )
171
+ else: # Gemini
172
+ api_key = os.getenv("GEMINI_API_KEY")
173
+ response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
174
+
175
+ chat_history.append((model, response))
176
+ except Exception as e:
177
+ chat_history.append((model, f"Error: {e!s}"))
178
+
179
+ progress(1.0, desc="Done!")
180
+ return chat_history
181
+
182
+
183
+ with gr.Blocks() as demo:
184
+ gr.Markdown("# Model Response Comparison")
185
+ gr.Markdown("""Select multiple models to compare their responses""")
186
+
187
+ with gr.Row():
188
+ with gr.Column():
189
+ model_selector = gr.Dropdown(
190
+ choices=get_all_models(),
191
+ multiselect=True,
192
+ label="Select Models",
193
+ info="Choose models to compare",
194
+ value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
195
+ )
196
+
197
+ chatbot = gr.Chatbot(height=600, label="Model Responses")
198
+ msg = gr.Textbox(label="Prompt", placeholder="Ask a question to compare model responses...")
199
+
200
+ def respond(message, selected_models):
201
+ chat_history = multi_model_consensus(message, selected_models, rounds=1)
202
+ return chat_history
203
+
204
+ msg.submit(respond, [msg, model_selector], [chatbot])
205
+
206
+ for fn in demo.fns.values():
207
+ fn.api_name = False
208
+
209
+ if __name__ == "__main__":
210
+ demo.launch()
app_crew.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+ import gradio as gr
3
+
4
+ demo = gr.load(
5
+ name="crewai:gpt-4-turbo",
6
+ crew_type="article", # or 'support'
7
+ src=ai_gradio.registry,
8
+ )
app_deepseek.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ DEEPSEEK_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("deepseek:")]
7
+
8
+ # Create display names without the prefix
9
+ DEEPSEEK_MODELS_DISPLAY = [k.replace("deepseek:", "") for k in DEEPSEEK_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=DEEPSEEK_MODELS_FULL, # Use the full names with prefix
15
+ default_model=DEEPSEEK_MODELS_FULL[-1],
16
+ dropdown_label="Select DeepSeek Model",
17
+ choices=DEEPSEEK_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_experimental.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import google.generativeai as genai
5
+ import gradio as gr
6
+ import openai
7
+ from anthropic import Anthropic
8
+ from openai import OpenAI # Add explicit OpenAI import
9
+
10
+
11
+ def get_all_models():
12
+ """Get all available models from the registries."""
13
+ return [
14
+ "SambaNova: Meta-Llama-3.2-1B-Instruct",
15
+ "SambaNova: Meta-Llama-3.2-3B-Instruct",
16
+ "SambaNova: Llama-3.2-11B-Vision-Instruct",
17
+ "SambaNova: Llama-3.2-90B-Vision-Instruct",
18
+ "SambaNova: Meta-Llama-3.1-8B-Instruct",
19
+ "SambaNova: Meta-Llama-3.1-70B-Instruct",
20
+ "SambaNova: Meta-Llama-3.1-405B-Instruct",
21
+ "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
22
+ "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
23
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
24
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
25
+ "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
26
+ "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
27
+ "Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
28
+ "Hyperbolic: deepseek-ai/DeepSeek-V2.5",
29
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
30
+ ]
31
+
32
+
33
+ def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
34
+ """Generate a prompt for models to discuss and build upon previous
35
+ responses.
36
+ """
37
+ prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
38
+
39
+ Previous responses from other AI models:
40
+ {chr(10).join(f"- {response}" for response in previous_responses)}
41
+
42
+ Please provide your perspective while:
43
+ 1. Acknowledging key insights from previous responses
44
+ 2. Adding any missing important points
45
+ 3. Respectfully noting if you disagree with anything and explaining why
46
+ 4. Building towards a complete answer
47
+
48
+ Keep your response focused and concise (max 3-4 paragraphs)."""
49
+ return prompt
50
+
51
+
52
+ def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
53
+ """Generate a prompt for final consensus building."""
54
+ return f"""Review this multi-AI discussion about: "{original_question}"
55
+
56
+ Discussion history:
57
+ {chr(10).join(discussion_history)}
58
+
59
+ As a final synthesizer, please:
60
+ 1. Identify the key points where all models agreed
61
+ 2. Explain how any disagreements were resolved
62
+ 3. Present a clear, unified answer that represents our collective best understanding
63
+ 4. Note any remaining uncertainties or caveats
64
+
65
+ Keep the final consensus concise but complete."""
66
+
67
+
68
+ def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
69
+ import openai
70
+
71
+ client = openai.OpenAI(api_key=api_key)
72
+ response = client.chat.completions.create(model=model, messages=messages)
73
+ return response.choices[0].message.content
74
+
75
+
76
+ def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
77
+ """Chat with Anthropic's Claude model."""
78
+ client = Anthropic(api_key=api_key)
79
+ response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
80
+ return response.content[0].text
81
+
82
+
83
+ def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
84
+ """Chat with Gemini Pro model."""
85
+ genai.configure(api_key=api_key)
86
+ model = genai.GenerativeModel("gemini-pro")
87
+
88
+ # Convert messages to Gemini format
89
+ gemini_messages = []
90
+ for msg in messages:
91
+ role = "user" if msg["role"] == "user" else "model"
92
+ gemini_messages.append({"role": role, "parts": [msg["content"]]})
93
+
94
+ response = model.generate_content([m["parts"][0] for m in gemini_messages])
95
+ return response.text
96
+
97
+
98
+ def chat_with_sambanova(
99
+ messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
100
+ ) -> str:
101
+ """Chat with SambaNova's models using their OpenAI-compatible API."""
102
+ client = openai.OpenAI(
103
+ api_key=api_key,
104
+ base_url="https://api.sambanova.ai/v1",
105
+ )
106
+
107
+ response = client.chat.completions.create(
108
+ model=model_name,
109
+ messages=messages,
110
+ temperature=0.1,
111
+ top_p=0.1, # Use the specific model name passed in
112
+ )
113
+ return response.choices[0].message.content
114
+
115
+
116
+ def chat_with_hyperbolic(
117
+ messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
118
+ ) -> str:
119
+ """Chat with Hyperbolic's models using their OpenAI-compatible API."""
120
+ client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
121
+
122
+ # Add system message to the start of the messages list
123
+ full_messages = [
124
+ {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
125
+ *messages,
126
+ ]
127
+
128
+ response = client.chat.completions.create(
129
+ model=model_name, # Use the specific model name passed in
130
+ messages=full_messages,
131
+ temperature=0.7,
132
+ max_tokens=1024,
133
+ )
134
+ return response.choices[0].message.content
135
+
136
+
137
+ def multi_model_consensus(
138
+ question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
139
+ ) -> list[tuple[str, str]]:
140
+ if not selected_models:
141
+ raise gr.Error("Please select at least one model to chat with.")
142
+
143
+ chat_history = []
144
+ discussion_history = []
145
+
146
+ # Initial responses
147
+ progress(0, desc="Getting initial responses...")
148
+ initial_responses = []
149
+ for i, model in enumerate(selected_models):
150
+ provider, model_name = model.split(": ", 1)
151
+
152
+ try:
153
+ if provider == "Anthropic":
154
+ api_key = os.getenv("ANTHROPIC_API_KEY")
155
+ response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
156
+ elif provider == "SambaNova":
157
+ api_key = os.getenv("SAMBANOVA_API_KEY")
158
+ response = chat_with_sambanova(
159
+ messages=[
160
+ {"role": "system", "content": "You are a helpful assistant"},
161
+ {"role": "user", "content": question},
162
+ ],
163
+ api_key=api_key,
164
+ )
165
+ elif provider == "Hyperbolic": # Add Hyperbolic case
166
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
167
+ response = chat_with_hyperbolic(messages=[{"role": "user", "content": question}], api_key=api_key)
168
+ else: # Gemini
169
+ api_key = os.getenv("GEMINI_API_KEY")
170
+ response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
171
+
172
+ initial_responses.append(f"{model}: {response}")
173
+ discussion_history.append(f"Initial response from {model}:\n{response}")
174
+ chat_history.append((f"Initial response from {model}", response))
175
+ except Exception as e:
176
+ chat_history.append((f"Error from {model}", str(e)))
177
+
178
+ # Discussion rounds
179
+ for round_num in range(rounds):
180
+ progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...")
181
+ round_responses = []
182
+
183
+ random.shuffle(selected_models) # Randomize order each round
184
+ for model in selected_models:
185
+ provider, model_name = model.split(": ", 1)
186
+
187
+ try:
188
+ discussion_prompt = generate_discussion_prompt(question, discussion_history)
189
+ if provider == "Anthropic":
190
+ api_key = os.getenv("ANTHROPIC_API_KEY")
191
+ response = chat_with_anthropic(
192
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
193
+ )
194
+ elif provider == "SambaNova":
195
+ api_key = os.getenv("SAMBANOVA_API_KEY")
196
+ response = chat_with_sambanova(
197
+ messages=[
198
+ {"role": "system", "content": "You are a helpful assistant"},
199
+ {"role": "user", "content": discussion_prompt},
200
+ ],
201
+ api_key=api_key,
202
+ )
203
+ elif provider == "Hyperbolic": # Add Hyperbolic case
204
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
205
+ response = chat_with_hyperbolic(
206
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
207
+ )
208
+ else: # Gemini
209
+ api_key = os.getenv("GEMINI_API_KEY")
210
+ response = chat_with_gemini(
211
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
212
+ )
213
+
214
+ round_responses.append(f"{model}: {response}")
215
+ discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
216
+ chat_history.append((f"Round {round_num + 1} - {model}", response))
217
+ except Exception as e:
218
+ chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
219
+
220
+ # Final consensus
221
+ progress(0.9, desc="Building final consensus...")
222
+ model = selected_models[0]
223
+ provider, model_name = model.split(": ", 1)
224
+
225
+ try:
226
+ consensus_prompt = generate_consensus_prompt(question, discussion_history)
227
+ if provider == "Anthropic":
228
+ api_key = os.getenv("ANTHROPIC_API_KEY")
229
+ final_consensus = chat_with_anthropic(
230
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
231
+ )
232
+ elif provider == "SambaNova":
233
+ api_key = os.getenv("SAMBANOVA_API_KEY")
234
+ final_consensus = chat_with_sambanova(
235
+ messages=[
236
+ {"role": "system", "content": "You are a helpful assistant"},
237
+ {"role": "user", "content": consensus_prompt},
238
+ ],
239
+ api_key=api_key,
240
+ )
241
+ elif provider == "Hyperbolic": # Add Hyperbolic case
242
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
243
+ final_consensus = chat_with_hyperbolic(
244
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
245
+ )
246
+ else: # Gemini
247
+ api_key = os.getenv("GEMINI_API_KEY")
248
+ final_consensus = chat_with_gemini(
249
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
250
+ )
251
+ except Exception as e:
252
+ final_consensus = f"Error getting consensus from {model}: {e!s}"
253
+
254
+ chat_history.append(("Final Consensus", final_consensus))
255
+
256
+ progress(1.0, desc="Done!")
257
+ return chat_history
258
+
259
+
260
+ with gr.Blocks() as demo:
261
+ gr.Markdown("# Experimental Multi-Model Consensus Chat")
262
+ gr.Markdown(
263
+ """Select multiple models to collaborate on answering your question.
264
+ The models will discuss with each other and attempt to reach a consensus.
265
+ Maximum 3 models can be selected at once."""
266
+ )
267
+
268
+ with gr.Row():
269
+ with gr.Column():
270
+ model_selector = gr.Dropdown(
271
+ choices=get_all_models(),
272
+ multiselect=True,
273
+ label="Select Models (max 3)",
274
+ info="Choose up to 3 models to participate in the discussion",
275
+ value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
276
+ max_choices=3,
277
+ )
278
+ rounds_slider = gr.Slider(
279
+ minimum=1,
280
+ maximum=2,
281
+ value=1,
282
+ step=1,
283
+ label="Discussion Rounds",
284
+ info="Number of rounds of discussion between models",
285
+ )
286
+
287
+ chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion")
288
+ msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...")
289
+
290
+ def respond(message, selected_models, rounds):
291
+ chat_history = multi_model_consensus(message, selected_models, rounds)
292
+ return chat_history
293
+
294
+ msg.submit(respond, [msg, model_selector, rounds_slider], [chatbot], api_name="consensus_chat")
295
+
296
+ for fn in demo.fns.values():
297
+ fn.api_name = False
298
+
299
+ if __name__ == "__main__":
300
+ demo.launch()
app_fal.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fal_gradio
2
+
3
+ from utils import get_app
4
+
5
+ demo = get_app(
6
+ models=[
7
+ "fal-ai/ltx-video",
8
+ "fal-ai/ltx-video/image-to-video",
9
+ "fal-ai/luma-photon",
10
+ ],
11
+ default_model="fal-ai/luma-photon",
12
+ src=fal_gradio.registry,
13
+ )
14
+
15
+ if __name__ == "__main__":
16
+ demo.launch()
app_fireworks.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import fireworks_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "f1-preview",
10
+ "f1-mini-preview",
11
+ "llama-v3p3-70b-instruct",
12
+ ],
13
+ default_model="llama-v3p3-70b-instruct",
14
+ src=fireworks_gradio.registry,
15
+ accept_token=not os.getenv("FIREWORKS_API_KEY"),
16
+ )
17
+
18
+ if __name__ == "__main__":
19
+ demo.launch()
app_gemini.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[-1],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_gemini_camera.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[-2],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ camera=True,
19
+ fill_height=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_gemini_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[0],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ fill_height=True,
19
+ coder=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_gemini_voice.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[-2],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ enable_voice=True,
19
+ fill_height=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_groq.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Groq models from the registry
6
+ GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
7
+
8
+ # Create display names without the prefix
9
+ GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
10
+
11
+ demo = get_app(
12
+ models=GROQ_MODELS_FULL,
13
+ default_model=GROQ_MODELS_FULL[-2],
14
+ src=ai_gradio.registry,
15
+ dropdown_label="Select Groq Model",
16
+ choices=GROQ_MODELS_DISPLAY,
17
+ fill_height=True,
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_groq_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Groq models but keep their full names for loading
6
+ GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
7
+
8
+ # Create display names without the prefix
9
+ GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GROQ_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GROQ_MODELS_FULL[-1],
15
+ dropdown_label="Select Groq Model",
16
+ choices=GROQ_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ coder=True,
19
+ )
20
+
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_hf.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils import get_app
2
+
3
+ demo = get_app(
4
+ models=[
5
+ "microsoft/Phi-3.5-mini-instruct",
6
+ "HuggingFaceTB/SmolLM2-1.7B-Instruct",
7
+ "google/gemma-2-2b-it",
8
+ "openai-community/gpt2",
9
+ "microsoft/phi-2",
10
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
11
+ ],
12
+ default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
13
+ src="models",
14
+ )
15
+
16
+ if __name__ == "__main__":
17
+ demo.launch()
app_huggingface.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ HUGGINGFACE_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("huggingface:")]
7
+
8
+ # Create display names without the prefix
9
+ HUGGINGFACE_MODELS_DISPLAY = [k.replace("huggingface:", "") for k in HUGGINGFACE_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=HUGGINGFACE_MODELS_FULL, # Use the full names with prefix
15
+ default_model=HUGGINGFACE_MODELS_FULL[0],
16
+ dropdown_label="Select Huggingface Model",
17
+ choices=HUGGINGFACE_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ provider="fireworks-ai",
21
+ bill_to="huggingface"
22
+ )
app_hyperbolic.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
7
+
8
+ # Create display names without the prefix
9
+ HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
15
+ default_model=HYPERBOLIC_MODELS_FULL[-2],
16
+ dropdown_label="Select Hyperbolic Model",
17
+ choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
app_hyperbolic_coder.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
7
+
8
+ # Create display names without the prefix
9
+ HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
15
+ default_model=HYPERBOLIC_MODELS_FULL[-2],
16
+ dropdown_label="Select Hyperbolic Model",
17
+ choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ )
app_langchain.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ LANGCHAIN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("langchain:")]
7
+
8
+ # Create display names without the prefix
9
+ LANGCHAIN_MODELS_DISPLAY = [k.replace("langchain:", "") for k in LANGCHAIN_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=LANGCHAIN_MODELS_FULL, # Use the full names with prefix
15
+ default_model=LANGCHAIN_MODELS_FULL[0],
16
+ dropdown_label="Select Langchain Model",
17
+ choices=LANGCHAIN_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
23
+
app_lumaai.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import lumaai_gradio
3
+
4
+ demo = gr.load(
5
+ name="dream-machine",
6
+ src=lumaai_gradio.registry,
7
+ )
app_marco_o1.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ import transformers_gradio
4
+
5
+ demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry)
6
+ demo.fn = spaces.GPU()(demo.fn)
7
+
8
+ for fn in demo.fns.values():
9
+ fn.api_name = False
10
+
11
+ if __name__ == "__main__":
12
+ demo.launch()
app_meta.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ demo = gr.load("models/meta-llama/Llama-3.3-70B-Instruct")
4
+
5
+ if __name__ == "__main__":
6
+ demo.launch()
app_mindsearch.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Load the Gradio space
4
+ demo = gr.load(name="internlm/MindSearch", src="spaces")
5
+
6
+ # Disable API access for all functions
7
+ if hasattr(demo, "fns"):
8
+ for fn in demo.fns.values():
9
+ fn.api_name = False
10
+
11
+ if __name__ == "__main__":
12
+ demo.launch()
app_minimax.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
7
+
8
+ # Create display names without the prefix
9
+ MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=MINIMAX_MODELS_FULL, # Use the full names with prefix
15
+ default_model=MINIMAX_MODELS_FULL[0],
16
+ dropdown_label="Select Minimax Model",
17
+ choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_minimax_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
7
+
8
+ # Create display names without the prefix
9
+ MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=MINIMAX_MODELS_FULL, # Use the full names with prefix
15
+ default_model=MINIMAX_MODELS_FULL[0],
16
+ dropdown_label="Select Minimax Model",
17
+ choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_mistral.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the mistral models but keep their full names for loading
6
+ MISTRAL_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("mistral:")]
7
+
8
+ # Create display names without the prefix
9
+ MISTRAL_MODELS_DISPLAY = [k.replace("mistral:", "") for k in MISTRAL_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=MISTRAL_MODELS_FULL, # Use the full names with prefix
15
+ default_model=MISTRAL_MODELS_FULL[5],
16
+ dropdown_label="Select Mistral Model",
17
+ choices=MISTRAL_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_moondream.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Load the Gradio space
4
+ demo = gr.load(name="akhaliq/moondream", src="spaces")
5
+
6
+
7
+ # Disable API access for all functions
8
+ if hasattr(demo, "fns"):
9
+ for fn in demo.fns.values():
10
+ fn.api_name = False
11
+
12
+ if __name__ == "__main__":
13
+ demo.launch()
app_nvidia.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the nvidia models but keep their full names for loading
6
+ NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
7
+
8
+ # Create display names without the prefix
9
+ NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=NVIDIA_MODELS_FULL, # Use the full names with prefix
15
+ default_model=NVIDIA_MODELS_FULL[0],
16
+ dropdown_label="Select Nvidia Model",
17
+ choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_nvidia_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the nvidia models but keep their full names for loading
6
+ NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
7
+
8
+ # Create display names without the prefix
9
+ NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=NVIDIA_MODELS_FULL, # Use the full names with prefix
15
+ default_model=NVIDIA_MODELS_FULL[-1],
16
+ dropdown_label="Select Nvidia Model",
17
+ choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_omini.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Load the Gradio space
4
+ demo = gr.load(name="Yuanshi/OminiControl", src="spaces")
5
+
6
+
7
+ # Disable API access for all functions
8
+ if hasattr(demo, "fns"):
9
+ for fn in demo.fns.values():
10
+ fn.api_name = False
app_openai.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the OpenAI models but keep their full names for loading
6
+ OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
7
+
8
+ # Create display names without the prefix
9
+ OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=OPENAI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=OPENAI_MODELS_FULL[-1],
15
+ dropdown_label="Select OpenAI Model",
16
+ choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_openai_coder.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the OpenAI models but keep their full names for loading
6
+ OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
7
+
8
+ # Create display names without the prefix
9
+ OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=OPENAI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=OPENAI_MODELS_FULL[-1],
15
+ dropdown_label="Select OpenAI Model",
16
+ choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ coder=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_openai_voice.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import openai_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "gpt-4o-realtime-preview",
10
+ "gpt-4o-realtime-preview-2024-12-17",
11
+ "gpt-4o-realtime-preview-2024-10-01",
12
+ "gpt-4o-mini-realtime-preview",
13
+ "gpt-4o-mini-realtime-preview-2024-12-17",
14
+ ],
15
+ default_model="gpt-4o-mini-realtime-preview-2024-12-17",
16
+ src=openai_gradio.registry,
17
+ accept_token=not os.getenv("OPENAI_API_KEY"),
18
+ twilio_sid=os.getenv("TWILIO_SID_OPENAI"),
19
+ twilio_token=os.getenv("TWILIO_AUTH_OPENAI"),
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_openrouter.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the OpenAI models but keep their full names for loading
6
+ OPENROUTER_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openrouter:")]
7
+
8
+ # Create display names without the prefix
9
+ OPENROUTER_MODELS_DISPLAY = [k.replace("openrouter:", "") for k in OPENROUTER_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=OPENROUTER_MODELS_FULL, # Use the full names with prefix
14
+ default_model=OPENROUTER_MODELS_FULL[-1],
15
+ dropdown_label="Select OpenRouter Model",
16
+ choices=OPENROUTER_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ coder=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_paligemma.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_client import Client, handle_file
3
+
4
+ MODELS = {"Paligemma-10B": "akhaliq/paligemma2-10b-ft-docci-448"}
5
+
6
+
7
+ def create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
8
+ def chat(message, history):
9
+ text = message.get("text", "")
10
+ files = message.get("files", [])
11
+ processed_files = [handle_file(f) for f in files]
12
+
13
+ response = client.predict(
14
+ message={"text": text, "files": processed_files},
15
+ system_prompt=system_prompt,
16
+ temperature=temperature,
17
+ max_new_tokens=max_tokens,
18
+ top_k=top_k,
19
+ repetition_penalty=rep_penalty,
20
+ top_p=top_p,
21
+ api_name="/chat",
22
+ )
23
+ return response
24
+
25
+ return chat
26
+
27
+
28
+ def set_client_for_session(model_name, request: gr.Request):
29
+ headers = {}
30
+ if request and hasattr(request, "headers"):
31
+ x_ip_token = request.headers.get("x-ip-token")
32
+ if x_ip_token:
33
+ headers["X-IP-Token"] = x_ip_token
34
+
35
+ return Client(MODELS[model_name], headers=headers)
36
+
37
+
38
+ def safe_chat_fn(message, history, client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
39
+ if client is None:
40
+ return "Error: Client not initialized. Please refresh the page."
41
+ try:
42
+ return create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p)(
43
+ message, history
44
+ )
45
+ except Exception as e:
46
+ print(f"Error during chat: {e!s}")
47
+ return f"Error during chat: {e!s}"
48
+
49
+
50
+ with gr.Blocks() as demo:
51
+ client = gr.State()
52
+
53
+ with gr.Accordion("Advanced Settings", open=False):
54
+ system_prompt = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt")
55
+ with gr.Row():
56
+ temperature = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, label="Temperature")
57
+ top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top P")
58
+ with gr.Row():
59
+ top_k = gr.Slider(minimum=1, maximum=100, value=40, step=1, label="Top K")
60
+ rep_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, label="Repetition Penalty")
61
+ max_tokens = gr.Slider(minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens")
62
+
63
+ chat_interface = gr.ChatInterface(
64
+ fn=safe_chat_fn,
65
+ additional_inputs=[client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p],
66
+ multimodal=True,
67
+ )
68
+
69
+ # Initialize client on page load with default model
70
+ demo.load(fn=set_client_for_session, inputs=[gr.State("Paligemma-10B")], outputs=[client]) # Using default model
71
+
72
+ # Move the API access check here, after demo is defined
73
+ if hasattr(demo, "fns"):
74
+ for fn in demo.fns.values():
75
+ fn.api_name = False
76
+
77
+ if __name__ == "__main__":
78
+ demo.launch()
app_perplexity.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import perplexity_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "llama-3.1-sonar-large-128k-online",
10
+ "llama-3.1-sonar-small-128k-online",
11
+ "llama-3.1-sonar-huge-128k-online",
12
+ "llama-3.1-sonar-small-128k-chat",
13
+ "llama-3.1-sonar-large-128k-chat",
14
+ "llama-3.1-8b-instruct",
15
+ "llama-3.1-70b-instruct",
16
+ ],
17
+ default_model="llama-3.1-sonar-huge-128k-online",
18
+ src=perplexity_gradio.registry,
19
+ accept_token=not os.getenv("PERPLEXITY_API_KEY"),
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_playai.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import playai_gradio
3
+
4
+ demo = gr.load(
5
+ name="PlayDialog",
6
+ src=playai_gradio.registry,
7
+ )
8
+
9
+ for fn in demo.fns.values():
10
+ fn.api_name = False
app_qwen.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the qwen models but keep their full names for loading
6
+ QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
7
+
8
+ # Create display names without the prefix
9
+ QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=QWEN_MODELS_FULL, # Use the full names with prefix
15
+ default_model=QWEN_MODELS_FULL[-1],
16
+ dropdown_label="Select Qwen Model",
17
+ choices=QWEN_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )