Spaces:
Running
Running
update
Browse files- backend_docs_manager.py +198 -5
- backend_prompts.py +17 -70
backend_docs_manager.py
CHANGED
|
@@ -20,10 +20,18 @@ GRADIO_LLMS_TXT_URL = "https://www.gradio.app/llms.txt"
|
|
| 20 |
GRADIO_DOCS_CACHE_FILE = ".backend_gradio_docs_cache.txt"
|
| 21 |
GRADIO_DOCS_LAST_UPDATE_FILE = ".backend_gradio_docs_last_update.txt"
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
# Global variable to store the current Gradio documentation
|
| 24 |
_gradio_docs_content: Optional[str] = None
|
| 25 |
_gradio_docs_last_fetched: Optional[datetime] = None
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
def fetch_gradio_docs() -> Optional[str]:
|
| 28 |
"""Fetch the latest Gradio documentation from llms.txt"""
|
| 29 |
if not HAS_REQUESTS:
|
|
@@ -37,6 +45,19 @@ def fetch_gradio_docs() -> Optional[str]:
|
|
| 37 |
print(f"Warning: Failed to fetch Gradio docs from {GRADIO_LLMS_TXT_URL}: {e}")
|
| 38 |
return None
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
def filter_problematic_instructions(content: str) -> str:
|
| 41 |
"""Filter out problematic instructions that cause LLM to stop generation prematurely"""
|
| 42 |
if not content:
|
|
@@ -94,6 +115,31 @@ def should_update_gradio_docs() -> bool:
|
|
| 94 |
# Only update if we don't have cached content (first run or cache deleted)
|
| 95 |
return not os.path.exists(GRADIO_DOCS_CACHE_FILE)
|
| 96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
def get_gradio_docs_content() -> str:
|
| 98 |
"""Get the current Gradio documentation content, updating if necessary"""
|
| 99 |
global _gradio_docs_content, _gradio_docs_last_fetched
|
|
@@ -139,6 +185,63 @@ For the latest documentation, visit: https://www.gradio.app/llms.txt
|
|
| 139 |
|
| 140 |
return _gradio_docs_content or ""
|
| 141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
def build_gradio_system_prompt() -> str:
|
| 143 |
"""Build the complete Gradio system prompt with full documentation"""
|
| 144 |
|
|
@@ -209,6 +312,88 @@ Below is the complete, official Gradio 6 documentation automatically synced from
|
|
| 209 |
- Generate production-ready code that follows all best practices
|
| 210 |
- Always include the "Built with anycoder" attribution in the header
|
| 211 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
"""
|
| 213 |
|
| 214 |
return full_prompt + final_instructions
|
|
@@ -216,12 +401,20 @@ Below is the complete, official Gradio 6 documentation automatically synced from
|
|
| 216 |
def initialize_backend_docs():
|
| 217 |
"""Initialize backend documentation system on startup"""
|
| 218 |
try:
|
| 219 |
-
# Pre-load the documentation
|
| 220 |
-
|
| 221 |
-
if
|
| 222 |
-
print(f"π
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
else:
|
| 224 |
-
print("β οΈ
|
|
|
|
| 225 |
except Exception as e:
|
| 226 |
print(f"Warning: Failed to initialize backend documentation: {e}")
|
| 227 |
|
|
|
|
| 20 |
GRADIO_DOCS_CACHE_FILE = ".backend_gradio_docs_cache.txt"
|
| 21 |
GRADIO_DOCS_LAST_UPDATE_FILE = ".backend_gradio_docs_last_update.txt"
|
| 22 |
|
| 23 |
+
TRANSFORMERSJS_DOCS_URL = "https://huggingface.co/docs/transformers.js/llms.txt"
|
| 24 |
+
TRANSFORMERSJS_DOCS_CACHE_FILE = ".backend_transformersjs_docs_cache.txt"
|
| 25 |
+
TRANSFORMERSJS_DOCS_LAST_UPDATE_FILE = ".backend_transformersjs_docs_last_update.txt"
|
| 26 |
+
|
| 27 |
# Global variable to store the current Gradio documentation
|
| 28 |
_gradio_docs_content: Optional[str] = None
|
| 29 |
_gradio_docs_last_fetched: Optional[datetime] = None
|
| 30 |
|
| 31 |
+
# Global variable to store the current transformers.js documentation
|
| 32 |
+
_transformersjs_docs_content: Optional[str] = None
|
| 33 |
+
_transformersjs_docs_last_fetched: Optional[datetime] = None
|
| 34 |
+
|
| 35 |
def fetch_gradio_docs() -> Optional[str]:
|
| 36 |
"""Fetch the latest Gradio documentation from llms.txt"""
|
| 37 |
if not HAS_REQUESTS:
|
|
|
|
| 45 |
print(f"Warning: Failed to fetch Gradio docs from {GRADIO_LLMS_TXT_URL}: {e}")
|
| 46 |
return None
|
| 47 |
|
| 48 |
+
def fetch_transformersjs_docs() -> Optional[str]:
|
| 49 |
+
"""Fetch the latest transformers.js documentation from llms.txt"""
|
| 50 |
+
if not HAS_REQUESTS:
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
response = requests.get(TRANSFORMERSJS_DOCS_URL, timeout=10)
|
| 55 |
+
response.raise_for_status()
|
| 56 |
+
return response.text
|
| 57 |
+
except Exception as e:
|
| 58 |
+
print(f"Warning: Failed to fetch transformers.js docs from {TRANSFORMERSJS_DOCS_URL}: {e}")
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
def filter_problematic_instructions(content: str) -> str:
|
| 62 |
"""Filter out problematic instructions that cause LLM to stop generation prematurely"""
|
| 63 |
if not content:
|
|
|
|
| 115 |
# Only update if we don't have cached content (first run or cache deleted)
|
| 116 |
return not os.path.exists(GRADIO_DOCS_CACHE_FILE)
|
| 117 |
|
| 118 |
+
def load_cached_transformersjs_docs() -> Optional[str]:
|
| 119 |
+
"""Load cached transformers.js documentation from file"""
|
| 120 |
+
try:
|
| 121 |
+
if os.path.exists(TRANSFORMERSJS_DOCS_CACHE_FILE):
|
| 122 |
+
with open(TRANSFORMERSJS_DOCS_CACHE_FILE, 'r', encoding='utf-8') as f:
|
| 123 |
+
return f.read()
|
| 124 |
+
except Exception as e:
|
| 125 |
+
print(f"Warning: Failed to load cached transformers.js docs: {e}")
|
| 126 |
+
return None
|
| 127 |
+
|
| 128 |
+
def save_transformersjs_docs_cache(content: str):
|
| 129 |
+
"""Save transformers.js documentation to cache file"""
|
| 130 |
+
try:
|
| 131 |
+
with open(TRANSFORMERSJS_DOCS_CACHE_FILE, 'w', encoding='utf-8') as f:
|
| 132 |
+
f.write(content)
|
| 133 |
+
with open(TRANSFORMERSJS_DOCS_LAST_UPDATE_FILE, 'w', encoding='utf-8') as f:
|
| 134 |
+
f.write(datetime.now().isoformat())
|
| 135 |
+
except Exception as e:
|
| 136 |
+
print(f"Warning: Failed to save transformers.js docs cache: {e}")
|
| 137 |
+
|
| 138 |
+
def should_update_transformersjs_docs() -> bool:
|
| 139 |
+
"""Check if transformers.js documentation should be updated"""
|
| 140 |
+
# Only update if we don't have cached content (first run or cache deleted)
|
| 141 |
+
return not os.path.exists(TRANSFORMERSJS_DOCS_CACHE_FILE)
|
| 142 |
+
|
| 143 |
def get_gradio_docs_content() -> str:
|
| 144 |
"""Get the current Gradio documentation content, updating if necessary"""
|
| 145 |
global _gradio_docs_content, _gradio_docs_last_fetched
|
|
|
|
| 185 |
|
| 186 |
return _gradio_docs_content or ""
|
| 187 |
|
| 188 |
+
def get_transformersjs_docs_content() -> str:
|
| 189 |
+
"""Get the current transformers.js documentation content, updating if necessary"""
|
| 190 |
+
global _transformersjs_docs_content, _transformersjs_docs_last_fetched
|
| 191 |
+
|
| 192 |
+
# Check if we need to update
|
| 193 |
+
if (_transformersjs_docs_content is None or
|
| 194 |
+
_transformersjs_docs_last_fetched is None or
|
| 195 |
+
should_update_transformersjs_docs()):
|
| 196 |
+
|
| 197 |
+
print("π Loading transformers.js documentation...")
|
| 198 |
+
|
| 199 |
+
# Try to fetch latest content
|
| 200 |
+
latest_content = fetch_transformersjs_docs()
|
| 201 |
+
|
| 202 |
+
if latest_content:
|
| 203 |
+
# Filter out problematic instructions that cause early termination
|
| 204 |
+
filtered_content = filter_problematic_instructions(latest_content)
|
| 205 |
+
_transformersjs_docs_content = filtered_content
|
| 206 |
+
_transformersjs_docs_last_fetched = datetime.now()
|
| 207 |
+
save_transformersjs_docs_cache(filtered_content)
|
| 208 |
+
print(f"β
transformers.js documentation loaded successfully ({len(filtered_content)} chars)")
|
| 209 |
+
else:
|
| 210 |
+
# Fallback to cached content
|
| 211 |
+
cached_content = load_cached_transformersjs_docs()
|
| 212 |
+
if cached_content:
|
| 213 |
+
_transformersjs_docs_content = cached_content
|
| 214 |
+
_transformersjs_docs_last_fetched = datetime.now()
|
| 215 |
+
print(f"β οΈ Using cached transformers.js documentation (network fetch failed) ({len(cached_content)} chars)")
|
| 216 |
+
else:
|
| 217 |
+
# Fallback to minimal content
|
| 218 |
+
_transformersjs_docs_content = """
|
| 219 |
+
# Transformers.js API Reference (Offline Fallback)
|
| 220 |
+
|
| 221 |
+
This is a minimal fallback when documentation cannot be fetched.
|
| 222 |
+
Please check your internet connection for the latest API reference.
|
| 223 |
+
|
| 224 |
+
Transformers.js allows you to run π€ Transformers models directly in the browser using ONNX Runtime.
|
| 225 |
+
|
| 226 |
+
Key features:
|
| 227 |
+
- pipeline() API for common tasks (sentiment-analysis, text-generation, etc.)
|
| 228 |
+
- Support for custom models via model ID or path
|
| 229 |
+
- WebGPU support for GPU acceleration
|
| 230 |
+
- Quantization support (fp32, fp16, q8, q4)
|
| 231 |
+
|
| 232 |
+
Basic usage:
|
| 233 |
+
```javascript
|
| 234 |
+
import { pipeline } from '@huggingface/transformers';
|
| 235 |
+
const pipe = await pipeline('sentiment-analysis');
|
| 236 |
+
const out = await pipe('I love transformers!');
|
| 237 |
+
```
|
| 238 |
+
|
| 239 |
+
For the latest documentation, visit: https://huggingface.co/docs/transformers.js
|
| 240 |
+
"""
|
| 241 |
+
print("β Using minimal fallback transformers.js documentation")
|
| 242 |
+
|
| 243 |
+
return _transformersjs_docs_content or ""
|
| 244 |
+
|
| 245 |
def build_gradio_system_prompt() -> str:
|
| 246 |
"""Build the complete Gradio system prompt with full documentation"""
|
| 247 |
|
|
|
|
| 312 |
- Generate production-ready code that follows all best practices
|
| 313 |
- Always include the "Built with anycoder" attribution in the header
|
| 314 |
|
| 315 |
+
"""
|
| 316 |
+
|
| 317 |
+
return full_prompt + final_instructions
|
| 318 |
+
|
| 319 |
+
def build_transformersjs_system_prompt() -> str:
|
| 320 |
+
"""Build the complete transformers.js system prompt with full documentation"""
|
| 321 |
+
|
| 322 |
+
# Get the full transformers.js documentation
|
| 323 |
+
docs_content = get_transformersjs_docs_content()
|
| 324 |
+
|
| 325 |
+
# Base system prompt with anycoder-specific instructions
|
| 326 |
+
base_prompt = """You are an expert transformers.js developer. Create a complete, working browser-based ML application using transformers.js based on the user's request. Generate all necessary code to make the application functional and runnable in the browser.
|
| 327 |
+
|
| 328 |
+
## Multi-File Application Structure
|
| 329 |
+
|
| 330 |
+
When creating transformers.js applications, organize your code into multiple files for proper deployment:
|
| 331 |
+
|
| 332 |
+
**File Organization:**
|
| 333 |
+
- `index.html` - Main HTML entry point (REQUIRED)
|
| 334 |
+
- `app.js` - Main JavaScript application logic (REQUIRED)
|
| 335 |
+
- `styles.css` - Styling (optional)
|
| 336 |
+
- `worker.js` - Web Worker for model loading (recommended for better performance)
|
| 337 |
+
- `package.json` - Node.js dependencies if using bundler (optional)
|
| 338 |
+
|
| 339 |
+
**Output Format:**
|
| 340 |
+
You MUST use this exact format with file separators:
|
| 341 |
+
|
| 342 |
+
=== index.html ===
|
| 343 |
+
[complete HTML content]
|
| 344 |
+
|
| 345 |
+
=== app.js ===
|
| 346 |
+
[complete JavaScript content]
|
| 347 |
+
|
| 348 |
+
=== worker.js ===
|
| 349 |
+
[web worker content - if needed]
|
| 350 |
+
|
| 351 |
+
**π¨ CRITICAL: Best Practices**
|
| 352 |
+
- Use CDN for transformers.js: https://cdn.jsdelivr.net/npm/@huggingface/transformers
|
| 353 |
+
- Implement loading states and progress indicators
|
| 354 |
+
- Use Web Workers for model loading to avoid blocking UI
|
| 355 |
+
- Handle errors gracefully with user-friendly messages
|
| 356 |
+
- Show model download progress when applicable
|
| 357 |
+
- Use quantized models (q8, q4) for faster loading in browser
|
| 358 |
+
|
| 359 |
+
Requirements:
|
| 360 |
+
1. Create a modern, responsive web application
|
| 361 |
+
2. Use appropriate transformers.js pipelines and models
|
| 362 |
+
3. Include proper error handling and loading states
|
| 363 |
+
4. Implement progress indicators for model loading
|
| 364 |
+
5. Add helpful descriptions and examples
|
| 365 |
+
6. Follow browser best practices (async/await, Web Workers, etc.)
|
| 366 |
+
7. Make the UI user-friendly with clear labels
|
| 367 |
+
8. Include proper comments in code
|
| 368 |
+
|
| 369 |
+
IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder
|
| 370 |
+
|
| 371 |
+
---
|
| 372 |
+
|
| 373 |
+
## Complete transformers.js Documentation
|
| 374 |
+
|
| 375 |
+
Below is the complete, official transformers.js documentation automatically synced from https://huggingface.co/docs/transformers.js/llms.txt:
|
| 376 |
+
|
| 377 |
+
"""
|
| 378 |
+
|
| 379 |
+
# Combine base prompt with full documentation
|
| 380 |
+
full_prompt = base_prompt + docs_content
|
| 381 |
+
|
| 382 |
+
# Add final instructions
|
| 383 |
+
final_instructions = """
|
| 384 |
+
|
| 385 |
+
---
|
| 386 |
+
|
| 387 |
+
## Final Instructions
|
| 388 |
+
|
| 389 |
+
- Always use the exact function signatures and patterns from the transformers.js documentation above
|
| 390 |
+
- Use the pipeline() API for common tasks
|
| 391 |
+
- Implement WebGPU support when appropriate for better performance
|
| 392 |
+
- Use quantized models by default (q8 or q4) for faster browser loading
|
| 393 |
+
- Generate production-ready code that follows all best practices
|
| 394 |
+
- Always include the "Built with anycoder" attribution in the header
|
| 395 |
+
- Consider using Web Workers for heavy computation to keep UI responsive
|
| 396 |
+
|
| 397 |
"""
|
| 398 |
|
| 399 |
return full_prompt + final_instructions
|
|
|
|
| 401 |
def initialize_backend_docs():
|
| 402 |
"""Initialize backend documentation system on startup"""
|
| 403 |
try:
|
| 404 |
+
# Pre-load the Gradio documentation
|
| 405 |
+
gradio_docs = get_gradio_docs_content()
|
| 406 |
+
if gradio_docs:
|
| 407 |
+
print(f"π Gradio documentation initialized ({len(gradio_docs)} chars loaded)")
|
| 408 |
+
else:
|
| 409 |
+
print("β οΈ Gradio documentation initialized with fallback content")
|
| 410 |
+
|
| 411 |
+
# Pre-load the transformers.js documentation
|
| 412 |
+
transformersjs_docs = get_transformersjs_docs_content()
|
| 413 |
+
if transformersjs_docs:
|
| 414 |
+
print(f"π transformers.js documentation initialized ({len(transformersjs_docs)} chars loaded)")
|
| 415 |
else:
|
| 416 |
+
print("β οΈ transformers.js documentation initialized with fallback content")
|
| 417 |
+
|
| 418 |
except Exception as e:
|
| 419 |
print(f"Warning: Failed to initialize backend documentation: {e}")
|
| 420 |
|
backend_prompts.py
CHANGED
|
@@ -3,13 +3,13 @@ Standalone system prompts for AnyCoder backend.
|
|
| 3 |
No dependencies on Gradio or other heavy libraries.
|
| 4 |
"""
|
| 5 |
|
| 6 |
-
# Import the backend documentation manager for Gradio 6 docs
|
| 7 |
try:
|
| 8 |
-
from backend_docs_manager import build_gradio_system_prompt
|
| 9 |
HAS_BACKEND_DOCS = True
|
| 10 |
except ImportError:
|
| 11 |
HAS_BACKEND_DOCS = False
|
| 12 |
-
print("Warning: backend_docs_manager not available, using fallback
|
| 13 |
|
| 14 |
HTML_SYSTEM_PROMPT = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING MODERN CSS. Use as much as you can modern CSS for the styling, if you can't do something with modern CSS, then use custom CSS. Also, try to elaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE
|
| 15 |
|
|
@@ -27,7 +27,14 @@ Generate complete, working HTML code that can be run immediately.
|
|
| 27 |
IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"""
|
| 28 |
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
**π¨ CRITICAL: DO NOT Generate README.md Files**
|
| 33 |
- NEVER generate README.md files under any circumstances
|
|
@@ -76,37 +83,6 @@ import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers
|
|
| 76 |
7. Each file must be complete and ready to deploy - no placeholders or "// TODO" comments
|
| 77 |
8. **AVOID EMOJIS in the generated code** (HTML/JS/CSS files) - use text or unicode symbols instead for deployment compatibility
|
| 78 |
|
| 79 |
-
**WRONG FORMAT (DO NOT DO THIS):**
|
| 80 |
-
<!DOCTYPE html>
|
| 81 |
-
<html>...
|
| 82 |
-
|
| 83 |
-
=== index.js ===
|
| 84 |
-
...
|
| 85 |
-
|
| 86 |
-
**CORRECT FORMAT (DO THIS):**
|
| 87 |
-
=== index.html ===
|
| 88 |
-
<!DOCTYPE html>
|
| 89 |
-
<html>...
|
| 90 |
-
|
| 91 |
-
=== index.js ===
|
| 92 |
-
...
|
| 93 |
-
|
| 94 |
-
**Example of CORRECT format:**
|
| 95 |
-
=== index.html ===
|
| 96 |
-
<!DOCTYPE html>
|
| 97 |
-
<html>
|
| 98 |
-
<head>...</head>
|
| 99 |
-
<body>...</body>
|
| 100 |
-
</html>
|
| 101 |
-
|
| 102 |
-
=== index.js ===
|
| 103 |
-
import { pipeline } from '...';
|
| 104 |
-
// Complete working code
|
| 105 |
-
|
| 106 |
-
=== style.css ===
|
| 107 |
-
body { margin: 0; }
|
| 108 |
-
/* Complete styling */
|
| 109 |
-
|
| 110 |
Requirements:
|
| 111 |
1. Create a modern, responsive web application using transformers.js
|
| 112 |
2. Use the transformers.js library for AI/ML functionality
|
|
@@ -118,31 +94,19 @@ Requirements:
|
|
| 118 |
|
| 119 |
**Transformers.js Library Usage:**
|
| 120 |
|
| 121 |
-
Import via CDN
|
| 122 |
```javascript
|
| 123 |
-
|
| 124 |
-
import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0';
|
| 125 |
-
</script>
|
| 126 |
```
|
| 127 |
|
| 128 |
**Pipeline API - Quick Tour:**
|
| 129 |
-
Pipelines group together a pretrained model with preprocessing and postprocessing. Example:
|
| 130 |
-
|
| 131 |
```javascript
|
| 132 |
-
import { pipeline } from '@huggingface/transformers';
|
| 133 |
-
|
| 134 |
// Allocate a pipeline for sentiment-analysis
|
| 135 |
const pipe = await pipeline('sentiment-analysis');
|
| 136 |
-
|
| 137 |
const out = await pipe('I love transformers!');
|
| 138 |
-
// [{'label': 'POSITIVE', 'score': 0.999817686}]
|
| 139 |
-
|
| 140 |
-
// Use a different model by specifying model id
|
| 141 |
-
const pipe = await pipeline('sentiment-analysis', 'Xenova/bert-base-multilingual-uncased-sentiment');
|
| 142 |
```
|
| 143 |
|
| 144 |
**Device Options:**
|
| 145 |
-
By default, models run on CPU (via WASM). For better performance, use WebGPU:
|
| 146 |
```javascript
|
| 147 |
// Run on WebGPU (GPU)
|
| 148 |
const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', {
|
|
@@ -151,12 +115,6 @@ const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncase
|
|
| 151 |
```
|
| 152 |
|
| 153 |
**Quantization Options:**
|
| 154 |
-
In resource-constrained environments (browsers), use quantized models:
|
| 155 |
-
- "fp32" (default for WebGPU)
|
| 156 |
-
- "fp16"
|
| 157 |
-
- "q8" (default for WASM)
|
| 158 |
-
- "q4" (4-bit quantization for smaller size)
|
| 159 |
-
|
| 160 |
```javascript
|
| 161 |
// Run at 4-bit quantization for better performance
|
| 162 |
const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', {
|
|
@@ -164,22 +122,11 @@ const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncase
|
|
| 164 |
});
|
| 165 |
```
|
| 166 |
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
**File Content Requirements:**
|
| 170 |
-
- **index.html**: Complete HTML structure with proper DOCTYPE, meta tags, links to CSS/JS files, and full body content
|
| 171 |
-
- **index.js**: Complete JavaScript logic with transformers.js imports and ALL functionality implemented
|
| 172 |
-
- **style.css**: Complete styling for the entire application - NO empty or placeholder styles
|
| 173 |
-
|
| 174 |
-
**π¨ FINAL REMINDERS:**
|
| 175 |
-
1. Use the === filename === markers EXACTLY as shown in the examples
|
| 176 |
-
2. DO NOT use markdown code blocks (```html, ```js, ```css)
|
| 177 |
-
3. ALL THREE files must be complete and functional - no placeholders or "TODO" comments
|
| 178 |
-
4. Start each file's content immediately on the line after the === marker
|
| 179 |
-
5. Ensure each file has actual content - empty files will cause deployment failure
|
| 180 |
-
6. AVOID using emojis in the generated code files - use text or HTML entities instead
|
| 181 |
|
| 182 |
-
|
|
|
|
| 183 |
|
| 184 |
|
| 185 |
STREAMLIT_SYSTEM_PROMPT = """You are an expert Streamlit developer. Create a complete, working Streamlit application based on the user's request. Generate all necessary code to make the application functional and runnable.
|
|
|
|
| 3 |
No dependencies on Gradio or other heavy libraries.
|
| 4 |
"""
|
| 5 |
|
| 6 |
+
# Import the backend documentation manager for Gradio 6 docs and transformers.js docs
|
| 7 |
try:
|
| 8 |
+
from backend_docs_manager import build_gradio_system_prompt, build_transformersjs_system_prompt
|
| 9 |
HAS_BACKEND_DOCS = True
|
| 10 |
except ImportError:
|
| 11 |
HAS_BACKEND_DOCS = False
|
| 12 |
+
print("Warning: backend_docs_manager not available, using fallback prompts")
|
| 13 |
|
| 14 |
HTML_SYSTEM_PROMPT = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING MODERN CSS. Use as much as you can modern CSS for the styling, if you can't do something with modern CSS, then use custom CSS. Also, try to elaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE
|
| 15 |
|
|
|
|
| 27 |
IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder"""
|
| 28 |
|
| 29 |
|
| 30 |
+
# Transformers.js system prompt - dynamically loaded with full transformers.js documentation
|
| 31 |
+
def get_transformersjs_system_prompt() -> str:
|
| 32 |
+
"""Get the complete transformers.js system prompt with full documentation"""
|
| 33 |
+
if HAS_BACKEND_DOCS:
|
| 34 |
+
return build_transformersjs_system_prompt()
|
| 35 |
+
else:
|
| 36 |
+
# Fallback prompt if documentation manager is not available
|
| 37 |
+
return """You are an expert web developer creating a transformers.js application. You will generate THREE separate files: index.html, index.js, and style.css.
|
| 38 |
|
| 39 |
**π¨ CRITICAL: DO NOT Generate README.md Files**
|
| 40 |
- NEVER generate README.md files under any circumstances
|
|
|
|
| 83 |
7. Each file must be complete and ready to deploy - no placeholders or "// TODO" comments
|
| 84 |
8. **AVOID EMOJIS in the generated code** (HTML/JS/CSS files) - use text or unicode symbols instead for deployment compatibility
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
Requirements:
|
| 87 |
1. Create a modern, responsive web application using transformers.js
|
| 88 |
2. Use the transformers.js library for AI/ML functionality
|
|
|
|
| 94 |
|
| 95 |
**Transformers.js Library Usage:**
|
| 96 |
|
| 97 |
+
Import via CDN:
|
| 98 |
```javascript
|
| 99 |
+
import { pipeline } from 'https://cdn.jsdelivr.net/npm/@huggingface/transformers@3.8.0';
|
|
|
|
|
|
|
| 100 |
```
|
| 101 |
|
| 102 |
**Pipeline API - Quick Tour:**
|
|
|
|
|
|
|
| 103 |
```javascript
|
|
|
|
|
|
|
| 104 |
// Allocate a pipeline for sentiment-analysis
|
| 105 |
const pipe = await pipeline('sentiment-analysis');
|
|
|
|
| 106 |
const out = await pipe('I love transformers!');
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
```
|
| 108 |
|
| 109 |
**Device Options:**
|
|
|
|
| 110 |
```javascript
|
| 111 |
// Run on WebGPU (GPU)
|
| 112 |
const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', {
|
|
|
|
| 115 |
```
|
| 116 |
|
| 117 |
**Quantization Options:**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
```javascript
|
| 119 |
// Run at 4-bit quantization for better performance
|
| 120 |
const pipe = await pipeline('sentiment-analysis', 'Xenova/distilbert-base-uncased-finetuned-sst-2-english', {
|
|
|
|
| 122 |
});
|
| 123 |
```
|
| 124 |
|
| 125 |
+
IMPORTANT: Always include "Built with anycoder" as clickable text in the header/top section of your application that links to https://huggingface.co/spaces/akhaliq/anycoder
|
| 126 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
+
# Legacy variable for backward compatibility - now dynamically generated
|
| 129 |
+
TRANSFORMERS_JS_SYSTEM_PROMPT = get_transformersjs_system_prompt()
|
| 130 |
|
| 131 |
|
| 132 |
STREAMLIT_SYSTEM_PROMPT = """You are an expert Streamlit developer. Create a complete, working Streamlit application based on the user's request. Generate all necessary code to make the application functional and runnable.
|