Spaces:
Running on Zero
Running on Zero
Initial commit
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitignore +16 -0
- .vscode/settings.json +6 -0
- ARCHITECTURE_HUGGINGFACE.md +245 -0
- Adult_repo/.gitattributes +35 -0
- Adult_repo/README.md +13 -0
- Adult_repo/app.py +207 -0
- Adult_repo/requirements.txt +9 -0
- FIXED_README.md +87 -0
- START_IMAGEFORGE.bat +43 -0
- ZIMAGETURBO_QUICKSTART.md +171 -0
- ZIMAGETURBO_SETUP.md +96 -0
- add_exit.txt +1 -0
- download_model.py +66 -0
- download_sd15.py +30 -0
- download_tiny_sd.py +37 -0
- imageforge/.github/workflows/ci.yml +31 -0
- imageforge/.gitignore +18 -0
- imageforge/.npmrc +2 -0
- imageforge/README.md +199 -0
- imageforge/backend/__init__.py +1 -0
- imageforge/backend/app/__init__.py +1 -0
- imageforge/backend/app/api/__init__.py +1 -0
- imageforge/backend/app/api/schemas.py +163 -0
- imageforge/backend/app/core/__init__.py +1 -0
- imageforge/backend/app/core/config.py +53 -0
- imageforge/backend/app/core/logging.py +26 -0
- imageforge/backend/app/core/observability.py +38 -0
- imageforge/backend/app/core/policy.py +117 -0
- imageforge/backend/app/core/prompting.py +70 -0
- imageforge/backend/app/core/security.py +56 -0
- imageforge/backend/app/jobs/__init__.py +1 -0
- imageforge/backend/app/jobs/manager.py +455 -0
- imageforge/backend/app/local_ai/__init__.py +1 -0
- imageforge/backend/app/local_ai/engine.py +129 -0
- imageforge/backend/app/main.py +437 -0
- imageforge/backend/app/providers/__init__.py +1 -0
- imageforge/backend/app/providers/a1111_provider.py +166 -0
- imageforge/backend/app/providers/diffusion_provider.py +165 -0
- imageforge/backend/app/providers/dummy_provider.py +68 -0
- imageforge/backend/app/providers/factory.py +28 -0
- imageforge/backend/app/providers/huggingface_provider.py +328 -0
- imageforge/backend/app/providers/interface.py +51 -0
- imageforge/backend/app/providers/localai_provider.py +105 -0
- imageforge/backend/app/providers/zimageturbo_provider.py +227 -0
- imageforge/backend/app/storage/__init__.py +1 -0
- imageforge/backend/app/storage/history.py +44 -0
- imageforge/backend/app/storage/maintenance.py +39 -0
- imageforge/backend/app/storage/presets.py +80 -0
- imageforge/backend/app/storage/settings.py +67 -0
- imageforge/backend/tests/test_a1111_provider.py +51 -0
.gitignore
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.cache/
|
| 2 |
+
.venv/
|
| 3 |
+
logs/
|
| 4 |
+
output/
|
| 5 |
+
|
| 6 |
+
*.log
|
| 7 |
+
git_add_*.txt
|
| 8 |
+
git_add_output.log
|
| 9 |
+
|
| 10 |
+
imageforge/.cache/
|
| 11 |
+
imageforge/logs/
|
| 12 |
+
imageforge/output/
|
| 13 |
+
|
| 14 |
+
stable-diffusion-webui/models/
|
| 15 |
+
stable-diffusion-webui/repositories/
|
| 16 |
+
stable-diffusion-webui/tmp/
|
.vscode/settings.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"chat.tools.terminal.autoApprove": {
|
| 3 |
+
"&": true,
|
| 4 |
+
"npm run dev:web": true
|
| 5 |
+
}
|
| 6 |
+
}
|
ARCHITECTURE_HUGGINGFACE.md
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🏗️ PixelForge Architektur mit HuggingFace Spaces
|
| 2 |
+
|
| 3 |
+
## Übersicht
|
| 4 |
+
|
| 5 |
+
PixelForge wird als **Orchestrator** fungieren, der externe AI-Services via REST API orchestriert. Die Heavy-Lifting (GPU-intensive Image Generation) läuft auf HuggingFace Spaces.
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
┌─────────────────────────────────────────────────────────┐
|
| 9 |
+
│ PixelForge Frontend (React/Vite) │
|
| 10 |
+
│ http://127.0.0.1:5173 │
|
| 11 |
+
└────────────────────┬────────────────────────────────────┘
|
| 12 |
+
│
|
| 13 |
+
│ HTTP Requests
|
| 14 |
+
↓
|
| 15 |
+
┌─────────────────────────────────────────────────────────┐
|
| 16 |
+
│ PixelForge Backend (FastAPI) │
|
| 17 |
+
│ http://127.0.0.1:8008 │
|
| 18 |
+
│ ┌──────────────────────────────────────────────────┐ │
|
| 19 |
+
│ │ Provider Factory & Job Manager │ │
|
| 20 |
+
│ └────┬─────────────────────────────────────────────┘ │
|
| 21 |
+
└────┼──────────────────────────────────────────────────────┘
|
| 22 |
+
│
|
| 23 |
+
├─ Lokale Provider (CPU):
|
| 24 |
+
│ ├─ dummy: Dummy bilder
|
| 25 |
+
│ ├─ localai: LocalAI Engine
|
| 26 |
+
│ └─ diffusion: Stable Diffusion (lokal)
|
| 27 |
+
│
|
| 28 |
+
└─ Remote Provider (API/GPU):
|
| 29 |
+
├─ zimageturbo: Cloud API (Colab/RunPod) [ÜBERGANGSLÖSUNG]
|
| 30 |
+
├─ huggingface: HuggingFace Space API [ZUKÜNFTIG]
|
| 31 |
+
│ ├─ Heartsync/Adult Space (erwachsene Inhalte)
|
| 32 |
+
│ ├─ weitere HF Spaces...
|
| 33 |
+
│ └─ Authentifizierung via HF API Token
|
| 34 |
+
│
|
| 35 |
+
└─ a1111: AUTOMATIC1111 API (lokal falls vorhanden)
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
---
|
| 39 |
+
|
| 40 |
+
## Phase 1: Aktuelle State (Übergangslösung)
|
| 41 |
+
|
| 42 |
+
**Colab + Z-Image Turbo** als schneller Remote-Provider:
|
| 43 |
+
- ✅ Funktioniert sofort
|
| 44 |
+
- ⚠️ Session-basiert (max 12 Stunden)
|
| 45 |
+
- ⚠️ Colab kann jeden Moment beenden
|
| 46 |
+
- ✅ Kostenlos
|
| 47 |
+
|
| 48 |
+
**Implementation**: `zimageturbo_provider.py` (HTTP API Wrapper)
|
| 49 |
+
|
| 50 |
+
### Zu tun jetzt:
|
| 51 |
+
1. Frontend testen (Modellauswahl wurde gefixt)
|
| 52 |
+
2. Colab Notebook hochladen & API URL setzen
|
| 53 |
+
3. Z-Image Turbo Provider testen
|
| 54 |
+
|
| 55 |
+
---
|
| 56 |
+
|
| 57 |
+
## Phase 2: HuggingFace Space Integration (Zukünftig)
|
| 58 |
+
|
| 59 |
+
**Adult_repo** bleibt als eigenständige Einheit auf HuggingFace Spaces:
|
| 60 |
+
|
| 61 |
+
```bash
|
| 62 |
+
# Adult_repo läuft auf:
|
| 63 |
+
https://huggingface.co/spaces/Heartsync/Adult
|
| 64 |
+
# → Hat eigene GPU/Inference Engine
|
| 65 |
+
# → Verfügbar 24/7 (solange Space online)
|
| 66 |
+
```
|
| 67 |
+
|
| 68 |
+
### Architektur:
|
| 69 |
+
|
| 70 |
+
1. **Adult_repo auf HF Space** (remote)
|
| 71 |
+
- Gradio Interface (aber nicht für PixelForge relevant)
|
| 72 |
+
- Inference Endpoint für API-Calls
|
| 73 |
+
- Nutzt Z-Image Turbo Model intern
|
| 74 |
+
|
| 75 |
+
2. **HuggingFace Space API Provider** (neu in PixelForge)
|
| 76 |
+
```python
|
| 77 |
+
# Backend Provider
|
| 78 |
+
class HuggingFaceSpaceProvider(IImageProvider):
|
| 79 |
+
def __init__(self, space_name: str, token: str):
|
| 80 |
+
self.space_name = "Heartsync/Adult" # oder andere Spaces
|
| 81 |
+
self.api_token = os.getenv("HF_API_TOKEN")
|
| 82 |
+
|
| 83 |
+
def generate(self, request: ProviderRequest) -> ProviderResult:
|
| 84 |
+
# POST zu HF Inference API
|
| 85 |
+
# https://api-inference.huggingface.co/models/{space}
|
| 86 |
+
...
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
3. **PixelForge kennt mehrere Spaces**:
|
| 90 |
+
- `heartsync/adult` → Erwachsene Inhalte
|
| 91 |
+
- `heartsync/anime` → Anime-Style
|
| 92 |
+
- `custom/space` → Beliebige andere Spaces
|
| 93 |
+
|
| 94 |
+
---
|
| 95 |
+
|
| 96 |
+
## Vergleich: Colab vs HuggingFace Space
|
| 97 |
+
|
| 98 |
+
| Aspekt | Colab | HF Space | Lokal |
|
| 99 |
+
|--------|-------|----------|-------|
|
| 100 |
+
| **Setup** | Jupyter Notebook | Einmal deployen | Python Virtual Env |
|
| 101 |
+
| **Kosten** | Kostenlos | Kostenlos (mit Limits) | Nur Hardware |
|
| 102 |
+
| **Persistenz** | 12h max | 24/7 (solange online) | 24/7 |
|
| 103 |
+
| **GPU** | T4/A100 (kostenlos) | Space-spezifisch | Deine Hardware |
|
| 104 |
+
| **API-Zugriff** | über Ngrok-Tunnel | HF Inference API | localhost:PORT |
|
| 105 |
+
| **Startzeit** | ~2 min (Kernel) | instant | instant |
|
| 106 |
+
| **Geeignet für** | Schnelle Tests | Production | Development |
|
| 107 |
+
|
| 108 |
+
---
|
| 109 |
+
|
| 110 |
+
## Implementierungs-Roadmap
|
| 111 |
+
|
| 112 |
+
### ✅ Heute (Phase 1)
|
| 113 |
+
- [x] Frontend Model-Picker repariert
|
| 114 |
+
- [x] Colab Notebook erstellt
|
| 115 |
+
- [x] zimageturbo_provider.py ✓
|
| 116 |
+
- [ ] Testen mit Colab
|
| 117 |
+
|
| 118 |
+
### 📅 Später (Phase 2)
|
| 119 |
+
- [ ] HuggingFaceSpaceProvider erstellen
|
| 120 |
+
- [ ] HF API Token Authentication
|
| 121 |
+
- [ ] Adult_repo auf HF Space deployen
|
| 122 |
+
- [ ] Provider registrieren & testen
|
| 123 |
+
- [ ] Colab durch HF Space ersetzen
|
| 124 |
+
|
| 125 |
+
### 🎯 Langfristig (Phase 3)
|
| 126 |
+
- [ ] Multi-Space Support (verschiedene Spaces wählen)
|
| 127 |
+
- [ ] Space Health Monitoring
|
| 128 |
+
- [ ] Fallback-Strategie (wenn Space down ist)
|
| 129 |
+
- [ ] Queue Management für lange Jobs
|
| 130 |
+
|
| 131 |
+
---
|
| 132 |
+
|
| 133 |
+
## HuggingFace API Details (für später)
|
| 134 |
+
|
| 135 |
+
### Space Deployment (Adult_repo)
|
| 136 |
+
```bash
|
| 137 |
+
# HF CLI installieren
|
| 138 |
+
pip install huggingface-hub
|
| 139 |
+
|
| 140 |
+
# Private Space erstellen (Adult content)
|
| 141 |
+
huggingface-cli repo create --type space Adult_repo --private
|
| 142 |
+
|
| 143 |
+
# Adult_repo hochladen
|
| 144 |
+
git push huggingface main
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
### PixelForge Provider (HTTP API Call)
|
| 148 |
+
```python
|
| 149 |
+
import requests
|
| 150 |
+
|
| 151 |
+
# HF Inference API Endpoint
|
| 152 |
+
url = "https://api-inference.huggingface.co/models/Heartsync/Adult"
|
| 153 |
+
|
| 154 |
+
payload = {
|
| 155 |
+
"inputs": "A beautiful sunset over mountains",
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
headers = {
|
| 159 |
+
"Authorization": f"Bearer {HF_API_TOKEN}"
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
response = requests.post(url, json=payload, headers=headers)
|
| 163 |
+
image_data = response.content # PIL Image bytes
|
| 164 |
+
```
|
| 165 |
+
|
| 166 |
+
### Alternative: Space API (wenn Gradio genutzt)
|
| 167 |
+
```python
|
| 168 |
+
# Falls Adult_repo Gradio API exponiert
|
| 169 |
+
from gradio_client import Client
|
| 170 |
+
|
| 171 |
+
client = Client("https://huggingface.co/spaces/Heartsync/Adult")
|
| 172 |
+
result = client.predict(
|
| 173 |
+
prompt="A girl in a school uniform",
|
| 174 |
+
seed=42,
|
| 175 |
+
api_name="/predict"
|
| 176 |
+
)
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
---
|
| 180 |
+
|
| 181 |
+
## Umgebungsvariablen (Phase 2)
|
| 182 |
+
|
| 183 |
+
```powershell
|
| 184 |
+
# HuggingFace Integration
|
| 185 |
+
$env:HF_API_TOKEN = "hf_xxxxxxxxxxxxxxxxxxxxx"
|
| 186 |
+
$env:HF_SPACE_ADULT = "Heartsync/Adult"
|
| 187 |
+
$env:HF_SPACE_DEFAULT = "Heartsync/Adult"
|
| 188 |
+
$env:HF_REQUEST_TIMEOUT = "300" # Sekunden
|
| 189 |
+
|
| 190 |
+
# Colab (Phase 1 - temporär)
|
| 191 |
+
$env:ZIMAGETURBO_API_URL = "https://abc123.ngrok.io"
|
| 192 |
+
$env:ZIMAGETURBO_TIMEOUT = "300"
|
| 193 |
+
```
|
| 194 |
+
|
| 195 |
+
---
|
| 196 |
+
|
| 197 |
+
## Adult_repo Struktur (vor Ort)
|
| 198 |
+
|
| 199 |
+
```
|
| 200 |
+
d:/VSC Codes/Bild/Adult_repo/
|
| 201 |
+
├── app.py # Gradio Interface (wird auf HF Space laufen)
|
| 202 |
+
├── requirements.txt # Dependencies
|
| 203 |
+
├── README.md
|
| 204 |
+
└── .git # Git Repo
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
**Wichtig**: Adult_repo ist momentan nicht in PixelForge integriert!
|
| 208 |
+
- Es läuft später als eigenständiger Service auf HF Space
|
| 209 |
+
- PixelForge ruft es via HF API auf
|
| 210 |
+
- Keine lokale Abhängigkeit → Clean Architecture ✨
|
| 211 |
+
|
| 212 |
+
---
|
| 213 |
+
|
| 214 |
+
## Nächste Schritte
|
| 215 |
+
|
| 216 |
+
### Sofort (Phase 1):
|
| 217 |
+
```powershell
|
| 218 |
+
# 1. Frontend aktualisieren (Done ✓)
|
| 219 |
+
# 2. Browser aktualisieren (F5)
|
| 220 |
+
# 3. Modellauswahl testen
|
| 221 |
+
# 4. Colab konfigurieren & testen
|
| 222 |
+
```
|
| 223 |
+
|
| 224 |
+
### Später (Phase 2):
|
| 225 |
+
```powershell
|
| 226 |
+
# 1. Adult_repo zu HF Space pushen
|
| 227 |
+
# 2. HF API Token besorgen
|
| 228 |
+
# 3. HuggingFaceSpaceProvider schreiben
|
| 229 |
+
# 4. In Factory registrieren
|
| 230 |
+
# 5. Testen & Colab deaktivieren
|
| 231 |
+
```
|
| 232 |
+
|
| 233 |
+
---
|
| 234 |
+
|
| 235 |
+
## Fragen für Phase 2?
|
| 236 |
+
|
| 237 |
+
- Welche anderen Spaces möchtest du anbinden?
|
| 238 |
+
- Benötigst du Content Moderation (Safety Filter)?
|
| 239 |
+
- Wie sollen lange Jobs (>5min) gehandhabt werden?
|
| 240 |
+
- Fallback-Strategie wenn Space down ist?
|
| 241 |
+
|
| 242 |
+
---
|
| 243 |
+
|
| 244 |
+
**Status**: 🟢 Phase 1 bereitet sich vor
|
| 245 |
+
**Phase 2 Start**: Nach Colab-Test & HF Space Deployment
|
Adult_repo/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Adult_repo/README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: NSFW Uncensored Adult Image
|
| 3 |
+
emoji: 📈
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 6.0.2
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
short_description: Based 'Z-IMAGE TURBO'
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
Adult_repo/app.py
ADDED
|
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import spaces
|
| 3 |
+
import gradio as gr
|
| 4 |
+
from diffusers import DiffusionPipeline
|
| 5 |
+
import diffusers
|
| 6 |
+
import numpy as np
|
| 7 |
+
import random
|
| 8 |
+
|
| 9 |
+
# =========================================================
|
| 10 |
+
# MODEL CONFIGURATION
|
| 11 |
+
# =========================================================
|
| 12 |
+
MAX_SEED = np.iinfo(np.int32).max
|
| 13 |
+
|
| 14 |
+
# =========================================================
|
| 15 |
+
# PROMPT EXAMPLES
|
| 16 |
+
# =========================================================
|
| 17 |
+
prompt_examples = [
|
| 18 |
+
"The shy college girl, with glasses and a tight plaid skirt, nervously approaches her professor",
|
| 19 |
+
"Her skirt rose a little higher with each gentle push, a soft blush of blush spreading across her cheeks as she felt the satisfying warmth of his breath on her cheek.",
|
| 20 |
+
"a girl in a school uniform having her skirt pulled up by a boy, and then being fucked",
|
| 21 |
+
"Moody mature anime scene of two lovers fuck under neon rain, sensual atmosphere",
|
| 22 |
+
"Moody mature anime scene of two lovers kissing under neon rain, sensual atmosphere",
|
| 23 |
+
"The girl sits on the boy's lap by the window, his hands resting on her waist. She is unbuttoning his shirt, her expression focused and intense.",
|
| 24 |
+
"A girl with long, black hair is sleeping on her desk in the classroom. Her skirt has ridden up, revealing her thighs, and a trail of drool escapes her slightly parted lips.",
|
| 25 |
+
"The waves rolled gently, a slow, sweet kiss of the lip, a slow, slow build of anticipation as their toes bumped gently – a slow, sweet kiss of the lip, a promise of more to come.",
|
| 26 |
+
"Her elegant silk gown swayed gracefully as she approached him, the delicate fabric brushing against her legs. A warm blush spread across her cheeks as she felt his breath on her face.",
|
| 27 |
+
"Her white blouse and light cotton skirt rose a little higher with each gentle push, a soft blush spreading across her cheeks as she felt the satisfying warmth of his breath on her cheek.",
|
| 28 |
+
"A woman in a business suit having her skirt lifted by a man, and then being sexually assaulted.",
|
| 29 |
+
"The older woman sits on the man's lap by the fireplace, his hands resting on her hips. She is unbuttoning his vest, her expression focused and intense. He takes control of the situation as she finishes unbuttoning his shirt, pushing her onto her back and begins to have sex with her.",
|
| 30 |
+
"There is a woman with long black hair. Her face features alluring eyes and full lips, with a slender figure adorned in black lace lingerie. She lies on the bed, loosening her lingerie strap with one hand while seductively glancing downward.",
|
| 31 |
+
"In a dimly lit room, the same woman teases with her dark, flowing hair, now covering her voluptuous breasts, while a black garter belt accentuates her thighs. She sits on the sofa, leaning back, lifting one leg to expose her most private areas through the sheer lingerie.",
|
| 32 |
+
"A woman with glasses, lying on the bed in just her bra, spreads her legs wide, revealing all! She wears a sultry expression, gazing directly at the viewer with her brown eyes, her short black hair cascading over the pillow. Her slim figure, accentuated by the lacy lingerie, exudes a seductive aura.",
|
| 33 |
+
"A soft focus on the girl's face, eyes closed, biting her lip, as her roommate performs oral pleasure, the experienced woman's hair cascading between her thighs.",
|
| 34 |
+
"A woman in a blue hanbok sits on a wooden floor, her legs folded beneath her, gazing out of a window, the sunlight highlighting the graceful lines of her clothing.",
|
| 35 |
+
"The couple, immersed in a wooden outdoor bath, share an intimate moment, her wet kimono clinging to her curves, his hands exploring her body beneath the water's surface.",
|
| 36 |
+
"A steamy shower scene, the twins embrace under the warm water, their soapy hands gliding over each other's curves, their passion intensifying as they explore uncharted territories.",
|
| 37 |
+
"The teacher, with a firm grip, pins the student against the blackboard, her skirt hiked up, exposing her delicate lace panties. Their heavy breathing echoes in the quiet room as they share an intense, intimate moment.",
|
| 38 |
+
"After hours, the girl sits on top of the teacher's lap, riding him on the classroom floor, her hair cascading over her face as she moves with increasing intensity, their bodies glistening with sweat.",
|
| 39 |
+
"In the dimly lit dorm room, the roommates lay entangled in a passionate embrace, their naked bodies glistening with sweat, as the experienced woman teaches her lover the art of kissing and touching.",
|
| 40 |
+
"The once-innocent student, now confident, takes charge, straddling her lover on the couch, their bare skin illuminated by the warm glow of the sunset through the window.",
|
| 41 |
+
"A close-up of the secretary's hand unzipping her boss's dress shirt, her fingers gently caressing his chest, their eyes locked in a heated embrace in the supply closet.",
|
| 42 |
+
"The secretary, in a tight pencil skirt and silk blouse, leans back on the boss's desk, her legs wrapped around his waist, her blouse unbuttoned, revealing her lace bra, as he passionately kisses her, his hands exploring her body.",
|
| 43 |
+
"On the living room couch, one twin sits astride her sister's lap, their lips locked in a passionate kiss, their hands tangled in each other's hair, unraveling a new level of intimacy.",
|
| 44 |
+
"In a dimly lit chamber, the dominant woman, dressed in a leather corset and thigh-high boots, stands tall, her hand gripping her submissive partner's hair, his eyes closed in submission as she instructs him to please her.",
|
| 45 |
+
"The dominant, in a sheer lace bodysuit, sits on a throne-like chair, her legs spread, as the submissive, on his knees, worships her with his tongue, his hands bound behind his back.",
|
| 46 |
+
"A traditional Japanese onsen, with steam rising, a young woman in a colorful kimono kneels on a tatami mat, her back to the viewer, as her male partner, also in a kimono, gently unties her obi, revealing her bare back.",
|
| 47 |
+
"In a serene outdoor setting, the woman, in a vibrant summer kimono, sits on a bench, her legs slightly spread, her partner kneeling before her, his hands gently caressing her exposed thigh.",
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
# =========================================================
|
| 51 |
+
# LOAD PIPELINE
|
| 52 |
+
# =========================================================
|
| 53 |
+
print("Loading Z-Image-Turbo pipeline...")
|
| 54 |
+
diffusers.utils.logging.set_verbosity_info()
|
| 55 |
+
|
| 56 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 57 |
+
"Tongyi-MAI/Z-Image-Turbo",
|
| 58 |
+
torch_dtype=torch.bfloat16,
|
| 59 |
+
low_cpu_mem_usage=False,
|
| 60 |
+
attn_implementation="kernels-community/vllm-flash-attn3",
|
| 61 |
+
)
|
| 62 |
+
pipe.to("cuda")
|
| 63 |
+
|
| 64 |
+
# =========================================================
|
| 65 |
+
# RANDOM PROMPT FUNCTION
|
| 66 |
+
# =========================================================
|
| 67 |
+
def get_random_prompt():
|
| 68 |
+
return random.choice(prompt_examples)
|
| 69 |
+
|
| 70 |
+
# =========================================================
|
| 71 |
+
# IMAGE GENERATOR
|
| 72 |
+
# =========================================================
|
| 73 |
+
@spaces.GPU
|
| 74 |
+
def generate_image(prompt, height, width, num_inference_steps, seed, randomize_seed, num_images):
|
| 75 |
+
if not prompt:
|
| 76 |
+
raise gr.Error("Please enter a prompt.")
|
| 77 |
+
|
| 78 |
+
if randomize_seed:
|
| 79 |
+
seed = torch.randint(0, 2**32 - 1, (1,)).item()
|
| 80 |
+
|
| 81 |
+
num_images = min(max(1, int(num_images)), 4)
|
| 82 |
+
|
| 83 |
+
generator = torch.Generator("cuda").manual_seed(int(seed))
|
| 84 |
+
|
| 85 |
+
result = pipe(
|
| 86 |
+
prompt=prompt,
|
| 87 |
+
height=int(height),
|
| 88 |
+
width=int(width),
|
| 89 |
+
num_inference_steps=int(num_inference_steps),
|
| 90 |
+
guidance_scale=0.0,
|
| 91 |
+
generator=generator,
|
| 92 |
+
max_sequence_length=1024,
|
| 93 |
+
num_images_per_prompt=num_images,
|
| 94 |
+
output_type="pil",
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
return result.images, seed
|
| 98 |
+
|
| 99 |
+
# =========================================================
|
| 100 |
+
# GRADIO UI
|
| 101 |
+
# =========================================================
|
| 102 |
+
with gr.Blocks() as demo:
|
| 103 |
+
|
| 104 |
+
gr.HTML("""
|
| 105 |
+
<style>
|
| 106 |
+
.gradio-container {
|
| 107 |
+
background: linear-gradient(135deg, #fef9f3 0%, #f0e6fa 50%, #e6f0fa 100%) !important;
|
| 108 |
+
}
|
| 109 |
+
footer {display: none !important;}
|
| 110 |
+
</style>
|
| 111 |
+
|
| 112 |
+
<div style="text-align: center; margin-bottom: 20px;">
|
| 113 |
+
<h1 style="color: #6b5b7a; font-size: 2.2rem; font-weight: 700; margin-bottom: 0.3rem;">
|
| 114 |
+
🖼️ NSFW Uncensored Adult "Text to Image"
|
| 115 |
+
</h1>
|
| 116 |
+
|
| 117 |
+
<p style="color: #8b7b9b; font-size: 1rem;">
|
| 118 |
+
Powered by Z-Image-Turbo Model
|
| 119 |
+
</p>
|
| 120 |
+
|
| 121 |
+
<div style="margin-top: 12px; display: flex; justify-content: center; gap: 12px; flex-wrap: wrap;">
|
| 122 |
+
<a href="https://huggingface.co/spaces/Heartsync/FREE-NSFW-HUB" target="_blank">
|
| 123 |
+
<img src="https://img.shields.io/static/v1?label=FREE&message=NSFW%20HUB&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=white&style=for-the-badge" alt="badge">
|
| 124 |
+
</a>
|
| 125 |
+
|
| 126 |
+
<a href="https://www.humangen.ai" target="_blank">
|
| 127 |
+
<img src="https://img.shields.io/static/v1?label=100%25%20FREE&message=AI%20Playground&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
|
| 128 |
+
</a>
|
| 129 |
+
|
| 130 |
+
<a href="https://huggingface.co/spaces/Heartsync/NSFW-Uncensored-photo" target="_blank">
|
| 131 |
+
<img src="https://img.shields.io/static/v1?label=Text%20to%20Image%28Photo%29&message=NSFW%20Uncensored&color=%230000ff&labelColor=%23800080&logo=Huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
|
| 132 |
+
</a>
|
| 133 |
+
|
| 134 |
+
<a href="https://huggingface.co/spaces/Heartsync/NSFW-Uncensored-video2" target="_blank">
|
| 135 |
+
<img src="https://img.shields.io/static/v1?label=Image%20to%20Video%282%29&message=NSFW%20Uncensored&color=%230000ff&labelColor=%23800080&logo=Huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
|
| 136 |
+
</a>
|
| 137 |
+
|
| 138 |
+
<a href="https://huggingface.co/spaces/Heartsync/adult" target="_blank">
|
| 139 |
+
<img src="https://img.shields.io/static/v1?label=Text%20to%20Image%20to%20Video&message=ADULT&color=%23ff00ff&labelColor=%23000080&logo=Huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
|
| 140 |
+
</a>
|
| 141 |
+
</div>
|
| 142 |
+
</div>
|
| 143 |
+
""")
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
with gr.Row():
|
| 147 |
+
with gr.Column(scale=1):
|
| 148 |
+
prompt_input = gr.Textbox(
|
| 149 |
+
label="✏️ Prompt",
|
| 150 |
+
placeholder="Describe the image you want to create...",
|
| 151 |
+
lines=3
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
random_button = gr.Button("🎲 Random Prompt", variant="secondary")
|
| 155 |
+
|
| 156 |
+
with gr.Row():
|
| 157 |
+
height_input = gr.Slider(512, 2048, 1024, step=64, label="Height")
|
| 158 |
+
width_input = gr.Slider(512, 2048, 1024, step=64, label="Width")
|
| 159 |
+
|
| 160 |
+
num_images_input = gr.Slider(1, 4, 2, step=1, label="🖼️ Number of Images")
|
| 161 |
+
|
| 162 |
+
with gr.Accordion("⚙️ Options", open=False):
|
| 163 |
+
steps_slider = gr.Slider(
|
| 164 |
+
minimum=1,
|
| 165 |
+
maximum=30,
|
| 166 |
+
step=1,
|
| 167 |
+
value=18,
|
| 168 |
+
label="Inference Steps"
|
| 169 |
+
)
|
| 170 |
+
seed_input = gr.Slider(
|
| 171 |
+
label="Seed",
|
| 172 |
+
minimum=0,
|
| 173 |
+
maximum=MAX_SEED,
|
| 174 |
+
step=1,
|
| 175 |
+
value=42
|
| 176 |
+
)
|
| 177 |
+
randomize_seed_checkbox = gr.Checkbox(
|
| 178 |
+
label="Randomize Seed",
|
| 179 |
+
value=True
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
generate_button = gr.Button(
|
| 183 |
+
"✨ Generate Image",
|
| 184 |
+
variant="primary"
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
with gr.Column(scale=1):
|
| 188 |
+
output_gallery = gr.Gallery(
|
| 189 |
+
label="🎨 Generated Images",
|
| 190 |
+
height=450,
|
| 191 |
+
columns=2
|
| 192 |
+
)
|
| 193 |
+
used_seed_output = gr.Number(label="Seed Used", interactive=False)
|
| 194 |
+
|
| 195 |
+
random_button.click(
|
| 196 |
+
fn=get_random_prompt,
|
| 197 |
+
outputs=[prompt_input]
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
generate_button.click(
|
| 201 |
+
fn=generate_image,
|
| 202 |
+
inputs=[prompt_input, height_input, width_input, steps_slider, seed_input, randomize_seed_checkbox, num_images_input],
|
| 203 |
+
outputs=[output_gallery, used_seed_output],
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
if __name__ == "__main__":
|
| 207 |
+
demo.queue().launch()
|
Adult_repo/requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio
|
| 2 |
+
git+https://github.com/huggingface/diffusers
|
| 3 |
+
transformers
|
| 4 |
+
kernels
|
| 5 |
+
|
| 6 |
+
torch
|
| 7 |
+
transformers
|
| 8 |
+
accelerate
|
| 9 |
+
spaces
|
FIXED_README.md
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ImageForge - Fixed & Working!
|
| 2 |
+
|
| 3 |
+
## ✓ Problem gelöst!
|
| 4 |
+
|
| 5 |
+
Die Bildgenerierung funktioniert jetzt mit echten KI-Modellen!
|
| 6 |
+
|
| 7 |
+
### Was wurde behoben:
|
| 8 |
+
|
| 9 |
+
1. **GPU-Aktivierung**: PyTorch mit CUDA 12.1 installiert - GPU wird jetzt verwendet!
|
| 10 |
+
2. **Threading-Fehler**: Race Condition bei parallelen Jobs behoben
|
| 11 |
+
3. **Modell-Download**: `segmind/tiny-sd` vollständig heruntergeladen (~1 GB)
|
| 12 |
+
4. **urllib3-Problem**: Downgrade auf 1.26.20 (behob Import-Fehler bei diffusers)
|
| 13 |
+
5. **Default Model**: API verwendet jetzt `localai` statt `dummy` als Standard
|
| 14 |
+
|
| 15 |
+
### Wie starten:
|
| 16 |
+
|
| 17 |
+
Doppelklick auf `START_IMAGEFORGE.bat` in `d:\VSC Codes\Bild\`
|
| 18 |
+
|
| 19 |
+
Das startet:
|
| 20 |
+
- Backend auf http://127.0.0.1:8008
|
| 21 |
+
- Frontend auf http://127.0.0.1:5173
|
| 22 |
+
|
| 23 |
+
Nach ~15-20 Sekunden ist das Programm bereit!
|
| 24 |
+
|
| 25 |
+
### Erste Bildgenerierung:
|
| 26 |
+
|
| 27 |
+
1. Browser öffnet sich automatisch bei http://127.0.0.1:5173
|
| 28 |
+
2. Prompt eingeben (z.B. "a beautiful sunset over mountains")
|
| 29 |
+
3. Auf "Generate" klicken
|
| 30 |
+
4. Warten (~45-60 Sekunden für das erste Bild)
|
| 31 |
+
5. **Ergebnis**: Echtes KI-generiertes Bild (>400 KB), KEIN weißes Dummy-Bild mehr!
|
| 32 |
+
|
| 33 |
+
### Verwendetes Modell:
|
| 34 |
+
|
| 35 |
+
- **segmind/tiny-sd** (Standard)
|
| 36 |
+
- Schnell: ~45-60 Sekunden pro Bild (512x512)
|
| 37 |
+
- Speicher: ~2 GB VRAM
|
| 38 |
+
- Qualität: Gut für schnelle Tests
|
| 39 |
+
|
| 40 |
+
### Optional: Besseres Modell (in Zukunft):
|
| 41 |
+
|
| 42 |
+
Die `runwayml/stable-diffusion-v1-5` Download wurde gestartet (~4 GB).
|
| 43 |
+
Wenn vollständig heruntergeladen, kann das Modell in `backend/app/local_ai/engine.py`
|
| 44 |
+
umgestellt werden (Zeile 49):
|
| 45 |
+
|
| 46 |
+
```python
|
| 47 |
+
self.model_id = os.getenv("IMAGEFORGE_LOCALAI_MODEL", "runwayml/stable-diffusion-v1-5")
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### Systemanforderungen (erfüllt):
|
| 51 |
+
|
| 52 |
+
- ✓ Python 3.12.10 mit venv
|
| 53 |
+
- ✓ PyTorch 2.5.1+cu121 (CUDA 12.1)
|
| 54 |
+
- ✓ NVIDIA GeForce GTX 1050 (CUDA verfügbar)
|
| 55 |
+
- ✓ diffusers 0.36.0
|
| 56 |
+
- ✓ urllib3 1.26.20 (wichtig!)
|
| 57 |
+
|
| 58 |
+
### Bekannte Einschränkungen:
|
| 59 |
+
|
| 60 |
+
- Erste Generation pro Sitzung dauert länger (Modell laden: ~30 Sekunden)
|
| 61 |
+
- tiny-sd erzeugt kleinere Bilder mit weniger Details als größere Modelle
|
| 62 |
+
- GTX 1050 hat nur 2 GB VRAM - größere Modelle nicht möglich
|
| 63 |
+
|
| 64 |
+
### Bei Problemen:
|
| 65 |
+
|
| 66 |
+
Wenn wieder weiße Bilder erscheinen:
|
| 67 |
+
|
| 68 |
+
1. Backend-Fenster prüfen auf Fehler
|
| 69 |
+
2. Sicherstellen dass urllib3==1.26.20 installiert ist:
|
| 70 |
+
```
|
| 71 |
+
d:\VSC Codes\Bild\.venv\Scripts\python.exe -m pip list | findstr urllib3
|
| 72 |
+
```
|
| 73 |
+
3. Falls 2.x: Downgrade durchführen:
|
| 74 |
+
```
|
| 75 |
+
d:\VSC Codes\Bild\.venv\Scripts\python.exe -m pip install "urllib3<2.0" --force-reinstall
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
### Nächste Schritte (optional):
|
| 79 |
+
|
| 80 |
+
1. **Qualität verbessern**: SD 1.5 Modell verwenden (sobald Download fertig)
|
| 81 |
+
2. **Frontend anpassen**: UI-Elemente nach Wunsch ändern
|
| 82 |
+
3. **Weitere Modelle**: Andere HuggingFace Modelle testen
|
| 83 |
+
|
| 84 |
+
---
|
| 85 |
+
|
| 86 |
+
**Status**: ✓ Funktioniert! Echte Bilder werden generiert!
|
| 87 |
+
**Letzte Änderung**: 2026-02-19 22:20
|
START_IMAGEFORGE.bat
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@echo off
|
| 2 |
+
echo ======================================
|
| 3 |
+
echo ImageForge Stack Startup
|
| 4 |
+
echo ======================================
|
| 5 |
+
echo.
|
| 6 |
+
|
| 7 |
+
REM Set HuggingFace cache and environment variables
|
| 8 |
+
set HF_HOME=d:/VSC Codes/Bild/.cache/hf
|
| 9 |
+
set TRANSFORMERS_CACHE=d:/VSC Codes/Bild/.cache/hf
|
| 10 |
+
|
| 11 |
+
cd /d "%~dp0imageforge"
|
| 12 |
+
|
| 13 |
+
echo [1/2] Starting Backend...
|
| 14 |
+
start "ImageForge Backend" /MIN cmd /k "set HF_HOME=d:/VSC Codes/Bild/.cache/hf && set TRANSFORMERS_CACHE=d:/VSC Codes/Bild/.cache/hf && set IMAGEFORGE_HOST=127.0.0.1 && set IMAGEFORGE_PORT=8008 && D:\VSC Codes\Bild\.venv\Scripts\python.exe -m backend.app.main"
|
| 15 |
+
|
| 16 |
+
timeout /t 3 /nobreak >nul
|
| 17 |
+
|
| 18 |
+
echo [2/2] Starting Frontend...
|
| 19 |
+
cd frontend
|
| 20 |
+
start "ImageForge Frontend" /MIN cmd /k "npm run dev"
|
| 21 |
+
|
| 22 |
+
timeout /t 5 /nobreak >nul
|
| 23 |
+
|
| 24 |
+
echo.
|
| 25 |
+
echo ======================================
|
| 26 |
+
echo Services Started!
|
| 27 |
+
echo ======================================
|
| 28 |
+
echo Backend: http://127.0.0.1:8008
|
| 29 |
+
echo Frontend: http://127.0.0.1:5173
|
| 30 |
+
echo ======================================
|
| 31 |
+
echo.
|
| 32 |
+
echo Opening UI in browser...
|
| 33 |
+
timeout /t 2 /nobreak >nul
|
| 34 |
+
start http://127.0.0.1:5173
|
| 35 |
+
|
| 36 |
+
echo.
|
| 37 |
+
echo Stack is running! Press any key to stop all services...
|
| 38 |
+
pause >nul
|
| 39 |
+
|
| 40 |
+
echo.
|
| 41 |
+
echo Stopping services...
|
| 42 |
+
taskkill /FI "WINDOWTITLE eq ImageForge*" /F >nul 2>&1
|
| 43 |
+
echo Done!
|
ZIMAGETURBO_QUICKSTART.md
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🚀 Z-Image Turbo - Schnellstart für PixelForge
|
| 2 |
+
|
| 3 |
+
## ⏱️ 5 Minuten Installation & Test
|
| 4 |
+
|
| 5 |
+
### 1️⃣ Google Colab Vorbereitung (2 Min)
|
| 6 |
+
|
| 7 |
+
```bash
|
| 8 |
+
# 1. Gehe zu https://colab.research.google.com
|
| 9 |
+
# 2. Wähle: File → Open notebook → Upload
|
| 10 |
+
# 3. Wähle: pixelforge_colab_test.ipynb
|
| 11 |
+
# 4. Runtime → Change runtime type → GPU (T4)
|
| 12 |
+
# 5. Registriere bei https://ngrok.com (kostenlos)
|
| 13 |
+
# 6. Kopiere Auth Token aus Account Settings
|
| 14 |
+
```
|
| 15 |
+
|
| 16 |
+
### 2️⃣ Colab Notebook Ausführen (2 Min)
|
| 17 |
+
|
| 18 |
+
```python
|
| 19 |
+
# Cell 1: Abhängigkeiten installieren
|
| 20 |
+
# → Drücke Shift+Enter zum Ausführen
|
| 21 |
+
|
| 22 |
+
# Cell 2: GPU-Check
|
| 23 |
+
# → Sollte zeigen: "GPU T4 erkannt ✓"
|
| 24 |
+
|
| 25 |
+
# Cell 3: Modell laden
|
| 26 |
+
# → Lädt Stable Diffusion (ca. 2 GB)
|
| 27 |
+
|
| 28 |
+
# Cell 4: Test-Generierung
|
| 29 |
+
# → Generiert ein Testbild (~30 Sekunden)
|
| 30 |
+
|
| 31 |
+
# Cell 5: API-Server starten
|
| 32 |
+
# → Ersetze "YOUR_NGROK_TOKEN" mit deinem Token
|
| 33 |
+
# → Drücke Ausführen & kopiere Public URL
|
| 34 |
+
# → Z.B.: https://abc123def456.ngrok.io
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
### 3️⃣ PixelForge Starten (1 Min)
|
| 38 |
+
|
| 39 |
+
**Option A: PowerShell (Empfohlen)**
|
| 40 |
+
```powershell
|
| 41 |
+
cd "d:\VSC Codes\Bild"
|
| 42 |
+
.\start_zimageturbo.ps1
|
| 43 |
+
# → Gebe Colab Ngrok-URL ein
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
**Option B: Batch-Datei**
|
| 47 |
+
```cmd
|
| 48 |
+
cd d:\VSC Codes\Bild
|
| 49 |
+
start_zimageturbo.bat
|
| 50 |
+
# → Gebe Colab Ngrok-URL ein
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
### 4️⃣ Terminal-Befehle (Manuell)
|
| 54 |
+
|
| 55 |
+
```powershell
|
| 56 |
+
# Backend
|
| 57 |
+
cd "d:\VSC Codes\Bild\imageforge"
|
| 58 |
+
$env:ZIMAGETURBO_API_URL = "https://abc123def456.ngrok.io"
|
| 59 |
+
$env:ZIMAGETURBO_TIMEOUT = "300"
|
| 60 |
+
& "D:/VSC Codes/Bild/.venv/Scripts/python.exe" -m backend.app.main
|
| 61 |
+
|
| 62 |
+
# Separate PowerShell Tab:
|
| 63 |
+
cd "d:\VSC Codes\Bild\imageforge\frontend"
|
| 64 |
+
npm run dev:web
|
| 65 |
+
```
|
| 66 |
+
|
| 67 |
+
### 5️⃣ Im Browser Testen
|
| 68 |
+
|
| 69 |
+
```
|
| 70 |
+
1. Öffne: http://127.0.0.1:5173
|
| 71 |
+
2. Wähle "Z-Image Turbo" im Modell-Dropdown
|
| 72 |
+
3. Gib Prompt ein: "A beautiful sunset over mountains"
|
| 73 |
+
4. Klicke "Generate"
|
| 74 |
+
5. Warte 30-60 Sekunden
|
| 75 |
+
6. Bild sollte in Browser erscheinen ✓
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
---
|
| 79 |
+
|
| 80 |
+
## ✅ Checkliste
|
| 81 |
+
|
| 82 |
+
- [ ] Colab-Konto aktiviert & GPU eingeschaltet
|
| 83 |
+
- [ ] ngrok-Account registriert & Auth Token kopiert
|
| 84 |
+
- [ ] pixelforge_colab_test.ipynb in Colab hochgeladen
|
| 85 |
+
- [ ] Alle 5 Cells im Notebook ausgeführt
|
| 86 |
+
- [ ] Ngrok Public URL kopiert (z.B. https://abc123.ngrok.io)
|
| 87 |
+
- [ ] start_zimageturbo.ps1 oder .bat ausgeführt
|
| 88 |
+
- [ ] ZIMAGETURBO_API_URL korrekt gesetzt
|
| 89 |
+
- [ ] Backend läuft: http://127.0.0.1:8008/health (sollte OK sein)
|
| 90 |
+
- [ ] Frontend läuft: http://127.0.0.1:5173 (sollte React-UI zeigen)
|
| 91 |
+
- [ ] Test-Generierung abgeschlossen (echtes Bild!)
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
|
| 95 |
+
## 🆘 Häufige Probleme
|
| 96 |
+
|
| 97 |
+
| Problem | Lösung |
|
| 98 |
+
|---------|--------|
|
| 99 |
+
| "API nicht erreichbar" | Colab noch am Laden? Oder Ngrok Token ungültig? |
|
| 100 |
+
| "Timeout nach 300s" | Colab-Model zu lange zum Laden? Timeout erhöhen: `$env:ZIMAGETURBO_TIMEOUT = "600"` |
|
| 101 |
+
| "Colab-Sitzung verloren" | Google Colab beendet nach 4h Leerlauf. Notebook neu starten! |
|
| 102 |
+
| "Port 5173 bereits in Verwendung" | Alte Vite instanz lauft noch: `Get-Process -Name "node" \| Stop-Process` |
|
| 103 |
+
| "Fehler: Unknown Provider" | Backend neu starten nach Env-Variablen-Änderung! |
|
| 104 |
+
|
| 105 |
+
---
|
| 106 |
+
|
| 107 |
+
## 📊 Performance erwartet
|
| 108 |
+
|
| 109 |
+
| Hardware | Zeit | Qualität |
|
| 110 |
+
|----------|------|----------|
|
| 111 |
+
| Colab T4 GPU | 30-60s | High ⭐⭐⭐ |
|
| 112 |
+
| Lokal tiny-sd | 5-10s | Medium ⭐⭐ |
|
| 113 |
+
| i9 + RTX 4070 | 5-10s | High ⭐⭐⭐ |
|
| 114 |
+
| RunPod A100 | 2-3s | Ultra ⭐⭐⭐⭐ |
|
| 115 |
+
|
| 116 |
+
---
|
| 117 |
+
|
| 118 |
+
## 🔧 Umgebungsvariablen
|
| 119 |
+
|
| 120 |
+
```powershell
|
| 121 |
+
# Manuell (falls nicht über Skript):
|
| 122 |
+
$env:ZIMAGETURBO_API_URL = "https://deine-ngrok-url.ngrok.io"
|
| 123 |
+
$env:ZIMAGETURBO_TIMEOUT = "300" # Sekunden
|
| 124 |
+
$env:ZIMAGETURBO_PROXY = "" # Optional für Proxy
|
| 125 |
+
|
| 126 |
+
# Oder permanent in Windows:
|
| 127 |
+
# Systemsteuerung → Umgebungsvariablen → Neue Var hinzufügen
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
---
|
| 131 |
+
|
| 132 |
+
## 🌐 Alternative: Lokale Installation (Ohne Colab)
|
| 133 |
+
|
| 134 |
+
```bash
|
| 135 |
+
# Wenn du Z-Image Turbo lokal installieren möchtest:
|
| 136 |
+
# 1. Lade Modell von HuggingFace herunter
|
| 137 |
+
# 2. Starte lokalen Flask-Server (ähnlich Colab-Notebook)
|
| 138 |
+
# 3. Setze ZIMAGETURBO_API_URL auf http://localhost:5000
|
| 139 |
+
```
|
| 140 |
+
|
| 141 |
+
---
|
| 142 |
+
|
| 143 |
+
## 📝 Mehrere Modelle testen
|
| 144 |
+
|
| 145 |
+
Im ZIMAGETURBO_SETUP.md gibt es weitere Optionen:
|
| 146 |
+
- **Stable Diffusion v1.5** (schnell, gut)
|
| 147 |
+
- **Stable Diffusion XL** (langsam, sehr gut)
|
| 148 |
+
- **Custom Models** von HuggingFace
|
| 149 |
+
|
| 150 |
+
Ändere einfach im Colab Cell 3 das Modell!
|
| 151 |
+
|
| 152 |
+
---
|
| 153 |
+
|
| 154 |
+
## 💡 Tipps
|
| 155 |
+
|
| 156 |
+
1. **Colab bleibt aktiv**: Öffne DevTools (F12) → Keep console open
|
| 157 |
+
2. **Bessere Prompts**: Nutze Details wie "4K, professional, highly detailed"
|
| 158 |
+
3. **Batch-Generierung**: Mehrere Prompts nacheinander → Job-Queue
|
| 159 |
+
4. **Speichern**: Generierte Bilder landen in output/YYYY-MM-DD/job_*/
|
| 160 |
+
|
| 161 |
+
---
|
| 162 |
+
|
| 163 |
+
## ❓ Mehr Hilfe
|
| 164 |
+
|
| 165 |
+
- **Technisches**: Siehe [ZIMAGETURBO_SETUP.md](ZIMAGETURBO_SETUP.md)
|
| 166 |
+
- **Provider Code**: [zimageturbo_provider.py](imageforge/backend/app/providers/zimageturbo_provider.py)
|
| 167 |
+
- **Colab-Notebook**: [pixelforge_colab_test.ipynb](pixelforge_colab_test.ipynb)
|
| 168 |
+
|
| 169 |
+
---
|
| 170 |
+
|
| 171 |
+
**Viel Spaß beim Testen! 🎨✨**
|
ZIMAGETURBO_SETUP.md
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Z-Image Turbo Integration für PixelForge
|
| 2 |
+
|
| 3 |
+
## Schritt 1: Google Colab Setup
|
| 4 |
+
|
| 5 |
+
1. Öffne `pixelforge_colab_test.ipynb` in Google Colab
|
| 6 |
+
2. Führe alle Cells aus:
|
| 7 |
+
- Dependencies installieren
|
| 8 |
+
- GPU-Check
|
| 9 |
+
- Modell laden (Colab T4 = ~2-3 Min)
|
| 10 |
+
- API-Server starten
|
| 11 |
+
|
| 12 |
+
3. **Ngrok einrichten (für Public URL):**
|
| 13 |
+
```bash
|
| 14 |
+
# Registriere dich auf ngrok.com (kostenlos)
|
| 15 |
+
# Kopiere Auth-Token in Colab Cell 5
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
## Schritt 2: PixelForge konfigurieren
|
| 19 |
+
|
| 20 |
+
Setze diese **Umgebungsvariablen** vor dem Backend-Start:
|
| 21 |
+
|
| 22 |
+
```powershell
|
| 23 |
+
$env:ZIMAGETURBO_API_URL = "https://dein-ngrok-url.ngrok.io" # Colab Public URL
|
| 24 |
+
$env:ZIMAGETURBO_API_KEY = "" # Leer, wenn keine Auth benötigt
|
| 25 |
+
$env:ZIMAGETURBO_TIMEOUT = "300" # 5 Minuten
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
**Beispiel (Windows PowerShell):**
|
| 29 |
+
```powershell
|
| 30 |
+
cd "d:/VSC Codes/Bild/imageforge"
|
| 31 |
+
$env:ZIMAGETURBO_API_URL = "https://abc123.ngrok.io"
|
| 32 |
+
& "D:/VSC Codes/Bild/.venv/Scripts/python.exe" -m backend.app.main
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
## Schritt 3: Backend-Start
|
| 36 |
+
|
| 37 |
+
Backend wird neu gestartet und Z-Image Turbo sollte verfügbar sein:
|
| 38 |
+
|
| 39 |
+
```
|
| 40 |
+
[2026-02-20 12:34:56] ✓ Z-Image Turbo API verfügbar: https://abc123.ngrok.io
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## Schritt 4: Im UI verwenden
|
| 44 |
+
|
| 45 |
+
1. Öffne http://127.0.0.1:5173/
|
| 46 |
+
2. Wechsle das Modell zu **"Z-Image Turbo"**
|
| 47 |
+
3. Gib einen Prompt ein und klicke **"Generate"**
|
| 48 |
+
4. Das Bild wird über Colab generiert ☁️
|
| 49 |
+
|
| 50 |
+
## Lokale Installation (statt Colab)
|
| 51 |
+
|
| 52 |
+
Wenn du Z-Image Turbo lokal installieren möchtest:
|
| 53 |
+
|
| 54 |
+
```python
|
| 55 |
+
# 1. Download & Install
|
| 56 |
+
pip install z-image-turbo torch diffusers
|
| 57 |
+
|
| 58 |
+
# 2. Lokaler API-Server (optional)
|
| 59 |
+
from flask import Flask
|
| 60 |
+
from z_image_turbo import Pipeline
|
| 61 |
+
|
| 62 |
+
app = Flask(__name__)
|
| 63 |
+
pipe = Pipeline("zimageturbo-fast") # Oder dein Modell
|
| 64 |
+
|
| 65 |
+
@app.route('/health', methods=['GET'])
|
| 66 |
+
def health():
|
| 67 |
+
return {"status": "ok"}
|
| 68 |
+
|
| 69 |
+
@app.route('/generate', methods=['POST'])
|
| 70 |
+
def generate():
|
| 71 |
+
# ... generiere Bild ...
|
| 72 |
+
pass
|
| 73 |
+
```
|
| 74 |
+
|
| 75 |
+
Dann: `ZIMAGETURBO_API_URL=http://localhost:5000`
|
| 76 |
+
|
| 77 |
+
## Tipps & Troubleshooting
|
| 78 |
+
|
| 79 |
+
| Problem | Lösung |
|
| 80 |
+
|---------|--------|
|
| 81 |
+
| **API antwortet nicht** | Ngrok-URL in Colab und Env-Var prüfen |
|
| 82 |
+
| **Timeout (300s zu kurz)** | `ZIMAGETURBO_TIMEOUT=600` erhöhen |
|
| 83 |
+
| **Colab-Session beendet** | Notebook neu starten (Ngrok-Token erforderlich) |
|
| 84 |
+
| **"Unknown provider"** | Backend Cache leeren: `killall python` + Neustart |
|
| 85 |
+
|
| 86 |
+
## Performance
|
| 87 |
+
|
| 88 |
+
| Setup | Speed | Kosten |
|
| 89 |
+
|-------|-------|--------|
|
| 90 |
+
| **Colab + T4** | 30-60s/Bild | Kostenlos |
|
| 91 |
+
| **Lokal (RTX 4070)** | 5-10s/Bild | ~€500 |
|
| 92 |
+
| **RunPod (A100)** | 2-3s/Bild | €0.50-1.00/h |
|
| 93 |
+
|
| 94 |
+
---
|
| 95 |
+
|
| 96 |
+
**Viel Spaß mit Z-Image Turbo! 🚀**
|
add_exit.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
EXIT:128
|
download_model.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Download and cache a real Stable Diffusion model for offline use.
|
| 4 |
+
This runs once, then the model is cached forever.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
# Setup cache
|
| 12 |
+
cache_root = Path("d:/VSC Codes/Bild/.cache/hf")
|
| 13 |
+
cache_root.mkdir(parents=True, exist_ok=True)
|
| 14 |
+
|
| 15 |
+
os.environ['HF_HOME'] = str(cache_root)
|
| 16 |
+
os.environ['TORCH_HOME'] = str(cache_root / 'torch')
|
| 17 |
+
|
| 18 |
+
print(f"Cache directory: {cache_root}")
|
| 19 |
+
print()
|
| 20 |
+
|
| 21 |
+
# Use direct API instead of CLI
|
| 22 |
+
print("Downloading Stable Diffusion 2.1...")
|
| 23 |
+
print("(This is one-time setup, then cached offline)")
|
| 24 |
+
print()
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
# Use snapshot_download which is more robust
|
| 28 |
+
from huggingface_hub import snapshot_download
|
| 29 |
+
|
| 30 |
+
model_id = "stabilityai/stable-diffusion-2-1"
|
| 31 |
+
print(f"Model: {model_id}")
|
| 32 |
+
print()
|
| 33 |
+
print("Downloading (this may take 5-10 minutes on first run)...")
|
| 34 |
+
|
| 35 |
+
model_path = snapshot_download(
|
| 36 |
+
repo_id=model_id,
|
| 37 |
+
cache_dir=str(cache_root),
|
| 38 |
+
local_dir_use_symlinks=False
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
print()
|
| 42 |
+
print(f"✓ SUCCESS! Model cached at:")
|
| 43 |
+
print(f" {model_path}")
|
| 44 |
+
print()
|
| 45 |
+
print("Backend can now generate images offline!")
|
| 46 |
+
sys.exit(0)
|
| 47 |
+
|
| 48 |
+
except Exception as e:
|
| 49 |
+
print(f"✗ FAILED: {type(e).__name__}: {e}")
|
| 50 |
+
print()
|
| 51 |
+
print("Trying tiny-sd as fallback (smaller, faster)...")
|
| 52 |
+
print()
|
| 53 |
+
|
| 54 |
+
try:
|
| 55 |
+
from huggingface_hub import snapshot_download
|
| 56 |
+
model_id = "segmind/tiny-sd"
|
| 57 |
+
model_path = snapshot_download(
|
| 58 |
+
repo_id=model_id,
|
| 59 |
+
cache_dir=str(cache_root),
|
| 60 |
+
local_dir_use_symlinks=False
|
| 61 |
+
)
|
| 62 |
+
print(f"✓ Fallback successful! Cached at: {model_path}")
|
| 63 |
+
sys.exit(0)
|
| 64 |
+
except Exception as e2:
|
| 65 |
+
print(f"✗ Fallback failed: {e2}")
|
| 66 |
+
sys.exit(1)
|
download_sd15.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from huggingface_hub import snapshot_download
|
| 6 |
+
|
| 7 |
+
os.environ['HF_HUB_DISABLE_SYMLINKS'] = '1'
|
| 8 |
+
|
| 9 |
+
cache = Path("d:/VSC Codes/Bild/.cache/hf")
|
| 10 |
+
cache.mkdir(parents=True, exist_ok=True)
|
| 11 |
+
|
| 12 |
+
print("Downloading runwayml/stable-diffusion-v1-5...")
|
| 13 |
+
print("(~4GB, this is one-time setup)")
|
| 14 |
+
print()
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
path = snapshot_download(
|
| 18 |
+
"runwayml/stable-diffusion-v1-5",
|
| 19 |
+
cache_dir=str(cache),
|
| 20 |
+
local_dir_use_symlinks=False
|
| 21 |
+
)
|
| 22 |
+
print()
|
| 23 |
+
print(f"✓ SUCCESS!")
|
| 24 |
+
print(f"Model cached to: {path}")
|
| 25 |
+
print()
|
| 26 |
+
print("ImageForge can now generate real images!")
|
| 27 |
+
sys.exit(0)
|
| 28 |
+
except Exception as e:
|
| 29 |
+
print(f"✗ Error: {e}")
|
| 30 |
+
sys.exit(1)
|
download_tiny_sd.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Download tiny-sd model - FAST download, REAL images (not dummy)"""
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from huggingface_hub import snapshot_download
|
| 7 |
+
|
| 8 |
+
os.environ['HF_HUB_DISABLE_SYMLINKS'] = '1'
|
| 9 |
+
cache = Path("d:/VSC Codes/Bild/.cache/hf")
|
| 10 |
+
|
| 11 |
+
print("=" * 60)
|
| 12 |
+
print(" QUICK MODEL DOWNLOAD: tiny-sd")
|
| 13 |
+
print("=" * 60)
|
| 14 |
+
print()
|
| 15 |
+
print("This is a small model (~600MB) that generates REAL images")
|
| 16 |
+
print("(Not as good as SD 1.5, but better than white dummy images)")
|
| 17 |
+
print()
|
| 18 |
+
|
| 19 |
+
try:
|
| 20 |
+
print("Downloading segmind/tiny-sd...")
|
| 21 |
+
path = snapshot_download(
|
| 22 |
+
"segmind/tiny-sd",
|
| 23 |
+
cache_dir=str(cache),
|
| 24 |
+
local_dir_use_symlinks=False
|
| 25 |
+
)
|
| 26 |
+
print()
|
| 27 |
+
print("=" * 60)
|
| 28 |
+
print("✓ SUCCESS!")
|
| 29 |
+
print("=" * 60)
|
| 30 |
+
print(f"Model cached at: {path}")
|
| 31 |
+
print()
|
| 32 |
+
print("Backend can now generate REAL images!")
|
| 33 |
+
print("Just restart the backend to use it.")
|
| 34 |
+
sys.exit(0)
|
| 35 |
+
except Exception as e:
|
| 36 |
+
print(f"✗ Error: {e}")
|
| 37 |
+
sys.exit(1)
|
imageforge/.github/workflows/ci.yml
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: CI
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
pull_request:
|
| 6 |
+
|
| 7 |
+
jobs:
|
| 8 |
+
test:
|
| 9 |
+
runs-on: ubuntu-latest
|
| 10 |
+
steps:
|
| 11 |
+
- uses: actions/checkout@v4
|
| 12 |
+
- uses: actions/setup-python@v5
|
| 13 |
+
with:
|
| 14 |
+
python-version: "3.12"
|
| 15 |
+
- name: Install Python deps
|
| 16 |
+
run: pip install -r requirements.txt
|
| 17 |
+
- name: Run backend tests
|
| 18 |
+
run: pytest
|
| 19 |
+
- uses: actions/setup-node@v4
|
| 20 |
+
with:
|
| 21 |
+
node-version: "20"
|
| 22 |
+
- name: Install frontend deps
|
| 23 |
+
run: npm --prefix frontend ci
|
| 24 |
+
- name: Typecheck frontend
|
| 25 |
+
run: npm --prefix frontend run typecheck
|
| 26 |
+
- name: Build renderer
|
| 27 |
+
run: npm --prefix frontend run build:renderer
|
| 28 |
+
- name: Install Playwright browser
|
| 29 |
+
run: npx --prefix frontend playwright install --with-deps chromium
|
| 30 |
+
- name: Run E2E smoke
|
| 31 |
+
run: npm --prefix frontend run test:e2e
|
imageforge/.gitignore
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
.venv/
|
| 4 |
+
venv/
|
| 5 |
+
node_modules/
|
| 6 |
+
frontend/node_modules/
|
| 7 |
+
frontend/dist/
|
| 8 |
+
frontend/dist-electron/
|
| 9 |
+
output/
|
| 10 |
+
app.log
|
| 11 |
+
prompt_history.json
|
| 12 |
+
.pytest_cache/
|
| 13 |
+
frontend/test-results/
|
| 14 |
+
frontend/playwright-report/
|
| 15 |
+
backups/
|
| 16 |
+
jobs_state.json
|
| 17 |
+
settings.json
|
| 18 |
+
admin_audit.log
|
imageforge/.npmrc
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cache=D:/AI/npm-cache
|
| 2 |
+
logs-dir=D:/AI/npm-cache/_logs
|
imageforge/README.md
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ImageForge
|
| 2 |
+
|
| 3 |
+
ImageForge ist eine lokal laufende Desktop-App zur Bild-Erstellung per Prompt. Die App nutzt ein Python-Backend (FastAPI + Job-Queue) und ein Electron+React-Frontend.
|
| 4 |
+
|
| 5 |
+
## Features
|
| 6 |
+
|
| 7 |
+
- Prompt + Negative Prompt
|
| 8 |
+
- Modellauswahl (`dummy`, `localai`, `diffusion`)
|
| 9 |
+
- Bildtyp-Presets und Stil-Presets
|
| 10 |
+
- Image-to-Image (Startbild + Strength)
|
| 11 |
+
- Dashboard (Queue, Status, Retry/Cancel, Compare)
|
| 12 |
+
- Preset-System (CRUD)
|
| 13 |
+
- Export (PNG/JPG/WEBP)
|
| 14 |
+
- Prompt-Versionierung (`config_hash` in `meta.json`)
|
| 15 |
+
- API-Key + Rollenmodell (`viewer`, `operator`, `admin`)
|
| 16 |
+
- Rate-Limit pro Client
|
| 17 |
+
- Health + Readiness + Metrics
|
| 18 |
+
- Recovery von Job-Status nach Neustart
|
| 19 |
+
- Storage-Governance (Retention-Cleanup)
|
| 20 |
+
|
| 21 |
+
## Setup
|
| 22 |
+
|
| 23 |
+
```powershell
|
| 24 |
+
cd imageforge
|
| 25 |
+
python -m venv .venv
|
| 26 |
+
.\.venv\Scripts\Activate.ps1
|
| 27 |
+
pip install -r requirements.txt
|
| 28 |
+
npm --prefix frontend install
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
## Start
|
| 32 |
+
|
| 33 |
+
```powershell
|
| 34 |
+
npm run dev
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
Für den kompletten lokalen Stack (Backend + Frontend, inkl. Healthcheck):
|
| 38 |
+
|
| 39 |
+
```powershell
|
| 40 |
+
npm run dev:stack
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## Serverbetrieb
|
| 44 |
+
|
| 45 |
+
```powershell
|
| 46 |
+
$env:IMAGEFORGE_HOST="0.0.0.0"
|
| 47 |
+
$env:IMAGEFORGE_PORT="8008"
|
| 48 |
+
$env:IMAGEFORGE_CORS_ORIGINS="http://localhost:5173"
|
| 49 |
+
$env:IMAGEFORGE_API_KEYS="viewerKey:viewer,opsKey:operator,adminKey:admin"
|
| 50 |
+
$env:IMAGEFORGE_RATE_LIMIT_PER_MIN="120"
|
| 51 |
+
$env:IMAGEFORGE_CONTENT_PROFILE="internal-relaxed"
|
| 52 |
+
$env:IMAGEFORGE_ADMIN_TOKEN="change-me"
|
| 53 |
+
python -m backend.app.main
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
## Wichtige Endpunkte
|
| 57 |
+
|
| 58 |
+
- `GET /health`
|
| 59 |
+
- `GET /ready`
|
| 60 |
+
- `GET /metrics`
|
| 61 |
+
- `GET /metrics/prometheus`
|
| 62 |
+
- `POST /generate`
|
| 63 |
+
- `GET /jobs`
|
| 64 |
+
- `POST /jobs/{id}/retry`
|
| 65 |
+
- `POST /jobs/{id}/cancel`
|
| 66 |
+
- `GET /dashboard/stats`
|
| 67 |
+
- `GET/POST/DELETE /presets`
|
| 68 |
+
- `POST /export`
|
| 69 |
+
- `GET/PUT /admin/settings`
|
| 70 |
+
- `POST /admin/cleanup`
|
| 71 |
+
|
| 72 |
+
## Security und Policy
|
| 73 |
+
|
| 74 |
+
- API-Key Header: `X-ImageForge-Api-Key`
|
| 75 |
+
- Admin Override Header: `X-ImageForge-Admin-Token`
|
| 76 |
+
- Policy Profile:
|
| 77 |
+
- `strict`
|
| 78 |
+
- `internal-relaxed`
|
| 79 |
+
- Audit-Logs:
|
| 80 |
+
- `policy_audit.log`
|
| 81 |
+
- `admin_audit.log`
|
| 82 |
+
|
| 83 |
+
## Backup / Restore
|
| 84 |
+
|
| 85 |
+
```powershell
|
| 86 |
+
npm run backup
|
| 87 |
+
npm run restore -- -Source backups\imageforge_backup_YYYYMMDD_HHMMSS
|
| 88 |
+
```
|
| 89 |
+
|
| 90 |
+
## Tests
|
| 91 |
+
|
| 92 |
+
```powershell
|
| 93 |
+
npm run test
|
| 94 |
+
npm run test:e2e
|
| 95 |
+
```
|
| 96 |
+
|
| 97 |
+
## CI
|
| 98 |
+
|
| 99 |
+
GitHub Actions Workflow liegt in `.github/workflows/ci.yml`.
|
| 100 |
+
|
| 101 |
+
## Optional LocalAI / Diffusion
|
| 102 |
+
|
| 103 |
+
```powershell
|
| 104 |
+
pip install diffusers torch transformers accelerate
|
| 105 |
+
$env:IMAGEFORGE_LOCALAI_MODEL="stabilityai/sd-turbo"
|
| 106 |
+
$env:IMAGEFORGE_LOCALAI_IMAGE_TIMEOUT_SECONDS="180"
|
| 107 |
+
$env:IMAGEFORGE_LOCALAI_LOCAL_ONLY="1"
|
| 108 |
+
$env:IMAGEFORGE_ENABLE_ATTENTION_SLICING="1"
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
Wenn `torch.cuda.is_available()` auf `False` bleibt, ist oft eine CPU-only Torch-Build installiert.
|
| 112 |
+
Für NVIDIA-GPU unter Windows kann eine CUDA-Build so installiert werden:
|
| 113 |
+
|
| 114 |
+
```powershell
|
| 115 |
+
python -m pip install --upgrade --index-url https://download.pytorch.org/whl/cu121 torch torchvision torchaudio
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
## Optional AUTOMATIC1111 Integration
|
| 119 |
+
|
| 120 |
+
ImageForge kann AUTOMATIC1111 als Provider nutzen (`model = a1111`).
|
| 121 |
+
|
| 122 |
+
### Empfohlener Produktionspfad (Stability Matrix)
|
| 123 |
+
|
| 124 |
+
Für stabilen Betrieb auf Windows wird eine **saubere, manager-gesteuerte Installation** empfohlen (statt manuell gepatchter `stable-diffusion-webui`-Klone):
|
| 125 |
+
|
| 126 |
+
1. Stability Matrix installieren und dort eine frische WebUI-Instanz mit aktivierter API starten.
|
| 127 |
+
2. API-Endpunkt prüfen:
|
| 128 |
+
|
| 129 |
+
```powershell
|
| 130 |
+
Invoke-RestMethod http://127.0.0.1:7860/sdapi/v1/sd-models
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
3. ImageForge auf diese Instanz zeigen (Default ist bereits `127.0.0.1:7860`):
|
| 134 |
+
|
| 135 |
+
```powershell
|
| 136 |
+
$env:IMAGEFORGE_A1111_BASE_URL="http://127.0.0.1:7860"
|
| 137 |
+
```
|
| 138 |
+
|
| 139 |
+
1. AUTOMATIC1111 lokal starten (mit API), z. B.:
|
| 140 |
+
|
| 141 |
+
```powershell
|
| 142 |
+
webui-user.bat --api
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
2. Optional URL/Timeout konfigurieren:
|
| 146 |
+
|
| 147 |
+
```powershell
|
| 148 |
+
$env:IMAGEFORGE_A1111_BASE_URL="http://127.0.0.1:7860"
|
| 149 |
+
$env:IMAGEFORGE_A1111_TIMEOUT_SECONDS="180"
|
| 150 |
+
$env:IMAGEFORGE_A1111_HEALTH_ENDPOINT="/sdapi/v1/sd-models"
|
| 151 |
+
$env:IMAGEFORGE_A1111_RETRY_COUNT="2"
|
| 152 |
+
$env:IMAGEFORGE_A1111_RETRY_BACKOFF_SECONDS="1.0"
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
Falls A1111 mit `--api-auth user:password` läuft:
|
| 156 |
+
|
| 157 |
+
```powershell
|
| 158 |
+
$env:IMAGEFORGE_A1111_API_USER="user"
|
| 159 |
+
$env:IMAGEFORGE_A1111_API_PASSWORD="password"
|
| 160 |
+
```
|
| 161 |
+
|
| 162 |
+
Alternativ als ein String:
|
| 163 |
+
|
| 164 |
+
```powershell
|
| 165 |
+
$env:IMAGEFORGE_A1111_API_AUTH="user:password"
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
Danach erscheint `AUTOMATIC1111` in `/models` als verfügbar, sobald der A1111-Server erreichbar ist.
|
| 169 |
+
|
| 170 |
+
### Robuster Betrieb bei A1111-Ausfällen
|
| 171 |
+
|
| 172 |
+
Wenn A1111 nicht erreichbar ist oder Fehler liefert, kann ImageForge automatisch auf andere Provider wechseln (z. B. `localai`, `diffusion`, `dummy`) statt den Job direkt abzubrechen.
|
| 173 |
+
|
| 174 |
+
```powershell
|
| 175 |
+
$env:IMAGEFORGE_ENABLE_AUTO_FALLBACK="1"
|
| 176 |
+
$env:IMAGEFORGE_FALLBACK_MODELS="a1111,localai,diffusion,dummy"
|
| 177 |
+
$env:IMAGEFORGE_FALLBACK_TIMEOUT_SECONDS="90"
|
| 178 |
+
$env:IMAGEFORGE_FALLBACK_MAX_STEPS="24"
|
| 179 |
+
```
|
| 180 |
+
|
| 181 |
+
- `IMAGEFORGE_ENABLE_AUTO_FALLBACK`: `1` aktiviert automatische Umschaltung.
|
| 182 |
+
- `IMAGEFORGE_FALLBACK_MODELS`: Priorisierte Reihenfolge der Fallback-Provider.
|
| 183 |
+
- `IMAGEFORGE_FALLBACK_TIMEOUT_SECONDS`: Kürzerer Timeout pro Fallback-Versuch.
|
| 184 |
+
- `IMAGEFORGE_FALLBACK_MAX_STEPS`: Deckel für Steps bei Fallback, um Laufzeit zu reduzieren.
|
| 185 |
+
|
| 186 |
+
### Stack-Readiness prüfen
|
| 187 |
+
|
| 188 |
+
Nach dem Start von Backend und A1111:
|
| 189 |
+
|
| 190 |
+
```powershell
|
| 191 |
+
./scripts/healthcheck-stack.ps1 -RequireA1111
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
Das Skript validiert `/health`, `/ready`, `/models` und optional die A1111-API selbst.
|
| 195 |
+
|
| 196 |
+
## Troubleshooting
|
| 197 |
+
|
| 198 |
+
- Falls Desktop-Build auf Windows an Symlink-Rechten scheitert: Entwickler-Modus oder Admin-Rechte aktivieren.
|
| 199 |
+
- Logs: `app.log`, `policy_audit.log`, `admin_audit.log`.
|
imageforge/backend/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/api/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/api/schemas.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import Literal
|
| 6 |
+
|
| 7 |
+
from pydantic import BaseModel, Field, field_validator
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class JobStatus(str, Enum):
|
| 11 |
+
QUEUED = "queued"
|
| 12 |
+
RUNNING = "running"
|
| 13 |
+
DONE = "done"
|
| 14 |
+
ERROR = "error"
|
| 15 |
+
CANCELLED = "cancelled"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class GenerateRequest(BaseModel):
|
| 19 |
+
prompt: str = Field(min_length=1, max_length=2000)
|
| 20 |
+
negative_prompt: str = Field(default="", max_length=2000)
|
| 21 |
+
model: str = Field(default="localai") # Changed from "dummy" to "localai"
|
| 22 |
+
size: Literal["512x512", "768x768", "1024x1024", "1024x1536", "1536x1024"] = "512x512"
|
| 23 |
+
count: int = Field(default=1, ge=1, le=4)
|
| 24 |
+
seed: int | None = Field(default=None, ge=0)
|
| 25 |
+
random_seed: bool = True
|
| 26 |
+
steps: int = Field(default=30, ge=1, le=100)
|
| 27 |
+
guidance: float = Field(default=6.5, ge=1.0, le=20.0)
|
| 28 |
+
image_type: Literal[
|
| 29 |
+
"general",
|
| 30 |
+
"photo",
|
| 31 |
+
"portrait",
|
| 32 |
+
"landscape",
|
| 33 |
+
"architecture",
|
| 34 |
+
"product",
|
| 35 |
+
"logo",
|
| 36 |
+
"icon",
|
| 37 |
+
"poster",
|
| 38 |
+
"illustration",
|
| 39 |
+
"anime",
|
| 40 |
+
"pixel_art",
|
| 41 |
+
"sketch",
|
| 42 |
+
"painting",
|
| 43 |
+
"3d",
|
| 44 |
+
] = "general"
|
| 45 |
+
style_preset: Literal[
|
| 46 |
+
"auto",
|
| 47 |
+
"photorealistic",
|
| 48 |
+
"cinematic",
|
| 49 |
+
"minimal",
|
| 50 |
+
"vibrant",
|
| 51 |
+
"monochrome",
|
| 52 |
+
"watercolor",
|
| 53 |
+
"oil",
|
| 54 |
+
"noir",
|
| 55 |
+
"fantasy",
|
| 56 |
+
] = "auto"
|
| 57 |
+
style_strength: int = Field(default=60, ge=0, le=100)
|
| 58 |
+
admin_override: bool = False
|
| 59 |
+
init_image_path: str | None = None
|
| 60 |
+
img2img_strength: float = Field(default=0.45, ge=0.0, le=1.0)
|
| 61 |
+
model_variant: str | None = Field(default=None, max_length=200)
|
| 62 |
+
|
| 63 |
+
@field_validator("prompt")
|
| 64 |
+
@classmethod
|
| 65 |
+
def validate_prompt(cls, value: str) -> str:
|
| 66 |
+
value = value.strip()
|
| 67 |
+
if not value:
|
| 68 |
+
raise ValueError("Prompt must not be empty")
|
| 69 |
+
return value
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class GenerateResponse(BaseModel):
|
| 73 |
+
job_id: str
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class RetryResponse(BaseModel):
|
| 77 |
+
old_job_id: str
|
| 78 |
+
new_job_id: str
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class JobInfoResponse(BaseModel):
|
| 82 |
+
job_id: str
|
| 83 |
+
status: JobStatus
|
| 84 |
+
progress: int
|
| 85 |
+
message: str
|
| 86 |
+
created_at: datetime
|
| 87 |
+
updated_at: datetime
|
| 88 |
+
image_paths: list[str] = Field(default_factory=list)
|
| 89 |
+
output_dir: str | None = None
|
| 90 |
+
error: str | None = None
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class CancelResponse(BaseModel):
|
| 94 |
+
success: bool
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class ModelInfo(BaseModel):
|
| 98 |
+
id: str
|
| 99 |
+
name: str
|
| 100 |
+
available: bool
|
| 101 |
+
description: str
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
class HistoryItem(BaseModel):
|
| 105 |
+
prompt: str
|
| 106 |
+
negative_prompt: str
|
| 107 |
+
timestamp: datetime
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class HealthResponse(BaseModel):
|
| 111 |
+
status: str
|
| 112 |
+
timestamp: datetime
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class PresetPayload(BaseModel):
|
| 116 |
+
name: str = Field(min_length=1, max_length=80)
|
| 117 |
+
prompt: str = ""
|
| 118 |
+
negative_prompt: str = ""
|
| 119 |
+
model: str = "dummy"
|
| 120 |
+
size: Literal["512x512", "768x768", "1024x1024", "1024x1536", "1536x1024"] = "512x512"
|
| 121 |
+
count: int = Field(default=1, ge=1, le=4)
|
| 122 |
+
steps: int = Field(default=30, ge=1, le=100)
|
| 123 |
+
guidance: float = Field(default=6.5, ge=1.0, le=20.0)
|
| 124 |
+
image_type: str = "general"
|
| 125 |
+
style_preset: str = "auto"
|
| 126 |
+
style_strength: int = Field(default=60, ge=0, le=100)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class PresetResponse(PresetPayload):
|
| 130 |
+
updated_at: datetime
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
class DashboardStats(BaseModel):
|
| 134 |
+
queued: int
|
| 135 |
+
running: int
|
| 136 |
+
done: int
|
| 137 |
+
error: int
|
| 138 |
+
cancelled: int
|
| 139 |
+
total: int
|
| 140 |
+
last_24h: int
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class ExportRequest(BaseModel):
|
| 144 |
+
source_path: str
|
| 145 |
+
format: Literal["png", "jpg", "webp"] = "png"
|
| 146 |
+
quality: int = Field(default=92, ge=1, le=100)
|
| 147 |
+
max_width: int | None = Field(default=None, ge=64, le=8192)
|
| 148 |
+
max_height: int | None = Field(default=None, ge=64, le=8192)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class ExportResponse(BaseModel):
|
| 152 |
+
output_path: str
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class AdminSettings(BaseModel):
|
| 156 |
+
content_profile: str
|
| 157 |
+
rate_limit_per_minute: int
|
| 158 |
+
output_retention_days: int
|
| 159 |
+
adult_enabled: bool = False
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class MetricsResponse(BaseModel):
|
| 163 |
+
metrics: dict[str, float | int]
|
imageforge/backend/app/core/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/core/config.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
BASE_DIR = Path(__file__).resolve().parents[3]
|
| 7 |
+
AI_CACHE_ROOT = Path(os.getenv("IMAGEFORGE_CACHE_ROOT", str(BASE_DIR / ".cache"))).resolve()
|
| 8 |
+
AI_TMP_ROOT = Path(os.getenv("IMAGEFORGE_TMP_ROOT", str(AI_CACHE_ROOT / "tmp"))).resolve()
|
| 9 |
+
|
| 10 |
+
for directory in [
|
| 11 |
+
AI_CACHE_ROOT,
|
| 12 |
+
AI_CACHE_ROOT / "hf",
|
| 13 |
+
AI_CACHE_ROOT / "hf" / "transformers",
|
| 14 |
+
AI_CACHE_ROOT / "torch",
|
| 15 |
+
AI_CACHE_ROOT / "pip",
|
| 16 |
+
AI_TMP_ROOT,
|
| 17 |
+
]:
|
| 18 |
+
directory.mkdir(parents=True, exist_ok=True)
|
| 19 |
+
|
| 20 |
+
os.environ.setdefault("HF_HOME", str(AI_CACHE_ROOT / "hf"))
|
| 21 |
+
os.environ.setdefault("TRANSFORMERS_CACHE", str(AI_CACHE_ROOT / "hf" / "transformers"))
|
| 22 |
+
os.environ.setdefault("TORCH_HOME", str(AI_CACHE_ROOT / "torch"))
|
| 23 |
+
os.environ.setdefault("PIP_CACHE_DIR", str(AI_CACHE_ROOT / "pip"))
|
| 24 |
+
os.environ.setdefault("TMP", str(AI_TMP_ROOT))
|
| 25 |
+
os.environ.setdefault("TEMP", str(AI_TMP_ROOT))
|
| 26 |
+
|
| 27 |
+
OUTPUT_DIR = BASE_DIR / "output"
|
| 28 |
+
LOG_FILE = BASE_DIR / "app.log"
|
| 29 |
+
POLICY_AUDIT_FILE = BASE_DIR / "policy_audit.log"
|
| 30 |
+
HISTORY_FILE = BASE_DIR / "prompt_history.json"
|
| 31 |
+
PRESETS_FILE = BASE_DIR / "presets.json"
|
| 32 |
+
JOBS_STATE_FILE = BASE_DIR / "jobs_state.json"
|
| 33 |
+
SETTINGS_FILE = BASE_DIR / "settings.json"
|
| 34 |
+
ADMIN_AUDIT_FILE = BASE_DIR / "admin_audit.log"
|
| 35 |
+
DEFAULT_BACKEND_HOST = os.getenv("IMAGEFORGE_HOST", "127.0.0.1")
|
| 36 |
+
DEFAULT_BACKEND_PORT = int(os.getenv("IMAGEFORGE_PORT", "8008"))
|
| 37 |
+
CORS_ORIGINS = os.getenv("IMAGEFORGE_CORS_ORIGINS", "*")
|
| 38 |
+
CONTENT_PROFILE = os.getenv("IMAGEFORGE_CONTENT_PROFILE", "strict")
|
| 39 |
+
ADMIN_TOKEN = os.getenv("IMAGEFORGE_ADMIN_TOKEN", "")
|
| 40 |
+
API_KEYS = os.getenv("IMAGEFORGE_API_KEYS", "")
|
| 41 |
+
RATE_LIMIT_PER_MINUTE = int(os.getenv("IMAGEFORGE_RATE_LIMIT_PER_MIN", "120"))
|
| 42 |
+
REQUEST_MAX_BYTES = int(os.getenv("IMAGEFORGE_REQUEST_MAX_BYTES", str(2 * 1024 * 1024)))
|
| 43 |
+
OUTPUT_RETENTION_DAYS = int(os.getenv("IMAGEFORGE_OUTPUT_RETENTION_DAYS", "30"))
|
| 44 |
+
DASHBOARD_REFRESH_SECONDS = int(os.getenv("IMAGEFORGE_DASHBOARD_REFRESH_SECONDS", "2"))
|
| 45 |
+
JOB_TIMEOUT_SECONDS = int(os.getenv("IMAGEFORGE_JOB_TIMEOUT_SECONDS", "180"))
|
| 46 |
+
A1111_BASE_URL = os.getenv("IMAGEFORGE_A1111_BASE_URL", "http://127.0.0.1:7860")
|
| 47 |
+
A1111_TIMEOUT_SECONDS = int(os.getenv("IMAGEFORGE_A1111_TIMEOUT_SECONDS", "180"))
|
| 48 |
+
A1111_HEALTH_ENDPOINT = os.getenv("IMAGEFORGE_A1111_HEALTH_ENDPOINT", "/sdapi/v1/sd-models")
|
| 49 |
+
A1111_RETRY_COUNT = int(os.getenv("IMAGEFORGE_A1111_RETRY_COUNT", "2"))
|
| 50 |
+
A1111_RETRY_BACKOFF_SECONDS = float(os.getenv("IMAGEFORGE_A1111_RETRY_BACKOFF_SECONDS", "1.0"))
|
| 51 |
+
A1111_API_AUTH = os.getenv("IMAGEFORGE_A1111_API_AUTH", "")
|
| 52 |
+
A1111_API_USER = os.getenv("IMAGEFORGE_A1111_API_USER", "")
|
| 53 |
+
A1111_API_PASSWORD = os.getenv("IMAGEFORGE_A1111_API_PASSWORD", "")
|
imageforge/backend/app/core/logging.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
from logging.handlers import RotatingFileHandler
|
| 5 |
+
|
| 6 |
+
from .config import LOG_FILE
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def setup_logging() -> None:
|
| 10 |
+
LOG_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 11 |
+
root = logging.getLogger()
|
| 12 |
+
if root.handlers:
|
| 13 |
+
return
|
| 14 |
+
|
| 15 |
+
root.setLevel(logging.INFO)
|
| 16 |
+
formatter = logging.Formatter(
|
| 17 |
+
"%(asctime)s | %(levelname)s | %(name)s | %(message)s"
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
file_handler = RotatingFileHandler(LOG_FILE, maxBytes=1_000_000, backupCount=3)
|
| 21 |
+
file_handler.setFormatter(formatter)
|
| 22 |
+
root.addHandler(file_handler)
|
| 23 |
+
|
| 24 |
+
stream_handler = logging.StreamHandler()
|
| 25 |
+
stream_handler.setFormatter(formatter)
|
| 26 |
+
root.addHandler(stream_handler)
|
imageforge/backend/app/core/observability.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import threading
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MetricsStore:
|
| 8 |
+
def __init__(self) -> None:
|
| 9 |
+
self._lock = threading.Lock()
|
| 10 |
+
self._counters: dict[str, int] = defaultdict(int)
|
| 11 |
+
self._timings_ms_sum: dict[str, float] = defaultdict(float)
|
| 12 |
+
self._timings_count: dict[str, int] = defaultdict(int)
|
| 13 |
+
|
| 14 |
+
def incr(self, key: str, delta: int = 1) -> None:
|
| 15 |
+
with self._lock:
|
| 16 |
+
self._counters[key] += delta
|
| 17 |
+
|
| 18 |
+
def observe_ms(self, key: str, value_ms: float) -> None:
|
| 19 |
+
with self._lock:
|
| 20 |
+
self._timings_ms_sum[key] += value_ms
|
| 21 |
+
self._timings_count[key] += 1
|
| 22 |
+
|
| 23 |
+
def snapshot(self) -> dict[str, float | int]:
|
| 24 |
+
with self._lock:
|
| 25 |
+
out: dict[str, float | int] = dict(self._counters)
|
| 26 |
+
for key, total in self._timings_ms_sum.items():
|
| 27 |
+
count = max(1, self._timings_count[key])
|
| 28 |
+
out[f"{key}_avg_ms"] = total / count
|
| 29 |
+
out[f"{key}_count"] = self._timings_count[key]
|
| 30 |
+
return out
|
| 31 |
+
|
| 32 |
+
def to_prometheus(self) -> str:
|
| 33 |
+
data = self.snapshot()
|
| 34 |
+
lines = ["# TYPE imageforge_metric gauge"]
|
| 35 |
+
for key, value in sorted(data.items()):
|
| 36 |
+
prom_name = "imageforge_" + key.replace("-", "_").replace("/", "_").replace(".", "_")
|
| 37 |
+
lines.append(f"{prom_name} {value}")
|
| 38 |
+
return "\n".join(lines) + "\n"
|
imageforge/backend/app/core/policy.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import re
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
from datetime import datetime, timezone
|
| 7 |
+
|
| 8 |
+
from .config import POLICY_AUDIT_FILE
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
HARD_BLOCK_PATTERNS = [
|
| 12 |
+
r"\bminor\b",
|
| 13 |
+
r"\bunderage\b",
|
| 14 |
+
r"\bchild\b",
|
| 15 |
+
r"\bteen\s*sex\b",
|
| 16 |
+
r"\brape\b",
|
| 17 |
+
r"\bsexual\s*assault\b",
|
| 18 |
+
r"\bnon\s*-?consensual\b",
|
| 19 |
+
r"\bforced\s*sex\b",
|
| 20 |
+
r"\bincest\b",
|
| 21 |
+
r"\bbestiality\b",
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
ADULT_SEXUAL_PATTERNS = [
|
| 25 |
+
r"\berotic\b",
|
| 26 |
+
r"\bsensual\b",
|
| 27 |
+
r"\bnude\b",
|
| 28 |
+
r"\bnudity\b",
|
| 29 |
+
r"\bsexy\b",
|
| 30 |
+
r"\blingerie\b",
|
| 31 |
+
r"\badult\b",
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
EXPLICIT_PATTERNS = [
|
| 35 |
+
r"\bporn\b",
|
| 36 |
+
r"\bhardcore\b",
|
| 37 |
+
r"\bexplicit\s*sex\b",
|
| 38 |
+
r"\bpenetration\b",
|
| 39 |
+
r"\bblowjob\b",
|
| 40 |
+
r"\bsex\s*act\b",
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@dataclass(slots=True)
|
| 45 |
+
class PolicyDecision:
|
| 46 |
+
allowed: bool
|
| 47 |
+
reason: str
|
| 48 |
+
matched: list[str]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class ContentPolicy:
|
| 52 |
+
def __init__(self, profile: str) -> None:
|
| 53 |
+
self.profile = profile.strip().lower() or "strict"
|
| 54 |
+
|
| 55 |
+
def evaluate(self, text: str, admin_override: bool) -> PolicyDecision:
|
| 56 |
+
combined = text.lower()
|
| 57 |
+
hard_hits = _find_matches(combined, HARD_BLOCK_PATTERNS)
|
| 58 |
+
if hard_hits:
|
| 59 |
+
return PolicyDecision(False, "blocked_illegal_content", hard_hits)
|
| 60 |
+
|
| 61 |
+
sexual_hits = _find_matches(combined, ADULT_SEXUAL_PATTERNS)
|
| 62 |
+
explicit_hits = _find_matches(combined, EXPLICIT_PATTERNS)
|
| 63 |
+
|
| 64 |
+
if self.profile == "strict":
|
| 65 |
+
if (sexual_hits or explicit_hits) and not admin_override:
|
| 66 |
+
return PolicyDecision(False, "blocked_sexual_in_strict_profile", sexual_hits + explicit_hits)
|
| 67 |
+
if (sexual_hits or explicit_hits) and admin_override:
|
| 68 |
+
return PolicyDecision(True, "allowed_admin_override", sexual_hits + explicit_hits)
|
| 69 |
+
|
| 70 |
+
if self.profile == "internal-relaxed":
|
| 71 |
+
if explicit_hits and not admin_override:
|
| 72 |
+
return PolicyDecision(False, "explicit_requires_admin_override", explicit_hits)
|
| 73 |
+
if explicit_hits and admin_override:
|
| 74 |
+
return PolicyDecision(True, "allowed_explicit_admin_override", explicit_hits)
|
| 75 |
+
if sexual_hits:
|
| 76 |
+
return PolicyDecision(True, "allowed_adult_sexual_relaxed", sexual_hits)
|
| 77 |
+
|
| 78 |
+
return PolicyDecision(True, "allowed", [])
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class PolicyAuditStore:
|
| 82 |
+
def write(
|
| 83 |
+
self,
|
| 84 |
+
*,
|
| 85 |
+
prompt: str,
|
| 86 |
+
negative_prompt: str,
|
| 87 |
+
profile: str,
|
| 88 |
+
decision: PolicyDecision,
|
| 89 |
+
client_ip: str,
|
| 90 |
+
model: str,
|
| 91 |
+
admin_override_requested: bool,
|
| 92 |
+
admin_override_applied: bool,
|
| 93 |
+
) -> None:
|
| 94 |
+
POLICY_AUDIT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 95 |
+
entry = {
|
| 96 |
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
| 97 |
+
"profile": profile,
|
| 98 |
+
"allowed": decision.allowed,
|
| 99 |
+
"reason": decision.reason,
|
| 100 |
+
"matched": decision.matched,
|
| 101 |
+
"model": model,
|
| 102 |
+
"client_ip": client_ip,
|
| 103 |
+
"admin_override_requested": admin_override_requested,
|
| 104 |
+
"admin_override_applied": admin_override_applied,
|
| 105 |
+
"prompt_preview": prompt[:180],
|
| 106 |
+
"negative_prompt_preview": negative_prompt[:180],
|
| 107 |
+
}
|
| 108 |
+
with POLICY_AUDIT_FILE.open("a", encoding="utf-8") as fp:
|
| 109 |
+
fp.write(json.dumps(entry, ensure_ascii=True) + "\n")
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def _find_matches(text: str, patterns: list[str]) -> list[str]:
|
| 113 |
+
matches: list[str] = []
|
| 114 |
+
for pattern in patterns:
|
| 115 |
+
if re.search(pattern, text, flags=re.IGNORECASE):
|
| 116 |
+
matches.append(pattern)
|
| 117 |
+
return matches
|
imageforge/backend/app/core/prompting.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
_IMAGE_TYPE_HINTS: dict[str, str] = {
|
| 7 |
+
"general": "high quality image",
|
| 8 |
+
"photo": "real-world photography, natural lighting, realistic textures",
|
| 9 |
+
"portrait": "portrait photography, detailed face, sharp eyes, skin texture",
|
| 10 |
+
"landscape": "wide landscape composition, depth, atmospheric perspective",
|
| 11 |
+
"architecture": "architectural photography, perspective lines, material details",
|
| 12 |
+
"product": "studio product shot, clean background, commercial lighting",
|
| 13 |
+
"logo": "minimal vector logo, clean geometry, high contrast",
|
| 14 |
+
"icon": "flat app icon design, simple silhouette, readable at small size",
|
| 15 |
+
"poster": "poster design, bold composition, impactful typography space",
|
| 16 |
+
"illustration": "digital illustration, clean linework, cohesive color palette",
|
| 17 |
+
"anime": "anime style illustration, expressive line art, cel shading",
|
| 18 |
+
"pixel_art": "pixel art style, crisp pixels, limited palette",
|
| 19 |
+
"sketch": "pencil sketch, rough strokes, paper texture",
|
| 20 |
+
"painting": "painterly artwork, brush strokes, rich color blending",
|
| 21 |
+
"3d": "3d render style, global illumination, physically based materials",
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
_STYLE_HINTS: dict[str, str] = {
|
| 25 |
+
"auto": "",
|
| 26 |
+
"photorealistic": "photorealistic, ultra detailed, natural color grading",
|
| 27 |
+
"cinematic": "cinematic lighting, dramatic framing, filmic color",
|
| 28 |
+
"minimal": "minimalist design, clean shapes, negative space",
|
| 29 |
+
"vibrant": "vivid colors, high saturation, energetic contrast",
|
| 30 |
+
"monochrome": "monochrome palette, tonal depth, strong value structure",
|
| 31 |
+
"watercolor": "watercolor texture, soft pigment edges",
|
| 32 |
+
"oil": "oil painting texture, impasto details",
|
| 33 |
+
"noir": "film noir, moody shadows, high contrast",
|
| 34 |
+
"fantasy": "fantasy art, epic atmosphere, imaginative details",
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
_BASE_NEGATIVE = (
|
| 38 |
+
"low quality, blurry, noisy, jpeg artifacts, deformed anatomy, bad composition, "
|
| 39 |
+
"watermark, signature, text clutter"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@dataclass(slots=True)
|
| 44 |
+
class PromptCompileResult:
|
| 45 |
+
prompt: str
|
| 46 |
+
negative_prompt: str
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def compile_prompts(
|
| 50 |
+
prompt: str,
|
| 51 |
+
negative_prompt: str,
|
| 52 |
+
image_type: str,
|
| 53 |
+
style_preset: str,
|
| 54 |
+
style_strength: int,
|
| 55 |
+
) -> PromptCompileResult:
|
| 56 |
+
type_hint = _IMAGE_TYPE_HINTS.get(image_type, _IMAGE_TYPE_HINTS["general"])
|
| 57 |
+
style_hint = _STYLE_HINTS.get(style_preset, "")
|
| 58 |
+
|
| 59 |
+
strength = max(0, min(100, style_strength))
|
| 60 |
+
style_weight = ""
|
| 61 |
+
if style_hint:
|
| 62 |
+
style_weight = f" style influence {strength}%: {style_hint}."
|
| 63 |
+
|
| 64 |
+
compiled_prompt = f"{prompt.strip()}. Target: {type_hint}.{style_weight}".strip()
|
| 65 |
+
|
| 66 |
+
merged_negative = _BASE_NEGATIVE
|
| 67 |
+
if negative_prompt.strip():
|
| 68 |
+
merged_negative = f"{negative_prompt.strip()}, {merged_negative}"
|
| 69 |
+
|
| 70 |
+
return PromptCompileResult(prompt=compiled_prompt, negative_prompt=merged_negative)
|
imageforge/backend/app/core/security.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import time
|
| 4 |
+
from collections import defaultdict, deque
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
|
| 7 |
+
from fastapi import HTTPException
|
| 8 |
+
|
| 9 |
+
from .config import API_KEYS, RATE_LIMIT_PER_MINUTE
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass(slots=True)
|
| 13 |
+
class Principal:
|
| 14 |
+
client_id: str
|
| 15 |
+
role: str
|
| 16 |
+
api_key: str | None
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class ApiSecurity:
|
| 20 |
+
def __init__(self) -> None:
|
| 21 |
+
self.required_keys: dict[str, str] = {}
|
| 22 |
+
for raw in [item.strip() for item in API_KEYS.split(",") if item.strip()]:
|
| 23 |
+
if ":" in raw:
|
| 24 |
+
key, role = raw.split(":", maxsplit=1)
|
| 25 |
+
self.required_keys[key.strip()] = role.strip() or "operator"
|
| 26 |
+
else:
|
| 27 |
+
self.required_keys[raw] = "operator"
|
| 28 |
+
self.limit = RATE_LIMIT_PER_MINUTE
|
| 29 |
+
self._hits: dict[str, deque[float]] = defaultdict(deque)
|
| 30 |
+
|
| 31 |
+
def authenticate(self, api_key: str | None, client_id: str) -> Principal:
|
| 32 |
+
role = "operator"
|
| 33 |
+
if self.required_keys:
|
| 34 |
+
if not api_key or api_key not in self.required_keys:
|
| 35 |
+
raise HTTPException(status_code=401, detail="Missing or invalid API key")
|
| 36 |
+
role = self.required_keys[api_key]
|
| 37 |
+
self._check_rate_limit(client_id)
|
| 38 |
+
return Principal(client_id=client_id, role=role, api_key=api_key)
|
| 39 |
+
|
| 40 |
+
@staticmethod
|
| 41 |
+
def require_role(principal: Principal, minimum_role: str) -> None:
|
| 42 |
+
rank = {"viewer": 1, "operator": 2, "admin": 3}
|
| 43 |
+
if rank.get(principal.role, 0) < rank.get(minimum_role, 0):
|
| 44 |
+
raise HTTPException(status_code=403, detail=f"Role '{minimum_role}' required")
|
| 45 |
+
|
| 46 |
+
def _check_rate_limit(self, client_id: str) -> None:
|
| 47 |
+
if client_id in {"127.0.0.1", "::1", "localhost"}:
|
| 48 |
+
return
|
| 49 |
+
now = time.time()
|
| 50 |
+
q = self._hits[client_id]
|
| 51 |
+
window_start = now - 60.0
|
| 52 |
+
while q and q[0] < window_start:
|
| 53 |
+
q.popleft()
|
| 54 |
+
if len(q) >= self.limit:
|
| 55 |
+
raise HTTPException(status_code=429, detail="Rate limit exceeded")
|
| 56 |
+
q.append(now)
|
imageforge/backend/app/jobs/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/jobs/manager.py
ADDED
|
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import hashlib
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
import queue
|
| 7 |
+
import threading
|
| 8 |
+
import time
|
| 9 |
+
import uuid
|
| 10 |
+
import os
|
| 11 |
+
from dataclasses import dataclass, field
|
| 12 |
+
from datetime import datetime, timezone
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
import random
|
| 15 |
+
|
| 16 |
+
from ..api.schemas import GenerateRequest, JobStatus
|
| 17 |
+
from ..core.config import JOBS_STATE_FILE, JOB_TIMEOUT_SECONDS, OUTPUT_DIR
|
| 18 |
+
from ..core.prompting import compile_prompts
|
| 19 |
+
from ..providers.factory import ProviderRegistry
|
| 20 |
+
from ..providers.interface import ProviderRequest, ProviderResult, ProviderUnavailableError
|
| 21 |
+
from ..storage.history import PromptHistoryStore
|
| 22 |
+
|
| 23 |
+
LOGGER = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@dataclass
|
| 27 |
+
class JobState:
|
| 28 |
+
job_id: str
|
| 29 |
+
request: GenerateRequest
|
| 30 |
+
status: JobStatus = JobStatus.QUEUED
|
| 31 |
+
progress: int = 0
|
| 32 |
+
message: str = "Queued"
|
| 33 |
+
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
| 34 |
+
updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
| 35 |
+
image_paths: list[str] = field(default_factory=list)
|
| 36 |
+
output_dir: str | None = None
|
| 37 |
+
error: str | None = None
|
| 38 |
+
cancel_requested: bool = False
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class JobManager:
|
| 42 |
+
def __init__(
|
| 43 |
+
self,
|
| 44 |
+
provider_registry: ProviderRegistry,
|
| 45 |
+
history_store: PromptHistoryStore,
|
| 46 |
+
state_file: Path | None = None,
|
| 47 |
+
) -> None:
|
| 48 |
+
self.provider_registry = provider_registry
|
| 49 |
+
self.history_store = history_store
|
| 50 |
+
self._state_file = state_file or JOBS_STATE_FILE
|
| 51 |
+
self._jobs: dict[str, JobState] = {}
|
| 52 |
+
self._queue: queue.Queue[str] = queue.Queue()
|
| 53 |
+
self._lock = threading.RLock()
|
| 54 |
+
self._state_write_lock = threading.Lock()
|
| 55 |
+
self._worker = threading.Thread(target=self._worker_loop, daemon=True)
|
| 56 |
+
self._load_state()
|
| 57 |
+
self._worker.start()
|
| 58 |
+
|
| 59 |
+
def submit(self, request: GenerateRequest) -> str:
|
| 60 |
+
job_id = uuid.uuid4().hex[:12]
|
| 61 |
+
state = JobState(job_id=job_id, request=request)
|
| 62 |
+
with self._lock:
|
| 63 |
+
self._jobs[job_id] = state
|
| 64 |
+
self._queue.put(job_id)
|
| 65 |
+
self.history_store.add(request.prompt, request.negative_prompt)
|
| 66 |
+
self._save_state()
|
| 67 |
+
LOGGER.info("Job %s queued with model %s", job_id, request.model)
|
| 68 |
+
return job_id
|
| 69 |
+
|
| 70 |
+
def get(self, job_id: str) -> JobState | None:
|
| 71 |
+
with self._lock:
|
| 72 |
+
return self._jobs.get(job_id)
|
| 73 |
+
|
| 74 |
+
def list(self) -> list[JobState]:
|
| 75 |
+
with self._lock:
|
| 76 |
+
return sorted(self._jobs.values(), key=lambda item: item.created_at, reverse=True)
|
| 77 |
+
|
| 78 |
+
def retry(self, job_id: str) -> str | None:
|
| 79 |
+
with self._lock:
|
| 80 |
+
state = self._jobs.get(job_id)
|
| 81 |
+
if state is None:
|
| 82 |
+
return None
|
| 83 |
+
return self.submit(state.request)
|
| 84 |
+
|
| 85 |
+
def stats(self) -> dict[str, int]:
|
| 86 |
+
with self._lock:
|
| 87 |
+
items = list(self._jobs.values())
|
| 88 |
+
now = datetime.now(timezone.utc)
|
| 89 |
+
counts = {status.value: 0 for status in JobStatus}
|
| 90 |
+
last_24h = 0
|
| 91 |
+
for item in items:
|
| 92 |
+
counts[item.status.value] += 1
|
| 93 |
+
if (now - item.created_at).total_seconds() <= 86400:
|
| 94 |
+
last_24h += 1
|
| 95 |
+
return {
|
| 96 |
+
"queued": counts["queued"],
|
| 97 |
+
"running": counts["running"],
|
| 98 |
+
"done": counts["done"],
|
| 99 |
+
"error": counts["error"],
|
| 100 |
+
"cancelled": counts["cancelled"],
|
| 101 |
+
"total": len(items),
|
| 102 |
+
"last_24h": last_24h,
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
def cancel(self, job_id: str) -> bool:
|
| 106 |
+
with self._lock:
|
| 107 |
+
state = self._jobs.get(job_id)
|
| 108 |
+
if state is None:
|
| 109 |
+
return False
|
| 110 |
+
state.cancel_requested = True
|
| 111 |
+
if state.status == JobStatus.QUEUED:
|
| 112 |
+
state.status = JobStatus.CANCELLED
|
| 113 |
+
state.message = "Cancelled"
|
| 114 |
+
state.updated_at = datetime.now(timezone.utc)
|
| 115 |
+
self._save_state()
|
| 116 |
+
return True
|
| 117 |
+
|
| 118 |
+
def _update(self, job_id: str, **updates) -> None: # noqa: ANN003
|
| 119 |
+
with self._lock:
|
| 120 |
+
state = self._jobs.get(job_id)
|
| 121 |
+
if state is None:
|
| 122 |
+
return
|
| 123 |
+
for key, value in updates.items():
|
| 124 |
+
setattr(state, key, value)
|
| 125 |
+
state.updated_at = datetime.now(timezone.utc)
|
| 126 |
+
self._save_state()
|
| 127 |
+
|
| 128 |
+
def _worker_loop(self) -> None:
|
| 129 |
+
while True:
|
| 130 |
+
job_id = self._queue.get()
|
| 131 |
+
state = self.get(job_id)
|
| 132 |
+
if state is None:
|
| 133 |
+
continue
|
| 134 |
+
if state.status == JobStatus.CANCELLED:
|
| 135 |
+
continue
|
| 136 |
+
self._run_job(job_id, state)
|
| 137 |
+
|
| 138 |
+
def _run_job(self, job_id: str, state: JobState) -> None:
|
| 139 |
+
self._update(job_id, status=JobStatus.RUNNING, progress=1, message="Starting generation")
|
| 140 |
+
date_dir = datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
| 141 |
+
output_dir = OUTPUT_DIR / date_dir / f"job_{job_id}"
|
| 142 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 143 |
+
|
| 144 |
+
try:
|
| 145 |
+
width, height = self._parse_size(state.request.size)
|
| 146 |
+
seed = state.request.seed
|
| 147 |
+
if state.request.random_seed or seed is None:
|
| 148 |
+
seed = random.randint(0, 2**31 - 1)
|
| 149 |
+
if state.request.init_image_path:
|
| 150 |
+
init_path = Path(state.request.init_image_path)
|
| 151 |
+
if not init_path.exists() or not init_path.is_file():
|
| 152 |
+
raise ValueError("init_image_path does not exist or is not a file")
|
| 153 |
+
compiled = compile_prompts(
|
| 154 |
+
prompt=state.request.prompt,
|
| 155 |
+
negative_prompt=state.request.negative_prompt,
|
| 156 |
+
image_type=state.request.image_type,
|
| 157 |
+
style_preset=state.request.style_preset,
|
| 158 |
+
style_strength=state.request.style_strength,
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
provider_request = ProviderRequest(
|
| 162 |
+
prompt=compiled.prompt,
|
| 163 |
+
negative_prompt=compiled.negative_prompt,
|
| 164 |
+
count=state.request.count,
|
| 165 |
+
width=width,
|
| 166 |
+
height=height,
|
| 167 |
+
seed=seed,
|
| 168 |
+
steps=state.request.steps,
|
| 169 |
+
guidance=state.request.guidance,
|
| 170 |
+
init_image_path=state.request.init_image_path,
|
| 171 |
+
img2img_strength=state.request.img2img_strength,
|
| 172 |
+
model_variant=state.request.model_variant,
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
def progress_cb(progress: int, message: str) -> None:
|
| 176 |
+
self._update(job_id, progress=max(1, min(99, progress)), message=message)
|
| 177 |
+
|
| 178 |
+
def is_cancelled() -> bool:
|
| 179 |
+
current = self.get(job_id)
|
| 180 |
+
return bool(current and current.cancel_requested)
|
| 181 |
+
selected_provider_id, result = self._generate_with_fallback(
|
| 182 |
+
requested_model=state.request.model,
|
| 183 |
+
request=provider_request,
|
| 184 |
+
output_dir=output_dir,
|
| 185 |
+
progress_cb=progress_cb,
|
| 186 |
+
is_cancelled=is_cancelled,
|
| 187 |
+
)
|
| 188 |
+
if is_cancelled():
|
| 189 |
+
self._update(job_id, status=JobStatus.CANCELLED, progress=0, message="Cancelled")
|
| 190 |
+
return
|
| 191 |
+
image_paths = [str(path.resolve()) for path in result.image_paths]
|
| 192 |
+
meta = {
|
| 193 |
+
"prompt": state.request.prompt,
|
| 194 |
+
"compiled_prompt": compiled.prompt,
|
| 195 |
+
"neg_prompt": state.request.negative_prompt,
|
| 196 |
+
"compiled_neg_prompt": compiled.negative_prompt,
|
| 197 |
+
"seed": seed,
|
| 198 |
+
"model": state.request.model,
|
| 199 |
+
"size": state.request.size,
|
| 200 |
+
"count": state.request.count,
|
| 201 |
+
"steps": state.request.steps,
|
| 202 |
+
"guidance": state.request.guidance,
|
| 203 |
+
"image_type": state.request.image_type,
|
| 204 |
+
"style_preset": state.request.style_preset,
|
| 205 |
+
"style_strength": state.request.style_strength,
|
| 206 |
+
"init_image_path": state.request.init_image_path,
|
| 207 |
+
"img2img_strength": state.request.img2img_strength,
|
| 208 |
+
"model_variant": state.request.model_variant,
|
| 209 |
+
"requested_model": state.request.model,
|
| 210 |
+
"used_provider": selected_provider_id,
|
| 211 |
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
| 212 |
+
"images": image_paths,
|
| 213 |
+
}
|
| 214 |
+
stable_blob = json.dumps(
|
| 215 |
+
{
|
| 216 |
+
"compiled_prompt": compiled.prompt,
|
| 217 |
+
"compiled_neg_prompt": compiled.negative_prompt,
|
| 218 |
+
"model": selected_provider_id,
|
| 219 |
+
"size": state.request.size,
|
| 220 |
+
"steps": state.request.steps,
|
| 221 |
+
"guidance": state.request.guidance,
|
| 222 |
+
"seed": seed,
|
| 223 |
+
},
|
| 224 |
+
sort_keys=True,
|
| 225 |
+
).encode("utf-8")
|
| 226 |
+
meta["config_hash"] = hashlib.sha256(stable_blob).hexdigest()
|
| 227 |
+
meta_path = output_dir / "meta.json"
|
| 228 |
+
meta_path.write_text(json.dumps(meta, indent=2), encoding="utf-8")
|
| 229 |
+
self._update(
|
| 230 |
+
job_id,
|
| 231 |
+
status=JobStatus.DONE,
|
| 232 |
+
progress=100,
|
| 233 |
+
message="Generation complete",
|
| 234 |
+
image_paths=image_paths,
|
| 235 |
+
output_dir=str(output_dir.resolve()),
|
| 236 |
+
)
|
| 237 |
+
LOGGER.info("Job %s completed with %s image(s)", job_id, len(image_paths))
|
| 238 |
+
self._save_state()
|
| 239 |
+
except Exception as exc: # noqa: BLE001
|
| 240 |
+
LOGGER.exception("Job %s failed", job_id)
|
| 241 |
+
self._update(
|
| 242 |
+
job_id,
|
| 243 |
+
status=JobStatus.ERROR,
|
| 244 |
+
progress=0,
|
| 245 |
+
message="Generation failed",
|
| 246 |
+
error=str(exc),
|
| 247 |
+
output_dir=str(output_dir.resolve()),
|
| 248 |
+
)
|
| 249 |
+
self._save_state()
|
| 250 |
+
|
| 251 |
+
def _generate_with_fallback(
|
| 252 |
+
self,
|
| 253 |
+
requested_model: str,
|
| 254 |
+
request: ProviderRequest,
|
| 255 |
+
output_dir: Path,
|
| 256 |
+
progress_cb,
|
| 257 |
+
is_cancelled,
|
| 258 |
+
) -> tuple[str, ProviderResult]:
|
| 259 |
+
candidates = self._candidate_models(requested_model)
|
| 260 |
+
failures: list[str] = []
|
| 261 |
+
|
| 262 |
+
for idx, model_id in enumerate(candidates):
|
| 263 |
+
provider = self.provider_registry.get(model_id)
|
| 264 |
+
if not provider.is_available():
|
| 265 |
+
failures.append(f"{model_id}: unavailable")
|
| 266 |
+
continue
|
| 267 |
+
|
| 268 |
+
adjusted_request = request
|
| 269 |
+
if idx > 0:
|
| 270 |
+
adjusted_request = self._adjust_request_for_fallback(model_id, request)
|
| 271 |
+
progress_cb(2, f"Fallback to '{model_id}'")
|
| 272 |
+
else:
|
| 273 |
+
progress_cb(2, f"Using provider '{model_id}'")
|
| 274 |
+
|
| 275 |
+
try:
|
| 276 |
+
result = self._generate_with_timeout(
|
| 277 |
+
provider=provider,
|
| 278 |
+
request=adjusted_request,
|
| 279 |
+
output_dir=output_dir,
|
| 280 |
+
progress_cb=progress_cb,
|
| 281 |
+
is_cancelled=is_cancelled,
|
| 282 |
+
timeout_seconds=self._timeout_for_attempt(idx),
|
| 283 |
+
)
|
| 284 |
+
if getattr(result, "image_paths", None):
|
| 285 |
+
return model_id, result
|
| 286 |
+
failures.append(f"{model_id}: produced no images")
|
| 287 |
+
except Exception as exc: # noqa: BLE001
|
| 288 |
+
failures.append(f"{model_id}: {exc}")
|
| 289 |
+
|
| 290 |
+
raise ProviderUnavailableError(
|
| 291 |
+
"No provider could generate images. Attempts: " + " | ".join(failures)
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
def _candidate_models(self, requested_model: str) -> list[str]:
|
| 295 |
+
known = {provider.id for provider in self.provider_registry.list()}
|
| 296 |
+
if requested_model not in known:
|
| 297 |
+
raise KeyError(f"Unknown provider: {requested_model}")
|
| 298 |
+
|
| 299 |
+
auto_fallback = os.getenv("IMAGEFORGE_ENABLE_AUTO_FALLBACK", "1") == "1"
|
| 300 |
+
if not auto_fallback:
|
| 301 |
+
return [requested_model]
|
| 302 |
+
|
| 303 |
+
# Try localai and diffusion first (real models), fallback to dummy if needed
|
| 304 |
+
fallback_raw = os.getenv("IMAGEFORGE_FALLBACK_MODELS", "localai,diffusion,a1111,dummy")
|
| 305 |
+
fallback_order = [part.strip() for part in fallback_raw.split(",") if part.strip()]
|
| 306 |
+
|
| 307 |
+
models: list[str] = [requested_model]
|
| 308 |
+
for model_id in fallback_order:
|
| 309 |
+
if model_id in known and model_id not in models:
|
| 310 |
+
models.append(model_id)
|
| 311 |
+
return models
|
| 312 |
+
|
| 313 |
+
@staticmethod
|
| 314 |
+
def _timeout_for_attempt(attempt_index: int) -> int:
|
| 315 |
+
if attempt_index == 0:
|
| 316 |
+
return JOB_TIMEOUT_SECONDS
|
| 317 |
+
fallback_timeout = int(os.getenv("IMAGEFORGE_FALLBACK_TIMEOUT_SECONDS", "90"))
|
| 318 |
+
return max(10, fallback_timeout)
|
| 319 |
+
|
| 320 |
+
@staticmethod
|
| 321 |
+
def _adjust_request_for_fallback(model_id: str, request: ProviderRequest) -> ProviderRequest:
|
| 322 |
+
max_steps = int(os.getenv("IMAGEFORGE_FALLBACK_MAX_STEPS", "24"))
|
| 323 |
+
capped_steps = max(1, min(request.steps, max_steps))
|
| 324 |
+
capped_guidance = min(request.guidance, 8.0)
|
| 325 |
+
max_side = int(os.getenv("IMAGEFORGE_FALLBACK_MAX_SIDE", "768"))
|
| 326 |
+
width = min(request.width, max_side)
|
| 327 |
+
height = min(request.height, max_side)
|
| 328 |
+
width = max(64, (width // 8) * 8)
|
| 329 |
+
height = max(64, (height // 8) * 8)
|
| 330 |
+
return ProviderRequest(
|
| 331 |
+
prompt=request.prompt,
|
| 332 |
+
negative_prompt=request.negative_prompt,
|
| 333 |
+
count=request.count,
|
| 334 |
+
width=width,
|
| 335 |
+
height=height,
|
| 336 |
+
seed=request.seed,
|
| 337 |
+
steps=capped_steps,
|
| 338 |
+
guidance=capped_guidance,
|
| 339 |
+
init_image_path=request.init_image_path,
|
| 340 |
+
img2img_strength=request.img2img_strength,
|
| 341 |
+
model_variant=request.model_variant if model_id in {"a1111", "localai", "diffusion"} else None,
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
@staticmethod
|
| 345 |
+
def _parse_size(size: str) -> tuple[int, int]:
|
| 346 |
+
try:
|
| 347 |
+
width_raw, height_raw = size.split("x", maxsplit=1)
|
| 348 |
+
width = int(width_raw)
|
| 349 |
+
height = int(height_raw)
|
| 350 |
+
except Exception as exc: # noqa: BLE001
|
| 351 |
+
raise ValueError(f"Invalid size format: {size}") from exc
|
| 352 |
+
if (width, height) not in {(512, 512), (768, 768), (1024, 1024), (1024, 1536), (1536, 1024)}:
|
| 353 |
+
raise ValueError(f"Unsupported size: {size}")
|
| 354 |
+
return width, height
|
| 355 |
+
|
| 356 |
+
@staticmethod
|
| 357 |
+
def _generate_with_timeout(provider, request, output_dir, progress_cb, is_cancelled, timeout_seconds): # noqa: ANN001
|
| 358 |
+
result_holder: dict[str, object] = {}
|
| 359 |
+
error_holder: dict[str, Exception] = {}
|
| 360 |
+
|
| 361 |
+
def _run() -> None:
|
| 362 |
+
try:
|
| 363 |
+
result_holder["value"] = provider.generate(request, output_dir, progress_cb, is_cancelled)
|
| 364 |
+
except Exception as exc: # noqa: BLE001
|
| 365 |
+
error_holder["error"] = exc
|
| 366 |
+
|
| 367 |
+
worker = threading.Thread(target=_run, daemon=True)
|
| 368 |
+
worker.start()
|
| 369 |
+
|
| 370 |
+
started = time.monotonic()
|
| 371 |
+
while worker.is_alive():
|
| 372 |
+
if is_cancelled():
|
| 373 |
+
break
|
| 374 |
+
if timeout_seconds > 0 and (time.monotonic() - started) > timeout_seconds:
|
| 375 |
+
raise TimeoutError(
|
| 376 |
+
f"Generation timed out after {timeout_seconds}s. "
|
| 377 |
+
"Try model 'dummy' or reduce steps/size."
|
| 378 |
+
)
|
| 379 |
+
worker.join(timeout=0.5)
|
| 380 |
+
|
| 381 |
+
if "error" in error_holder:
|
| 382 |
+
raise error_holder["error"]
|
| 383 |
+
if "value" not in result_holder:
|
| 384 |
+
raise RuntimeError("Generation was interrupted before producing a result")
|
| 385 |
+
return result_holder["value"]
|
| 386 |
+
|
| 387 |
+
def _save_state(self) -> None:
|
| 388 |
+
rows = []
|
| 389 |
+
with self._lock:
|
| 390 |
+
for state in self._jobs.values():
|
| 391 |
+
rows.append(
|
| 392 |
+
{
|
| 393 |
+
"job_id": state.job_id,
|
| 394 |
+
"request": state.request.model_dump(),
|
| 395 |
+
"status": state.status.value,
|
| 396 |
+
"progress": state.progress,
|
| 397 |
+
"message": state.message,
|
| 398 |
+
"created_at": state.created_at.isoformat(),
|
| 399 |
+
"updated_at": state.updated_at.isoformat(),
|
| 400 |
+
"image_paths": state.image_paths,
|
| 401 |
+
"output_dir": state.output_dir,
|
| 402 |
+
"error": state.error,
|
| 403 |
+
"cancel_requested": state.cancel_requested,
|
| 404 |
+
}
|
| 405 |
+
)
|
| 406 |
+
self._state_file.parent.mkdir(parents=True, exist_ok=True)
|
| 407 |
+
payload = json.dumps(rows, indent=2)
|
| 408 |
+
|
| 409 |
+
with self._state_write_lock:
|
| 410 |
+
tmp_path = self._state_file.with_suffix(f"{self._state_file.suffix}.{uuid.uuid4().hex}.tmp")
|
| 411 |
+
try:
|
| 412 |
+
tmp_path.write_text(payload, encoding="utf-8")
|
| 413 |
+
tmp_path.replace(self._state_file)
|
| 414 |
+
except OSError as exc:
|
| 415 |
+
LOGGER.warning("State persistence failed (%s). Continuing without crash.", exc)
|
| 416 |
+
try:
|
| 417 |
+
self._state_file.write_text(payload, encoding="utf-8")
|
| 418 |
+
except OSError as fallback_exc:
|
| 419 |
+
LOGGER.warning("Direct state write failed (%s).", fallback_exc)
|
| 420 |
+
finally:
|
| 421 |
+
tmp_path.unlink(missing_ok=True)
|
| 422 |
+
|
| 423 |
+
def _load_state(self) -> None:
|
| 424 |
+
if not self._state_file.exists():
|
| 425 |
+
return
|
| 426 |
+
try:
|
| 427 |
+
rows = json.loads(self._state_file.read_text(encoding="utf-8"))
|
| 428 |
+
except json.JSONDecodeError:
|
| 429 |
+
return
|
| 430 |
+
if not isinstance(rows, list):
|
| 431 |
+
return
|
| 432 |
+
for row in rows:
|
| 433 |
+
try:
|
| 434 |
+
req = GenerateRequest(**row["request"])
|
| 435 |
+
status = JobStatus(row.get("status", "error"))
|
| 436 |
+
# Recover queued/running jobs as error on restart to avoid silent loss.
|
| 437 |
+
if status in {JobStatus.QUEUED, JobStatus.RUNNING}:
|
| 438 |
+
status = JobStatus.ERROR
|
| 439 |
+
row["error"] = "Recovered after restart during unfinished execution"
|
| 440 |
+
state = JobState(
|
| 441 |
+
job_id=row["job_id"],
|
| 442 |
+
request=req,
|
| 443 |
+
status=status,
|
| 444 |
+
progress=int(row.get("progress", 0)),
|
| 445 |
+
message=row.get("message", ""),
|
| 446 |
+
created_at=datetime.fromisoformat(row["created_at"]),
|
| 447 |
+
updated_at=datetime.fromisoformat(row["updated_at"]),
|
| 448 |
+
image_paths=list(row.get("image_paths", [])),
|
| 449 |
+
output_dir=row.get("output_dir"),
|
| 450 |
+
error=row.get("error"),
|
| 451 |
+
cancel_requested=bool(row.get("cancel_requested", False)),
|
| 452 |
+
)
|
| 453 |
+
self._jobs[state.job_id] = state
|
| 454 |
+
except Exception: # noqa: BLE001
|
| 455 |
+
continue
|
imageforge/backend/app/local_ai/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/local_ai/engine.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
from dataclasses import dataclass
|
| 6 |
+
|
| 7 |
+
LOGGER = logging.getLogger(__name__)
|
| 8 |
+
|
| 9 |
+
# Lazy imports to avoid torch loading issues on Windows
|
| 10 |
+
torch = None
|
| 11 |
+
StableDiffusionImg2ImgPipeline = None
|
| 12 |
+
StableDiffusionPipeline = None
|
| 13 |
+
|
| 14 |
+
def _ensure_imports():
|
| 15 |
+
global torch, StableDiffusionImg2ImgPipeline, StableDiffusionPipeline
|
| 16 |
+
if torch is not None:
|
| 17 |
+
return
|
| 18 |
+
try:
|
| 19 |
+
import torch as _torch
|
| 20 |
+
from diffusers import StableDiffusionImg2ImgPipeline as _Img2Img
|
| 21 |
+
from diffusers import StableDiffusionPipeline as _Pipeline
|
| 22 |
+
torch = _torch
|
| 23 |
+
StableDiffusionImg2ImgPipeline = _Img2Img
|
| 24 |
+
StableDiffusionPipeline = _Pipeline
|
| 25 |
+
LOGGER.info("✓ torch and diffusers imported successfully")
|
| 26 |
+
except Exception as exc: # pragma: no cover - optional dependency
|
| 27 |
+
LOGGER.error("✗ Failed to import torch/diffusers: %s", exc, exc_info=True)
|
| 28 |
+
pass
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass(slots=True)
|
| 32 |
+
class LocalAIRequest:
|
| 33 |
+
prompt: str
|
| 34 |
+
negative_prompt: str
|
| 35 |
+
width: int
|
| 36 |
+
height: int
|
| 37 |
+
steps: int
|
| 38 |
+
guidance: float
|
| 39 |
+
seed: int
|
| 40 |
+
init_image_path: str | None = None
|
| 41 |
+
strength: float = 0.45
|
| 42 |
+
model_variant: str | None = None
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class LocalAIEngine:
|
| 46 |
+
"""Self-hosted local generation engine; no external API calls required."""
|
| 47 |
+
|
| 48 |
+
def __init__(self) -> None:
|
| 49 |
+
self.model_id = os.getenv("IMAGEFORGE_LOCALAI_MODEL", "segmind/tiny-sd")
|
| 50 |
+
self._pipe_t2i = None
|
| 51 |
+
self._pipe_i2i = None
|
| 52 |
+
|
| 53 |
+
def is_available(self) -> bool:
|
| 54 |
+
_ensure_imports()
|
| 55 |
+
return StableDiffusionPipeline is not None and torch is not None
|
| 56 |
+
|
| 57 |
+
def _ensure(self):
|
| 58 |
+
_ensure_imports()
|
| 59 |
+
if not self.is_available():
|
| 60 |
+
raise RuntimeError(
|
| 61 |
+
"LocalAI dependencies missing. Install diffusers, torch, transformers, accelerate."
|
| 62 |
+
)
|
| 63 |
+
if self._pipe_t2i is None:
|
| 64 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 65 |
+
dtype = torch.float16 if device == "cuda" else torch.float32
|
| 66 |
+
local_only = os.getenv("IMAGEFORGE_LOCALAI_LOCAL_ONLY", "0") == "1"
|
| 67 |
+
LOGGER.info("Loading LocalAI model '%s' on %s", self.model_id, device)
|
| 68 |
+
try:
|
| 69 |
+
# FORCE local_files_only=False to allow download if needed
|
| 70 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 71 |
+
self.model_id,
|
| 72 |
+
torch_dtype=dtype,
|
| 73 |
+
local_files_only=False, # Always allow download
|
| 74 |
+
use_safetensors=True if "safetensors" in self.model_id else None,
|
| 75 |
+
)
|
| 76 |
+
except Exception as exc: # noqa: BLE001
|
| 77 |
+
LOGGER.error("Failed to load model '%s': %s", self.model_id, exc)
|
| 78 |
+
raise RuntimeError(
|
| 79 |
+
f"LocalAI model '{self.model_id}' could not be loaded. Error: {exc}"
|
| 80 |
+
) from exc
|
| 81 |
+
if device == "cuda":
|
| 82 |
+
pipe = pipe.to(device)
|
| 83 |
+
if os.getenv("IMAGEFORGE_ENABLE_ATTENTION_SLICING", "1") == "1":
|
| 84 |
+
pipe.enable_attention_slicing()
|
| 85 |
+
self._pipe_t2i = pipe
|
| 86 |
+
if StableDiffusionImg2ImgPipeline is not None:
|
| 87 |
+
pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 88 |
+
self.model_id,
|
| 89 |
+
torch_dtype=dtype,
|
| 90 |
+
local_files_only=local_only,
|
| 91 |
+
)
|
| 92 |
+
if device == "cuda":
|
| 93 |
+
pipe_i2i = pipe_i2i.to(device)
|
| 94 |
+
if os.getenv("IMAGEFORGE_ENABLE_ATTENTION_SLICING", "1") == "1":
|
| 95 |
+
pipe_i2i.enable_attention_slicing()
|
| 96 |
+
self._pipe_i2i = pipe_i2i
|
| 97 |
+
return self._pipe_t2i
|
| 98 |
+
|
| 99 |
+
def generate(self, req: LocalAIRequest):
|
| 100 |
+
from PIL import Image
|
| 101 |
+
|
| 102 |
+
if getattr(req, "model_variant", None) and req.model_variant != self.model_id:
|
| 103 |
+
self.model_id = req.model_variant
|
| 104 |
+
self._pipe_t2i = None
|
| 105 |
+
self._pipe_i2i = None
|
| 106 |
+
pipe = self._ensure()
|
| 107 |
+
generator = torch.Generator(device=pipe.device).manual_seed(req.seed)
|
| 108 |
+
if req.init_image_path and self._pipe_i2i is not None:
|
| 109 |
+
init_img = Image.open(req.init_image_path).convert("RGB").resize((req.width, req.height))
|
| 110 |
+
out = self._pipe_i2i(
|
| 111 |
+
prompt=req.prompt,
|
| 112 |
+
negative_prompt=req.negative_prompt or None,
|
| 113 |
+
image=init_img,
|
| 114 |
+
guidance_scale=req.guidance,
|
| 115 |
+
num_inference_steps=req.steps,
|
| 116 |
+
strength=max(0.0, min(1.0, req.strength)),
|
| 117 |
+
generator=generator,
|
| 118 |
+
)
|
| 119 |
+
else:
|
| 120 |
+
out = pipe(
|
| 121 |
+
prompt=req.prompt,
|
| 122 |
+
negative_prompt=req.negative_prompt or None,
|
| 123 |
+
width=req.width,
|
| 124 |
+
height=req.height,
|
| 125 |
+
guidance_scale=req.guidance,
|
| 126 |
+
num_inference_steps=req.steps,
|
| 127 |
+
generator=generator,
|
| 128 |
+
)
|
| 129 |
+
return out.images[0]
|
imageforge/backend/app/main.py
ADDED
|
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import time
|
| 5 |
+
from dataclasses import asdict
|
| 6 |
+
from datetime import datetime, timezone
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
from fastapi import FastAPI, Header, HTTPException, Query, Request
|
| 10 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 11 |
+
from fastapi.responses import FileResponse, PlainTextResponse
|
| 12 |
+
from PIL import Image
|
| 13 |
+
import uvicorn
|
| 14 |
+
|
| 15 |
+
from .api.schemas import (
|
| 16 |
+
AdminSettings,
|
| 17 |
+
CancelResponse,
|
| 18 |
+
DashboardStats,
|
| 19 |
+
ExportRequest,
|
| 20 |
+
ExportResponse,
|
| 21 |
+
GenerateRequest,
|
| 22 |
+
GenerateResponse,
|
| 23 |
+
HealthResponse,
|
| 24 |
+
HistoryItem,
|
| 25 |
+
JobInfoResponse,
|
| 26 |
+
MetricsResponse,
|
| 27 |
+
ModelInfo,
|
| 28 |
+
PresetPayload,
|
| 29 |
+
PresetResponse,
|
| 30 |
+
RetryResponse,
|
| 31 |
+
)
|
| 32 |
+
from .core.config import (
|
| 33 |
+
ADMIN_TOKEN,
|
| 34 |
+
CONTENT_PROFILE,
|
| 35 |
+
CORS_ORIGINS,
|
| 36 |
+
DEFAULT_BACKEND_HOST,
|
| 37 |
+
DEFAULT_BACKEND_PORT,
|
| 38 |
+
OUTPUT_DIR,
|
| 39 |
+
OUTPUT_RETENTION_DAYS,
|
| 40 |
+
REQUEST_MAX_BYTES,
|
| 41 |
+
)
|
| 42 |
+
from .core.logging import setup_logging
|
| 43 |
+
from .core.observability import MetricsStore
|
| 44 |
+
from .core.policy import ContentPolicy, PolicyAuditStore
|
| 45 |
+
from .core.security import ApiSecurity, Principal
|
| 46 |
+
from .jobs.manager import JobManager
|
| 47 |
+
from .providers.factory import ProviderRegistry
|
| 48 |
+
from .storage.history import PromptHistoryStore
|
| 49 |
+
from .storage.maintenance import cleanup_outputs
|
| 50 |
+
from .storage.presets import PresetStore
|
| 51 |
+
from .storage.settings import SettingsStore
|
| 52 |
+
|
| 53 |
+
setup_logging()
|
| 54 |
+
LOGGER = logging.getLogger(__name__)
|
| 55 |
+
|
| 56 |
+
provider_registry = ProviderRegistry()
|
| 57 |
+
history_store = PromptHistoryStore()
|
| 58 |
+
job_manager = JobManager(provider_registry, history_store)
|
| 59 |
+
policy_audit = PolicyAuditStore()
|
| 60 |
+
api_security = ApiSecurity()
|
| 61 |
+
preset_store = PresetStore()
|
| 62 |
+
settings_store = SettingsStore()
|
| 63 |
+
metrics = MetricsStore()
|
| 64 |
+
|
| 65 |
+
cleanup_removed = cleanup_outputs(OUTPUT_RETENTION_DAYS)
|
| 66 |
+
if cleanup_removed:
|
| 67 |
+
LOGGER.info("Startup cleanup removed %s old output day folder(s)", cleanup_removed)
|
| 68 |
+
|
| 69 |
+
app = FastAPI(title="ImageForge Backend", version="0.3.0")
|
| 70 |
+
allow_origins = [item.strip() for item in CORS_ORIGINS.split(",")] if CORS_ORIGINS != "*" else ["*"]
|
| 71 |
+
app.add_middleware(
|
| 72 |
+
CORSMiddleware,
|
| 73 |
+
allow_origins=allow_origins,
|
| 74 |
+
allow_methods=["*"],
|
| 75 |
+
allow_headers=["*"],
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@app.middleware("http")
|
| 80 |
+
async def request_limits_middleware(request: Request, call_next):
|
| 81 |
+
length = request.headers.get("content-length")
|
| 82 |
+
if length and int(length) > REQUEST_MAX_BYTES:
|
| 83 |
+
raise HTTPException(status_code=413, detail="Request too large")
|
| 84 |
+
start = time.perf_counter()
|
| 85 |
+
response = await call_next(request)
|
| 86 |
+
elapsed_ms = (time.perf_counter() - start) * 1000.0
|
| 87 |
+
metrics.incr("http_requests_total")
|
| 88 |
+
metrics.observe_ms("http_request", elapsed_ms)
|
| 89 |
+
response.headers["X-Content-Type-Options"] = "nosniff"
|
| 90 |
+
response.headers["X-Frame-Options"] = "DENY"
|
| 91 |
+
response.headers["Referrer-Policy"] = "no-referrer"
|
| 92 |
+
response.headers["Content-Security-Policy"] = "default-src 'self'"
|
| 93 |
+
return response
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def _principal(
|
| 97 |
+
http_request: Request,
|
| 98 |
+
x_imageforge_api_key: str | None,
|
| 99 |
+
minimum_role: str,
|
| 100 |
+
) -> Principal:
|
| 101 |
+
api_security.limit = int(settings_store.get().get("rate_limit_per_minute", api_security.limit))
|
| 102 |
+
principal = api_security.authenticate(
|
| 103 |
+
api_key=x_imageforge_api_key,
|
| 104 |
+
client_id=http_request.client.host if http_request.client else "unknown",
|
| 105 |
+
)
|
| 106 |
+
api_security.require_role(principal, minimum_role)
|
| 107 |
+
return principal
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@app.get("/health", response_model=HealthResponse)
|
| 111 |
+
def health() -> HealthResponse:
|
| 112 |
+
return HealthResponse(status="ok", timestamp=datetime.now(timezone.utc))
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@app.get("/ready", response_model=HealthResponse)
|
| 116 |
+
def ready() -> HealthResponse:
|
| 117 |
+
try:
|
| 118 |
+
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
| 119 |
+
probe = OUTPUT_DIR / ".ready_probe"
|
| 120 |
+
probe.write_text("ok", encoding="utf-8")
|
| 121 |
+
probe.unlink(missing_ok=True)
|
| 122 |
+
except Exception as exc: # noqa: BLE001
|
| 123 |
+
raise HTTPException(status_code=503, detail=f"Output directory not writable: {exc}") from exc
|
| 124 |
+
return HealthResponse(status="ready", timestamp=datetime.now(timezone.utc))
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
@app.get("/metrics", response_model=MetricsResponse)
|
| 128 |
+
def metrics_json(
|
| 129 |
+
http_request: Request,
|
| 130 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 131 |
+
) -> MetricsResponse:
|
| 132 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 133 |
+
return MetricsResponse(metrics=metrics.snapshot())
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
@app.get("/metrics/prometheus")
|
| 137 |
+
def metrics_prom(
|
| 138 |
+
http_request: Request,
|
| 139 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 140 |
+
) -> PlainTextResponse:
|
| 141 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 142 |
+
return PlainTextResponse(metrics.to_prometheus())
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
@app.get("/models", response_model=list[ModelInfo])
|
| 146 |
+
def models(
|
| 147 |
+
http_request: Request,
|
| 148 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 149 |
+
) -> list[ModelInfo]:
|
| 150 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 151 |
+
runtime = settings_store.get()
|
| 152 |
+
adult_enabled = bool(runtime.get("adult_enabled", False))
|
| 153 |
+
return [
|
| 154 |
+
ModelInfo(
|
| 155 |
+
id=provider.id,
|
| 156 |
+
name=provider.name,
|
| 157 |
+
description=provider.description,
|
| 158 |
+
available=provider.is_available() if provider.id != "zimageturbo" else (adult_enabled and provider.is_available()),
|
| 159 |
+
)
|
| 160 |
+
for provider in provider_registry.list()
|
| 161 |
+
]
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
@app.post("/generate", response_model=GenerateResponse)
|
| 165 |
+
def generate(
|
| 166 |
+
request: GenerateRequest,
|
| 167 |
+
http_request: Request,
|
| 168 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 169 |
+
x_imageforge_admin_token: str | None = Header(default=None, alias="X-ImageForge-Admin-Token"),
|
| 170 |
+
) -> GenerateResponse:
|
| 171 |
+
_principal(http_request, x_imageforge_api_key, "operator")
|
| 172 |
+
try:
|
| 173 |
+
provider_registry.get(request.model)
|
| 174 |
+
except KeyError as exc:
|
| 175 |
+
raise HTTPException(status_code=400, detail=str(exc)) from exc
|
| 176 |
+
|
| 177 |
+
runtime = settings_store.get()
|
| 178 |
+
if request.model == "zimageturbo" and not bool(runtime.get("adult_enabled", False)):
|
| 179 |
+
raise HTTPException(status_code=400, detail="Adult provider is disabled in admin settings")
|
| 180 |
+
policy = ContentPolicy(runtime.get("content_profile", CONTENT_PROFILE))
|
| 181 |
+
|
| 182 |
+
admin_override_applied = bool(
|
| 183 |
+
request.admin_override and ADMIN_TOKEN and x_imageforge_admin_token == ADMIN_TOKEN
|
| 184 |
+
)
|
| 185 |
+
decision = policy.evaluate(
|
| 186 |
+
f"{request.prompt}\n{request.negative_prompt}",
|
| 187 |
+
admin_override=admin_override_applied,
|
| 188 |
+
)
|
| 189 |
+
policy_audit.write(
|
| 190 |
+
prompt=request.prompt,
|
| 191 |
+
negative_prompt=request.negative_prompt,
|
| 192 |
+
profile=runtime.get("content_profile", CONTENT_PROFILE),
|
| 193 |
+
decision=decision,
|
| 194 |
+
client_ip=http_request.client.host if http_request.client else "unknown",
|
| 195 |
+
model=request.model,
|
| 196 |
+
admin_override_requested=request.admin_override,
|
| 197 |
+
admin_override_applied=admin_override_applied,
|
| 198 |
+
)
|
| 199 |
+
if not decision.allowed:
|
| 200 |
+
raise HTTPException(status_code=400, detail=f"Blocked by policy: {decision.reason}")
|
| 201 |
+
|
| 202 |
+
metrics.incr("jobs_submitted_total")
|
| 203 |
+
job_id = job_manager.submit(request)
|
| 204 |
+
return GenerateResponse(job_id=job_id)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
@app.get("/jobs/{job_id}", response_model=JobInfoResponse)
|
| 208 |
+
def job_status(
|
| 209 |
+
job_id: str,
|
| 210 |
+
http_request: Request,
|
| 211 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 212 |
+
) -> JobInfoResponse:
|
| 213 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 214 |
+
state = job_manager.get(job_id)
|
| 215 |
+
if state is None:
|
| 216 |
+
raise HTTPException(status_code=404, detail="Job not found")
|
| 217 |
+
return _to_job_response(state)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@app.get("/jobs", response_model=list[JobInfoResponse])
|
| 221 |
+
def list_jobs(
|
| 222 |
+
http_request: Request,
|
| 223 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 224 |
+
) -> list[JobInfoResponse]:
|
| 225 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 226 |
+
return [_to_job_response(state) for state in job_manager.list()]
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@app.get("/image")
|
| 230 |
+
def get_image(path: str = Query(...)) -> FileResponse:
|
| 231 |
+
try:
|
| 232 |
+
source = Path(path).resolve(strict=True)
|
| 233 |
+
except FileNotFoundError as exc:
|
| 234 |
+
raise HTTPException(status_code=404, detail="Image not found") from exc
|
| 235 |
+
|
| 236 |
+
output_root = OUTPUT_DIR.resolve()
|
| 237 |
+
if source != output_root and output_root not in source.parents:
|
| 238 |
+
raise HTTPException(status_code=403, detail="Forbidden source path")
|
| 239 |
+
if not source.is_file():
|
| 240 |
+
raise HTTPException(status_code=404, detail="Image not found")
|
| 241 |
+
return FileResponse(source)
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
@app.post("/jobs/{job_id}/retry", response_model=RetryResponse)
|
| 245 |
+
def retry_job(
|
| 246 |
+
job_id: str,
|
| 247 |
+
http_request: Request,
|
| 248 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 249 |
+
) -> RetryResponse:
|
| 250 |
+
_principal(http_request, x_imageforge_api_key, "operator")
|
| 251 |
+
new_id = job_manager.retry(job_id)
|
| 252 |
+
if not new_id:
|
| 253 |
+
raise HTTPException(status_code=404, detail="Job not found")
|
| 254 |
+
metrics.incr("jobs_retry_total")
|
| 255 |
+
return RetryResponse(old_job_id=job_id, new_job_id=new_id)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def _to_job_response(state) -> JobInfoResponse: # noqa: ANN001
|
| 259 |
+
return JobInfoResponse(
|
| 260 |
+
job_id=state.job_id,
|
| 261 |
+
status=state.status,
|
| 262 |
+
progress=state.progress,
|
| 263 |
+
message=state.message,
|
| 264 |
+
created_at=state.created_at,
|
| 265 |
+
updated_at=state.updated_at,
|
| 266 |
+
image_paths=state.image_paths,
|
| 267 |
+
output_dir=state.output_dir,
|
| 268 |
+
error=state.error,
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
@app.post("/jobs/{job_id}/cancel", response_model=CancelResponse)
|
| 273 |
+
def cancel(
|
| 274 |
+
job_id: str,
|
| 275 |
+
http_request: Request,
|
| 276 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 277 |
+
) -> CancelResponse:
|
| 278 |
+
_principal(http_request, x_imageforge_api_key, "operator")
|
| 279 |
+
success = job_manager.cancel(job_id)
|
| 280 |
+
if not success:
|
| 281 |
+
raise HTTPException(status_code=404, detail="Job not found")
|
| 282 |
+
metrics.incr("jobs_cancel_total")
|
| 283 |
+
return CancelResponse(success=True)
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
@app.get("/history", response_model=list[HistoryItem])
|
| 287 |
+
def history(
|
| 288 |
+
http_request: Request,
|
| 289 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 290 |
+
) -> list[HistoryItem]:
|
| 291 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 292 |
+
items = history_store.list()
|
| 293 |
+
parsed: list[HistoryItem] = []
|
| 294 |
+
for item in items:
|
| 295 |
+
try:
|
| 296 |
+
parsed.append(
|
| 297 |
+
HistoryItem(
|
| 298 |
+
prompt=item.get("prompt", ""),
|
| 299 |
+
negative_prompt=item.get("negative_prompt", ""),
|
| 300 |
+
timestamp=datetime.fromisoformat(item.get("timestamp", "")),
|
| 301 |
+
)
|
| 302 |
+
)
|
| 303 |
+
except Exception: # noqa: BLE001
|
| 304 |
+
continue
|
| 305 |
+
return parsed
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
@app.get("/dashboard/stats", response_model=DashboardStats)
|
| 309 |
+
def dashboard_stats(
|
| 310 |
+
http_request: Request,
|
| 311 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 312 |
+
) -> DashboardStats:
|
| 313 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 314 |
+
return DashboardStats(**job_manager.stats())
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
@app.get("/presets", response_model=list[PresetResponse])
|
| 318 |
+
def list_presets(
|
| 319 |
+
http_request: Request,
|
| 320 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 321 |
+
) -> list[PresetResponse]:
|
| 322 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 323 |
+
return [PresetResponse(**asdict(preset)) for preset in preset_store.list()]
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
@app.post("/presets", response_model=PresetResponse)
|
| 327 |
+
def upsert_preset(
|
| 328 |
+
payload: PresetPayload,
|
| 329 |
+
http_request: Request,
|
| 330 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 331 |
+
) -> PresetResponse:
|
| 332 |
+
_principal(http_request, x_imageforge_api_key, "operator")
|
| 333 |
+
preset = preset_store.upsert(payload.model_dump())
|
| 334 |
+
return PresetResponse(**asdict(preset))
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
@app.delete("/presets/{name}", response_model=CancelResponse)
|
| 338 |
+
def delete_preset(
|
| 339 |
+
name: str,
|
| 340 |
+
http_request: Request,
|
| 341 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 342 |
+
) -> CancelResponse:
|
| 343 |
+
_principal(http_request, x_imageforge_api_key, "admin")
|
| 344 |
+
ok = preset_store.delete(name)
|
| 345 |
+
if not ok:
|
| 346 |
+
raise HTTPException(status_code=404, detail="Preset not found")
|
| 347 |
+
return CancelResponse(success=True)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
@app.post("/export", response_model=ExportResponse)
|
| 351 |
+
def export_image(
|
| 352 |
+
payload: ExportRequest,
|
| 353 |
+
http_request: Request,
|
| 354 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 355 |
+
) -> ExportResponse:
|
| 356 |
+
_principal(http_request, x_imageforge_api_key, "operator")
|
| 357 |
+
source = Path(payload.source_path).resolve()
|
| 358 |
+
output_root = OUTPUT_DIR.resolve()
|
| 359 |
+
if output_root not in source.parents:
|
| 360 |
+
raise HTTPException(status_code=403, detail="Forbidden source path")
|
| 361 |
+
if not source.exists() or not source.is_file():
|
| 362 |
+
raise HTTPException(status_code=404, detail="Source image not found")
|
| 363 |
+
img = Image.open(source).convert("RGB")
|
| 364 |
+
if payload.max_width or payload.max_height:
|
| 365 |
+
max_w = payload.max_width or img.width
|
| 366 |
+
max_h = payload.max_height or img.height
|
| 367 |
+
img.thumbnail((max_w, max_h))
|
| 368 |
+
out_path = source.with_name(f"{source.stem}_export.{payload.format}")
|
| 369 |
+
save_kwargs: dict[str, int] = {}
|
| 370 |
+
if payload.format in {"jpg", "webp"}:
|
| 371 |
+
save_kwargs["quality"] = payload.quality
|
| 372 |
+
img.save(out_path, format=payload.format.upper(), **save_kwargs)
|
| 373 |
+
return ExportResponse(output_path=str(out_path.resolve()))
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
@app.get("/admin/settings", response_model=AdminSettings)
|
| 377 |
+
def get_admin_settings(
|
| 378 |
+
http_request: Request,
|
| 379 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 380 |
+
) -> AdminSettings:
|
| 381 |
+
principal = _principal(http_request, x_imageforge_api_key, "admin")
|
| 382 |
+
current = settings_store.get()
|
| 383 |
+
metrics.incr(f"admin_settings_read_by_{principal.role}")
|
| 384 |
+
return AdminSettings(**current)
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
@app.put("/admin/settings", response_model=AdminSettings)
|
| 388 |
+
def put_admin_settings(
|
| 389 |
+
payload: AdminSettings,
|
| 390 |
+
http_request: Request,
|
| 391 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 392 |
+
) -> AdminSettings:
|
| 393 |
+
principal = _principal(http_request, x_imageforge_api_key, "admin")
|
| 394 |
+
current = settings_store.update(payload.model_dump(), actor=principal.client_id)
|
| 395 |
+
return AdminSettings(**current)
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
@app.post("/admin/cleanup", response_model=DashboardStats)
|
| 399 |
+
def cleanup_endpoint(
|
| 400 |
+
http_request: Request,
|
| 401 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 402 |
+
) -> DashboardStats:
|
| 403 |
+
_principal(http_request, x_imageforge_api_key, "admin")
|
| 404 |
+
runtime = settings_store.get()
|
| 405 |
+
cleanup_outputs(int(runtime.get("output_retention_days", OUTPUT_RETENTION_DAYS)))
|
| 406 |
+
return DashboardStats(**job_manager.stats())
|
| 407 |
+
|
| 408 |
+
|
| 409 |
+
@app.get("/files/{relative_path:path}")
|
| 410 |
+
def output_file(
|
| 411 |
+
relative_path: str,
|
| 412 |
+
http_request: Request,
|
| 413 |
+
x_imageforge_api_key: str | None = Header(default=None, alias="X-ImageForge-Api-Key"),
|
| 414 |
+
):
|
| 415 |
+
_principal(http_request, x_imageforge_api_key, "viewer")
|
| 416 |
+
path = (Path.cwd() / relative_path).resolve()
|
| 417 |
+
output_root = OUTPUT_DIR.resolve()
|
| 418 |
+
if output_root not in path.parents and path != output_root:
|
| 419 |
+
raise HTTPException(status_code=403, detail="Forbidden")
|
| 420 |
+
if not path.exists() or not path.is_file():
|
| 421 |
+
raise HTTPException(status_code=404, detail="File not found")
|
| 422 |
+
return FileResponse(path)
|
| 423 |
+
|
| 424 |
+
|
| 425 |
+
def run() -> None:
|
| 426 |
+
uvicorn.run(
|
| 427 |
+
"backend.app.main:app",
|
| 428 |
+
host=DEFAULT_BACKEND_HOST,
|
| 429 |
+
port=DEFAULT_BACKEND_PORT,
|
| 430 |
+
reload=False,
|
| 431 |
+
log_level="info",
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
|
| 435 |
+
if __name__ == "__main__":
|
| 436 |
+
LOGGER.info("Starting ImageForge backend")
|
| 437 |
+
run()
|
imageforge/backend/app/providers/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/providers/a1111_provider.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import base64
|
| 4 |
+
import json
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import time
|
| 7 |
+
from urllib import error, request
|
| 8 |
+
|
| 9 |
+
from ..core.config import (
|
| 10 |
+
A1111_API_AUTH,
|
| 11 |
+
A1111_API_PASSWORD,
|
| 12 |
+
A1111_API_USER,
|
| 13 |
+
A1111_BASE_URL,
|
| 14 |
+
A1111_HEALTH_ENDPOINT,
|
| 15 |
+
A1111_RETRY_BACKOFF_SECONDS,
|
| 16 |
+
A1111_RETRY_COUNT,
|
| 17 |
+
A1111_TIMEOUT_SECONDS,
|
| 18 |
+
)
|
| 19 |
+
from .interface import ProviderRequest, ProviderResult, ProviderUnavailableError
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class A1111Provider:
|
| 23 |
+
id = "a1111"
|
| 24 |
+
name = "AUTOMATIC1111"
|
| 25 |
+
description = "Uses AUTOMATIC1111 Stable Diffusion WebUI API"
|
| 26 |
+
|
| 27 |
+
def __init__(self) -> None:
|
| 28 |
+
self.base_url = A1111_BASE_URL.rstrip("/")
|
| 29 |
+
self.timeout = A1111_TIMEOUT_SECONDS
|
| 30 |
+
self.health_endpoint = self._normalize_endpoint(A1111_HEALTH_ENDPOINT)
|
| 31 |
+
self.retry_count = max(0, A1111_RETRY_COUNT)
|
| 32 |
+
self.retry_backoff_seconds = max(0.0, A1111_RETRY_BACKOFF_SECONDS)
|
| 33 |
+
self.auth_header = self._resolve_auth_header()
|
| 34 |
+
|
| 35 |
+
def is_available(self) -> bool:
|
| 36 |
+
try:
|
| 37 |
+
self._request_json("GET", self.health_endpoint, retries=0)
|
| 38 |
+
return True
|
| 39 |
+
except Exception:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
def generate(self, request_data: ProviderRequest, output_dir: Path, progress, is_cancelled) -> ProviderResult:
|
| 43 |
+
if not self.is_available():
|
| 44 |
+
raise ProviderUnavailableError(
|
| 45 |
+
"AUTOMATIC1111 API not reachable. Start webui with --api and verify IMAGEFORGE_A1111_BASE_URL."
|
| 46 |
+
)
|
| 47 |
+
if is_cancelled():
|
| 48 |
+
return ProviderResult(image_paths=[])
|
| 49 |
+
|
| 50 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 51 |
+
progress(5, "AUTOMATIC1111 request started")
|
| 52 |
+
|
| 53 |
+
payload = {
|
| 54 |
+
"prompt": request_data.prompt,
|
| 55 |
+
"negative_prompt": request_data.negative_prompt or "",
|
| 56 |
+
"steps": request_data.steps,
|
| 57 |
+
"cfg_scale": request_data.guidance,
|
| 58 |
+
"seed": request_data.seed,
|
| 59 |
+
"width": request_data.width,
|
| 60 |
+
"height": request_data.height,
|
| 61 |
+
"batch_size": request_data.count,
|
| 62 |
+
"n_iter": 1,
|
| 63 |
+
"sampler_name": "Euler a",
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
if request_data.model_variant:
|
| 67 |
+
self._request_json("POST", "/sdapi/v1/options", {"sd_model_checkpoint": request_data.model_variant})
|
| 68 |
+
|
| 69 |
+
endpoint = "/sdapi/v1/txt2img"
|
| 70 |
+
if request_data.init_image_path:
|
| 71 |
+
init_path = Path(request_data.init_image_path)
|
| 72 |
+
if not init_path.exists() or not init_path.is_file():
|
| 73 |
+
raise ValueError("init_image_path does not exist or is not a file")
|
| 74 |
+
payload = {
|
| 75 |
+
"prompt": request_data.prompt,
|
| 76 |
+
"negative_prompt": request_data.negative_prompt or "",
|
| 77 |
+
"steps": request_data.steps,
|
| 78 |
+
"cfg_scale": request_data.guidance,
|
| 79 |
+
"seed": request_data.seed,
|
| 80 |
+
"width": request_data.width,
|
| 81 |
+
"height": request_data.height,
|
| 82 |
+
"batch_size": request_data.count,
|
| 83 |
+
"n_iter": 1,
|
| 84 |
+
"denoising_strength": max(0.0, min(1.0, request_data.img2img_strength)),
|
| 85 |
+
"init_images": [base64.b64encode(init_path.read_bytes()).decode("ascii")],
|
| 86 |
+
"sampler_name": "Euler a",
|
| 87 |
+
}
|
| 88 |
+
endpoint = "/sdapi/v1/img2img"
|
| 89 |
+
|
| 90 |
+
response = self._request_json("POST", endpoint, payload)
|
| 91 |
+
images = response.get("images") if isinstance(response, dict) else None
|
| 92 |
+
if not isinstance(images, list) or not images:
|
| 93 |
+
raise RuntimeError("AUTOMATIC1111 returned no images")
|
| 94 |
+
|
| 95 |
+
image_paths: list[Path] = []
|
| 96 |
+
total = len(images)
|
| 97 |
+
for idx, encoded in enumerate(images, start=1):
|
| 98 |
+
if is_cancelled():
|
| 99 |
+
break
|
| 100 |
+
try:
|
| 101 |
+
raw = base64.b64decode(encoded)
|
| 102 |
+
except Exception as exc:
|
| 103 |
+
raise RuntimeError("Failed to decode image from AUTOMATIC1111") from exc
|
| 104 |
+
out_path = output_dir / f"image_{idx:02d}.png"
|
| 105 |
+
out_path.write_bytes(raw)
|
| 106 |
+
image_paths.append(out_path)
|
| 107 |
+
pct = int((idx / max(1, total)) * 100)
|
| 108 |
+
progress(pct, f"AUTOMATIC1111 image {idx}/{total} complete")
|
| 109 |
+
|
| 110 |
+
return ProviderResult(image_paths=image_paths)
|
| 111 |
+
|
| 112 |
+
def _request_json(self, method: str, endpoint: str, payload: dict | None = None, retries: int | None = None):
|
| 113 |
+
endpoint = self._normalize_endpoint(endpoint)
|
| 114 |
+
retry_budget = self.retry_count if retries is None else max(0, retries)
|
| 115 |
+
data = json.dumps(payload).encode("utf-8") if payload is not None else None
|
| 116 |
+
req = request.Request(
|
| 117 |
+
f"{self.base_url}{endpoint}",
|
| 118 |
+
data=data,
|
| 119 |
+
method=method,
|
| 120 |
+
headers=self._headers(),
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
attempts = retry_budget + 1
|
| 124 |
+
for attempt in range(1, attempts + 1):
|
| 125 |
+
try:
|
| 126 |
+
with request.urlopen(req, timeout=self.timeout) as resp:
|
| 127 |
+
return json.loads(resp.read().decode("utf-8"))
|
| 128 |
+
except error.HTTPError as exc:
|
| 129 |
+
detail = exc.read().decode("utf-8", errors="ignore")
|
| 130 |
+
retryable = exc.code >= 500 or exc.code == 429
|
| 131 |
+
if retryable and attempt < attempts:
|
| 132 |
+
self._sleep_before_retry(attempt)
|
| 133 |
+
continue
|
| 134 |
+
raise RuntimeError(f"AUTOMATIC1111 {method} failed ({exc.code}): {detail}") from exc
|
| 135 |
+
except Exception as exc:
|
| 136 |
+
if attempt < attempts:
|
| 137 |
+
self._sleep_before_retry(attempt)
|
| 138 |
+
continue
|
| 139 |
+
raise RuntimeError(f"AUTOMATIC1111 {method} failed: {exc}") from exc
|
| 140 |
+
|
| 141 |
+
def _headers(self) -> dict[str, str]:
|
| 142 |
+
headers = {"Content-Type": "application/json"}
|
| 143 |
+
if self.auth_header:
|
| 144 |
+
headers["Authorization"] = self.auth_header
|
| 145 |
+
return headers
|
| 146 |
+
|
| 147 |
+
@staticmethod
|
| 148 |
+
def _normalize_endpoint(endpoint: str) -> str:
|
| 149 |
+
if endpoint.startswith("/"):
|
| 150 |
+
return endpoint
|
| 151 |
+
return f"/{endpoint}"
|
| 152 |
+
|
| 153 |
+
def _resolve_auth_header(self) -> str:
|
| 154 |
+
if A1111_API_AUTH.strip():
|
| 155 |
+
token = base64.b64encode(A1111_API_AUTH.encode("utf-8")).decode("ascii")
|
| 156 |
+
return f"Basic {token}"
|
| 157 |
+
if A1111_API_USER.strip() or A1111_API_PASSWORD.strip():
|
| 158 |
+
pair = f"{A1111_API_USER}:{A1111_API_PASSWORD}"
|
| 159 |
+
token = base64.b64encode(pair.encode("utf-8")).decode("ascii")
|
| 160 |
+
return f"Basic {token}"
|
| 161 |
+
return ""
|
| 162 |
+
|
| 163 |
+
def _sleep_before_retry(self, attempt: int) -> None:
|
| 164 |
+
if self.retry_backoff_seconds <= 0:
|
| 165 |
+
return
|
| 166 |
+
time.sleep(self.retry_backoff_seconds * attempt)
|
imageforge/backend/app/providers/diffusion_provider.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
from .interface import ProviderRequest, ProviderResult, ProviderUnavailableError
|
| 8 |
+
|
| 9 |
+
LOGGER = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
# Lazy imports to avoid torch loading issues on Windows
|
| 12 |
+
torch = None
|
| 13 |
+
StableDiffusionImg2ImgPipeline = None
|
| 14 |
+
StableDiffusionPipeline = None
|
| 15 |
+
|
| 16 |
+
def _ensure_imports():
|
| 17 |
+
global torch, StableDiffusionImg2ImgPipeline, StableDiffusionPipeline
|
| 18 |
+
if torch is not None:
|
| 19 |
+
return
|
| 20 |
+
try:
|
| 21 |
+
import torch as _torch
|
| 22 |
+
from diffusers import StableDiffusionImg2ImgPipeline as _Img2Img
|
| 23 |
+
from diffusers import StableDiffusionPipeline as _Pipeline
|
| 24 |
+
torch = _torch
|
| 25 |
+
StableDiffusionImg2ImgPipeline = _Img2Img
|
| 26 |
+
StableDiffusionPipeline = _Pipeline
|
| 27 |
+
except Exception: # pragma: no cover - optional dependency
|
| 28 |
+
pass
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class DiffusionProvider:
|
| 32 |
+
id = "diffusion"
|
| 33 |
+
name = "Stable Diffusion (local)"
|
| 34 |
+
description = "Uses diffusers for local Stable Diffusion generation"
|
| 35 |
+
|
| 36 |
+
def __init__(self, model_id: str = "segmind/tiny-sd") -> None:
|
| 37 |
+
self.model_id = os.getenv("IMAGEFORGE_DIFFUSION_MODEL", model_id)
|
| 38 |
+
self._pipe: StableDiffusionPipeline | None = None
|
| 39 |
+
self._img2img_pipe: StableDiffusionImg2ImgPipeline | None = None
|
| 40 |
+
|
| 41 |
+
def is_available(self) -> bool:
|
| 42 |
+
_ensure_imports()
|
| 43 |
+
return StableDiffusionPipeline is not None and torch is not None
|
| 44 |
+
|
| 45 |
+
def _ensure_pipeline(self) -> StableDiffusionPipeline:
|
| 46 |
+
_ensure_imports()
|
| 47 |
+
if StableDiffusionPipeline is None or torch is None:
|
| 48 |
+
raise ProviderUnavailableError(
|
| 49 |
+
"Diffusion dependencies missing. Install diffusers, torch, and transformers."
|
| 50 |
+
)
|
| 51 |
+
if self._pipe is None:
|
| 52 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 53 |
+
dtype = torch.float16 if device == "cuda" else torch.float32
|
| 54 |
+
local_only = os.getenv("IMAGEFORGE_DIFFUSION_LOCAL_ONLY", "0") == "1"
|
| 55 |
+
LOGGER.info("Loading diffusion model '%s' on %s", self.model_id, device)
|
| 56 |
+
try:
|
| 57 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 58 |
+
self.model_id,
|
| 59 |
+
torch_dtype=dtype,
|
| 60 |
+
local_files_only=local_only,
|
| 61 |
+
)
|
| 62 |
+
except Exception as exc: # noqa: BLE001
|
| 63 |
+
mode_hint = "local cache only" if local_only else "online download"
|
| 64 |
+
raise ProviderUnavailableError(
|
| 65 |
+
f"Diffusion model '{self.model_id}' could not be loaded ({mode_hint}). "
|
| 66 |
+
"Set IMAGEFORGE_DIFFUSION_LOCAL_ONLY=0 to allow downloading models."
|
| 67 |
+
) from exc
|
| 68 |
+
if device == "cuda":
|
| 69 |
+
pipe = pipe.to(device)
|
| 70 |
+
if os.getenv("IMAGEFORGE_ENABLE_ATTENTION_SLICING", "1") == "1":
|
| 71 |
+
pipe.enable_attention_slicing()
|
| 72 |
+
self._pipe = pipe
|
| 73 |
+
return self._pipe
|
| 74 |
+
|
| 75 |
+
def _ensure_img2img_pipeline(self) -> StableDiffusionImg2ImgPipeline | None:
|
| 76 |
+
_ensure_imports()
|
| 77 |
+
if StableDiffusionImg2ImgPipeline is None or torch is None:
|
| 78 |
+
return None
|
| 79 |
+
if self._img2img_pipe is None:
|
| 80 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 81 |
+
dtype = torch.float16 if device == "cuda" else torch.float32
|
| 82 |
+
local_only = os.getenv("IMAGEFORGE_DIFFUSION_LOCAL_ONLY", "0") == "1"
|
| 83 |
+
LOGGER.info("Loading diffusion img2img model '%s' on %s", self.model_id, device)
|
| 84 |
+
try:
|
| 85 |
+
img2img = StableDiffusionImg2ImgPipeline.from_pretrained(
|
| 86 |
+
self.model_id,
|
| 87 |
+
torch_dtype=dtype,
|
| 88 |
+
local_files_only=local_only,
|
| 89 |
+
)
|
| 90 |
+
except Exception as exc: # noqa: BLE001
|
| 91 |
+
mode_hint = "local cache only" if local_only else "online download"
|
| 92 |
+
raise ProviderUnavailableError(
|
| 93 |
+
f"Diffusion img2img model '{self.model_id}' could not be loaded ({mode_hint}). "
|
| 94 |
+
"Set IMAGEFORGE_DIFFUSION_LOCAL_ONLY=0 to allow downloading models."
|
| 95 |
+
) from exc
|
| 96 |
+
if device == "cuda":
|
| 97 |
+
img2img = img2img.to(device)
|
| 98 |
+
if os.getenv("IMAGEFORGE_ENABLE_ATTENTION_SLICING", "1") == "1":
|
| 99 |
+
img2img.enable_attention_slicing()
|
| 100 |
+
self._img2img_pipe = img2img
|
| 101 |
+
return self._img2img_pipe
|
| 102 |
+
|
| 103 |
+
def generate(self, request: ProviderRequest, output_dir: Path, progress, is_cancelled) -> ProviderResult:
|
| 104 |
+
if is_cancelled():
|
| 105 |
+
return ProviderResult(image_paths=[])
|
| 106 |
+
if request.model_variant and request.model_variant != self.model_id:
|
| 107 |
+
self.model_id = request.model_variant
|
| 108 |
+
self._pipe = None
|
| 109 |
+
self._img2img_pipe = None
|
| 110 |
+
progress(1, "Loading diffusion model")
|
| 111 |
+
pipe = self._ensure_pipeline()
|
| 112 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 113 |
+
image_paths: list[Path] = []
|
| 114 |
+
|
| 115 |
+
for idx in range(request.count):
|
| 116 |
+
if is_cancelled():
|
| 117 |
+
break
|
| 118 |
+
|
| 119 |
+
seed = request.seed + idx
|
| 120 |
+
generator = torch.Generator(device=pipe.device).manual_seed(seed)
|
| 121 |
+
|
| 122 |
+
def _callback(step: int, timestep: int, latents): # noqa: ANN001
|
| 123 |
+
if is_cancelled():
|
| 124 |
+
raise RuntimeError("Generation cancelled")
|
| 125 |
+
local_progress = int(((step + 1) / max(1, request.steps)) * 100)
|
| 126 |
+
progress(local_progress, f"Diffusion step {step + 1}/{request.steps} (image {idx + 1})")
|
| 127 |
+
|
| 128 |
+
if request.init_image_path:
|
| 129 |
+
from PIL import Image
|
| 130 |
+
|
| 131 |
+
img2img_pipe = self._ensure_img2img_pipeline()
|
| 132 |
+
if img2img_pipe is None:
|
| 133 |
+
raise ProviderUnavailableError("Img2Img requires diffusers img2img pipeline support")
|
| 134 |
+
init_image = Image.open(request.init_image_path).convert("RGB").resize((request.width, request.height))
|
| 135 |
+
result = img2img_pipe(
|
| 136 |
+
prompt=request.prompt,
|
| 137 |
+
negative_prompt=request.negative_prompt or None,
|
| 138 |
+
image=init_image,
|
| 139 |
+
num_inference_steps=request.steps,
|
| 140 |
+
guidance_scale=request.guidance,
|
| 141 |
+
strength=max(0.0, min(1.0, request.img2img_strength)),
|
| 142 |
+
generator=generator,
|
| 143 |
+
callback=_callback,
|
| 144 |
+
callback_steps=1,
|
| 145 |
+
)
|
| 146 |
+
else:
|
| 147 |
+
result = pipe(
|
| 148 |
+
prompt=request.prompt,
|
| 149 |
+
negative_prompt=request.negative_prompt or None,
|
| 150 |
+
width=request.width,
|
| 151 |
+
height=request.height,
|
| 152 |
+
num_inference_steps=request.steps,
|
| 153 |
+
guidance_scale=request.guidance,
|
| 154 |
+
generator=generator,
|
| 155 |
+
callback=_callback,
|
| 156 |
+
callback_steps=1,
|
| 157 |
+
)
|
| 158 |
+
image = result.images[0]
|
| 159 |
+
image_path = output_dir / f"image_{idx + 1:02d}.png"
|
| 160 |
+
image.save(image_path, format="PNG")
|
| 161 |
+
image_paths.append(image_path)
|
| 162 |
+
pct = int(((idx + 1) / request.count) * 100)
|
| 163 |
+
progress(pct, f"Diffusion image {idx + 1}/{request.count} complete")
|
| 164 |
+
|
| 165 |
+
return ProviderResult(image_paths=image_paths)
|
imageforge/backend/app/providers/dummy_provider.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
from PIL import Image, ImageDraw, ImageFont
|
| 7 |
+
|
| 8 |
+
from .interface import IImageProvider, ProviderRequest, ProviderResult
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class DummyProvider(IImageProvider):
|
| 12 |
+
id = "dummy"
|
| 13 |
+
name = "Dummy Placeholder"
|
| 14 |
+
description = "Generates local placeholder images without AI dependencies"
|
| 15 |
+
|
| 16 |
+
def is_available(self) -> bool:
|
| 17 |
+
return True
|
| 18 |
+
|
| 19 |
+
def generate(
|
| 20 |
+
self,
|
| 21 |
+
request: ProviderRequest,
|
| 22 |
+
output_dir: Path,
|
| 23 |
+
progress,
|
| 24 |
+
is_cancelled,
|
| 25 |
+
) -> ProviderResult:
|
| 26 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 27 |
+
image_paths: list[Path] = []
|
| 28 |
+
|
| 29 |
+
for idx in range(request.count):
|
| 30 |
+
if is_cancelled():
|
| 31 |
+
break
|
| 32 |
+
|
| 33 |
+
image = Image.new("RGB", (request.width, request.height))
|
| 34 |
+
pixels = image.load()
|
| 35 |
+
for y in range(request.height):
|
| 36 |
+
for x in range(request.width):
|
| 37 |
+
r = int(40 + 215 * (x / max(1, request.width - 1)))
|
| 38 |
+
g = int(40 + 215 * (y / max(1, request.height - 1)))
|
| 39 |
+
b = int(120 + 80 * math.sin((x + y) / 120))
|
| 40 |
+
pixels[x, y] = (r, g, max(0, min(255, b)))
|
| 41 |
+
if request.init_image_path:
|
| 42 |
+
source = Image.open(request.init_image_path).convert("RGB")
|
| 43 |
+
source = source.resize((request.width, request.height))
|
| 44 |
+
alpha = max(0.0, min(1.0, request.img2img_strength))
|
| 45 |
+
image = Image.blend(source, image, 1.0 - alpha)
|
| 46 |
+
|
| 47 |
+
draw = ImageDraw.Draw(image)
|
| 48 |
+
font = ImageFont.load_default()
|
| 49 |
+
lines = [
|
| 50 |
+
f"DummyProvider #{idx + 1}",
|
| 51 |
+
f"Prompt: {request.prompt[:80]}",
|
| 52 |
+
f"Negative: {request.negative_prompt[:80] or '-'}",
|
| 53 |
+
f"Seed: {request.seed}",
|
| 54 |
+
f"Size: {request.width}x{request.height}",
|
| 55 |
+
]
|
| 56 |
+
y_pos = 20
|
| 57 |
+
for line in lines:
|
| 58 |
+
draw.rectangle((16, y_pos - 2, request.width - 16, y_pos + 14), fill=(0, 0, 0))
|
| 59 |
+
draw.text((20, y_pos), line, fill=(255, 255, 255), font=font)
|
| 60 |
+
y_pos += 20
|
| 61 |
+
|
| 62 |
+
image_path = output_dir / f"image_{idx + 1:02d}.png"
|
| 63 |
+
image.save(image_path, format="PNG")
|
| 64 |
+
image_paths.append(image_path)
|
| 65 |
+
pct = int(((idx + 1) / request.count) * 100)
|
| 66 |
+
progress(pct, f"Dummy image {idx + 1}/{request.count} complete")
|
| 67 |
+
|
| 68 |
+
return ProviderResult(image_paths=image_paths)
|
imageforge/backend/app/providers/factory.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from .a1111_provider import A1111Provider
|
| 4 |
+
from .diffusion_provider import DiffusionProvider
|
| 5 |
+
from .dummy_provider import DummyProvider
|
| 6 |
+
from .interface import IImageProvider
|
| 7 |
+
from .localai_provider import LocalAIProvider
|
| 8 |
+
from .zimageturbo_provider import ZImageTurboProvider
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ProviderRegistry:
|
| 12 |
+
def __init__(self) -> None:
|
| 13 |
+
self._providers: dict[str, IImageProvider] = {
|
| 14 |
+
"dummy": DummyProvider(),
|
| 15 |
+
"localai": LocalAIProvider(),
|
| 16 |
+
"diffusion": DiffusionProvider(),
|
| 17 |
+
"a1111": A1111Provider(),
|
| 18 |
+
"zimageturbo": ZImageTurboProvider(),
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
def get(self, provider_id: str) -> IImageProvider:
|
| 22 |
+
provider = self._providers.get(provider_id)
|
| 23 |
+
if provider is None:
|
| 24 |
+
raise KeyError(f"Unknown provider: {provider_id}")
|
| 25 |
+
return provider
|
| 26 |
+
|
| 27 |
+
def list(self) -> list[IImageProvider]:
|
| 28 |
+
return list(self._providers.values())
|
imageforge/backend/app/providers/huggingface_provider.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
HuggingFace Spaces Provider for PixelForge
|
| 3 |
+
|
| 4 |
+
This provider allows PixelForge to call any HuggingFace Space as an image generation backend.
|
| 5 |
+
Supports both Spaces with Gradio API and custom inference endpoints.
|
| 6 |
+
|
| 7 |
+
Phase 2 Implementation (Future)
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import json
|
| 12 |
+
import requests
|
| 13 |
+
import base64
|
| 14 |
+
import logging
|
| 15 |
+
from io import BytesIO
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from PIL import Image
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class HuggingFaceSpaceProvider:
|
| 23 |
+
"""
|
| 24 |
+
Provider für HuggingFace Spaces Integration
|
| 25 |
+
|
| 26 |
+
Beispiele:
|
| 27 |
+
- Heartsync/Adult: Erwachsene Bildgenerierung (Z-Image Turbo)
|
| 28 |
+
- Andere Spaces: beliebige Custom Spaces
|
| 29 |
+
|
| 30 |
+
Umgebungsvariablen:
|
| 31 |
+
- HF_API_TOKEN: HuggingFace API Token (https://huggingface.co/settings/tokens)
|
| 32 |
+
- HF_SPACE_URL: Komplette Space URL oder nur "username/space-name"
|
| 33 |
+
- HF_REQUEST_TIMEOUT: Timeout in Sekunden (default: 300)
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
id = "huggingface"
|
| 37 |
+
name = "HuggingFace Spaces"
|
| 38 |
+
description = "Nutzt HuggingFace Spaces für Image Generation (erwachsene Inhalte, Anime, Custom)"
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
space_id: str = None,
|
| 43 |
+
api_token: str = None,
|
| 44 |
+
timeout: int = 300,
|
| 45 |
+
):
|
| 46 |
+
"""
|
| 47 |
+
Args:
|
| 48 |
+
space_id: HuggingFace Space ID (z.B. "Heartsync/Adult" oder komplette URL)
|
| 49 |
+
api_token: HuggingFace API Token
|
| 50 |
+
timeout: Request Timeout in Sekunden
|
| 51 |
+
"""
|
| 52 |
+
self.space_id = space_id or os.getenv("HF_SPACE_URL", "Heartsync/Adult")
|
| 53 |
+
self.api_token = api_token or os.getenv("HF_API_TOKEN", "")
|
| 54 |
+
self.timeout = timeout or int(os.getenv("HF_REQUEST_TIMEOUT", "300"))
|
| 55 |
+
|
| 56 |
+
# Normalize space URL
|
| 57 |
+
if not self.space_id.startswith("https://"):
|
| 58 |
+
self.space_id = f"https://huggingface.co/spaces/{self.space_id}"
|
| 59 |
+
|
| 60 |
+
self.api_url = f"{self.space_id}/call/predict"
|
| 61 |
+
self.logger = logging.getLogger(__name__)
|
| 62 |
+
|
| 63 |
+
def is_available(self) -> bool:
|
| 64 |
+
"""Check if HuggingFace Space is reachable"""
|
| 65 |
+
if not self.api_token:
|
| 66 |
+
self.logger.warning("HF_API_TOKEN not set - HuggingFace Space Provider unavailable")
|
| 67 |
+
return False
|
| 68 |
+
|
| 69 |
+
try:
|
| 70 |
+
# Check Space health
|
| 71 |
+
response = requests.head(
|
| 72 |
+
self.space_id,
|
| 73 |
+
timeout=5,
|
| 74 |
+
headers={"Authorization": f"Bearer {self.api_token}"}
|
| 75 |
+
)
|
| 76 |
+
is_available = response.status_code < 500
|
| 77 |
+
if is_available:
|
| 78 |
+
self.logger.info(f"✓ HuggingFace Space verfügbar: {self.space_id}")
|
| 79 |
+
else:
|
| 80 |
+
self.logger.warning(f"HuggingFace Space Status: {response.status_code}")
|
| 81 |
+
return is_available
|
| 82 |
+
except Exception as e:
|
| 83 |
+
self.logger.error(f"HuggingFace Space Health Check failed: {e}")
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
def generate(self, request: "ProviderRequest") -> "ProviderResult":
|
| 87 |
+
"""
|
| 88 |
+
Generate image using HuggingFace Space
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
request: ProviderRequest mit prompt, negative_prompt, etc.
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
ProviderResult mit generiertem Bild
|
| 95 |
+
"""
|
| 96 |
+
try:
|
| 97 |
+
# Payload für Gradio Space API
|
| 98 |
+
payload = {
|
| 99 |
+
"data": [
|
| 100 |
+
request.prompt, # prompt
|
| 101 |
+
request.negative_prompt, # negative_prompt
|
| 102 |
+
int(request.seed % (2**31)), # seed
|
| 103 |
+
request.steps, # num_inference_steps
|
| 104 |
+
request.guidance_scale, # guidance_scale
|
| 105 |
+
]
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
headers = {
|
| 109 |
+
"Authorization": f"Bearer {self.api_token}",
|
| 110 |
+
"Content-Type": "application/json",
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
self.logger.info(f"Generating image on HF Space: {self.space_id}")
|
| 114 |
+
self.logger.debug(f"Payload: {json.dumps(payload, indent=2)}")
|
| 115 |
+
|
| 116 |
+
# Call HuggingFace Space
|
| 117 |
+
response = requests.post(
|
| 118 |
+
self.api_url,
|
| 119 |
+
json=payload,
|
| 120 |
+
headers=headers,
|
| 121 |
+
timeout=self.timeout,
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
response.raise_for_status()
|
| 125 |
+
|
| 126 |
+
# Parse response
|
| 127 |
+
result = response.json()
|
| 128 |
+
self.logger.debug(f"HF Response: {json.dumps(result, indent=2)}")
|
| 129 |
+
|
| 130 |
+
# Erwartete Struktur: {"data": [{"name": "image.png", "data": "base64..."}]}
|
| 131 |
+
if "data" not in result or not result["data"]:
|
| 132 |
+
raise ValueError(f"Unexpected response format: {result}")
|
| 133 |
+
|
| 134 |
+
# Extract image(s) from response
|
| 135 |
+
image_data_list = result["data"]
|
| 136 |
+
if not isinstance(image_data_list, list):
|
| 137 |
+
image_data_list = [image_data_list]
|
| 138 |
+
|
| 139 |
+
image_paths = []
|
| 140 |
+
|
| 141 |
+
for idx, img_data in enumerate(image_data_list):
|
| 142 |
+
# Handle different response formats
|
| 143 |
+
if isinstance(img_data, dict) and "data" in img_data:
|
| 144 |
+
# Gradio format: {"name": "...", "data": "base64..."}
|
| 145 |
+
base64_str = img_data["data"]
|
| 146 |
+
elif isinstance(img_data, str):
|
| 147 |
+
# Direct base64 string
|
| 148 |
+
base64_str = img_data
|
| 149 |
+
else:
|
| 150 |
+
self.logger.warning(f"Unknown image format: {type(img_data)}")
|
| 151 |
+
continue
|
| 152 |
+
|
| 153 |
+
# Decode base64 image
|
| 154 |
+
if base64_str.startswith("data:image"):
|
| 155 |
+
# Data URL format: "data:image/png;base64,..."
|
| 156 |
+
base64_str = base64_str.split(",", 1)[1]
|
| 157 |
+
|
| 158 |
+
try:
|
| 159 |
+
image_bytes = base64.b64decode(base64_str)
|
| 160 |
+
image = Image.open(BytesIO(image_bytes))
|
| 161 |
+
|
| 162 |
+
# Save to disk
|
| 163 |
+
output_path = Path(__file__).parent.parent.parent / "output" / f"image_{idx:02d}.png"
|
| 164 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 165 |
+
image.save(output_path)
|
| 166 |
+
|
| 167 |
+
image_paths.append(str(output_path))
|
| 168 |
+
self.logger.info(f"✓ Image saved: {output_path}")
|
| 169 |
+
|
| 170 |
+
except Exception as e:
|
| 171 |
+
self.logger.error(f"Failed to decode/save image {idx}: {e}")
|
| 172 |
+
raise
|
| 173 |
+
|
| 174 |
+
if not image_paths:
|
| 175 |
+
raise ValueError("No images in response")
|
| 176 |
+
|
| 177 |
+
return {
|
| 178 |
+
"image_paths": image_paths,
|
| 179 |
+
"message": f"Generated {len(image_paths)} image(s) via {self.space_id}",
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
except requests.Timeout:
|
| 183 |
+
raise TimeoutError(
|
| 184 |
+
f"HuggingFace Space request timed out after {self.timeout}s. "
|
| 185 |
+
"Space might be busy or overloaded."
|
| 186 |
+
)
|
| 187 |
+
except requests.HTTPError as e:
|
| 188 |
+
if e.response.status_code == 401:
|
| 189 |
+
raise PermissionError(f"Invalid HF API Token")
|
| 190 |
+
elif e.response.status_code == 404:
|
| 191 |
+
raise ValueError(f"HuggingFace Space not found: {self.space_id}")
|
| 192 |
+
else:
|
| 193 |
+
raise RuntimeError(f"HF Space API error: {e}")
|
| 194 |
+
except Exception as e:
|
| 195 |
+
self.logger.error(f"HuggingFace Space generation failed: {e}")
|
| 196 |
+
raise
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# ============================================================================
|
| 200 |
+
# Alternative: Verwendung von gradio_client statt REST
|
| 201 |
+
# ============================================================================
|
| 202 |
+
|
| 203 |
+
class HuggingFaceSpaceProviderGradio:
|
| 204 |
+
"""
|
| 205 |
+
Alternative Implementation using gradio_client
|
| 206 |
+
|
| 207 |
+
Vorteil: Automatische Format-Konvertierung
|
| 208 |
+
Nachteil: Zusätzliche Dependency (gradio-client)
|
| 209 |
+
|
| 210 |
+
Usage:
|
| 211 |
+
pip install gradio_client
|
| 212 |
+
"""
|
| 213 |
+
|
| 214 |
+
id = "huggingface_gradio"
|
| 215 |
+
name = "HuggingFace Spaces (Gradio)"
|
| 216 |
+
description = "HuggingFace Spaces via Gradio Client"
|
| 217 |
+
|
| 218 |
+
def __init__(self, space_id: str = None):
|
| 219 |
+
try:
|
| 220 |
+
from gradio_client import Client
|
| 221 |
+
self.Client = Client
|
| 222 |
+
except ImportError:
|
| 223 |
+
raise ImportError("gradio-client not installed. Install with: pip install gradio_client")
|
| 224 |
+
|
| 225 |
+
self.space_id = space_id or os.getenv("HF_SPACE_URL", "Heartsync/Adult")
|
| 226 |
+
self.logger = logging.getLogger(__name__)
|
| 227 |
+
|
| 228 |
+
def is_available(self) -> bool:
|
| 229 |
+
try:
|
| 230 |
+
client = self.Client(f"https://huggingface.co/spaces/{self.space_id}")
|
| 231 |
+
self.logger.info(f"✓ Gradio Client verbunden: {self.space_id}")
|
| 232 |
+
return True
|
| 233 |
+
except Exception as e:
|
| 234 |
+
self.logger.error(f"Gradio Client connection failed: {e}")
|
| 235 |
+
return False
|
| 236 |
+
|
| 237 |
+
def generate(self, request: "ProviderRequest") -> "ProviderResult":
|
| 238 |
+
from gradio_client import Client
|
| 239 |
+
|
| 240 |
+
try:
|
| 241 |
+
client = Client(f"https://huggingface.co/spaces/{self.space_id}")
|
| 242 |
+
|
| 243 |
+
result = client.predict(
|
| 244 |
+
prompt=request.prompt,
|
| 245 |
+
negative_prompt=request.negative_prompt,
|
| 246 |
+
seed=int(request.seed % (2**31)),
|
| 247 |
+
num_inference_steps=request.steps,
|
| 248 |
+
guidance_scale=request.guidance_scale,
|
| 249 |
+
api_name="/predict"
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
# Gradio returns file path
|
| 253 |
+
image_path = str(result)
|
| 254 |
+
|
| 255 |
+
return {
|
| 256 |
+
"image_paths": [image_path],
|
| 257 |
+
"message": f"Generated via {self.space_id}",
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
except Exception as e:
|
| 261 |
+
self.logger.error(f"Gradio generation failed: {e}")
|
| 262 |
+
raise
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# ============================================================================
|
| 266 |
+
# Integration Instructions for later
|
| 267 |
+
# ============================================================================
|
| 268 |
+
|
| 269 |
+
"""
|
| 270 |
+
INTEGRATION CHECKLIST (Phase 2):
|
| 271 |
+
|
| 272 |
+
1. ✅ Dieses File erstellt
|
| 273 |
+
2. [ ] HuggingFace API Token besorgen:
|
| 274 |
+
- https://huggingface.co/settings/tokens
|
| 275 |
+
- New Token → read role
|
| 276 |
+
- Setze env: HF_API_TOKEN=hf_xxxxx
|
| 277 |
+
|
| 278 |
+
3. [ ] Adult_repo zu HF Space deployen:
|
| 279 |
+
- cd d:/VSC Codes/Bild/Adult_repo
|
| 280 |
+
- pip install huggingface-hub
|
| 281 |
+
- huggingface-cli login
|
| 282 |
+
- git push huggingface main
|
| 283 |
+
|
| 284 |
+
4. [ ] Provider in Factory registrieren:
|
| 285 |
+
# in imageforge/backend/app/providers/factory.py
|
| 286 |
+
from .huggingface_provider import HuggingFaceSpaceProvider
|
| 287 |
+
|
| 288 |
+
_providers = {
|
| 289 |
+
...
|
| 290 |
+
"huggingface": HuggingFaceSpaceProvider(),
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
5. [ ] Testen:
|
| 294 |
+
# Im Backend
|
| 295 |
+
POST http://127.0.0.1:8008/generate
|
| 296 |
+
{
|
| 297 |
+
"model": "huggingface",
|
| 298 |
+
"prompt": "A beautiful sunset",
|
| 299 |
+
"negative_prompt": "",
|
| 300 |
+
"steps": 20,
|
| 301 |
+
"guidance_scale": 7.5
|
| 302 |
+
}
|
| 303 |
+
|
| 304 |
+
6. [ ] Colab deaktivieren oder als Fallback nutzen
|
| 305 |
+
|
| 306 |
+
TROUBLESHOOTING:
|
| 307 |
+
|
| 308 |
+
Problem: "HF_API_TOKEN not set"
|
| 309 |
+
Fix: $env:HF_API_TOKEN = "hf_xxxxx"; Restart Backend
|
| 310 |
+
|
| 311 |
+
Problem: "Space not found (404)"
|
| 312 |
+
Fix: Space-ID prüfen: "Heartsync/Adult" oder komplette URL?
|
| 313 |
+
|
| 314 |
+
Problem: "Request timeout"
|
| 315 |
+
Fix: Space ist überlastet. Später erneut versuchen.
|
| 316 |
+
Oder: $env:HF_REQUEST_TIMEOUT = "600" (10 Min)
|
| 317 |
+
|
| 318 |
+
Problem: "Invalid API Token"
|
| 319 |
+
Fix: Token regenerieren auf https://huggingface.co/settings/tokens
|
| 320 |
+
Ensure read permissions
|
| 321 |
+
|
| 322 |
+
TESTEN MIT CURL:
|
| 323 |
+
|
| 324 |
+
curl -X POST https://api-inference.huggingface.co/models/Heartsync/Adult \\
|
| 325 |
+
-H "Authorization: Bearer hf_xxxxx" \\
|
| 326 |
+
-H "Content-Type: application/json" \\
|
| 327 |
+
-d '{"inputs": "a girl in school uniform"}'
|
| 328 |
+
"""
|
imageforge/backend/app/providers/interface.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from typing import Callable, Protocol
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ProviderUnavailableError(RuntimeError):
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@dataclass(slots=True)
|
| 13 |
+
class ProviderRequest:
|
| 14 |
+
prompt: str
|
| 15 |
+
negative_prompt: str
|
| 16 |
+
count: int
|
| 17 |
+
width: int
|
| 18 |
+
height: int
|
| 19 |
+
seed: int
|
| 20 |
+
steps: int
|
| 21 |
+
guidance: float
|
| 22 |
+
init_image_path: str | None = None
|
| 23 |
+
img2img_strength: float = 0.45
|
| 24 |
+
model_variant: str | None = None
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@dataclass(slots=True)
|
| 28 |
+
class ProviderResult:
|
| 29 |
+
image_paths: list[Path]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
ProgressCallback = Callable[[int, str], None]
|
| 33 |
+
CancelCallback = Callable[[], bool]
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class IImageProvider(Protocol):
|
| 37 |
+
id: str
|
| 38 |
+
name: str
|
| 39 |
+
description: str
|
| 40 |
+
|
| 41 |
+
def is_available(self) -> bool:
|
| 42 |
+
...
|
| 43 |
+
|
| 44 |
+
def generate(
|
| 45 |
+
self,
|
| 46 |
+
request: ProviderRequest,
|
| 47 |
+
output_dir: Path,
|
| 48 |
+
progress: ProgressCallback,
|
| 49 |
+
is_cancelled: CancelCallback,
|
| 50 |
+
) -> ProviderResult:
|
| 51 |
+
...
|
imageforge/backend/app/providers/localai_provider.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import threading
|
| 6 |
+
import time
|
| 7 |
+
|
| 8 |
+
from ..local_ai.engine import LocalAIEngine, LocalAIRequest
|
| 9 |
+
from .interface import ProviderRequest, ProviderResult, ProviderUnavailableError
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class LocalAIProvider:
|
| 13 |
+
id = "localai"
|
| 14 |
+
name = "LocalAI (self-hosted)"
|
| 15 |
+
description = "Own local AI engine, no paid cloud API required"
|
| 16 |
+
|
| 17 |
+
def __init__(self) -> None:
|
| 18 |
+
self.engine = LocalAIEngine()
|
| 19 |
+
self.image_timeout_seconds = max(1, int(os.getenv("IMAGEFORGE_LOCALAI_IMAGE_TIMEOUT_SECONDS", "180")))
|
| 20 |
+
|
| 21 |
+
def is_available(self) -> bool:
|
| 22 |
+
return self.engine.is_available()
|
| 23 |
+
|
| 24 |
+
def generate(self, request: ProviderRequest, output_dir: Path, progress, is_cancelled) -> ProviderResult:
|
| 25 |
+
if not self.is_available():
|
| 26 |
+
raise ProviderUnavailableError(
|
| 27 |
+
"LocalAI dependencies missing. Install diffusers/torch packages."
|
| 28 |
+
)
|
| 29 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 30 |
+
image_paths: list[Path] = []
|
| 31 |
+
|
| 32 |
+
for idx in range(request.count):
|
| 33 |
+
if is_cancelled():
|
| 34 |
+
break
|
| 35 |
+
progress(5, f"LocalAI preparing image {idx + 1}/{request.count}")
|
| 36 |
+
image = self._generate_with_heartbeat(
|
| 37 |
+
LocalAIRequest(
|
| 38 |
+
prompt=request.prompt,
|
| 39 |
+
negative_prompt=request.negative_prompt,
|
| 40 |
+
width=request.width,
|
| 41 |
+
height=request.height,
|
| 42 |
+
steps=request.steps,
|
| 43 |
+
guidance=request.guidance,
|
| 44 |
+
seed=request.seed + idx,
|
| 45 |
+
init_image_path=request.init_image_path,
|
| 46 |
+
strength=request.img2img_strength,
|
| 47 |
+
model_variant=request.model_variant,
|
| 48 |
+
),
|
| 49 |
+
progress=progress,
|
| 50 |
+
is_cancelled=is_cancelled,
|
| 51 |
+
image_index=idx + 1,
|
| 52 |
+
total=request.count,
|
| 53 |
+
)
|
| 54 |
+
image_path = output_dir / f"image_{idx + 1:02d}.png"
|
| 55 |
+
image.save(image_path, format="PNG")
|
| 56 |
+
image_paths.append(image_path)
|
| 57 |
+
pct = int(((idx + 1) / request.count) * 100)
|
| 58 |
+
progress(pct, f"LocalAI image {idx + 1}/{request.count} complete")
|
| 59 |
+
|
| 60 |
+
return ProviderResult(image_paths=image_paths)
|
| 61 |
+
|
| 62 |
+
def _generate_with_heartbeat(
|
| 63 |
+
self,
|
| 64 |
+
request: LocalAIRequest,
|
| 65 |
+
progress,
|
| 66 |
+
is_cancelled,
|
| 67 |
+
image_index: int,
|
| 68 |
+
total: int,
|
| 69 |
+
):
|
| 70 |
+
result_holder: dict[str, object] = {}
|
| 71 |
+
error_holder: dict[str, Exception] = {}
|
| 72 |
+
|
| 73 |
+
def _run() -> None:
|
| 74 |
+
try:
|
| 75 |
+
result_holder["image"] = self.engine.generate(request)
|
| 76 |
+
except Exception as exc: # noqa: BLE001
|
| 77 |
+
error_holder["error"] = exc
|
| 78 |
+
|
| 79 |
+
worker = threading.Thread(target=_run, daemon=True)
|
| 80 |
+
worker.start()
|
| 81 |
+
|
| 82 |
+
started = time.monotonic()
|
| 83 |
+
heartbeat = 8
|
| 84 |
+
while worker.is_alive():
|
| 85 |
+
if is_cancelled():
|
| 86 |
+
raise RuntimeError("Generation cancelled")
|
| 87 |
+
elapsed = time.monotonic() - started
|
| 88 |
+
if elapsed > self.image_timeout_seconds:
|
| 89 |
+
raise TimeoutError(
|
| 90 |
+
"LocalAI generation timed out. "
|
| 91 |
+
"Try fewer steps/smaller size or use fallback model 'dummy'."
|
| 92 |
+
)
|
| 93 |
+
pct = min(95, heartbeat)
|
| 94 |
+
progress(
|
| 95 |
+
pct,
|
| 96 |
+
f"LocalAI generating image {image_index}/{total} ({pct}%, {int(elapsed)}s)",
|
| 97 |
+
)
|
| 98 |
+
heartbeat = min(95, heartbeat + 2)
|
| 99 |
+
worker.join(timeout=1.0)
|
| 100 |
+
|
| 101 |
+
if "error" in error_holder:
|
| 102 |
+
raise error_holder["error"]
|
| 103 |
+
if "image" not in result_holder:
|
| 104 |
+
raise RuntimeError("LocalAI generation ended without image result")
|
| 105 |
+
return result_holder["image"]
|
imageforge/backend/app/providers/zimageturbo_provider.py
ADDED
|
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import base64
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
import os
|
| 7 |
+
import time
|
| 8 |
+
from io import BytesIO
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Any
|
| 11 |
+
from urllib.parse import urljoin
|
| 12 |
+
|
| 13 |
+
import requests
|
| 14 |
+
from PIL import Image
|
| 15 |
+
|
| 16 |
+
from .interface import (
|
| 17 |
+
CancelCallback,
|
| 18 |
+
IImageProvider,
|
| 19 |
+
ProgressCallback,
|
| 20 |
+
ProviderRequest,
|
| 21 |
+
ProviderResult,
|
| 22 |
+
ProviderUnavailableError,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
LOGGER = logging.getLogger(__name__)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class ZImageTurboProvider(IImageProvider):
|
| 29 |
+
id = "zimageturbo"
|
| 30 |
+
name = "Adult + Z-Image Turbo (HF Space API)"
|
| 31 |
+
description = "Nutzen von Heartsync/Adult per HuggingFace Space API"
|
| 32 |
+
|
| 33 |
+
def __init__(self) -> None:
|
| 34 |
+
self.mode = os.getenv("ZIMAGETURBO_MODE", "hf_space").strip().lower()
|
| 35 |
+
self.api_key = os.getenv("ZIMAGETURBO_API_KEY", "").strip()
|
| 36 |
+
self.timeout = int(os.getenv("ZIMAGETURBO_TIMEOUT", "300"))
|
| 37 |
+
|
| 38 |
+
self.hf_space = os.getenv("ZIMAGETURBO_HF_SPACE", "Heartsync/Adult").strip()
|
| 39 |
+
self.hf_api_name = os.getenv("ZIMAGETURBO_HF_API_NAME", "predict").strip().lstrip("/")
|
| 40 |
+
|
| 41 |
+
legacy_base = os.getenv("ZIMAGETURBO_API_URL", "").strip()
|
| 42 |
+
if legacy_base:
|
| 43 |
+
self.legacy_api_url = legacy_base.rstrip("/")
|
| 44 |
+
else:
|
| 45 |
+
self.legacy_api_url = ""
|
| 46 |
+
|
| 47 |
+
self.hf_base_url = f"https://huggingface.co/spaces/{self.hf_space}"
|
| 48 |
+
self._available: bool | None = None
|
| 49 |
+
|
| 50 |
+
def is_available(self) -> bool:
|
| 51 |
+
if self._available is not None:
|
| 52 |
+
return self._available
|
| 53 |
+
try:
|
| 54 |
+
if self.mode == "legacy":
|
| 55 |
+
self._available = self._is_legacy_available()
|
| 56 |
+
else:
|
| 57 |
+
self._available = self._is_hf_space_available()
|
| 58 |
+
except Exception: # noqa: BLE001
|
| 59 |
+
self._available = False
|
| 60 |
+
return self._available
|
| 61 |
+
|
| 62 |
+
def generate(
|
| 63 |
+
self,
|
| 64 |
+
request: ProviderRequest,
|
| 65 |
+
output_dir: Path,
|
| 66 |
+
progress: ProgressCallback,
|
| 67 |
+
is_cancelled: CancelCallback,
|
| 68 |
+
) -> ProviderResult:
|
| 69 |
+
if not self.is_available():
|
| 70 |
+
raise ProviderUnavailableError("Adult + Z-Image Turbo API nicht verfügbar")
|
| 71 |
+
|
| 72 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 73 |
+
image_paths: list[Path] = []
|
| 74 |
+
|
| 75 |
+
for index in range(request.count):
|
| 76 |
+
if is_cancelled():
|
| 77 |
+
break
|
| 78 |
+
|
| 79 |
+
progress(int(10 + 80 * (index / max(1, request.count))), f"Generiere Bild {index + 1}/{request.count}...")
|
| 80 |
+
current_seed = request.seed + index if request.seed >= 0 else None
|
| 81 |
+
|
| 82 |
+
if self.mode == "legacy":
|
| 83 |
+
image = self._generate_legacy(request, current_seed)
|
| 84 |
+
else:
|
| 85 |
+
image = self._generate_hf_space(request, current_seed)
|
| 86 |
+
|
| 87 |
+
out = output_dir / f"image_{index + 1:02d}.png"
|
| 88 |
+
image.save(out)
|
| 89 |
+
image_paths.append(out)
|
| 90 |
+
progress(int(10 + 80 * ((index + 1) / max(1, request.count))), f"Bild {index + 1}/{request.count} gespeichert")
|
| 91 |
+
|
| 92 |
+
if not image_paths:
|
| 93 |
+
raise RuntimeError("Keine Bilder erzeugt")
|
| 94 |
+
|
| 95 |
+
return ProviderResult(image_paths=image_paths)
|
| 96 |
+
|
| 97 |
+
def _headers(self) -> dict[str, str]:
|
| 98 |
+
headers = {"Content-Type": "application/json"}
|
| 99 |
+
if self.api_key:
|
| 100 |
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
| 101 |
+
return headers
|
| 102 |
+
|
| 103 |
+
def _is_legacy_available(self) -> bool:
|
| 104 |
+
if not self.legacy_api_url:
|
| 105 |
+
return False
|
| 106 |
+
try:
|
| 107 |
+
response = requests.get(f"{self.legacy_api_url}/health", timeout=5, headers=self._headers())
|
| 108 |
+
return response.status_code == 200
|
| 109 |
+
except Exception as exc: # noqa: BLE001
|
| 110 |
+
LOGGER.warning("Legacy ZImageTurbo health failed: %s", exc)
|
| 111 |
+
return False
|
| 112 |
+
|
| 113 |
+
def _is_hf_space_available(self) -> bool:
|
| 114 |
+
try:
|
| 115 |
+
response = requests.get(self.hf_base_url, timeout=8)
|
| 116 |
+
if response.status_code < 500:
|
| 117 |
+
return True
|
| 118 |
+
LOGGER.warning("HF Space returned status %s", response.status_code)
|
| 119 |
+
return False
|
| 120 |
+
except Exception as exc: # noqa: BLE001
|
| 121 |
+
LOGGER.warning("HF Space health failed: %s", exc)
|
| 122 |
+
return False
|
| 123 |
+
|
| 124 |
+
def _generate_legacy(self, request: ProviderRequest, seed: int | None) -> Image.Image:
|
| 125 |
+
payload: dict[str, Any] = {
|
| 126 |
+
"prompt": request.prompt,
|
| 127 |
+
"negative_prompt": request.negative_prompt,
|
| 128 |
+
"width": request.width,
|
| 129 |
+
"height": request.height,
|
| 130 |
+
"num_inference_steps": request.steps,
|
| 131 |
+
"guidance_scale": request.guidance,
|
| 132 |
+
}
|
| 133 |
+
if seed is not None:
|
| 134 |
+
payload["seed"] = seed
|
| 135 |
+
|
| 136 |
+
response = requests.post(
|
| 137 |
+
f"{self.legacy_api_url}/generate",
|
| 138 |
+
json=payload,
|
| 139 |
+
headers=self._headers(),
|
| 140 |
+
timeout=self.timeout,
|
| 141 |
+
)
|
| 142 |
+
response.raise_for_status()
|
| 143 |
+
body = response.json()
|
| 144 |
+
if not body.get("success") or not body.get("image"):
|
| 145 |
+
raise RuntimeError(body.get("error", "Ungültige Legacy API Antwort"))
|
| 146 |
+
raw = base64.b64decode(body["image"])
|
| 147 |
+
return Image.open(BytesIO(raw)).convert("RGB")
|
| 148 |
+
|
| 149 |
+
def _generate_hf_space(self, request: ProviderRequest, seed: int | None) -> Image.Image:
|
| 150 |
+
api_call_url = f"{self.hf_base_url}/gradio_api/call/{self.hf_api_name}"
|
| 151 |
+
payload = {
|
| 152 |
+
"data": [
|
| 153 |
+
request.prompt,
|
| 154 |
+
request.height,
|
| 155 |
+
request.width,
|
| 156 |
+
request.steps,
|
| 157 |
+
seed if seed is not None else 42,
|
| 158 |
+
seed is None,
|
| 159 |
+
1,
|
| 160 |
+
]
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
call_response = requests.post(
|
| 164 |
+
api_call_url,
|
| 165 |
+
json=payload,
|
| 166 |
+
headers=self._headers(),
|
| 167 |
+
timeout=self.timeout,
|
| 168 |
+
)
|
| 169 |
+
call_response.raise_for_status()
|
| 170 |
+
call_payload = call_response.json()
|
| 171 |
+
event_id = call_payload.get("event_id")
|
| 172 |
+
if not event_id:
|
| 173 |
+
raise RuntimeError(f"HF Space call failed: {call_payload}")
|
| 174 |
+
|
| 175 |
+
result = self._poll_hf_event(event_id)
|
| 176 |
+
image_url = self._extract_first_image_url(result)
|
| 177 |
+
if not image_url:
|
| 178 |
+
raise RuntimeError(f"Kein Bild in HF-Antwort: {result}")
|
| 179 |
+
|
| 180 |
+
full_url = image_url if image_url.startswith("http") else urljoin(self.hf_base_url, image_url)
|
| 181 |
+
image_response = requests.get(full_url, timeout=self.timeout)
|
| 182 |
+
image_response.raise_for_status()
|
| 183 |
+
return Image.open(BytesIO(image_response.content)).convert("RGB")
|
| 184 |
+
|
| 185 |
+
def _poll_hf_event(self, event_id: str) -> Any:
|
| 186 |
+
result_url = f"{self.hf_base_url}/gradio_api/call/{self.hf_api_name}/{event_id}"
|
| 187 |
+
deadline = time.time() + self.timeout
|
| 188 |
+
|
| 189 |
+
while time.time() < deadline:
|
| 190 |
+
response = requests.get(result_url, timeout=30)
|
| 191 |
+
response.raise_for_status()
|
| 192 |
+
lines = [line.strip() for line in response.text.splitlines() if line.strip()]
|
| 193 |
+
data_lines = [line[5:].strip() for line in lines if line.startswith("data:")]
|
| 194 |
+
if not data_lines:
|
| 195 |
+
time.sleep(1)
|
| 196 |
+
continue
|
| 197 |
+
|
| 198 |
+
for raw in reversed(data_lines):
|
| 199 |
+
if raw in {"[DONE]", "null"}:
|
| 200 |
+
continue
|
| 201 |
+
try:
|
| 202 |
+
return json.loads(raw)
|
| 203 |
+
except json.JSONDecodeError:
|
| 204 |
+
continue
|
| 205 |
+
time.sleep(1)
|
| 206 |
+
|
| 207 |
+
raise TimeoutError(f"HF Space Timeout nach {self.timeout}s")
|
| 208 |
+
|
| 209 |
+
def _extract_first_image_url(self, payload: Any) -> str | None:
|
| 210 |
+
stack = [payload]
|
| 211 |
+
while stack:
|
| 212 |
+
item = stack.pop()
|
| 213 |
+
if isinstance(item, str):
|
| 214 |
+
if item.startswith("http") and ("/gradio_api/file=" in item or item.endswith((".png", ".jpg", ".jpeg", ".webp"))):
|
| 215 |
+
return item
|
| 216 |
+
if item.startswith("/gradio_api/file="):
|
| 217 |
+
return item
|
| 218 |
+
elif isinstance(item, dict):
|
| 219 |
+
for key in ("url", "path"):
|
| 220 |
+
value = item.get(key)
|
| 221 |
+
if isinstance(value, str):
|
| 222 |
+
if value.startswith("http") or value.startswith("/gradio_api/file="):
|
| 223 |
+
return value
|
| 224 |
+
stack.extend(item.values())
|
| 225 |
+
elif isinstance(item, list):
|
| 226 |
+
stack.extend(item)
|
| 227 |
+
return None
|
imageforge/backend/app/storage/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
imageforge/backend/app/storage/history.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime, timezone
|
| 5 |
+
from threading import Lock
|
| 6 |
+
|
| 7 |
+
from ..core.config import HISTORY_FILE
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class PromptHistoryStore:
|
| 11 |
+
def __init__(self, max_entries: int = 50) -> None:
|
| 12 |
+
self.max_entries = max_entries
|
| 13 |
+
self._lock = Lock()
|
| 14 |
+
|
| 15 |
+
def add(self, prompt: str, negative_prompt: str) -> None:
|
| 16 |
+
with self._lock:
|
| 17 |
+
items = self._read()
|
| 18 |
+
items.insert(
|
| 19 |
+
0,
|
| 20 |
+
{
|
| 21 |
+
"prompt": prompt,
|
| 22 |
+
"negative_prompt": negative_prompt,
|
| 23 |
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
| 24 |
+
},
|
| 25 |
+
)
|
| 26 |
+
HISTORY_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 27 |
+
HISTORY_FILE.write_text(
|
| 28 |
+
json.dumps(items[: self.max_entries], indent=2), encoding="utf-8"
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
def list(self) -> list[dict[str, str]]:
|
| 32 |
+
with self._lock:
|
| 33 |
+
return self._read()
|
| 34 |
+
|
| 35 |
+
def _read(self) -> list[dict[str, str]]:
|
| 36 |
+
if not HISTORY_FILE.exists():
|
| 37 |
+
return []
|
| 38 |
+
try:
|
| 39 |
+
raw = json.loads(HISTORY_FILE.read_text(encoding="utf-8"))
|
| 40 |
+
if isinstance(raw, list):
|
| 41 |
+
return [item for item in raw if isinstance(item, dict)]
|
| 42 |
+
except json.JSONDecodeError:
|
| 43 |
+
return []
|
| 44 |
+
return []
|
imageforge/backend/app/storage/maintenance.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
from datetime import datetime, timedelta, timezone
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
from ..core.config import OUTPUT_DIR
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def cleanup_outputs(retention_days: int) -> int:
|
| 10 |
+
if retention_days <= 0:
|
| 11 |
+
return 0
|
| 12 |
+
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
| 13 |
+
cutoff = datetime.now(timezone.utc) - timedelta(days=retention_days)
|
| 14 |
+
removed = 0
|
| 15 |
+
for item in OUTPUT_DIR.iterdir():
|
| 16 |
+
if not item.is_dir():
|
| 17 |
+
continue
|
| 18 |
+
dt = _parse_date(item.name)
|
| 19 |
+
if dt and dt < cutoff:
|
| 20 |
+
_remove_tree(item)
|
| 21 |
+
removed += 1
|
| 22 |
+
return removed
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _parse_date(name: str) -> datetime | None:
|
| 26 |
+
try:
|
| 27 |
+
return datetime.strptime(name, "%Y-%m-%d").replace(tzinfo=timezone.utc)
|
| 28 |
+
except ValueError:
|
| 29 |
+
return None
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _remove_tree(path: Path) -> None:
|
| 33 |
+
for child in path.glob("**/*"):
|
| 34 |
+
if child.is_file():
|
| 35 |
+
child.unlink(missing_ok=True)
|
| 36 |
+
for child in sorted(path.glob("**/*"), reverse=True):
|
| 37 |
+
if child.is_dir():
|
| 38 |
+
child.rmdir()
|
| 39 |
+
path.rmdir()
|
imageforge/backend/app/storage/presets.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from dataclasses import asdict, dataclass
|
| 5 |
+
from datetime import datetime, timezone
|
| 6 |
+
from threading import Lock
|
| 7 |
+
|
| 8 |
+
from ..core.config import PRESETS_FILE
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@dataclass(slots=True)
|
| 12 |
+
class Preset:
|
| 13 |
+
name: str
|
| 14 |
+
prompt: str
|
| 15 |
+
negative_prompt: str
|
| 16 |
+
model: str
|
| 17 |
+
size: str
|
| 18 |
+
count: int
|
| 19 |
+
steps: int
|
| 20 |
+
guidance: float
|
| 21 |
+
image_type: str
|
| 22 |
+
style_preset: str
|
| 23 |
+
style_strength: int
|
| 24 |
+
updated_at: str
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class PresetStore:
|
| 28 |
+
def __init__(self) -> None:
|
| 29 |
+
self._lock = Lock()
|
| 30 |
+
|
| 31 |
+
def list(self) -> list[Preset]:
|
| 32 |
+
with self._lock:
|
| 33 |
+
return [Preset(**item) for item in self._read()]
|
| 34 |
+
|
| 35 |
+
def upsert(self, payload: dict) -> Preset:
|
| 36 |
+
with self._lock:
|
| 37 |
+
rows = self._read()
|
| 38 |
+
now = datetime.now(timezone.utc).isoformat()
|
| 39 |
+
preset = Preset(
|
| 40 |
+
name=payload["name"],
|
| 41 |
+
prompt=payload.get("prompt", ""),
|
| 42 |
+
negative_prompt=payload.get("negative_prompt", ""),
|
| 43 |
+
model=payload.get("model", "dummy"),
|
| 44 |
+
size=payload.get("size", "1024x1024"),
|
| 45 |
+
count=int(payload.get("count", 1)),
|
| 46 |
+
steps=int(payload.get("steps", 30)),
|
| 47 |
+
guidance=float(payload.get("guidance", 7.5)),
|
| 48 |
+
image_type=payload.get("image_type", "general"),
|
| 49 |
+
style_preset=payload.get("style_preset", "auto"),
|
| 50 |
+
style_strength=int(payload.get("style_strength", 60)),
|
| 51 |
+
updated_at=now,
|
| 52 |
+
)
|
| 53 |
+
rows = [row for row in rows if row.get("name") != preset.name]
|
| 54 |
+
rows.insert(0, asdict(preset))
|
| 55 |
+
self._write(rows)
|
| 56 |
+
return preset
|
| 57 |
+
|
| 58 |
+
def delete(self, name: str) -> bool:
|
| 59 |
+
with self._lock:
|
| 60 |
+
rows = self._read()
|
| 61 |
+
after = [row for row in rows if row.get("name") != name]
|
| 62 |
+
if len(after) == len(rows):
|
| 63 |
+
return False
|
| 64 |
+
self._write(after)
|
| 65 |
+
return True
|
| 66 |
+
|
| 67 |
+
def _read(self) -> list[dict]:
|
| 68 |
+
if not PRESETS_FILE.exists():
|
| 69 |
+
return []
|
| 70 |
+
try:
|
| 71 |
+
content = json.loads(PRESETS_FILE.read_text(encoding="utf-8"))
|
| 72 |
+
if isinstance(content, list):
|
| 73 |
+
return [row for row in content if isinstance(row, dict)]
|
| 74 |
+
except json.JSONDecodeError:
|
| 75 |
+
return []
|
| 76 |
+
return []
|
| 77 |
+
|
| 78 |
+
def _write(self, rows: list[dict]) -> None:
|
| 79 |
+
PRESETS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 80 |
+
PRESETS_FILE.write_text(json.dumps(rows[:200], indent=2), encoding="utf-8")
|
imageforge/backend/app/storage/settings.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import annotations
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime, timezone
|
| 5 |
+
from threading import Lock
|
| 6 |
+
|
| 7 |
+
from ..core.config import (
|
| 8 |
+
ADMIN_AUDIT_FILE,
|
| 9 |
+
CONTENT_PROFILE,
|
| 10 |
+
OUTPUT_RETENTION_DAYS,
|
| 11 |
+
RATE_LIMIT_PER_MINUTE,
|
| 12 |
+
SETTINGS_FILE,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
DEFAULT_SETTINGS = {
|
| 17 |
+
"content_profile": CONTENT_PROFILE,
|
| 18 |
+
"rate_limit_per_minute": RATE_LIMIT_PER_MINUTE,
|
| 19 |
+
"output_retention_days": OUTPUT_RETENTION_DAYS,
|
| 20 |
+
"adult_enabled": False,
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class SettingsStore:
|
| 25 |
+
def __init__(self) -> None:
|
| 26 |
+
self._lock = Lock()
|
| 27 |
+
|
| 28 |
+
def get(self) -> dict:
|
| 29 |
+
with self._lock:
|
| 30 |
+
return self._merged()
|
| 31 |
+
|
| 32 |
+
def update(self, patch: dict, actor: str) -> dict:
|
| 33 |
+
with self._lock:
|
| 34 |
+
current = self._merged()
|
| 35 |
+
current.update({k: v for k, v in patch.items() if v is not None})
|
| 36 |
+
self._write(current)
|
| 37 |
+
self._audit(actor=actor, patch=patch, current=current)
|
| 38 |
+
return current
|
| 39 |
+
|
| 40 |
+
def _merged(self) -> dict:
|
| 41 |
+
current = DEFAULT_SETTINGS.copy()
|
| 42 |
+
current.update(self._read())
|
| 43 |
+
return current
|
| 44 |
+
|
| 45 |
+
def _read(self) -> dict:
|
| 46 |
+
if not SETTINGS_FILE.exists():
|
| 47 |
+
return {}
|
| 48 |
+
try:
|
| 49 |
+
parsed = json.loads(SETTINGS_FILE.read_text(encoding="utf-8"))
|
| 50 |
+
return parsed if isinstance(parsed, dict) else {}
|
| 51 |
+
except json.JSONDecodeError:
|
| 52 |
+
return {}
|
| 53 |
+
|
| 54 |
+
def _write(self, data: dict) -> None:
|
| 55 |
+
SETTINGS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 56 |
+
SETTINGS_FILE.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
| 57 |
+
|
| 58 |
+
def _audit(self, actor: str, patch: dict, current: dict) -> None:
|
| 59 |
+
entry = {
|
| 60 |
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
| 61 |
+
"actor": actor,
|
| 62 |
+
"patch": patch,
|
| 63 |
+
"current": current,
|
| 64 |
+
}
|
| 65 |
+
ADMIN_AUDIT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 66 |
+
with ADMIN_AUDIT_FILE.open("a", encoding="utf-8") as fp:
|
| 67 |
+
fp.write(json.dumps(entry, ensure_ascii=True) + "\n")
|
imageforge/backend/tests/test_a1111_provider.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
|
| 3 |
+
from backend.app.providers.a1111_provider import A1111Provider
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class _FakeResponse:
|
| 7 |
+
def __init__(self, payload: bytes) -> None:
|
| 8 |
+
self._payload = payload
|
| 9 |
+
|
| 10 |
+
def read(self) -> bytes:
|
| 11 |
+
return self._payload
|
| 12 |
+
|
| 13 |
+
def __enter__(self):
|
| 14 |
+
return self
|
| 15 |
+
|
| 16 |
+
def __exit__(self, exc_type, exc, tb):
|
| 17 |
+
return False
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_a1111_provider_builds_auth_header_from_compact_auth(monkeypatch) -> None:
|
| 21 |
+
monkeypatch.setattr("backend.app.providers.a1111_provider.A1111_API_AUTH", "user:secret")
|
| 22 |
+
monkeypatch.setattr("backend.app.providers.a1111_provider.A1111_API_USER", "")
|
| 23 |
+
monkeypatch.setattr("backend.app.providers.a1111_provider.A1111_API_PASSWORD", "")
|
| 24 |
+
|
| 25 |
+
provider = A1111Provider()
|
| 26 |
+
|
| 27 |
+
assert provider.auth_header.startswith("Basic ")
|
| 28 |
+
encoded = provider.auth_header.split(" ", maxsplit=1)[1]
|
| 29 |
+
decoded = base64.b64decode(encoded.encode("ascii")).decode("utf-8")
|
| 30 |
+
assert decoded == "user:secret"
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def test_a1111_provider_retries_transient_failure(monkeypatch) -> None:
|
| 34 |
+
monkeypatch.setattr("backend.app.providers.a1111_provider.A1111_RETRY_COUNT", 2)
|
| 35 |
+
monkeypatch.setattr("backend.app.providers.a1111_provider.A1111_RETRY_BACKOFF_SECONDS", 0.0)
|
| 36 |
+
|
| 37 |
+
attempts = {"count": 0}
|
| 38 |
+
|
| 39 |
+
def fake_urlopen(req, timeout): # noqa: ANN001
|
| 40 |
+
attempts["count"] += 1
|
| 41 |
+
if attempts["count"] == 1:
|
| 42 |
+
raise OSError("temporary network issue")
|
| 43 |
+
return _FakeResponse(b'{"ok": true}')
|
| 44 |
+
|
| 45 |
+
monkeypatch.setattr("backend.app.providers.a1111_provider.request.urlopen", fake_urlopen)
|
| 46 |
+
|
| 47 |
+
provider = A1111Provider()
|
| 48 |
+
result = provider._request_json("GET", "/sdapi/v1/sd-models")
|
| 49 |
+
|
| 50 |
+
assert result == {"ok": True}
|
| 51 |
+
assert attempts["count"] == 2
|