selamgpt / app.py
snackshell's picture
Update app.py
c770256 verified
from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse, JSONResponse
from pydantic import BaseModel
from typing import List, Optional, Literal
import json
import g4f
from g4f.Provider import Blackbox, RetryProvider
app = FastAPI()
# Configure Blackbox provider
g4f.Provider.Blackbox.url = "https://www.blackbox.ai/api/chat"
g4f.Provider.Blackbox.working = True
# All available models from Blackbox provider
TEXT_MODELS = [
# Blackbox models
"blackbox", "blackbox-pro", "blackbox-70b", "blackbox-180b",
# OpenAI compatible
"gpt-4", "gpt-4-turbo", "gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo",
# Anthropic
"claude-3-opus", "claude-3-sonnet", "claude-3-haiku", "claude-3.5", "claude-3.7-sonnet",
# Meta
"llama-3-70b", "llama-3-8b", "llama-3.3-70b", "llama-2-70b",
# DeepSeek
"deepseek-chat", "deepseek-v3", "deepseek-r1", "deepseek-coder",
# Other
"o1", "o3-mini", "mixtral-8x7b", "mixtral-small-24b", "qwq-32b",
"command-r-plus", "code-llama-70b", "gemini-pro", "gemini-1.5-flash"
]
IMAGE_MODELS = [
"flux", "flux-pro", "dall-e-3", "stable-diffusion-xl", "playground-v2.5",
"kandinsky-3", "deepfloyd-if", "sdxl-turbo"
]
class Message(BaseModel):
role: Literal["system", "user", "assistant"]
content: str
class ChatRequest(BaseModel):
model: str
messages: List[Message]
temperature: Optional[float] = 0.7
max_tokens: Optional[int] = None
stream: Optional[bool] = False
class ImageRequest(BaseModel):
model: str
prompt: str
size: Optional[str] = "1024x1024"
quality: Optional[Literal["standard", "hd"]] = "standard"
@app.get("/v1/models")
async def get_models():
"""Return all available models"""
return {
"text_models": TEXT_MODELS,
"image_models": IMAGE_MODELS
}
@app.post("/v1/chat/completions")
async def chat_completion(request: ChatRequest):
"""Handle text generation with Blackbox and other models"""
if request.model not in TEXT_MODELS:
raise HTTPException(
status_code=400,
detail=f"Invalid model. Available: {TEXT_MODELS}"
)
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
try:
if request.stream:
async def stream_generator():
response = await g4f.ChatCompletion.create_async(
model=request.model,
messages=messages,
provider=RetryProvider([Blackbox]),
temperature=request.temperature,
max_tokens=request.max_tokens,
stream=True
)
async for chunk in response:
if isinstance(chunk, str):
yield f"data: {json.dumps({'content': chunk})}\n\n"
elif hasattr(chunk, 'choices'):
content = chunk.choices[0].delta.get('content', '')
yield f"data: {json.dumps({'content': content})}\n\n"
yield "data: [DONE]\n\n"
return StreamingResponse(stream_generator(), media_type="text/event-stream")
else:
response = await g4f.ChatCompletion.create_async(
model=request.model,
messages=messages,
provider=RetryProvider([Blackbox]),
temperature=request.temperature,
max_tokens=request.max_tokens
)
return {"content": str(response)}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.post("/v1/images/generations")
async def generate_image(request: ImageRequest):
"""Handle image generation with Flux and other models"""
if request.model not in IMAGE_MODELS:
raise HTTPException(
status_code=400,
detail=f"Invalid model. Available: {IMAGE_MODELS}"
)
try:
if request.model in ["flux", "flux-pro"]:
image_data = g4f.ImageGeneration.create(
prompt=request.prompt,
model=request.model,
provider=Blackbox,
size=request.size
)
return JSONResponse({
"url": f"data:image/png;base64,{image_data.decode('utf-8')}",
"model": request.model
})
else:
# Implementation for other image providers
raise HTTPException(
status_code=501,
detail=f"{request.model} implementation pending"
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)