vision / app.py
abdullahalioo's picture
Update app.py
807cc58 verified
import os
from fastapi import FastAPI, HTTPException, Query, File, UploadFile, Form
from fastapi.responses import StreamingResponse, JSONResponse
from openai import AsyncOpenAI
import base64
from typing import Optional
app = FastAPI()
# Define available models (unchanged)
AVAILABLE_MODELS = {
"openai/gpt-4.1": "OpenAI GPT-4.1",
"openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini",
"openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano",
"openai/gpt-4o": "OpenAI GPT-4o",
"openai/gpt-4o-mini": "OpenAI GPT-4o mini",
"openai/o4-mini": "OpenAI o4-mini",
"microsoft/MAI-DS-R1": "MAI-DS-R1",
"microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)",
"microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)",
"microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)",
"microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)",
"microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)",
"microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)",
"microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)",
"microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)",
"microsoft/Phi-4": "Phi-4",
"microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct",
"microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct",
"ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large",
"ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini",
"mistral-ai/Codestral-2501": "Codestral 25.01",
"cohere/Cohere-command-r": "Cohere Command R",
"cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024",
"cohere/Cohere-command-r-plus": "Cohere Command R+",
"cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024",
"deepseek/DeepSeek-R1": "DeepSeek-R1",
"deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324",
"meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct",
"meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct",
"meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct",
"meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8",
"meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct",
"meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct",
"meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct",
"meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct",
"meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct",
"meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct",
"mistral-ai/Ministral-3B": "Ministral 3B",
"mistral-ai/Mistral-Large-2411": "Mistral Large 24.11",
"mistral-ai/Mistral-Nemo": "Mistral Nemo",
"mistral-ai/Mistral-large-2407": "Mistral Large (2407)",
"mistral-ai/Mistral-small": "Mistral Small",
"cohere/cohere-command-a": "Cohere Command A",
"core42/jais-30b-chat": "JAIS 30b Chat",
"mistral-ai/mistral-small-2503": "Mistral Small 3.1"
}
# Vision-capable models (subset of AVAILABLE_MODELS)
VISION_MODELS = [
"openai/gpt-4o",
"openai/gpt-4o-mini",
"microsoft/Phi-3.5-vision-instruct",
"meta/Llama-3.2-11B-Vision-Instruct",
"meta/Llama-3.2-90B-Vision-Instruct",
"microsoft/Phi-4-multimodal-instruct"
]
async def generate_ai_response(prompt: str, model: str):
token = os.getenv("GITHUB_TOKEN")
if not token:
raise HTTPException(status_code=500, detail="GitHub token not configured")
endpoint = "https://models.github.ai/inference"
if model not in AVAILABLE_MODELS:
raise HTTPException(status_code=400, detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}")
client = AsyncOpenAI(base_url=endpoint, api_key=token)
try:
stream = await client.chat.completions.create(
messages=[
{"role": "user", "content": prompt}
],
model=model,
temperature=1.0,
top_p=1.0,
stream=True
)
async for chunk in stream:
if chunk.choices and chunk.choices[0].delta.content:
yield chunk.choices[0].delta.content
except Exception as err:
yield f"Error: {str(err)}"
raise HTTPException(status_code=500, detail="AI generation failed")
async def process_image_with_vision(image: bytes, question: str, model: str):
token = os.getenv("GITHUB_TOKEN")
if not token:
raise HTTPException(status_code=500, detail="GitHub token not configured")
endpoint = "https://models.github.ai/inference"
if model not in VISION_MODELS:
raise HTTPException(status_code=400, detail=f"Model not vision-capable. Choose from: {', '.join(VISION_MODELS)}")
client = AsyncOpenAI(base_url=endpoint, api_key=token)
# Encode image to base64
base64_image = base64.b64encode(image).decode("utf-8")
try:
# Non-streaming request for vision task
response = await client.chat.completions.create(
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": question},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}
}
]
}
],
model=model,
temperature=1.0,
top_p=1.0,
stream=False # Vision tasks typically don't stream
)
return response.choices[0].message.content
except Exception as err:
raise HTTPException(status_code=500, detail=f"Vision processing failed: {str(err)}")
@app.post("/generate")
async def generate_response(
prompt: str = Query(..., description="The prompt for the AI"),
model: str = Query("openai/gpt-4.1-mini", description="The model to use for generation")
):
if not prompt:
raise HTTPException(status_code=400, detail="Prompt cannot be empty")
return StreamingResponse(
generate_ai_response(prompt, model),
media_type="text/event-stream"
)
@app.post("/process-image")
async def process_image(
image: UploadFile = File(..., description="Image file (PNG, JPEG, GIF)"),
question: str = Form(..., description="Question about the image"),
model: str = Form("openai/gpt-4o", description="Vision-capable model")
):
# Validate image format
if not image.filename.lower().endswith((".png", ".jpg", ".jpeg", ".gif")):
raise HTTPException(status_code=400, detail="Unsupported image format. Use PNG, JPEG, or GIF.")
# Read image content
image_data = await image.read()
# Process image with vision model
response = await process_image_with_vision(image_data, question, model)
return response
def get_app():
return app