icon-exporter / app.py
gron371's picture
Update app.py
b15e27e verified
from fastapi import FastAPI, File, UploadFile, Form
from transformers import AutoModel, AutoTokenizer
import uvicorn
from PIL import Image
import io
import torch
import os
from huggingface_hub import login
app = FastAPI(title="Image-Text API")
# Model ve tokenizer için global değişkenler
model = None
tokenizer = None
# HuggingFace token ile giriş yap
if "HUGGINGFACE_TOKEN" in os.environ:
login(token=os.environ["HUGGINGFACE_TOKEN"])
async def init_model():
global model, tokenizer
# Model ve tokenizer'ı yükle
model = AutoModel.from_pretrained(
'openbmb/MiniCPM-V-2_6',
trust_remote_code=True,
attn_implementation='eager',
torch_dtype=torch.bfloat16
)
model = model.eval()
if torch.cuda.is_available():
model = model.cuda()
tokenizer = AutoTokenizer.from_pretrained(
'openbmb/MiniCPM-V-2_6',
trust_remote_code=True
)
@app.on_event("startup")
async def startup_event():
await init_model()
@app.post("/process")
async def process_image_text(
image: UploadFile = File(...),
prompt: str = Form(...),
stream: bool = Form(False)
):
try:
# Resmi oku ve PIL Image'a dönüştür
image_content = await image.read()
pil_image = Image.open(io.BytesIO(image_content)).convert('RGB')
# Mesaj formatını hazırla
msgs = [{'role': 'user', 'content': [pil_image, prompt]}]
if stream:
# Streaming yanıt için generator
async def generate():
result = model.chat(
image=None,
msgs=msgs,
tokenizer=tokenizer,
sampling=True,
stream=True
)
for text in result:
yield {"text": text}
return generate()
else:
# Normal yanıt
result = model.chat(
image=None,
msgs=msgs,
tokenizer=tokenizer
)
return {
"status": "success",
"result": result
}
except Exception as e:
return {
"status": "error",
"message": str(e)
}
@app.get("/")
async def root():
return {
"message": "Image-Text API'ye hoş geldiniz",
"usage": "POST /process endpoint'ini kullanın"
}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)