hbatali2020's picture
Update app.py
8f6bbc3 verified
# โ”€โ”€โ”€ flash_attn Mock โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
import sys
import types
import importlib.util
flash_mock = types.ModuleType("flash_attn")
flash_mock.__version__ = "2.0.0"
flash_mock.__spec__ = importlib.util.spec_from_loader("flash_attn", loader=None)
sys.modules["flash_attn"] = flash_mock
sys.modules["flash_attn.flash_attn_interface"] = types.ModuleType("flash_attn.flash_attn_interface")
sys.modules["flash_attn.bert_padding"] = types.ModuleType("flash_attn.bert_padding")
# โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
import io
import time
import httpx
import torch
from PIL import Image
from transformers import AutoProcessor, AutoModelForCausalLM
from fastapi import FastAPI, HTTPException, UploadFile, File
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from contextlib import asynccontextmanager
from typing import Optional
MODEL_ID = "microsoft/Florence-2-large-ft"
VQA_QUESTION = (
"Is there a human being or part of a human body in the picture, such as a hand or fingers, etc.? Answer Yes or No."
)
MODEL_DATA = {}
@asynccontextmanager
async def lifespan(app: FastAPI):
print(f"๐Ÿ“ฅ Loading {MODEL_ID}...")
start = time.time()
MODEL_DATA["processor"] = AutoProcessor.from_pretrained(
MODEL_ID, trust_remote_code=True
)
MODEL_DATA["model"] = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float32,
trust_remote_code=True,
attn_implementation="eager"
).eval()
print(f"โœ… Model ready in {time.time()-start:.1f}s")
yield
MODEL_DATA.clear()
app = FastAPI(
title="AI Shield - Female Detection API",
description="Florence-2-large-ft | VQA | Compatible with AI Shield Chrome Extension",
version="5.0.0",
lifespan=lifespan
)
# โ”€โ”€โ”€ CORS: ุถุฑูˆุฑูŠ ู„ุฅุถุงูุฉ Chrome โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# โ”€โ”€โ”€ Schemas โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
class ImageUrlRequest(BaseModel):
image_url: str # ู…ู† ุฅุถุงูุฉ Chrome
# โ”€โ”€โ”€ ุฏุงู„ุฉ ุงู„ุชุญู„ูŠู„ ุงู„ู…ุดุชุฑูƒุฉ โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
def analyze_image(image: Image.Image) -> dict:
processor = MODEL_DATA["processor"]
model = MODEL_DATA["model"]
task = "<VQA>"
prompt = f"{task}{VQA_QUESTION}"
inputs = processor(text=prompt, images=image, return_tensors="pt")
start_time = time.time()
with torch.no_grad():
generated_ids = model.generate(
input_ids=inputs["input_ids"],
pixel_values=inputs["pixel_values"],
max_new_tokens=10,
num_beams=3,
do_sample=False
)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
parsed = processor.post_process_generation(
generated_text,
task=task,
image_size=(image.width, image.height)
)
elapsed = round(time.time() - start_time, 2)
answer = parsed.get(task, "").strip()
# โ”€โ”€โ”€ ู…ู†ุทู‚ ุงู„ู‚ุฑุงุฑ โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
a = answer.lower()
if a == "no" or a.startswith("no"):
decision, reason = "ALLOW", "model_answered_no"
elif "yes" in a:
decision, reason = "BLOCK", "model_answered_yes"
else:
decision, reason = "BLOCK", "unexpected_answer_blocked_for_safety"
return {
"decision": decision, # ALLOW | BLOCK (ุจุงู„ุญุฑูˆู ุงู„ูƒุจูŠุฑุฉ ู„ุชุชูˆุงูู‚ ู…ุน ุงู„ุฅุถุงูุฉ)
"reason": reason,
"vqa_answer": answer,
"execution_time": elapsed,
"status": "success"
}
# โ”€โ”€โ”€ Health Check โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
@app.get("/health")
def health():
return {"status": "ok", "model_loaded": "model" in MODEL_DATA}
# โ”€โ”€โ”€ Endpoint 1: ู…ู† ุฅุถุงูุฉ Chrome (image_url) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
# background.js ูŠุฑุณู„: POST /analyze {"image_url": "https://..."}
@app.post("/analyze")
async def analyze_from_url(request: ImageUrlRequest):
try:
async with httpx.AsyncClient(timeout=30) as client:
response = await client.get(request.image_url)
response.raise_for_status()
image_bytes = response.content
except Exception as e:
raise HTTPException(status_code=400, detail=f"ูุดู„ ุชุญู…ูŠู„ ุงู„ุตูˆุฑุฉ ู…ู† URL: {str(e)}")
try:
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
except Exception as e:
raise HTTPException(status_code=400, detail=f"ุฎุทุฃ ููŠ ู‚ุฑุงุกุฉ ุงู„ุตูˆุฑุฉ: {str(e)}")
return analyze_image(image)
# โ”€โ”€โ”€ Endpoint 2: ุงุฎุชุจุงุฑ ูŠุฏูˆูŠ (ุฑูุน ู…ู„ู) โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€
@app.post("/analyze-file")
async def analyze_from_file(file: UploadFile = File(...)):
if not file.content_type.startswith("image/"):
raise HTTPException(status_code=400, detail="ุงู„ู…ู„ู ู„ูŠุณ ุตูˆุฑุฉ")
try:
image = Image.open(io.BytesIO(await file.read())).convert("RGB")
except Exception as e:
raise HTTPException(status_code=400, detail=f"ุฎุทุฃ ููŠ ู‚ุฑุงุกุฉ ุงู„ุตูˆุฑุฉ: {str(e)}")
return analyze_image(image)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)