#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Qwen-VL FastAPI server with metrics & controls:
- Endpoints: /image-qa, /image-caption, /healthz
- Returns: {"answer", "usage": {"input_tokens","output_tokens"}, "timing": {"ttft","e2el"}}
- Concurrency guard: asyncio.Semaphore(1) (adjust via QWEN_MAX_INFLIGHT env)
"""
import base64, io, os, threading, time, asyncio

import torch
from fastapi import FastAPI
from pydantic import BaseModel
from PIL import Image
from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer
from qwen_vl_utils import process_vision_info

MODEL_ID = os.environ.get("QWEN_VL_MODEL", "models/Qwen/Qwen2.5-VL-3B-Instruct")
DTYPE = torch.float16
DEVICE_MAP = "auto"
MAX_INFLIGHT = int(os.environ.get("QWEN_MAX_INFLIGHT", "1"))  # limit per instance

t_load0 = time.time()
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(MODEL_ID, device_map=DEVICE_MAP, torch_dtype=DTYPE)
processor = AutoProcessor.from_pretrained(MODEL_ID)
load_time_sec = time.time() - t_load0

app = FastAPI(title="Qwen-VL Metrics Server")
SEM = asyncio.Semaphore(MAX_INFLIGHT)


def decode_image(img_str: str) -> Image.Image:
    if img_str.startswith("data:image"):
        b64 = img_str.split(",", 1)[1]
        return Image.open(io.BytesIO(base64.b64decode(b64))).convert("RGB")
    if len(img_str) > 1024 or "\n" in img_str:
        return Image.open(io.BytesIO(base64.b64decode(img_str))).convert("RGB")
    return Image.open(img_str).convert("RGB")


def token_len(text: str) -> int:
    if not text:
        return 0
    ids = processor.tokenizer(text, return_tensors="pt", add_special_tokens=False)
    return int(ids["input_ids"].shape[-1])


class QAReq(BaseModel):
    image: str
    question: str
    max_new_tokens: int = 8
    top_p: float = 1.0
    do_sample: bool = False


class ModelResp(BaseModel):
    answer: str
    usage: dict
    timing: dict


def generate_with_metrics(messages, max_new_tokens=8, top_p=1.0, do_sample=False) -> dict:
    prompt_text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    image_inputs, video_inputs = process_vision_info(messages)
    inputs = processor(text=[prompt_text], images=image_inputs, videos=video_inputs, return_tensors="pt").to(
        model.device)

    input_tokens = token_len(prompt_text)
    streamer = TextIteratorStreamer(processor.tokenizer, skip_prompt=True, skip_special_tokens=True)
    gen_kwargs = dict(
        **inputs, 
        streamer=streamer, 
        max_new_tokens=max_new_tokens, 
        top_p=top_p, 
        do_sample=do_sample
    )
    ttft_holder = {"ttft": None}
    chunks = []

    def _worker():
        model.generate(**gen_kwargs)

    th = threading.Thread(target=_worker)
    th.start()

    for piece in streamer:
        chunks.append(piece)
        if ttft_holder["ttft"] is None and piece:
            ttft_holder["ttft"] = time.time()

    th.join()
    answer_text = "".join(chunks).strip()
    output_tokens = token_len(answer_text)

    return {
        "answer": answer_text,
        "usage": {"input_tokens": input_tokens, "output_tokens": output_tokens},
        "timing": {"ttft": ttft_holder["ttft"]}
    }


@app.get("/healthz")
def healthz():
    return {
        "status": "ok",
        "model": MODEL_ID,
        "load_time_sec": round(load_time_sec, 3),
        "max_inflight": MAX_INFLIGHT
    }


@app.post("/image-qa", response_model=ModelResp)
async def image_qa(req: QAReq):
    img = decode_image(req.image)
    if "\n选项" in req.question or "请只输出" in req.question:
        user_content = [{"type": "image", "image": img}, {"type": "text", "text": req.question}]
    else:
        user_content = [
            {"type": "image", "image": img},
            {"type": "text", "text": f"请根据图片回答：{req.question}。只输出答案要点。"}
        ]
    messages = [{"role": "user", "content": user_content}]
    async with SEM:
        return generate_with_metrics(messages, max_new_tokens=req.max_new_tokens, top_p=req.top_p, do_sample=req.do_sample)

