from fastapi import APIRouter, UploadFile, File, Form, HTTPException
from fastapi.responses import JSONResponse
import base64
import io
from PIL import Image, ImageDraw, ImageFont
from openai import OpenAI
import os
import cv2
import numpy as np
import random
router = APIRouter()


try:
    FONT = ImageFont.truetype("arial.ttf", 32)
except IOError:
    FONT = ImageFont.load_default()

# ModelScope 客户端配置
MODELSCOPE_API_KEY = os.getenv("MODELScope_API_KEY", "ms-c1a1eb97-45cd-4f3b-b301-17ff67134349")
CLIENT = OpenAI(
    base_url='https://api-inference.modelscope.cn/v1',
    api_key=MODELSCOPE_API_KEY,
)
MODEL_ID = 'Qwen/Qwen3-VL-8B-Instruct'


def draw_cross_on_image(image: Image.Image, x: float, y: float, radius: float = 10) -> Image.Image:

    draw = ImageDraw.Draw(image)
    
    bbox = (x-radius, y-radius, x+radius, y+radius)
    draw.ellipse(bbox, fill="red", outline="red")
    return image


def image_to_base64(img: Image.Image, format="JPEG") -> str:

    buf = io.BytesIO()
    img.save(buf, format=format)
    return f"data:image/{format.lower()};base64," + base64.b64encode(buf.getvalue()).decode('utf-8')

@router.post("/vision/gaze_describe")
async def describe_gaze_content(
    image: UploadFile = File(..., description="用户上传的图片"),
    x: float = Form(..., description="注视点x坐标"),
    y: float = Form(..., description="注视点y坐标")
):

    if not image.content_type or not image.content_type.startswith("image/"):
        raise HTTPException(status_code=400, detail="上传文件必须是图片")

    try:
        img_data = await image.read()
        img = Image.open(io.BytesIO(img_data)).convert("RGB")
        width, height = img.size
    except Exception as e:
        raise HTTPException(status_code=400, detail=f"无法解析图片: {str(e)}")

    
    if not (0 <= x <= width and 0 <= y <= height):
        raise HTTPException(
            status_code=400,
            detail=f"注视点坐标超出范围 (0-{width}, 0-{height})"
        )

    annotated_img = draw_cross_on_image(img.copy(), x, y)
   

    try:
        img_url = image_to_base64(annotated_img, "JPEG")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"图像编码失败: {str(e)}")

    # 调用大模型描述注视内容
    try:
        response = CLIENT.chat.completions.create(
            model=MODEL_ID,
            messages=[
                {
                    "role": "user",
                    "content": [
                        {"type": "text", "text": "请描述用户正在注视的区域内容,用户的注视点在图片上是一个红点。用列表:1);2),3)表达,每列只用一个名词或短语"},
                        {"type": "image_url", "image_url": {"url": img_url}}
                    ]
                }
            ],
            stream=False
        )
        description = response.choices[0].message.content.strip()
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"调用大模型失败: {str(e)}")

    try:
        buffered = io.BytesIO()
        annotated_img.save(buffered, format="JPEG")
        annotated_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"结果图像生成失败: {str(e)}")

    return JSONResponse({
        "description": description,
        "annotated_image": annotated_base64,
        "gaze_point": {"x": x, "y": y},
        "image_size": {"width": width, "height": height}
    })


def create_visualization_image(record, canvas_width=640, canvas_height=480):
    keypoints_2d = np.array(record['keypoints_2d']).reshape(-1, 2)  # (21, 2)
    gaze_point = record['gaze_point']  # [gx, gy]
    label = record['label']

    canvas = np.ones((canvas_height, canvas_width, 3), dtype=np.uint8) * 255

    for i, (x, y) in enumerate(keypoints_2d):
        if not np.isnan(x) and not np.isnan(y):
            x, y = int(x), int(y)
            if 0 <= x < canvas_width and 0 <= y < canvas_height:
                cv2.circle(canvas, (x, y), radius=1, color=(255, 0, 0), thickness=-1)
                if i == 8:
                    cv2.circle(canvas, (x, y), radius=2, color=(255, 0, 0), thickness=-1)

  
    gx, gy = int(gaze_point[0]), int(gaze_point[1])
    if 0 <= gx < canvas_width and 0 <= gy < canvas_height:
        gaze_color = (0, 255, 0) if label == 1 else (0, 0, 255)
        cv2.circle(canvas, (gx, gy), radius=3, color=gaze_color, thickness=2)
        cv2.putText(canvas, "Gaze", (gx+10, gy), cv2.FONT_HERSHEY_SIMPLEX, 0.6, gaze_color, 2)

    label_text = "Positive" if label == 1 else "Negative"
    text_color = (0, 255, 0) if label == 1 else (0, 0, 255)
    cv2.putText(canvas, f"Label: {label_text}", (15, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, text_color, 2)

    return canvas

@router.get("/vision/gaze_hand")
def get_visualizations():
    images_base64 = []
    folder_path=os.path.abspath("files/inter")
    files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
    chosen_file = random.choice(files)
    data_file=os.path.join(folder_path, chosen_file)
    data_list=np.load(data_file ,allow_pickle=True)

    for i, record in enumerate(data_list):
        img = create_visualization_image(record)
        _, buffer = cv2.imencode('.jpg', img)
        img_str = base64.b64encode(buffer).decode('utf-8')
        images_base64.append({
            "id": i,
            "label": int(record['label']),
            "image": img_str
        })

    return JSONResponse(images_base64)