# main.py
# 目标检测项目
import base64
import logging
import time
from contextlib import asynccontextmanager

import torch
from fastapi import FastAPI, File, UploadFile, HTTPException
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import cv2
import numpy as np
from pydantic import BaseModel
from ultralytics import YOLO
from llama_cpp import Llama

# 配置参数

#本地部署的大语言模型 要求gguf格式 自己替换路径
MODEL_PATH = r"D:\LM Studio models\lmstudio-community\DeepSeek-R1-Distill-Qwen-1.5B-GGUF\DeepSeek-R1-Distill-Qwen-1.5B-Q4_K_M.gguf"
#yolo模型
YOLO_MODEL_PATH= r"yolo11n.pt"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
MAX_LENGTH = 1024
TEMPERATURE = 0.7

# 初始化FastAPI应用
@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时加载模型
    try:
        model_loader.load_model()
        logging.info("Model loaded successfully")
    except Exception as e:
        logging.error(f"Failed to initialize model: {str(e)}")
        exit(1)
    yield

app = FastAPI(lifespan=lifespan)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 加载YOLO模型
yolo_model = YOLO(YOLO_MODEL_PATH)

# ===================== 目标检测服务 =====================
@app.post("/detect/image")
async def detect_image(file: UploadFile = File(...)):
    try:
        contents = await file.read()
        nparr = np.frombuffer(contents, np.uint8)
        img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

        # 执行目标检测
        results = yolo_model(img, verbose=True)
        annotated_img = results[0].plot()

        # 处理检测结果
        result_data = []
        for result in results:
            for box in result.boxes:
                result_data.append({
                    "class_id": int(box.cls[0].item()),
                    "confidence": round(box.conf[0].item(), 3),
                    "position": {
                        "left": box.xyxy[0][0].item(),
                        "right": box.xyxy[0][2].item(),
                        "top": box.xyxy[0][1].item(),
                        "bottom": box.xyxy[0][3].item()
                    }
                })

        # 返回Base64图片
        _, encoded_img = cv2.imencode('.jpg', annotated_img)
        return JSONResponse({
            "result": "data:image/jpeg;base64," + base64.b64encode(encoded_img).decode(),
            "detection_info": result_data
        })

    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={"message": f"处理图片时发生错误: {str(e)}"}
        )

# ===================== AI问答服务 =====================
class QuestionRequest(BaseModel):
    question: str
    max_length: int = MAX_LENGTH
    temperature: float = TEMPERATURE

class ResponseData(BaseModel):
    answer: str
    latency: float
    model: str = "Tifa-DeepsexV2-7b"

class ModelLoader:
    def __init__(self):
        self.llm = None

    def load_model(self):
        try:
            self.llm = Llama(
                model_path=MODEL_PATH,
                n_ctx=2048,
                n_threads=8,
                n_gpu_layers=35 if DEVICE == "cuda" else 0
            )
            logging.info(f"GGUF model loaded on {DEVICE.upper()}")
        except Exception as e:
            logging.error(f"Model loading failed: {str(e)}")
            raise RuntimeError("Model initialization failed")

model_loader = ModelLoader()

@app.post("/api/ask", response_model=ResponseData)
async def ask_question(request: QuestionRequest):
    if not model_loader.llm:
        raise HTTPException(503, detail="Model not ready")

    start_time = time.time()
    try:
        response = model_loader.llm.create_chat_completion(
            messages=[{"role": "user", "content": request.question}],
            max_tokens=request.max_length,
            temperature=request.temperature
        )
        return {
            "answer": response["choices"][0]["message"]["content"],
            "latency": time.time() - start_time,
        }
    except Exception as e:
        logging.error(f"Generation error: {str(e)}")
        raise HTTPException(500, detail="Generation failed")

# ===================== 主服务 =====================
@app.get("/", response_class=HTMLResponse)
async def main():
    with open(r"html2.html", "r", encoding="utf-8") as f:
        return f.read()

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)