# main.py
from fastapi import FastAPI, UploadFile, File, Form, Request, HTTPException
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
import shutil
import os
import cv2
from openai import OpenAI
import time
from pathlib import Path
import asyncio
import json
import base64
import logging
import uuid
from datetime import datetime
from config import config
from model_clients import ModelClientFactory

# 配置日志
logging.basicConfig(
    level=getattr(logging, config.LOG_LEVEL),
    format=config.LOG_FORMAT
)
logger = logging.getLogger(__name__)

app = FastAPI(
    title=config.APP_NAME,
    description=config.APP_DESCRIPTION,
    version=config.APP_VERSION,
    debug=config.DEBUG
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=config.CORS_ORIGINS,
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 创建必要的目录
config.create_directories()

# 获取配置
model_config = config.get_model_config()
video_config = config.get_video_config()
image_config = config.get_image_config()

# 初始化模型客户端
try:
    model_client = ModelClientFactory.create_client(
        model_config["client_type"],
        model_config
    )
    logger.info(f"模型客户端初始化成功: {model_config['client_type']} - {model_config['base_url']}")
except Exception as e:
    logger.error(f"模型客户端初始化失败: {str(e)}")
    model_client = None

# 设置模板和上传/帧目录的静态文件服务
templates = Jinja2Templates(directory="templates")
app.mount("/uploads", StaticFiles(directory=str(config.UPLOAD_DIR)), name="uploads")
app.mount("/frames", StaticFiles(directory=str(config.FRAMES_DIR)), name="frames")

def encode_image(image_path):
    """将图片转换为base64编码"""
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')

async def analyze_image(image_path: str, object_str: str):
    """异步版本的图像分析函数"""
    try:
        if model_client is None:
            logger.error("模型客户端未初始化")
            return False, "模型服务不可用", 0

        return await model_client.analyze_image(image_path, object_str)

    except Exception as e:
        logger.error(f"图像分析失败: {str(e)}")
        return False, f"分析错误: {str(e)}", 0

def preprocess_image(image_path):
    """图像预处理函数"""
    img = cv2.imread(image_path)
    if img is None:
        return False

    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    l, a, b = cv2.split(lab)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    cl = clahe.apply(l)
    limg = cv2.merge((cl, a, b))
    final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
    cv2.imwrite(image_path, final, [cv2.IMWRITE_JPEG_QUALITY, 100])
    return True

@app.get("/")
async def home(request: Request):
    """主页面"""
    return templates.TemplateResponse("index.html", {"request": request})

@app.get("/health")
async def health_check():
    """健康检查接口"""
    try:
        # 检查模型服务是否可用
        if model_client is None:
            return JSONResponse(
                status_code=503,
                content={"status": "error", "message": "模型服务未初始化"}
            )

        return JSONResponse(
            content={
                "status": "healthy",
                "model_service": model_config["base_url"],
                "model_type": model_config["client_type"],
                "model_name": model_config["model_name"],
                "timestamp": datetime.now().isoformat()
            }
        )
    except Exception as e:
        logger.error(f"健康检查失败: {str(e)}")
        return JSONResponse(
            status_code=503,
            content={"status": "error", "message": str(e)}
        )

@app.post("/analyze")
async def analyze_video(
        video: UploadFile = File(...),
        object_str: str = Form(...)
):
    """视频分析接口"""
    task_id = str(uuid.uuid4())
    logger.info(f"开始分析任务 {task_id}: 文件={video.filename}, 目标={object_str}")

    try:
        # 验证输入
        if not video.filename:
            raise HTTPException(status_code=400, detail="未提供视频文件")

        if not object_str.strip():
            raise HTTPException(status_code=400, detail="未提供目标描述")

        # 检查文件格式
        file_ext = Path(video.filename).suffix.lower()
        if file_ext not in SUPPORTED_VIDEO_FORMATS:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的视频格式: {file_ext}. 支持的格式: {', '.join(SUPPORTED_VIDEO_FORMATS)}"
            )

        # 检查文件大小
        if video.size and video.size > MAX_FILE_SIZE:
            raise HTTPException(
                status_code=400,
                detail=f"文件过大: {video.size / 1024 / 1024:.1f}MB. 最大支持: {MAX_FILE_SIZE / 1024 / 1024:.0f}MB"
            )

        # 检查模型服务
        if model_client is None:
            raise HTTPException(status_code=503, detail="模型服务不可用")

        # 生成唯一文件名
        unique_filename = f"{task_id}_{video.filename}"
        video_path = UPLOAD_DIR / unique_filename

        # 保存上传的视频
        logger.info(f"保存视频文件: {video_path}")
        with open(video_path, "wb") as buffer:
            shutil.copyfileobj(video.file, buffer)

        # 为当前任务创建专门的帧目录
        task_frames_dir = FRAMES_DIR / video.filename.split('.')[0]
        task_frames_dir.mkdir(exist_ok=True)

        # 异步生成分析结果
        async def generate_results():
            cap = cv2.VideoCapture(str(video_path))
            fps = int(cap.get(cv2.CAP_PROP_FPS))
            frame_count = 0
            consecutive_detections = 0  # 连续检测计数
            first_detection_second = None  # 记录第一次检测时间

            try:
                while True:
                    success, frame = cap.read()
                    if not success:
                        break

                    if frame_count % fps == 0:  # 每秒处理一帧
                        current_second = frame_count // fps
                        frame_path = os.path.join(task_frames_dir, f"frame_{current_second}.jpg")
                        cv2.imwrite(frame_path, frame)

                        if preprocess_image(frame_path):
                            is_match, description, confidence = await analyze_image(frame_path, object_str)

                            if is_match:
                                consecutive_detections += 1
                                if consecutive_detections == 1:
                                    first_detection_second = current_second
                            else:
                                consecutive_detections = 0
                                first_detection_second = None

                            result = {
                                "status": "success",
                                "frame": {
                                    "second": current_second,
                                    "is_match": is_match,
                                    "description": description,
                                    "confidence": confidence,
                                    "frame_path": f"/frames/{video.filename.split('.')[0]}/frame_{current_second}.jpg"
                                }
                            }

                            yield json.dumps(result) + "\n"

                            # 如果连续两次检测到目标，输出结果并停止
                            if consecutive_detections >= 2:
                                final_result = {
                                    "status": "complete",
                                    "message": f"目标已连续检测到两次，首次检测时间为第 {first_detection_second} 秒",
                                    "first_detection_time": first_detection_second
                                }
                                yield json.dumps(final_result) + "\n"
                                break

                    frame_count += 1

            finally:
                cap.release()

        return StreamingResponse(generate_results(), media_type="application/json")

    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={"status": "error", "message": str(e)}
        )

if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)
