import os
import tempfile
from datetime import datetime, timedelta

import cv2
from deepface import DeepFace
from fastapi import FastAPI, UploadFile, File, Depends, HTTPException
from sqlalchemy.orm import Session
from ultralytics import YOLO

from api.face_recognition import router2
from api.face_registration import router1
from database import get_db
from services.face_db import FaceDBService
from collections import Counter

app = FastAPI()
app.include_router(router1)
app.include_router(router2)

@app.post("/api/recognize_video")
async def recognize_face_in_video(
    file: UploadFile = File(...),
    threshold: float = 0.7,
    db: Session = Depends(get_db)
):
    # 记录视频处理开始时间（作为时间基准）
    processing_start_time = datetime.utcnow()

    # 创建临时文件保存视频内容
    with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp:
        try:
            content = await file.read()
            tmp.write(content)
            temp_file_path = tmp.name
        except Exception as e:
            raise HTTPException(status_code=500, detail="文件读取失败")

    # 打开视频文件
    cap = cv2.VideoCapture(temp_file_path)
    if not cap.isOpened():
        os.unlink(temp_file_path)
        raise HTTPException(status_code=400, detail="无法打开视频文件")

    resultDict = {}
    face_emotion_model = YOLO('face-emotion.pt')
    try:
        # 获取视频帧率
        fps = cap.get(cv2.CAP_PROP_FPS)
        if fps <= 0:
            fps = 30  # 默认帧率

        while True:
            ret, frame = cap.read()
            if not ret:
                break

            # 计算当前帧相对时间（秒）并转换为绝对时间
            current_frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
            seconds_offset = current_frame / fps
            current_time = processing_start_time + timedelta(seconds=seconds_offset)
            formatted_time = current_time.strftime("%Y-%m-%d %H:%M:%S")

            # 处理帧数据
            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            results = face_emotion_model(rgb_frame)

            for result in results:
                for box in result.boxes:
                    # 人脸识别逻辑
                    x1, y1, x2, y2 = map(int, box.xyxy.cpu().numpy()[0])
                    img = rgb_frame[y1:y2, x1:x2]

                    # 特征提取
                    embedding_obj = DeepFace.represent(
                        img, model_name='Facenet', enforce_detection=False
                    )

                    # 数据库搜索
                    service = FaceDBService(db)
                    search_results = service.search_face(
                        embedding_obj[0]['embedding'], threshold=threshold
                    )
                    if not search_results:
                        continue

                    # 获取识别结果
                    name = search_results[0]['name'] or ""
                    class_id = box.cls.item()
                    emotion = result.names[class_id]
                    confidence = round(box.conf.item(), 2)

                    # 只处理置信度大于0.5的结果
                    if confidence > 0.5:
                        if name not in resultDict:
                            # 首次识别：记录创建时间和更新时间
                            resultDict[name] = {
                                "created_at": formatted_time,
                                "updated_at": formatted_time,
                                "emotions": [emotion]
                            }
                        else:
                            # 更新识别：仅更新时间和情绪
                            resultDict[name]["updated_at"] = formatted_time
                            resultDict[name]["emotions"].append(emotion)

    finally:
        cap.release()
        os.unlink(temp_file_path)

    # 后处理：统计最频繁情绪
    final_result = {}
    for name, data in resultDict.items():
        emotion_counter = Counter(data["emotions"])
        most_common_emotion = emotion_counter.most_common(1)[0][0]

        final_result[name] = {
            "emotion": most_common_emotion,
            "created_at": data["created_at"],  # 首次识别时间
            "updated_at": data["updated_at"]   # 最后识别时间
        }

    return final_result