# -*- coding: utf-8 -*-
# @Time : 2024-11-2024/11/30
# @File : api.py

import cv2
import numpy as np
import os
import json
import time
import subprocess
from typing import Dict, List, Union, Optional, Any

from ultralytics import YOLO
from utils.angle_utils import estimate_3d_from_2d, calculate_angle_3d, get_joint_angles_config, get_skeleton_connections
from utils.visualization import draw_skeleton
from utils.image_utils import cv2_add_chinese_text


class PoseAnalysisAPI:
    """姿态分析API接口类"""
    
    def __init__(self, model_path: str = "./weights/yolo11x-pose.pt"):
        """
        初始化姿态分析API
        
        参数:
        - model_path: YOLO模型路径
        """
        # 加载YOLO模型
        self.model = YOLO(model_path)
        
        # 获取关节角度配置
        self.joint_angles = get_joint_angles_config()
        
        # 获取骨架连接关系
        self.connections = get_skeleton_connections()

    def _convert_to_h264_aac(self, input_path: str, output_path: str) -> bool:
        """
        使用FFmpeg将视频转换为H.264+AAC编码
        
        参数:
        - input_path: 输入视频路径
        - output_path: 输出视频路径
        
        返回:
        - 转换是否成功
        """
        try:
            # FFmpeg命令：转换为H.264视频编码 + AAC音频编码
            cmd = [
                'ffmpeg',
                '-i', input_path,           # 输入文件
                '-c:v', 'libx264',          # 视频编码器：H.264
                '-preset', 'medium',        # 编码速度预设
                '-crf', '23',               # 质量因子（18-28，越小质量越好）
                '-c:a', 'aac',              # 音频编码器：AAC
                '-b:a', '128k',             # 音频比特率
                '-movflags', '+faststart',   # 优化网络播放
                '-y',                       # 覆盖输出文件
                output_path                 # 输出文件
            ]
            
            # 执行FFmpeg命令
            result = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
            
            if result.returncode == 0:
                print(f"FFmpeg转换成功: {output_path}")
                # 删除原始文件
                if os.path.exists(input_path) and input_path != output_path:
                    os.remove(input_path)
                return True
            else:
                print(f"FFmpeg转换失败: {result.stderr}")
                return False
                
        except subprocess.TimeoutExpired:
            print("FFmpeg转换超时")
            return False
        except FileNotFoundError:
            print("FFmpeg未找到，请确保已安装FFmpeg")
            return False
        except Exception as e:
            print(f"FFmpeg转换出错: {str(e)}")
            return False

    def analyze_frame(self, frame: np.ndarray) -> Dict[str, Any]:
        """
        分析单帧图像
        
        参数:
        - frame: 输入图像帧
        
        返回:
        - 包含分析结果的字典
        """
        try:
            # 使用YOLO模型检测姿态
            results = self.model(frame, verbose=False)
            
            # 检查是否检测到人
            if not results or len(results) == 0 or len(results[0]) == 0:
                return {
                    "code": 204,  # 成功但无内容
                    "msg": "未检测到人体",
                    "data": None
                }
            
            # 提取检测结果
            all_persons_data = []
            
            for i, result in enumerate(results[0]):
                try:
                    # 获取关键点
                    keypoints = result.keypoints
                    if keypoints is None or not hasattr(keypoints, 'xy') or keypoints.xy is None or len(keypoints.xy) == 0:
                        continue
                    
                    # 获取2D关键点
                    keypoints_2d = keypoints.xy.cpu().numpy()
                    
                    # 估计3D关键点
                    estimated_3d = estimate_3d_from_2d(keypoints_2d)
                    
                    # 计算关节角度
                    angles = {}
                    for angle_info in self.joint_angles:
                        p1_idx, p2_idx, p3_idx = angle_info["points"]
                        name = angle_info["name"]
                        
                        # 确保关键点索引有效
                        if (p1_idx < estimated_3d.shape[0] and 
                            p2_idx < estimated_3d.shape[0] and 
                            p3_idx < estimated_3d.shape[0]):
                            
                            p1 = estimated_3d[p1_idx]
                            p2 = estimated_3d[p2_idx]
                            p3 = estimated_3d[p3_idx]
                            
                            angle = calculate_angle_3d(p1, p2, p3)
                            angles[name] = float(angle)  # 转为Python原生float，确保可序列化
                    
                    # 构建单人数据 - 只包含人员ID和关节角度
                    personData = {
                        "personId": i,
                        "jointAngles": angles
                    }
                    
                    all_persons_data.append(personData)
                    
                except Exception as e:
                    print(f"处理第{i}个人时出错: {str(e)}")
                    continue
            
            # 返回结果
            return {
                "code": 200,
                "msg": "成功",
                "data": all_persons_data
            }
            
        except Exception as e:
            return {
                "code": 500,
                "msg": f"处理帧时出现错误: {str(e)}",
                "data": None
            }

    def analyze_video(self, video_path: str, save_result_video: bool = True) -> Dict[str, Any]:
        """
        分析视频文件
        
        参数:
        - video_path: 视频文件路径
        
        返回:
        - 包含分析结果的字典
        """
        try:
            # 打开视频文件
            cap = cv2.VideoCapture(video_path)
            if not cap.isOpened():
                return {
                    "code": 400,
                    "message": f"Cannot open video file: {video_path}",
                    "data": None
                }
            
            # 获取视频信息
            fps = int(cap.get(cv2.CAP_PROP_FPS))
            frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            
            # 准备输出视频写入器
            resultVideoPath = None
            videoWriter = None
            if save_result_video:
                # 生成结果视频路径
                videoName = os.path.splitext(os.path.basename(video_path))[0]
                resultVideoPath = os.path.join('./results', f"{videoName}_result.mp4")
                os.makedirs(os.path.dirname(resultVideoPath), exist_ok=True)
                
                # 初始化视频写入器 - 使用H.264编码
                # 尝试不同的H.264编码器
                fourcc_options = [
                    cv2.VideoWriter_fourcc(*'avc1'),  # H.264 (推荐)
                    cv2.VideoWriter_fourcc(*'H264'),  # H.264 备选
                    cv2.VideoWriter_fourcc(*'XVID'),  # XVID 备选
                    cv2.VideoWriter_fourcc(*'mp4v')   # MP4V 最后备选
                ]
                
                videoWriter = None
                for fourcc in fourcc_options:
                    videoWriter = cv2.VideoWriter(resultVideoPath, fourcc, fps, (width, height))
                    if videoWriter.isOpened():
                        print(f"使用编码器: {fourcc}")
                        break
                    else:
                        videoWriter.release()
                        videoWriter = None
                
                if videoWriter is None:
                    print("警告: 无法初始化视频写入器，将跳过结果视频保存")
                    save_result_video = False
            
            # 分析每一帧
            allFramesData = []
            currentFrame = 0
            
            while cap.isOpened():
                success, frame = cap.read()
                if not success:
                    break
                
                # 分析当前帧
                frameResult = self.analyze_frame(frame)
                
                # 绘制骨架线条到原视频帧
                if save_result_video and frameResult.get("data"):
                    frameWithSkeleton = frame.copy()
                    
                    # 使用YOLO模型重新检测以获取关键点坐标（用于绘制）
                    results = self.model(frame, verbose=False)
                    if results and len(results) > 0 and len(results[0]) > 0:
                        for result in results[0]:
                            if result.keypoints is not None and hasattr(result.keypoints, 'xy'):
                                keypoints_2d = result.keypoints.xy.cpu().numpy()
                                if keypoints_2d.shape[0] > 0:
                                    keypoints_for_draw = keypoints_2d[0]
                                    
                                    # 计算角度用于显示
                                    estimated_3d = estimate_3d_from_2d(keypoints_2d)
                                    angles = {}
                                    for angle_info in self.joint_angles:
                                        p1_idx, p2_idx, p3_idx = angle_info["points"]
                                        name = angle_info["name"]
                                        if (p1_idx < estimated_3d.shape[0] and 
                                            p2_idx < estimated_3d.shape[0] and 
                                            p3_idx < estimated_3d.shape[0]):
                                            p1 = estimated_3d[p1_idx]
                                            p2 = estimated_3d[p2_idx]
                                            p3 = estimated_3d[p3_idx]
                                            angle = calculate_angle_3d(p1, p2, p3)
                                            angles[name] = float(angle)
                                    
                                    # 绘制骨架
                                    frameWithSkeleton = draw_skeleton(
                                        frameWithSkeleton, 
                                        keypoints_for_draw, 
                                        self.connections, 
                                        self.joint_angles, 
                                        angles, 
                                        thickness=2, 
                                        show_keypoint_idx=False
                                    )
                    
                    # 写入结果视频
                    if videoWriter is not None:
                        videoWriter.write(frameWithSkeleton)
                
                # 只提取data部分，添加帧信息
                frameData = {
                    "frameId": currentFrame,
                    "timestamp": currentFrame / fps if fps > 0 else 0,
                    "persons": frameResult.get("data", [])
                }
                
                allFramesData.append(frameData)
                currentFrame += 1
                
                # 打印进度
                if currentFrame % 30 == 0:
                    print(f"处理进度: {currentFrame}/{frameCount} 帧 ({currentFrame/frameCount*100:.1f}%)")
            
            # 释放资源
            cap.release()
            if videoWriter is not None:
                videoWriter.release()
            
            # 如果保存了结果视频，使用FFmpeg进行H.264+AAC编码转换
            finalVideoPath = resultVideoPath
            if save_result_video and resultVideoPath and os.path.exists(resultVideoPath):
                # 创建最终的H.264编码视频路径
                videoName = os.path.splitext(os.path.basename(video_path))[0]
                h264VideoPath = os.path.join('./results', f"{videoName}_result_h264.mp4")
                
                # 使用FFmpeg转换
                if self._convert_to_h264_aac(resultVideoPath, h264VideoPath):
                    finalVideoPath = h264VideoPath
                    print(f"视频已转换为H.264+AAC编码: {h264VideoPath}")
                else:
                    print(f"FFmpeg转换失败，使用原始视频: {resultVideoPath}")
            
            # 构建返回数据
            responseData = {
                "videoInfo": {
                    "fps": fps,
                    "totalFrames": frameCount,
                    "processedFrames": currentFrame
                },
                "frames": allFramesData
            }
            
            # 如果保存了结果视频，添加视频路径信息
            if save_result_video and finalVideoPath:
                responseData["resultVideoPath"] = finalVideoPath
            
            return {
                "code": 200,
                "message": "Success",
                "data": responseData
            }
            
        except Exception as e:
            return {
                "code": 500,
                "message": f"Error processing video: {str(e)}",
                "data": None
            }

    def analyze_video_batch(self, video_paths: List[str]) -> Dict[str, Any]:
        """
        批量分析多个视频
        
        参数:
        - video_paths: 视频文件路径列表
        
        返回:
        - 包含分析结果的字典
        """
        results = {}
        for video_path in video_paths:
            video_name = os.path.basename(video_path)
            results[video_name] = self.analyze_video(video_path)
        
        return {
            "code": 200,
            "msg": "批处理完成",
            "data": results
        }

    def save_result_to_json(self, result: Dict[str, Any], output_path: str) -> Dict[str, Any]:
        """
        将分析结果保存为JSON文件
        
        参数:
        - result: 分析结果
        - output_path: 输出文件路径
        
        返回:
        - 操作状态
        """
        try:
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 写入JSON文件
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(result, f, ensure_ascii=False, indent=2)
            
            return {
                "code": 200,
                "msg": f"结果已保存到: {output_path}",
                "data": {"file_path": output_path}
            }
            
        except Exception as e:
            return {
                "code": 500,
                "msg": f"保存结果时出现错误: {str(e)}",
                "data": None
            }


# 使用示例
if __name__ == "__main__":
    # 初始化API
    api = PoseAnalysisAPI(model_path="./weights/yolo11x-pose.pt")
    
    # 分析视频
    video_path = "./video/跑步.mp4"
    print(f"开始分析视频: {video_path}")
    
    start_time = time.time()
    result = api.analyze_video(video_path)
    end_time = time.time()
    
    print(f"分析完成，耗时: {end_time - start_time:.2f} 秒")
    
    # 保存结果到JSON文件
    output_path = f"./results/{os.path.splitext(os.path.basename(video_path))[0]}_result.json"
    save_result = api.save_result_to_json(result, output_path)
    
    print(save_result["msg"]) 