import io
import os
import json
import torch
import random
import base64
import numpy as np

from typing import Optional
from tqdm import tqdm
from torch.utils.data import DataLoader
from PIL import Image, ImageDraw
from fastapi import APIRouter, HTTPException, Query
from fastapi.responses import JSONResponse

# 导入你的数据集和模型
from hand.track.umetrack.datasets.video_pose_data import load_hand_model_from_dict
from hand.track.umetrack.hand.hand import batch_hand, hand_model_to_device
from hand.track.umetrack.datasets.h5_dataset_yvr import HandPoseDataset
from hand.track.umetrack.hand.hand_pose_utils import (
    landmarks_from_batch_hand_pose,
)
from hand.track.umetrack.model_v3.dyd_model import DYDModelV2
from hand.track.umetrack.model_v3.model_opts import ModelOpts
from .utils.adb import ADBManager

# 创建路由
router = APIRouter(prefix="/track", tags=["tracking"])

# 全局变量
model = None
device = None
generic_hand_model = None
adb_manager = ADBManager()


def convert_numpy_types(obj):
    """递归地将numpy数据类型转换为Python原生类型"""
    if isinstance(obj, dict):
        return {convert_numpy_types(k): convert_numpy_types(v) for k, v in obj.items()}
    elif isinstance(obj, list):
        return [convert_numpy_types(item) for item in obj]
    elif isinstance(obj, (np.integer, np.uint8, np.uint16, np.uint32, np.uint64)):
        return int(obj)
    elif isinstance(obj, (np.floating, np.float16, np.float32, np.float64)):
        return float(obj)
    elif isinstance(obj, np.ndarray):
        return convert_numpy_types(obj.tolist())
    elif isinstance(obj, (np.bool_)):
        return bool(obj)
    else:
        return obj


def compute_mpjpe(pred_landmarks, gt_landmarks):
    """
    计算Landmarks的MPJPE（Mean Per Joint Position Error）
    """
    per_landmark_error = torch.norm(pred_landmarks - gt_landmarks, dim=-1)
    per_sample_errors = per_landmark_error.mean(dim=-1)
    mpjpe = per_sample_errors.mean().item()
    return mpjpe, per_sample_errors


def compute_pa_mpjpe(pred_landmarks, gt_landmarks):
    """
    计算Landmarks的PA-MPJPE（Procrustes Aligned MPJPE）
    """
    pred_landmarks_np = pred_landmarks.detach().cpu().numpy()
    gt_landmarks_np = gt_landmarks.detach().cpu().numpy()

    per_sample_pa_errors = []

    for i in range(pred_landmarks_np.shape[0]):
        pred = pred_landmarks_np[i]
        gt = gt_landmarks_np[i]

        # 中心化
        pred_centered = pred - pred.mean(axis=0)
        gt_centered = gt - gt.mean(axis=0)

        # 计算协方差矩阵
        H = pred_centered.T @ gt_centered

        # 使用SVD分解计算最优旋转
        U, S, Vt = np.linalg.svd(H)
        R = Vt.T @ U.T

        # 处理反射情况
        if np.linalg.det(R) < 0:
            Vt[-1, :] *= -1
            R = Vt.T @ U.T

        # 应用旋转
        pred_aligned = pred_centered @ R

        # 计算对齐后的误差
        error = np.linalg.norm(pred_aligned - gt_centered, axis=1).mean()
        per_sample_pa_errors.append(error)

    pa_mpjpe = np.mean(per_sample_pa_errors)
    return pa_mpjpe, per_sample_pa_errors


def load_model():
    """加载训练好的模型"""
    global model, device, generic_hand_model

    try:
        # 设备设置
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"Using device: {device}")

        # 模型配置
        model_opts = ModelOpts()

        # 创建模型
        model = DYDModelV2(
            input_size=(96, 96),
            model_opts=model_opts
        )
        model.classification_head = None

        # 加载训练好的权重
        model_path = "files/hand/models/track/ckpt_epoch300.pt"

        if os.path.exists(model_path):
            checkpoint = torch.load(model_path, map_location=device)
            if 'model_state_dict' in checkpoint:
                model.load_state_dict(checkpoint['model_state_dict'])
            else:
                model.load_state_dict(checkpoint)
            print(f"模型权重加载成功: {model_path}")
        else:
            print(f"警告: 未找到模型权重文件 {model_path}，使用随机初始化模型")

        model = model.to(device)
        model.eval()

        # 加载通用手部模型
        hand_model_path = "files/hand/models/track/generic_hand_model.json"
        if os.path.exists(hand_model_path):
            with open(hand_model_path) as f:
                generic_hand_model = load_hand_model_from_dict(json.load(f))
                generic_hand_model = hand_model_to_device(generic_hand_model, device)
            print(f"手部模型加载成功: {hand_model_path}")
        else:
            print(f"警告: 未找到手部模型文件 {hand_model_path}")

        # 打印模型参数数量
        total_params = sum(p.numel() for p in model.parameters())
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        print(f"模型参数 - 总计: {total_params:,}, 可训练: {trainable_params:,}")

        return True
    except Exception as e:
        print(f"模型加载失败: {e}")
        return False


@router.get("/infer")
async def inference_tracking_sample(
        split: str = Query("training", description="数据集分割: training 或 testing")
):
    """
    随机选择样本进行手势追踪推理

    参数:
    - split: 数据集分割，training 或 testing

    返回:
    - 多视角原图 (base64)
    - 每个视角的预测landmarks投影 (红点)
    - 每个视角的真实landmarks投影 (绿点)
    - 预测的3D landmarks坐标
    - 真实的3D landmarks坐标
    """
    global model, device, generic_hand_model

    if model is None or generic_hand_model is None:
        raise HTTPException(status_code=500, detail="模型或手部模型未初始化")

    if split not in ["training", "testing"]:
        raise HTTPException(status_code=400, detail="split参数必须是'training'或'testing'")

    try:
        # 数据集配置
        data_root = "files/hand/datasets"

        # 创建数据集
        dataset = HandPoseDataset(
            root_dir=data_root,
            split=split,
            num_views=4
        )

        # 随机选择样本
        random_sample_id = random.randint(0, len(dataset) - 1)
        sample = dataset[random_sample_id]

        # 准备推理数据
        images = sample["images"].unsqueeze(0).to(device)  # 添加batch维度
        view_mask = sample['view_mask'].unsqueeze(0).to(device)
        total_xfs = sample['total_xfs'].unsqueeze(0).to(device)
        world_to_ref_camera = sample["world_to_ref_camera"].unsqueeze(0).to(device)
        gt_hand_idx = sample["hand_idx"].unsqueeze(0).to(device)

        # 获取真实landmarks（如果存在）
        gt_landmarks_3d = None
        if 'landmarks' in sample:
            gt_landmarks_3d = sample['landmarks'].numpy()

        # 推理
        with torch.no_grad():
            pred_joints, pred_wrist, skel_scales, pred_hand_logits = model(
                images, view_mask, total_xfs, world_to_ref_camera
            )

            # 转换手腕变换矩阵
            pred_wrist = torch.inverse(world_to_ref_camera) @ pred_wrist
            pred_wrist[..., :3, 3] *= 1000

            # 准备批处理的通用手部模型
            batch_generic = batch_hand(generic_hand_model, 1)  # batch_size=1

            # 从预测的手部姿态计算landmarks
            pred_landmarks_3d = landmarks_from_batch_hand_pose(
                batch_generic, pred_joints, pred_wrist, gt_hand_idx
            )

            # 调整landmarks顺序和数量以匹配真实数据
            pred_landmarks_3d = pred_landmarks_3d[:, :20, :]

            # 转换为numpy
            pred_landmarks_3d_np = pred_landmarks_3d.squeeze(0).cpu().numpy()

        # 获取相机参数
        intrinsics = sample['intrinsics'].numpy()  # [V, 3, 3]
        extrinsics = sample['extrinsics'].numpy()  # [V, 4, 4] (camera_to_world)
        num_views = intrinsics.shape[0]
        extrinsics[:, :3, 3] *= 1000
        # 处理每个视角的图像和投影
        view_results = []
        original_images_base64 = []
        annotated_images_base64 = []

        for view_idx in range(num_views):
            # 获取原始图像
            view_image = sample['images'][view_idx].squeeze().numpy()  # [H, W]

            # 转换为0-255范围
            if view_image.max() <= 1.0:
                view_image_uint8 = (view_image * 255).astype(np.uint8)
            else:
                view_image_uint8 = view_image.astype(np.uint8)

            # 转换为PIL图像
            pil_img = Image.fromarray(view_image_uint8, mode='L')

            # 转换为RGB以便绘制彩色点
            rgb_img = pil_img.convert('RGB')

            # 获取当前视角的相机参数
            K = intrinsics[view_idx]  # 内参矩阵 [3, 3]
            extrinsic = extrinsics[view_idx]  # 外参矩阵 [4, 4] (camera_to_world)

            # 计算world_to_camera变换
            world_to_camera = np.linalg.inv(extrinsic)

            # 将3D landmarks投影到当前视角
            pred_points_2d = project_3d_to_2d(pred_landmarks_3d_np, K, world_to_camera)

            gt_points_2d = None
            if gt_landmarks_3d is not None:
                # 定义landmarks顺序（根据你的数据调整）
                order = [4, 8, 12, 16, 20, 0, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 1]
                gt_landmarks_reordered = gt_landmarks_3d[order, :]
                gt_landmarks_reordered = gt_landmarks_reordered[:20, :]
                gt_points_2d = project_3d_to_2d(gt_landmarks_reordered, K, world_to_camera)

            # 在图像上绘制landmarks
            draw = ImageDraw.Draw(rgb_img)

            # 绘制预测的landmarks（红点）
            if pred_points_2d is not None:
                for point in pred_points_2d:
                    if point is not None:
                        x, y = point
                        # 绘制红点
                        draw.ellipse([(x - 3, y - 3), (x + 3, y + 3)], fill='red', outline='red')

            # 绘制真实的landmarks（绿点）
            if gt_points_2d is not None:
                for point in gt_points_2d:
                    if point is not None:
                        x, y = point
                        # 绘制绿点
                        draw.ellipse([(x - 2, y - 2), (x + 2, y + 2)], fill='green', outline='green')

            # 转换为base64
            # 原始图像
            buffer_original = io.BytesIO()
            pil_img.save(buffer_original, format="PNG")
            original_base64 = base64.b64encode(buffer_original.getvalue()).decode('utf-8')

            # 标注后的图像
            buffer_annotated = io.BytesIO()
            rgb_img.save(buffer_annotated, format="PNG")
            annotated_base64 = base64.b64encode(buffer_annotated.getvalue()).decode('utf-8')

            # 存储结果
            view_result = {
                "view_index": view_idx,
                "intrinsics": convert_numpy_types(K.tolist()),
                "extrinsics": convert_numpy_types(extrinsic.tolist()),
                "predicted_points_2d": convert_numpy_types(
                    [list(p) if p is not None else None for p in pred_points_2d]),
                "ground_truth_points_2d": convert_numpy_types(
                    [list(p) if p is not None else None for p in gt_points_2d]) if gt_points_2d is not None else None
            }

            view_results.append(view_result)
            original_images_base64.append(original_base64)
            annotated_images_base64.append(annotated_base64)

        # 计算投影误差（如果真实landmarks存在）
        projection_errors = None
        if gt_landmarks_3d is not None:
            # 使用参考视角计算3D误差
            order = [4, 8, 12, 16, 20, 0, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 1]
            gt_landmarks_reordered = gt_landmarks_3d[order, :]
            gt_landmarks_reordered = gt_landmarks_reordered[:20, :]

            # 计算MPJPE
            mpjpe = np.mean(np.linalg.norm(pred_landmarks_3d_np - gt_landmarks_reordered, axis=1))

            projection_errors = {
                "mpjpe_3d": float(mpjpe),
                "landmark_count": len(pred_landmarks_3d_np)
            }

        return JSONResponse(content={
            "status": "success",
            "split": split,
            "sample_id": random_sample_id,
            "hand_idx": convert_numpy_types(sample['hand_idx'].item()),
            "reference_camera_idx": convert_numpy_types(sample['reference_camera_idx'].item()),
            "images": {
                "original": original_images_base64,
                "annotated": annotated_images_base64
            },
            "landmarks_3d": {
                "predicted": convert_numpy_types(pred_landmarks_3d_np.tolist()),
                "ground_truth": convert_numpy_types(gt_landmarks_3d.tolist()) if gt_landmarks_3d is not None else None
            },
            "projection_results": view_results,
            "projection_errors": projection_errors
        })

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"推理失败: {str(e)}")


def project_3d_to_2d(points_3d, intrinsic_matrix, world_to_camera, image_size=(96, 96)):
    """
    将3D点投影到2D图像平面

    参数:
    - points_3d: 3D点坐标 [N, 3]
    - intrinsic_matrix: 相机内参矩阵 [3, 3]
    - world_to_camera: 世界到相机坐标系的变换矩阵 [4, 4]
    - image_size: 图像尺寸 (width, height)

    返回:
    - points_2d: 2D投影坐标列表 [N]，如果点在相机后方则返回None
    """
    points_2d = []

    for point_3d in points_3d:
        # 将3D点转换为齐次坐标
        point_3d_homo = np.array([point_3d[0], point_3d[1], point_3d[2], 1.0])

        # 转换到相机坐标系
        point_camera = world_to_camera @ point_3d_homo

        # 检查点是否在相机前方 (z > 0)
        if point_camera[2] <= 0:
            points_2d.append(None)
            continue

        # 投影到图像平面
        point_image_homo = intrinsic_matrix @ point_camera[:3]
        point_image = point_image_homo[:2] / point_image_homo[2]

        # 检查点是否在图像范围内
        u, v = point_image
        if 0 <= u < image_size[0] and 0 <= v < image_size[1]:
            points_2d.append((float(u), float(v)))
        else:
            points_2d.append(None)

    return points_2d


def tensor_to_base64(tensor):
    """将张量图像转换为base64字符串"""
    try:
        # 确保张量在CPU上并转换为numpy
        if torch.is_tensor(tensor):
            tensor = tensor.cpu().numpy()

        # 处理多视角图像
        if len(tensor.shape) == 4:  # [NUM_VIEWS, 1, H, W]
            images_base64 = []

            for view_idx in range(tensor.shape[0]):
                # 获取单个视角的图像
                img_array = tensor[view_idx, 0]  # [H, W]

                # 归一化到0-255
                if img_array.max() <= 1.0:
                    img_array = (img_array * 255).astype(np.uint8)
                else:
                    img_array = img_array.astype(np.uint8)

                # 转换为PIL图像
                pil_img = Image.fromarray(img_array, mode='L')

                # 转换为base64
                buffer = io.BytesIO()
                pil_img.save(buffer, format="PNG")

                img_base64 = base64.b64encode(buffer.getvalue()).decode('utf-8')
                images_base64.append(img_base64)

            return images_base64
        else:
            return []

    except Exception as e:
        print(f"图像转换失败: {e}")
        return []


@router.get("/dataset-info")
async def get_tracking_dataset_info(
        split: str = Query("training", description="数据集分割: training 或 testing")
):
    """
    获取追踪数据集信息

    参数:
    - split: 数据集分割，training 或 testing
    """
    if split not in ["training", "testing"]:
        raise HTTPException(status_code=400, detail="split参数必须是'training'或'testing'")

    try:
        # 数据集配置
        data_root = r"files\hand\datasets"

        # 创建数据集实例获取统计信息
        dataset = HandPoseDataset(
            root_dir=data_root,
            split=split
        )

        stats = dataset.get_dataset_stats()
        stats = convert_numpy_types(stats)

        return JSONResponse(content={
            "status": "success",
            "dataset_info": {
                "split": split,
                "total_samples": stats['total_samples'],
                "num_files": stats['num_files'],
                "data_dir": stats['data_dir']
            }
        })
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取数据集信息失败: {str(e)}")


@router.get("/acc")
async def test_tracking_accuracy(
        split: str = Query("training", description="数据集分割: training 或 testing"),
        num: Optional[int] = Query(None, description="测试样本数量，为空则使用全部数据")
):
    """
    测试手势追踪精度

    参数:
    - split: 数据集分割，training 或 testing
    - num: 测试样本数量，为空则使用全部数据

    返回:
    - MPJPE (Mean Per Joint Position Error)
    - PA-MPJPE (Procrustes Aligned MPJPE)
    - 指尖MPJPE
    - 手腕MPJPE
    """
    global model, device, generic_hand_model

    if model is None or generic_hand_model is None:
        raise HTTPException(status_code=500, detail="模型或手部模型未初始化")

    if split not in ["training", "testing"]:
        raise HTTPException(status_code=400, detail="split参数必须是'training'或'testing'")

    try:
        # 数据集配置
        data_root = r"files\hand\datasets"

        # 创建测试数据集
        test_dataset = HandPoseDataset(
            root_dir=data_root,
            split=split
        )

        # 确定测试样本数量
        if num is None or num <= 0:
            num = len(test_dataset)
        else:
            num = min(num, len(test_dataset))

        print(f"开始测试手势追踪精度，使用 {num} 个样本，数据集: {split}")

        # 创建数据加载器
        test_dataloader = DataLoader(
            test_dataset,
            batch_size=16,
            shuffle=False,
            num_workers=2,
            pin_memory=True
        )

        # 初始化误差统计
        all_mpjpe_errors = []
        all_pa_mpjpe_errors = []
        all_fingertip_errors = []
        all_wrist_errors = []

        total_samples = 0

        with torch.no_grad():
            pbar = tqdm(test_dataloader, desc="Testing Tracking Accuracy")
            for batch in pbar:
                if total_samples >= num:
                    break

                # 准备批数据
                images = batch['images'].to(device)
                view_mask = batch['view_mask'].to(device)
                gt_hand_idx = batch['hand_idx'].to(device)
                total_xfs = batch['total_xfs'].to(device)
                world_to_ref_camera = batch["world_to_ref_camera"].to(device)

                # 注意：这里假设数据集中有landmarks，如果没有需要调整
                if 'landmarks' not in batch:
                    # 如果没有landmarks，跳过这个batch
                    continue

                gt_landmarks = batch["landmarks"].to(device)

                # 前向传播
                pred_joints, pred_wrist, skel_scales, pred_hand_logits = model.forward(
                    images, view_mask, total_xfs, world_to_ref_camera
                )

                # 转换手腕变换矩阵
                pred_wrist = torch.inverse(world_to_ref_camera) @ pred_wrist
                pred_wrist[..., :3, 3] *= 1000

                # 准备批处理的通用手部模型
                batch_size = pred_joints.shape[0]
                batch_generic = batch_hand(generic_hand_model, batch_size)

                # 从预测的手部姿态计算landmarks
                pred_landmarks = landmarks_from_batch_hand_pose(
                    batch_generic, pred_joints, pred_wrist, gt_hand_idx
                )

                # 调整landmarks顺序和数量以匹配真实数据
                pred_landmarks = pred_landmarks[:, :20, :]

                # 定义landmarks顺序（根据你的数据调整）
                order = [4, 8, 12, 16, 20, 0, 2, 3, 5, 6, 7, 9, 10, 11, 13, 14, 15, 17, 18, 19, 1]
                gt_landmarks_reordered = gt_landmarks[:, order, :]
                gt_landmarks_reordered = gt_landmarks_reordered[:, :20, :]

                # 计算Landmarks的MPJPE和PA-MPJPE
                mpjpe, per_sample_mpjpe = compute_mpjpe(pred_landmarks, gt_landmarks_reordered)
                all_mpjpe_errors.extend(per_sample_mpjpe.cpu().numpy())

                pa_mpjpe, per_sample_pa_mpjpe = compute_pa_mpjpe(pred_landmarks, gt_landmarks_reordered)
                all_pa_mpjpe_errors.extend(per_sample_pa_mpjpe)

                # 计算指尖的MPJPE
                fingertip_indices = [0, 1, 2, 3, 4]  # 重排后的指尖索引
                pred_fingertips = pred_landmarks[:, fingertip_indices, :]
                gt_fingertips = gt_landmarks_reordered[:, fingertip_indices, :]
                fingertip_mpjpe, per_sample_fingertip = compute_mpjpe(pred_fingertips, gt_fingertips)
                all_fingertip_errors.extend(per_sample_fingertip.cpu().numpy())

                # 计算腕关节的MPJPE
                wrist_index = 5  # 重排后的腕关节索引
                pred_wrist_joint = pred_landmarks[:, wrist_index:wrist_index + 1, :]
                gt_wrist_joint = gt_landmarks_reordered[:, wrist_index:wrist_index + 1, :]
                wrist_mpjpe, per_sample_wrist = compute_mpjpe(pred_wrist_joint, gt_wrist_joint)
                all_wrist_errors.extend(per_sample_wrist.cpu().numpy())

                total_samples += batch_size

                # 更新进度条
                pbar.set_postfix({
                    'MPJPE': f'{mpjpe:.3f}',
                    'PA-MPJPE': f'{pa_mpjpe:.3f}',
                    'Fingertip': f'{fingertip_mpjpe:.3f}',
                    'Wrist': f'{wrist_mpjpe:.3f}'
                })

                if total_samples >= num:
                    break

        # 计算平均误差
        avg_mpjpe = np.mean(all_mpjpe_errors) if all_mpjpe_errors else 0
        avg_pa_mpjpe = np.mean(all_pa_mpjpe_errors) if all_pa_mpjpe_errors else 0
        avg_fingertip_mpjpe = np.mean(all_fingertip_errors) if all_fingertip_errors else 0
        avg_wrist_mpjpe = np.mean(all_wrist_errors) if all_wrist_errors else 0

        # 统计低于阈值的样本比例
        thresholds = [10, 20, 30, 40, 50]
        mpjpe_threshold_stats = {}
        pa_mpjpe_threshold_stats = {}

        for threshold in thresholds:
            if all_mpjpe_errors:
                mpjpe_count = np.sum(np.array(all_mpjpe_errors) <= threshold)
                mpjpe_percentage = (mpjpe_count / len(all_mpjpe_errors)) * 100
                mpjpe_threshold_stats[threshold] = {
                    "count": mpjpe_count,
                    "percentage": mpjpe_percentage
                }

            if all_pa_mpjpe_errors:
                pa_mpjpe_count = np.sum(np.array(all_pa_mpjpe_errors) <= threshold)
                pa_mpjpe_percentage = (pa_mpjpe_count / len(all_pa_mpjpe_errors)) * 100
                pa_mpjpe_threshold_stats[threshold] = {
                    "count": pa_mpjpe_count,
                    "percentage": pa_mpjpe_percentage
                }
        return JSONResponse(content={
            "status": "success",
            "test_config": {
                "split": split,
                "num_samples": total_samples,
                "thresholds_mm": thresholds
            },
            "accuracy_metrics": {
                "mpjpe": convert_numpy_types(avg_mpjpe),
                "pa_mpjpe": convert_numpy_types(avg_pa_mpjpe),
                "fingertip_mpjpe": convert_numpy_types(avg_fingertip_mpjpe),
                "wrist_mpjpe": convert_numpy_types(avg_wrist_mpjpe)
            },
            "threshold_statistics": {
                "mpjpe": convert_numpy_types(mpjpe_threshold_stats),
                "pa_mpjpe": convert_numpy_types(pa_mpjpe_threshold_stats)
            },
            "error_distribution": {
                "mpjpe_errors": convert_numpy_types(all_mpjpe_errors[:100]),  # 只返回前100个用于展示
                "pa_mpjpe_errors": convert_numpy_types(all_pa_mpjpe_errors[:100])
            }
        })

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"测试追踪精度失败: {str(e)}")


@router.get("/delay")
async def test_tracking_delay(
):
    delay = adb_manager.test_delay()
    return JSONResponse(content={
        "delay": delay
    })


@router.get("/health")
async def tracking_health_check():
    """追踪模块健康检查端点"""
    return JSONResponse(content={
        "status": "success",
        "model_initialized": model is not None,
        "hand_model_initialized": generic_hand_model is not None,
        "device": str(device) if device else None
    })


# 在模块导入时初始化模型
try:
    print("正在初始化手势追踪模型...")
    if load_model():
        print("手势追踪模型初始化成功")
    else:
        print("手势追踪模型初始化失败")
except Exception as e:
    print(f"手势追踪模型初始化失败: {e}")
    model = None
    generic_hand_model = None
