import numpy as np
import cv2
from typing import List, Tuple, Dict
import logging
from hand.track.umetrack.hand.hand import HandModel
from hand.track.umetrack.hand.hand_skinning import _skin_points, _get_skinning_weights
from hand.track.umetrack.pipelines.track.tracking_result import SingleHandPose
from hand.track.umetrack.camera.camera import PinholePlaneCameraModel, CameraModel
from hand.track.umetrack.hand.hand_pose_utils import all_vertices_from_hand_pose, landmarks_from_hand_pose
import torch

logger = logging.getLogger(__name__)

def draw_hand_keypoints(
    image: np.ndarray,
    keypoints: np.ndarray,
    color: Tuple[int, int, int] = (0, 255, 0),
    radius: int = 3,
    thickness: int = -1,
    camera: PinholePlaneCameraModel = None
) -> np.ndarray:
    """
    在图像上绘制手部关键点
    
    Args:
        image: 输入图像 (H, W, 3)
        keypoints: 关键点坐标 (N, 3)，其中N是关键点数量，坐标在世界坐标系中
        color: BGR颜色元组
        radius: 关键点半径
        thickness: 关键点填充厚度（-1表示填充）
        camera: 相机模型，用于投影3D点到2D图像坐标
    
    Returns:
        绘制了关键点的图像
    """
    # 如果没有提供相机模型，直接使用前两个坐标作为2D坐标
    if camera is None:
        logger.warning("未提供相机模型，将直接使用关键点的前两个坐标作为2D坐标")
        keypoints_2d = keypoints[:, :2].astype(np.int32)
    else:
        # 使用相机模型进行投影
        # 1. 将世界坐标转换到相机坐标系
        keypoints_eye = camera.world_to_eye(keypoints)
        # 2. 将相机坐标投影到图像平面
        keypoints_2d = camera.eye_to_window(keypoints_eye).astype(np.int32)
    
    # 绘制每个关键点
    for point in keypoints_2d:
        # 检查点是否在图像范围内
        if 0 <= point[0] < image.shape[1] and 0 <= point[1] < image.shape[0]:
            cv2.circle(image, tuple(point), radius, color, thickness)
    
    # 定义手部关键点之间的连接
    # 关键点索引说明（根据 LANDMARK 枚举类的顺序）：
    # 0: THUMB_FINGERTIP (拇指指尖)
    # 1: INDEX_FINGER_FINGERTIP (食指指尖)
    # 2: MIDDLE_FINGER_FINGERTIP (中指指尖)
    # 3: RING_FINGER_FINGERTIP (无名指指尖)
    # 4: PINKY_FINGER_FINGERTIP (小指指尖)
    # 5: WRIST_JOINT (手腕关节)
    # 6: THUMB_INTERMEDIATE_FRAME (拇指中间关节)
    # 7: THUMB_DISTAL_FRAME (拇指远端关节)
    # 8: INDEX_PROXIMAL_FRAME (食指近端关节)
    # 9: INDEX_INTERMEDIATE_FRAME (食指中间关节)
    # 10: INDEX_DISTAL_FRAME (食指远端关节)
    # 11: MIDDLE_PROXIMAL_FRAME (中指近端关节)
    # 12: MIDDLE_INTERMEDIATE_FRAME (中指中间关节)
    # 13: MIDDLE_DISTAL_FRAME (中指远端关节)
    # 14: RING_PROXIMAL_FRAME (无名指近端关节)
    # 15: RING_INTERMEDIATE_FRAME (无名指中间关节)
    # 16: RING_DISTAL_FRAME (无名指远端关节)
    # 17: PINKY_PROXIMAL_FRAME (小指近端关节)
    # 18: PINKY_INTERMEDIATE_FRAME (小指中间关节)
    # 19: PINKY_DISTAL_FRAME (小指远端关节)
    # 20: PALM_CENTER (手掌中心)
    
    connections = [
        # 拇指连接
        (5, 6),  # 手腕到拇指中间关节
        (6, 7),  # 拇指中间关节到远端关节
        (7, 0),  # 拇指远端关节到指尖
        
        # 食指连接
        (5, 8),  # 手腕到食指近端关节
        (8, 9),  # 食指近端关节到中间关节
        (9, 10), # 食指中间关节到远端关节
        (10, 1), # 食指远端关节到指尖
        
        # 中指连接
        (5, 11), # 手腕到中指近端关节
        (11, 12),# 中指近端关节到中间关节
        (12, 13),# 中指中间关节到远端关节
        (13, 2), # 中指远端关节到指尖
        
        # 无名指连接
        (5, 14), # 手腕到无名指近端关节
        (14, 15),# 无名指近端关节到中间关节
        (15, 16),# 无名指中间关节到远端关节
        (16, 3), # 无名指远端关节到指尖
        
        # 小指连接
        (5, 17), # 手腕到小指近端关节
        (17, 18),# 小指近端关节到中间关节
        (18, 19),# 小指中间关节到远端关节
        (19, 4), # 小指远端关节到指尖
        
        # 手掌连接
        (5, 20), # 手腕到手掌中心
        (8, 20), # 食指近端关节到手掌中心
        (11, 20),# 中指近端关节到手掌中心
        (14, 20),# 无名指近端关节到手掌中心
        (17, 20) # 小指近端关节到手掌中心
    ]
    
    # 绘制连接线
    for start_idx, end_idx in connections:
        if (0 <= start_idx < len(keypoints_2d) and 0 <= end_idx < len(keypoints_2d) and
            # 检查两个点是否都在图像范围内
            0 <= keypoints_2d[start_idx][0] < image.shape[1] and 0 <= keypoints_2d[start_idx][1] < image.shape[0] and
            0 <= keypoints_2d[end_idx][0] < image.shape[1] and 0 <= keypoints_2d[end_idx][1] < image.shape[0]):
            cv2.line(
                image,
                tuple(keypoints_2d[start_idx]),
                tuple(keypoints_2d[end_idx]),
                color,
                2
            )
    
    return image

def draw_hand_mesh(
    image: np.ndarray,
    hand_model: HandModel,
    hand_pose: SingleHandPose,
    hand_idx: int,
    color: Tuple[int, int, int] = (0, 255, 0),
    thickness: int = 1,
    alpha: float = 1.0,
    camera: PinholePlaneCameraModel = None
) -> np.ndarray:
    """
    在图像上绘制手部网格模型
    
    Args:
        image: 输入图像 (H, W, 3)
        hand_model: 手部模型
        hand_pose: 手部姿态（SingleHandPose 对象）
        hand_idx: 手部索引（0表示右手，1表示左手）
        color: BGR颜色元组
        thickness: 线条粗细
        alpha: 透明度（0.0-1.0）
        camera: 相机模型，用于投影3D点到2D图像坐标
    
    Returns:
        绘制了手部网格的图像
    """
    # 创建透明图层
    overlay = image.copy()
    
    # 获取手部模型的顶点和面
    if hand_model.mesh_vertices is None or hand_model.mesh_triangles is None:
        logger.warning("手部模型缺少网格数据，无法绘制网格")
        return image
        
    triangles = hand_model.mesh_triangles
    if isinstance(triangles, torch.Tensor):
        triangles = triangles.detach().cpu().numpy()
    elif not isinstance(triangles, np.ndarray):
        raise TypeError(f"不支持的三角形数据类型: {type(triangles)}")
    
    # 如果没有提供相机模型，使用关键点绘制骨架
    if camera is None:
        logger.warning("未提供相机模型，将只绘制关键点连接")
        from lib.tracker.perspective_crop import landmarks_from_hand_pose
        keypoints = landmarks_from_hand_pose(hand_model, hand_pose, hand_idx)
        return draw_hand_keypoints(image, keypoints, color, thickness=thickness, camera=None)
    
    try:
        # 使用 all_vertices_from_hand_pose 获取所有网格顶点的位置
        vertices = all_vertices_from_hand_pose(hand_model, hand_pose, hand_idx)  # [V, 3]
        
        # 将3D顶点投影到2D图像坐标
        vertices_eye = camera.world_to_eye(vertices)  # [V, 3]
        vertices_2d = camera.eye_to_window(vertices_eye).astype(np.int32)  # [V, 2]
        
        # 绘制每个面
        for triangle in triangles:
            # 确保三角形索引是整数类型
            triangle = triangle.astype(np.int32)
            # 获取面的顶点
            triangle_vertices = vertices_2d[triangle]  # [3, 2]
            
            # 检查所有顶点是否在图像范围内且深度为正
            if (np.all((triangle_vertices >= 0) & (triangle_vertices < [image.shape[1], image.shape[0]])) and
                np.all(vertices_eye[triangle, 2] > 0)):  # 检查深度是否为正
                # 绘制面的轮廓
                for i in range(3):
                    pt1 = tuple(triangle_vertices[i])
                    pt2 = tuple(triangle_vertices[(i + 1) % 3])
                    cv2.line(overlay, pt1, pt2, color, thickness)
    
    except Exception as e:
        logger.error(f"绘制网格时出错: {str(e)}")
        # 如果绘制网格失败，回退到绘制关键点
        from lib.tracker.perspective_crop import landmarks_from_hand_pose
        keypoints = landmarks_from_hand_pose(hand_model, hand_pose, hand_idx)
        return draw_hand_keypoints(image, keypoints, color, thickness=thickness, camera=camera)
    
    # 将透明图层与原始图像混合
    if alpha < 1.0:
        cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image)
    else:
        image[:] = overlay[:]
    
    return image 

def draw_hand_mesh_multi_view(
    image: np.ndarray,
    hand_model: HandModel,
    hand_pose: SingleHandPose,
    hand_idx: int,
    cameras: List[CameraModel],
    camera_angles: List[float],
    grid_size: Tuple[int, int] = (2, 2),
    view_size: Tuple[int, int] = (640, 480),
    mesh_color: Tuple[int, int, int] = (0, 255, 0),
    keypoint_color: Tuple[int, int, int] = (255, 0, 0),
    mesh_thickness: int = 1,
    keypoint_radius: int = 3,
    alpha: float = 0.8,
    draw_keypoints: bool = True,
    draw_mesh: bool = True,
    background_color: Tuple[int, int, int] = (0, 0, 0)
) -> np.ndarray:
    """
    在多个视角下绘制手部网格和关键点，并将它们拼接在一起
    
    Args:
        image: 输入图像 (H, W, 3)
        hand_model: 手部模型
        hand_pose: 手部姿态
        hand_idx: 手部索引（0表示右手，1表示左手）
        cameras: 相机模型列表
        camera_angles: 相机角度列表
        grid_size: 网格布局大小 (rows, cols)
        view_size: 每个视角的图像大小 (width, height)
        mesh_color: 网格颜色 (BGR)
        keypoint_color: 关键点颜色 (BGR)
        mesh_thickness: 网格线条粗细
        keypoint_radius: 关键点半径
        alpha: 透明度（0.0-1.0）
        draw_keypoints: 是否绘制关键点
        draw_mesh: 是否绘制网格
        background_color: 背景颜色 (BGR)
    
    Returns:
        拼接后的多视角图像
    """
    rows, cols = grid_size
    view_width, view_height = view_size
    
    # 创建拼接后的画布
    canvas = np.full(
        (view_height * rows, view_width * cols, 3),
        background_color,
        dtype=np.uint8
    )
    
    # 获取所有视角下的顶点位置
    vertices = all_vertices_from_hand_pose(hand_model, hand_pose, hand_idx)  # [V, 3]
    keypoints = landmarks_from_hand_pose(hand_model, hand_pose, hand_idx)  # [21, 3]
    
    # 在每个视角下绘制
    for i, (camera, angle) in enumerate(zip(cameras, camera_angles)):
        if i >= rows * cols:
            break
            
        # 计算当前视角在画布中的位置
        row = i // cols
        col = i % cols
        y_start = row * view_height
        x_start = col * view_width
        
        # 创建当前视角的画布
        view_canvas = np.full((view_height, view_width, 3), background_color, dtype=np.uint8)
        
        try:
            # 投影顶点到2D
            vertices_eye = camera.world_to_eye(vertices)  # [V, 3]
            vertices_2d = camera.eye_to_window(vertices_eye).astype(np.int32)  # [V, 2]
            
            # 投影关键点到2D
            keypoints_eye = camera.world_to_eye(keypoints)  # [21, 3]
            keypoints_2d = camera.eye_to_window(keypoints_eye).astype(np.int32)  # [21, 2]
            
            # 绘制网格
            if draw_mesh and hand_model.mesh_triangles is not None:
                triangles = hand_model.mesh_triangles
                if isinstance(triangles, torch.Tensor):
                    triangles = triangles.detach().cpu().numpy()
                
                for triangle in triangles:
                    triangle = triangle.astype(np.int32)
                    triangle_vertices = vertices_2d[triangle]  # [3, 2]
                    
                    # 检查所有顶点是否在图像范围内且深度为正
                    if (np.all((triangle_vertices >= 0) & (triangle_vertices < [view_width, view_height])) and
                        np.all(vertices_eye[triangle, 2] > 0)):
                        # 绘制面的轮廓
                        for j in range(3):
                            pt1 = tuple(triangle_vertices[j])
                            pt2 = tuple(triangle_vertices[(j + 1) % 3])
                            cv2.line(view_canvas, pt1, pt2, mesh_color, mesh_thickness)
            
            # 绘制关键点
            if draw_keypoints:
                for point in keypoints_2d:
                    if (0 <= point[0] < view_width and 0 <= point[1] < view_height and
                        keypoints_eye[keypoints_2d.tolist().index(point.tolist()), 2] > 0):
                        cv2.circle(view_canvas, tuple(point), keypoint_radius, keypoint_color, -1)
            
            # 添加视角信息
            cv2.putText(
                view_canvas,
                f"View {i+1} ({angle:.1f}°)",
                (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX,
                1,
                (255, 255, 255),
                2
            )
            
        except Exception as e:
            logger.error(f"视角 {i+1} 绘制时出错: {str(e)}")
            cv2.putText(
                view_canvas,
                f"Error in View {i+1}",
                (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX,
                1,
                (0, 0, 255),
                2
            )
        
        # 将当前视角的画布复制到主画布
        canvas[y_start:y_start + view_height, x_start:x_start + view_width] = view_canvas
    
    return canvas

def draw_hand_mesh_multi_view_with_crop(
    image: np.ndarray,
    hand_model: HandModel,
    hand_pose: SingleHandPose,
    hand_idx: int,
    cameras: List[CameraModel],
    camera_angles: List[float],
    grid_size: Tuple[int, int] = (2, 2),
    view_size: Tuple[int, int] = (640, 480),
    mesh_color: Tuple[int, int, int] = (0, 255, 0),
    keypoint_color: Tuple[int, int, int] = (255, 0, 0),
    mesh_thickness: int = 1,
    keypoint_radius: int = 3,
    alpha: float = 0.8,
    draw_keypoints: bool = True,
    draw_mesh: bool = True,
    background_color: Tuple[int, int, int] = (0, 0, 0),
    focal_multiplier: float = 0.95,
    min_required_vis_landmarks: int = 19
) -> np.ndarray:
    """
    在多个视角下绘制手部网格和关键点，使用裁剪后的相机模型，并将它们拼接在一起
    
    Args:
        image: 输入图像 (H, W, 3)
        hand_model: 手部模型
        hand_pose: 手部姿态
        hand_idx: 手部索引（0表示右手，1表示左手）
        cameras: 原始相机模型列表
        camera_angles: 相机角度列表
        grid_size: 网格布局大小 (rows, cols)
        view_size: 每个视角的图像大小 (width, height)
        mesh_color: 网格颜色 (BGR)
        keypoint_color: 关键点颜色 (BGR)
        mesh_thickness: 网格线条粗细
        keypoint_radius: 关键点半径
        alpha: 透明度（0.0-1.0）
        draw_keypoints: 是否绘制关键点
        draw_mesh: 是否绘制网格
        background_color: 背景颜色 (BGR)
        focal_multiplier: 焦距乘数，用于裁剪相机
        min_required_vis_landmarks: 最小可见关键点数量要求
    
    Returns:
        拼接后的多视角图像
    """
    from lib.tracker.perspective_crop import gen_crop_cameras_from_pose
    
    # 生成裁剪后的相机模型
    crop_cameras = gen_crop_cameras_from_pose(
        cameras=cameras,
        camera_angles=camera_angles,
        hand_model=hand_model,
        hand_pose=hand_pose,
        hand_idx=hand_idx,
        num_crop_points=21,  # 使用21个关键点
        new_image_size=view_size,
        focal_multiplier=focal_multiplier,
        min_required_vis_landmarks=min_required_vis_landmarks
    )
    
    # 使用裁剪后的相机模型绘制多视角图像
    return draw_hand_mesh_multi_view(
        image=image,
        hand_model=hand_model,
        hand_pose=hand_pose,
        hand_idx=hand_idx,
        cameras=list(crop_cameras.values()),  # 使用裁剪后的相机模型
        camera_angles=[camera_angles[i] for i in crop_cameras.keys()],  # 对应的相机角度
        grid_size=grid_size,
        view_size=view_size,
        mesh_color=mesh_color,
        keypoint_color=keypoint_color,
        mesh_thickness=mesh_thickness,
        keypoint_radius=keypoint_radius,
        alpha=alpha,
        draw_keypoints=draw_keypoints,
        draw_mesh=draw_mesh,
        background_color=background_color
    ) 