from typing import Tuple

import cv2
import numpy as np
import torch

from hand.track.umetrack.camera.camera import PinholePlaneCameraModel
from hand.track.umetrack.hand.hand_pose_utils import landmarks_from_hand_pose, all_vertices_from_hand_pose
from .base import Visualizer

HAND_COLORS = {
    0: (0, 255, 0),  # 绿色表示左手
    1: (255, 0, 0)  # 红色表示右手
}


def draw_hand_keypoints(
        image: np.ndarray,
        keypoints: np.ndarray,
        color: Tuple[int, int, int] = (0, 255, 0),
        radius: int = 3,
        thickness: int = -1,
        camera: PinholePlaneCameraModel = None,
        min_depth: float = 30,
) -> np.ndarray:
    """
    在图像上绘制手部关键点

    Args:
        image: 输入图像 (H, W, 3)
        keypoints: 关键点坐标 (N, 3)，其中N是关键点数量，坐标在世界坐标系中
        color: BGR颜色元组
        radius: 关键点半径
        thickness: 关键点填充厚度（-1表示填充）
        camera: 相机模型，用于投影3D点到2D图像坐标

    Returns:
        绘制了关键点的图像
    """
    # 1. 将世界坐标转换到相机坐标系
    keypoints_eye = camera.world_to_eye(keypoints)
    if (keypoints_eye[:, 2] < min_depth).any():
        return image

    # 2. 将相机坐标投影到图像平面
    keypoints_2d = camera.eye_to_window(keypoints_eye).astype(np.int32)

    # 绘制每个关键点
    for point in keypoints_2d:
        # 检查点是否在图像范围内
        if 0 <= point[0] < image.shape[1] and 0 <= point[1] < image.shape[0]:
            cv2.circle(image, tuple(point), radius, color, thickness)

    # 定义手部关键点之间的连接
    # 关键点索引说明（根据 LANDMARK 枚举类的顺序）：
    # 0: THUMB_FINGERTIP (拇指指尖)
    # 1: INDEX_FINGER_FINGERTIP (食指指尖)
    # 2: MIDDLE_FINGER_FINGERTIP (中指指尖)
    # 3: RING_FINGER_FINGERTIP (无名指指尖)
    # 4: PINKY_FINGER_FINGERTIP (小指指尖)
    # 5: WRIST_JOINT (手腕关节)
    # 6: THUMB_INTERMEDIATE_FRAME (拇指中间关节)
    # 7: THUMB_DISTAL_FRAME (拇指远端关节)
    # 8: INDEX_PROXIMAL_FRAME (食指近端关节)
    # 9: INDEX_INTERMEDIATE_FRAME (食指中间关节)
    # 10: INDEX_DISTAL_FRAME (食指远端关节)
    # 11: MIDDLE_PROXIMAL_FRAME (中指近端关节)
    # 12: MIDDLE_INTERMEDIATE_FRAME (中指中间关节)
    # 13: MIDDLE_DISTAL_FRAME (中指远端关节)
    # 14: RING_PROXIMAL_FRAME (无名指近端关节)
    # 15: RING_INTERMEDIATE_FRAME (无名指中间关节)
    # 16: RING_DISTAL_FRAME (无名指远端关节)
    # 17: PINKY_PROXIMAL_FRAME (小指近端关节)
    # 18: PINKY_INTERMEDIATE_FRAME (小指中间关节)
    # 19: PINKY_DISTAL_FRAME (小指远端关节)
    # 20: PALM_CENTER (手掌中心)

    connections = [
        # 拇指连接
        (5, 6),  # 手腕到拇指中间关节
        (6, 7),  # 拇指中间关节到远端关节
        (7, 0),  # 拇指远端关节到指尖

        # 食指连接
        (5, 8),  # 手腕到食指近端关节
        (8, 9),  # 食指近端关节到中间关节
        (9, 10),  # 食指中间关节到远端关节
        (10, 1),  # 食指远端关节到指尖

        # 中指连接
        (5, 11),  # 手腕到中指近端关节
        (11, 12),  # 中指近端关节到中间关节
        (12, 13),  # 中指中间关节到远端关节
        (13, 2),  # 中指远端关节到指尖

        # 无名指连接
        (5, 14),  # 手腕到无名指近端关节
        (14, 15),  # 无名指近端关节到中间关节
        (15, 16),  # 无名指中间关节到远端关节
        (16, 3),  # 无名指远端关节到指尖

        # 小指连接
        (5, 17),  # 手腕到小指近端关节
        (17, 18),  # 小指近端关节到中间关节
        (18, 19),  # 小指中间关节到远端关节
        (19, 4),  # 小指远端关节到指尖

        # 手掌连接
        (5, 20),  # 手腕到手掌中心
        (8, 20),  # 食指近端关节到手掌中心
        (11, 20),  # 中指近端关节到手掌中心
        (14, 20),  # 无名指近端关节到手掌中心
        (17, 20)  # 小指近端关节到手掌中心
    ]

    # 绘制连接线
    for start_idx, end_idx in connections:
        if (0 <= start_idx < len(keypoints_2d) and 0 <= end_idx < len(keypoints_2d) and
                # 检查两个点是否都在图像范围内
                0 <= keypoints_2d[start_idx][0] < image.shape[1] and 0 <= keypoints_2d[start_idx][1] < image.shape[
                    0] and
                0 <= keypoints_2d[end_idx][0] < image.shape[1] and 0 <= keypoints_2d[end_idx][1] < image.shape[0]):
            cv2.line(
                image,
                tuple(keypoints_2d[start_idx]),
                tuple(keypoints_2d[end_idx]),
                color,
                2
            )

    return image


def draw_hand_mesh(
        image: np.ndarray,
        hand_model,
        hand_pose,
        hand_idx: int,
        color: Tuple[int, int, int] = (0, 255, 0),
        thickness: int = 1,
        alpha: float = 1.0,
        camera: PinholePlaneCameraModel = None
) -> np.ndarray:
    """
    在图像上绘制手部网格模型

    Args:
        image: 输入图像 (H, W, 3)
        hand_model: 手部模型
        hand_pose: 手部姿态（SingleHandPose 对象）
        hand_idx: 手部索引（0表示右手，1表示左手）
        color: BGR颜色元组
        thickness: 线条粗细
        alpha: 透明度（0.0-1.0）
        camera: 相机模型，用于投影3D点到2D图像坐标

    Returns:
        绘制了手部网格的图像
    """
    # 创建透明图层
    overlay = image.copy()

    # 获取手部模型的顶点和面
    if hand_model.mesh_vertices is None or hand_model.mesh_triangles is None:
        return image

    triangles = hand_model.mesh_triangles
    if isinstance(triangles, torch.Tensor):
        triangles = triangles.detach().cpu().numpy()
    elif not isinstance(triangles, np.ndarray):
        raise TypeError(f"不支持的三角形数据类型: {type(triangles)}")

    # 使用 all_vertices_from_hand_pose 获取所有网格顶点的位置
    vertices = all_vertices_from_hand_pose(hand_model, hand_pose, hand_idx)  # [V, 3]

    # 将3D顶点投影到2D图像坐标
    vertices_eye = camera.world_to_eye(vertices)  # [V, 3]
    vertices_2d = camera.eye_to_window(vertices_eye).astype(np.int32)  # [V, 2]

    # 绘制每个面
    for triangle in triangles:
        # 确保三角形索引是整数类型
        triangle = triangle.astype(np.int32)
        # 获取面的顶点
        triangle_vertices = vertices_2d[triangle]  # [3, 2]

        # 检查所有顶点是否在图像范围内且深度为正
        if (np.all((triangle_vertices >= 0) & (triangle_vertices < [image.shape[1], image.shape[0]])) and
                np.all(vertices_eye[triangle, 2] > 0)):  # 检查深度是否为正
            # 绘制面的轮廓
            for i in range(3):
                pt1 = tuple(triangle_vertices[i])
                pt2 = tuple(triangle_vertices[(i + 1) % 3])
                cv2.line(overlay, pt1, pt2, color, thickness)
    # 将透明图层与原始图像混合
    if alpha < 1.0:
        cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image)
    else:
        image[:] = overlay[:]

    return image


def draw_hand_skin(
        image: np.ndarray,
        hand_model,
        hand_pose,
        hand_idx: int,
        color: Tuple[int, int, int] = (0, 255, 0),
        alpha: float = 1.0,
        camera: PinholePlaneCameraModel = None
) -> np.ndarray:
    """
    绘制手部皮肤表面，填充手部的三角形区域

    Args:
        image: 输入图像 (H, W, 3)
        hand_model: 手部模型
        hand_pose: 手部姿态（SingleHandPose 对象）
        hand_idx: 手部索引（0表示右手，1表示左手）
        color: BGR颜色元组
        alpha: 透明度（0.0-1.0）
        camera: 相机模型，用于投影3D点到2D图像坐标

    Returns:
        绘制了手部表面的图像
    """
    # 创建透明图层
    overlay = image.copy()

    # 获取手部模型的顶点和面
    if hand_model.mesh_vertices is None or hand_model.mesh_triangles is None:
        return image

    triangles = hand_model.mesh_triangles
    if isinstance(triangles, torch.Tensor):
        triangles = triangles.detach().cpu().numpy()
    elif not isinstance(triangles, np.ndarray):
        raise TypeError(f"不支持的三角形数据类型: {type(triangles)}")

    # 使用 all_vertices_from_hand_pose 获取所有网格顶点的位置
    vertices = all_vertices_from_hand_pose(hand_model, hand_pose, hand_idx)  # [V, 3]

    # 将3D顶点投影到2D图像坐标
    vertices_eye = camera.world_to_eye(vertices)  # [V, 3]
    vertices_2d = camera.eye_to_window(vertices_eye).astype(np.int32)  # [V, 2]

    # 绘制每个面，填充三角形区域
    for triangle in triangles:
        # 确保三角形索引是整数类型
        triangle = triangle.astype(np.int32)
        # 获取面的顶点
        triangle_vertices = vertices_2d[triangle]  # [3, 2]

        # 检查所有顶点是否在图像范围内且深度为正
        if (np.all((triangle_vertices >= 0) & (triangle_vertices < [image.shape[1], image.shape[0]])) and
                np.all(vertices_eye[triangle, 2] > 0)):  # 检查深度是否为正
            # 填充三角形区域
            cv2.fillPoly(overlay, [triangle_vertices], color)

    # 将透明图层与原始图像混合
    if alpha < 1.0:
        cv2.addWeighted(overlay, alpha, image, 1 - alpha, 0, image)
    else:
        image[:] = overlay[:]

    return image


class HandVisualizer(Visualizer):

    def __call__(self, results):
        print(f"Visualizing: {results}")
        return f"Final_Output_{results}"

    def visualize(self, input_frame, tracking_result, method="landmarks"):
        processed_views = []
        for view in input_frame.views:

            view_image = view.image.copy()
            if len(view_image.shape) == 2:  # 如果是单通道图像
                view_image = cv2.cvtColor(view_image, cv2.COLOR_GRAY2BGR)
            elif view_image.shape[2] == 4:  # 如果是四通道图像（带alpha通道）
                view_image = cv2.cvtColor(view_image, cv2.COLOR_BGRA2BGR)
            for hand_idx, hand_result in tracking_result.items():
                color = HAND_COLORS[hand_idx]
                if hand_result is None:
                    continue
                if method == "landmarks":
                    landmarks = landmarks_from_hand_pose(hand_result["model"], hand_result["pose"], hand_idx)
                    view_image = self.draw_landmarks(view_image, landmarks, color, view.camera)
                elif method == "mesh":
                    view_image = self.draw_mesh(view_image, hand_result["model"], hand_result["pose"], hand_idx, color,
                                                view.camera)
                elif method == "skin":
                    view_image = self.draw_skin(view_image, hand_result["model"],
                                                hand_result["pose"], hand_idx, color,
                                                view.camera)
                else:
                    pass
            processed_views.append(view_image)

        # Combine the views into a grid layout
        if len(processed_views) == 4:
            # 2x2 grid for 4 views
            # Ensure all images have the same dimensions
            heights = [img.shape[0] for img in processed_views]
            widths = [img.shape[1] for img in processed_views]
            max_height = max(heights)
            max_width = max(widths)

            # Resize images if needed
            resized_views = []
            for img in processed_views:
                if img.shape[0] != max_height or img.shape[1] != max_width:
                    img = cv2.resize(img, (max_width, max_height))
                resized_views.append(img)

            # Create the 2x2 grid
            top_row = np.hstack((resized_views[0], resized_views[1]))
            bottom_row = np.hstack((resized_views[2], resized_views[3]))
            grid_image = np.vstack((top_row, bottom_row))

            return grid_image

        elif len(processed_views) == 6:
            # 3x2 grid for 6 views
            # Ensure all images have the same dimensions
            heights = [img.shape[0] for img in processed_views]
            widths = [img.shape[1] for img in processed_views]
            max_height = max(heights)
            max_width = max(widths)

            # Resize images if needed
            resized_views = []
            for img in processed_views:
                if img.shape[0] != max_height or img.shape[1] != max_width:
                    img = cv2.resize(img, (max_width, max_height))
                resized_views.append(img)

            # Create the 3x2 grid
            row1 = np.hstack((resized_views[0], resized_views[1]))
            row2 = np.hstack((resized_views[2], resized_views[3]))
            row3 = np.hstack((resized_views[4], resized_views[5]))
            grid_image = np.vstack((row1, row2, row3))

            return grid_image

        else:
            raise ValueError(f"Expected 4 or 6 views, but got {len(processed_views)} views")

    def draw_landmarks(self, view_image, landmarks, color, camera):

        view_image = draw_hand_keypoints(
            view_image,
            landmarks,
            color=color,
            camera=camera
        )
        return view_image

    def draw_mesh(self, view_image, model, pose, hand_id, color, camera):
        view_image = draw_hand_mesh(
            view_image,
            model,
            pose,
            hand_id,
            color=color,
            camera=camera
        )
        return view_image

    def draw_skin(self, view_image, model, pose, hand_id, color, camera):
        return draw_hand_skin(
            view_image,
            model,
            pose,
            hand_id,
            color=color,
            camera=camera,
            alpha=0.7  # 可以调整透明度
        )
