import sapien.core as sapien
import numpy as np
from PIL import Image, ImageColor
import cv2
import torch


def compute_pose(points):
    (x1, y1), (x2, y2), (x3, y3) = points
    # 中点
    x, y = (x1 + x2) / 2, (y1 + y2) / 2
    # 连线向量
    dx, dy = x2 - x1, y2 - y1
    # 两个法向
    n1 = np.array([dx, dy])
    n2 = np.array([-dx, -dy])
    # 第三个点相对中点的向量
    p3c = np.array([x3 - x, y3 - y])
    # 选择与第三点同侧的法向
    if np.dot(p3c, n1) <= 0:
        n = n1
    else:
        n = n2
    # 角度
    theta = np.arctan2(n[1], n[0])

    return [x, y, theta]

def draw_arrow(img, pose, length=30, color=(0, 0, 255), thickness=2):
    x, y, theta = pose
    # 起点
    pt1 = (int(x), int(y))
    # 终点
    pt2 = (int(x + length * np.cos(theta)),
           int(y + length * np.sin(theta)))
    # 画箭头
    cv2.arrowedLine(img, pt1, pt2, color, thickness, tipLength=0.3)

    return img

class Camera:

    def __init__(self, scene:sapien.Scene, name:str, pose:sapien.Pose=None, mounted_link=None, 
                    width=640, height=480, fovy=60.0, near=0.05, far=100.0):
        """
        Create a camera on a sapien scene

        Args:
            scene: sapien Scene
            name: name of the camera
        """
        self._scene = scene
        self.camera = scene.add_camera(name=name, width=width, height=height, fovy=fovy, near=near, far=far)
        self.pose = pose
        self.mounted_link = mounted_link
        self.width = width
        self.height = height
        self.fovy = fovy

    # ---------------------------------------------------------------------------- #
    # Camera Utils
    # ---------------------------------------------------------------------------- #
    def update_camera_pose(self, camera_pose):
        """
        Update rendering to refresh the camera's RGBD information
        (rendering must be updated even when disabled, otherwise data cannot be collected).
        """
        self.pose = camera_pose
        self.camera.entity.set_pose(self.pose)
    
    def get_config(self) -> dict:
        res = {}

        def _get_config(camera):
            camera_intrinsic_cv = camera.get_intrinsic_matrix()
            camera_extrinsic_cv = camera.get_extrinsic_matrix()
            camera_model_matrix = camera.get_model_matrix()
            return {
                "intrinsic_cv": camera_intrinsic_cv,
                "extrinsic_cv": camera_extrinsic_cv,
                "cam2world_gl": camera_model_matrix,
            }
        res[self.camera.name] = _get_config(self.camera)

        return res

    def get_rgb(self) -> dict:
        rgba = self.get_rgba()
        rgb = {}
        for camera_name, camera_data in rgba.items():
            rgb[camera_name] = {}
            rgb[camera_name]["rgb"] = camera_data["rgba"][:, :, :3]  # Exclude alpha channel
        return rgb

    def disp_rgb(self, image, image_name='RGB Image'):
        # create a cv2 window and display rgb image
        cv2.imshow(image_name, image[:, :, ::-1])
        cv2.waitKey(1)

    # Get Camera RGBA
    def get_rgba(self) -> dict:

        def _get_rgba(camera):
            camera_rgba = camera.get_picture("Color")
            camera_rgba_img = (camera_rgba * 255).clip(0, 255).astype("uint8")
            return camera_rgba_img
        res = {}
        res[self.camera.name] = {}
        res[self.camera.name]["rgba"] = _get_rgba(self.camera)

        return res

    # Get Camera Segmentation
    def get_segmentation(self, level="mesh") -> dict:

        def _get_segmentation(camera, level="mesh"):
            # visual_id is the unique id of each visual shape
            seg_labels = camera.get_picture("Segmentation")  # [H, W, 4]
            colormap = sorted(set(ImageColor.colormap.values()))
            color_palette = np.array([ImageColor.getrgb(color) for color in colormap], dtype=np.uint8)
            if level == "mesh":
                label0_image = seg_labels[..., 0].astype(np.uint8)  # mesh-level
            elif level == "actor":
                label0_image = seg_labels[..., 1].astype(np.uint8)  # actor-level
            return color_palette[label0_image], label0_image

        res = {}
        res[self.camera.name] = {}
        segmentation_image, segmentation_label = _get_segmentation(self.camera, level=level)
        res[self.camera.name][f"{level}_segmentation"] = segmentation_image
        res[self.camera.name][f"{level}_segmentation_label"] = segmentation_label

        return res

    def disp_segmentation(self, image, image_name='Segmentation'):
        # create a cv2 window and display segmentation image
        cv2.imshow(image_name, image)
        cv2.waitKey(1)

    def get_segmentation_keypoint(self, segmentation_label, target_labels:list[int]):
        img = np.zeros_like(segmentation_label, dtype=np.uint8)
        img_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
        centers = []
        key_pose = None

        for target_label in target_labels:
            ys, xs = np.where(segmentation_label == target_label)
            if len(xs) > 0:
                cx, cy = int(np.mean(xs)), int(np.mean(ys))
                centers.append((cx, cy))
                img_color[segmentation_label == target_label] = [255] * 3
                cv2.circle(img_color, center=(cx, cy), radius=6, color=(0, 255, 0), thickness=2)

        if len(centers) == 3:
            key_pose = compute_pose(centers)
            img_color = draw_arrow(img_color, key_pose)

        return img_color, centers, key_pose

    def disp_keypoint(self, image, image_name='Keypoint'):
        # create a cv2 window and display keypoint image
        cv2.imshow(image_name, image)
        cv2.waitKey(1)

    # Get Camera Depth
    def get_depth(self) -> dict:

        def _get_depth(camera):
            position = camera.get_picture("Position")
            depth = -position[..., 2]
            depth_image = (depth * 1000.0).astype(np.float64)
            return depth_image

        def _get_sensor_depth(sensor):
            depth = sensor.get_depth()
            depth = (depth * 1000.0).astype(np.float64)
            return depth

        res = {}
        rgba = self.get_rgba()
        res[self.camera.name] = {}
        res[self.camera.name]["depth"] = _get_depth(self.camera)
        res[self.camera.name]["depth"] *= rgba[self.camera.name]["rgba"][:, :, 3] / 255

        return res

    def disp_depth(self, image, image_name='Depth', max_depth=6000):
        # convert the 1 channel depth image to 3 channel grey image
        clipped_depth = image.clip(0, max_depth)
        depth_colored = cv2.applyColorMap((clipped_depth / max_depth * 255).astype(np.uint8), cv2.COLORMAP_JET)
        # create a cv2 window and display depth image
        cv2.imshow(image_name, depth_colored)
        cv2.waitKey(1)

    def get_camera_results(self, types=["rgb", "depth", "mesh_segmentation", "keypoint"], pose:sapien.Pose=None):
        if pose:
            self.update_camera_pose(camera_pose=pose)
        self._scene.update_render()
        self.camera.take_picture()
        results = {}
        for type in types:
            if type == "rgb":
                results[type] = self.get_rgb()[self.camera.name][type]
            elif type == "depth":
                results[type] = self.get_depth()[self.camera.name][type]
            elif type == "mesh_segmentation":
                results[type] = self.get_segmentation()[self.camera.name][type]
                results["mesh_segmentation_label"] = self.get_segmentation()[self.camera.name]["mesh_segmentation_label"]
        if "keypoint" in types:
            img, center, key_pose = self.get_segmentation_keypoint(segmentation_label=results["mesh_segmentation_label"], target_labels=[16, 17, 15])
            results['keypoint'] = {}
            results["keypoint"]['img'] = img
            results["keypoint"]['center'] = center
            results["keypoint"]['key_pose'] = key_pose
        return results