import os
import numpy as np
import cv2
from PIL import Image
import glob
import matplotlib.pyplot as plt
from pyquaternion import Quaternion
from nuscenes.utils.data_classes import Box as NuScenesBox
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix

from tools.visualization.bev_render import (
    color_mapping, 
    SCORE_THRESH, 
    MAP_SCORE_THRESH,
    CMD_LIST
)

class CamRender:
    def __init__(
        self, 
        plot_choices,
        out_dir,
        dataset_source='nus',
        is_pipeline=False,
    ):
        self.plot_choices = plot_choices
        self.out_dir = out_dir
        self.cam_gt_dir = os.path.join(out_dir, "cam_gt")
        self.cam_pred_dir = os.path.join(out_dir, "cam_pred")
        os.makedirs(self.cam_gt_dir, exist_ok=True)
        os.makedirs(self.cam_pred_dir, exist_ok=True)
        self.dataset_source = dataset_source
        self.is_pipeline = is_pipeline


        self.CAM_NAMES_NUSC = [
            'FRONT_LEFT',
            'FRONT',
            'FRONT_RIGHT',
            'BACK_RIGHT',
            'BACK',
            'BACK_LEFT',
        ]
        self.CAM_NAMES_NUSC_converter = [
            'FRONT',
            'FRONT_RIGHT',
            'FRONT_LEFT',
            'BACK',
            'BACK_LEFT',
            'BACK_RIGHT',
        ]
        if dataset_source == 'cyw':
            self.CAM_NAMES_NUSC_converter = [
                'FRONT',
                'BACK',
                'FRONT_RIGHT',
                'FRONT_LEFT',
                'BACK_RIGHT',
                'BACK_LEFT',
            ]



    def reset_canvas(self):
        plt.close()
        plt.gca().set_axis_off()
        plt.axis('off')
        self.fig, self.axes = plt.subplots(2, 3, figsize=(160 /3 , 20))
        plt.tight_layout()

    def render(
        self,
        data, 
        result,
        index,
    ):
        save_path_gt = None
        if data is not None:
            self.reset_canvas()
            self.render_image_data(data, index)
            self.draw_detection_gt(data)
            self.draw_motion_gt(data)
            self.draw_planning_gt(data)
            imgs_path = glob.glob(os.path.join(self.cam_gt_dir, '*.jpg'))
            index = len(imgs_path)
            save_path_gt = os.path.join(self.cam_gt_dir, str(index).zfill(4) + '.jpg')
            self.save_fig(save_path_gt)

        save_path_pred = None
        if result is not None:
            self.reset_canvas()
            self.render_image_data(data, index)
            self.draw_detection_pred(data, result)
            self.draw_motion_pred(data, result)
            self.draw_planning_pred(data, result)
            imgs_path = glob.glob(os.path.join(self.cam_pred_dir, '*.jpg'))
            index = len(imgs_path)
            save_path_pred = os.path.join(self.cam_pred_dir, str(index).zfill(4) + '.jpg')
            self.save_fig(save_path_pred)

        return save_path_gt, save_path_pred

    def load_image(self, data_path, cam):
        """Update the axis of the plot with the provided image."""
        if not os.path.exists(data_path):# carla数据集中有些没有normal增强的子数据，可能无法被读取
            data_path = data_path.replace('_normal','')
            assert os.path.exists(data_path)

        image = np.array(Image.open(data_path))
        font = cv2.FONT_HERSHEY_SIMPLEX
        org = (50, 60)
        fontScale = 1
        color = (255, 255, 255)
        thickness = 4
        return cv2.putText(image, cam, org, font, fontScale, color, thickness, cv2.LINE_AA)

    def update_image(self, image, index, cam):
        """Render image data for each camera."""
        ax = self.get_axis(index)
        ax.imshow(image)
        plt.axis('off')
        ax.axis('off')
        ax.grid(False)

    def get_axis(self, index):
        """Retrieve the corresponding axis based on the index."""
        return self.axes[index//3, index % 3]

    def save_fig(self, filename):
        plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
                            hspace=0, wspace=0)
        plt.margins(0, 0)
        plt.savefig(filename)

    def render_image_data(self, data, index):
        """Load and annotate image based on the provided path."""
        for i, cam in enumerate(self.CAM_NAMES_NUSC):
            idx = self.CAM_NAMES_NUSC_converter.index(cam)
            if idx>=len(data['img_filename']):
                continue
            if not self.is_pipeline:
                img_path = data['img_filename'][idx]
                image = self.load_image(img_path, cam)
            else:
                # 如果数据已经经过pipeline，可以直接显示输入模型的图, 对图进行反归一化方便rgb显示
                image = data['img'][idx]
                import mmcv
                image = mmcv.imdenormalize(
                    image, np.array([123.675, 116.28, 103.53]), np.array([58.395, 57.12, 57.375]), True).astype(np.uint8)
            self.update_image(image, i, cam)

    def draw_detection_gt(self, data):
        if not (self.plot_choices['draw_gt'] and self.plot_choices['det'] and "gt_labels_3d" in data):
            return

        bboxes = data['gt_bboxes_3d']
        for j, cam in enumerate(self.CAM_NAMES_NUSC):
            idx = self.CAM_NAMES_NUSC_converter.index(cam)
            if idx>=len(data['img_filename']):
                continue
            cam_intrinsic = data['cam_intrinsic'][idx]

            # if self.is_pipeline:
            lidar2img = data['lidar2img'][idx]
            mat4 = np.eye(4)
            mat4[:3, :3] = cam_intrinsic[:3, :3]
            extrinsic = np.linalg.inv(mat4) @ lidar2img

            U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
            Q = U @ Vh
            rot = Quaternion(matrix=Q, rtol=0.0001, atol=0.0001)
            trans = extrinsic[:3, 3]
            # else:
            #     extrinsic = data['lidar2cam'][idx]
            #     trans = extrinsic[3, :3]
            #
            #     U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
            #     Q = U @ Vh
            #     rot = Quaternion(matrix=Q).inverse
            #     # rot = Quaternion(matrix=extrinsic[:3, :3]).inverse

            if self.dataset_source == 'nus':
                imsize = (1600, 900)
            elif self.dataset_source == 'cyw':
                imsize = (1280, 720)
            else:
                raise NotImplementedError

            if self.is_pipeline:
                imsize = (704, 256) #如果图像是pipeline后的

            for i in range(data['gt_labels_3d'].shape[0]):
                label = data['gt_labels_3d'][i]
                if label == -1:
                    continue
                color = color_mapping[label % len(color_mapping)]

                center = bboxes[i, 0 : 3]
                box_dims = bboxes[i, 3 : 6]
                nusc_dims = box_dims[..., [1, 0, 2]]
                quat = Quaternion(axis=[0, 0, 1], radians=bboxes[i, 6])
                box = NuScenesBox(
                    center,
                    nusc_dims,
                    quat
                )
                box.rotate(rot)
                box.translate(trans)
                if box_in_image(box, cam_intrinsic, imsize):
                    box.render(
                        self.axes[j // 3, j % 3],
                        view=cam_intrinsic,
                        normalize=True,
                        colors=(color, color, color),
                        linewidth=4,
                    )

            self.axes[j//3, j % 3].set_xlim(0, imsize[0])
            self.axes[j//3, j % 3].set_ylim(imsize[1], 0)

    def draw_detection_pred(self, data, result):
        if not (self.plot_choices['draw_pred'] and self.plot_choices['det'] and "boxes_3d" in result):
            return

        bboxes = result['boxes_3d'].numpy()
        for j, cam in enumerate(self.CAM_NAMES_NUSC):
            idx = self.CAM_NAMES_NUSC_converter.index(cam)
            if idx>=len(data['img_filename']):
                continue
            cam_intrinsic = data['cam_intrinsic'][idx]

            if self.is_pipeline:
                lidar2img = data['lidar2img'][idx]
                mat4 = np.eye(4)
                mat4[:3, :3] = cam_intrinsic
                extrinsic = np.linalg.inv(mat4) @ lidar2img

                # U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
                # Q = U @ Vh
                rot = Quaternion(matrix=extrinsic[:3, :3], rtol=0.0001, atol=0.0001)
                trans = extrinsic[:3, 3]
            else:
                extrinsic = data['lidar2cam'][idx]
                trans = extrinsic[3, :3]

                U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
                Q = U @ Vh
                rot = Quaternion(matrix=Q).inverse
                # rot = Quaternion(matrix=extrinsic[:3, :3]).inverse

            if self.dataset_source == 'nus':
                imsize = (1600, 900)
            elif self.dataset_source == 'cyw':
                imsize = (1280, 720)
            else:
                raise NotImplementedError

            if self.is_pipeline:
                imsize = (704, 256)  # 如果图像是pipeline后的

            for i in range(result['labels_3d'].shape[0]):
                score = result['scores_3d'][i]
                if score < SCORE_THRESH:
                    continue
                color = color_mapping[result['instance_ids'][i] % len(color_mapping)]

                center = bboxes[i, 0 : 3]
                box_dims = bboxes[i, 3 : 6]
                nusc_dims = box_dims[..., [1, 0, 2]]
                quat = Quaternion(axis=[0, 0, 1], radians=bboxes[i, 6])
                box = NuScenesBox(
                    center,
                    nusc_dims,
                    quat
                )
                box.rotate(rot)
                box.translate(trans)
                if box_in_image(box, cam_intrinsic, imsize):
                    box.render(
                        self.axes[j // 3, j % 3],
                        view=cam_intrinsic,
                        normalize=True,
                        colors=(color, color, color),
                        linewidth=4,
                    )

            self.axes[j//3, j % 3].set_xlim(0, imsize[0])
            self.axes[j//3, j % 3].set_ylim(imsize[1], 0)

    def draw_motion_gt(self, data):
        if not (self.plot_choices['draw_gt'] and self.plot_choices['motion']):
            return
        bboxes = data['gt_bboxes_3d']
        for j, cam in enumerate(self.CAM_NAMES_NUSC):
            idx = self.CAM_NAMES_NUSC_converter.index(cam)
            if idx >= len(data['img_filename']):
                continue
            cam_intrinsic = data['cam_intrinsic'][idx]

            lidar2img = data['lidar2img'][idx]
            mat4 = np.eye(4)
            mat4[:3, :3] = cam_intrinsic[:3, :3]
            extrinsic = np.linalg.inv(mat4) @ lidar2img

            U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
            Q = U @ Vh
            rot = Quaternion(matrix=Q, rtol=0.0001, atol=0.0001)
            trans = extrinsic[:3, 3]
            # trans = extrinsic[3, :3]
            # U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
            # Q = U @ Vh
            # rot = Quaternion(matrix=Q).inverse
            # # rot = Quaternion(matrix=extrinsic[:3, :3]).inverse
            if self.dataset_source == 'nus':
                imsize = (1600, 900)
            elif self.dataset_source == 'cyw':
                imsize = (1280, 720)
            else:
                raise NotImplementedError

            for i in range(data['gt_labels_3d'].shape[0]):
                label = data['gt_labels_3d'][i]
                if label == -1:
                    continue
                color = color_mapping[label % len(color_mapping)]

                center = data['gt_bboxes_3d'][i, :2]
                masks = data['gt_agent_fut_masks'][i].astype(bool)
                if masks[0] == 0:
                    continue
                traj = data['gt_agent_fut_trajs'][i][masks]
                traj = traj.cumsum(axis=0) + center
                traj = np.concatenate([center.reshape(1, 2), traj], axis=0)
                traj_expand = np.ones((traj.shape[0], 1))
                traj_expand[:] = bboxes[i, 2] - bboxes[i, 5] / 2
                traj = np.concatenate([traj, traj_expand], axis=1) #扩展高度

                center = bboxes[i, 0: 3]
                box_dims = bboxes[i, 3: 6]
                nusc_dims = box_dims[..., [1, 0, 2]]
                quat = Quaternion(axis=[0, 0, 1], radians=bboxes[i, 6])
                box = NuScenesBox(
                    center,
                    nusc_dims,
                    quat
                )
                box.rotate(rot)
                box.translate(trans)
                if not box_in_image(box, cam_intrinsic, imsize):
                    continue
                traj_points = traj @ extrinsic[:3, :3].T + trans
                self._render_traj(traj_points, cam_intrinsic, j, color=color, s=15)

    def draw_motion_pred(self, data, result, points_per_step=10):
        if not (self.plot_choices['draw_pred'] and self.plot_choices['motion'] and "trajs_3d" in result):
            return

        bboxes = result['boxes_3d'].numpy()
        for j, cam in enumerate(self.CAM_NAMES_NUSC):
            idx = self.CAM_NAMES_NUSC_converter.index(cam)
            if idx>=len(data['img_filename']):
                continue
            cam_intrinsic = data['cam_intrinsic'][idx]
            lidar2cam = data['lidar2cam']
            extrinsic = lidar2cam[idx]
            trans = extrinsic[3, :3]
            U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
            Q = U @ Vh
            rot = Quaternion(matrix=Q).inverse
            # rot = Quaternion(matrix=extrinsic[:3, :3]).inverse
            imsize = (1600, 900)

            for i in range(result['labels_3d'].shape[0]):
                score = result['scores_3d'][i]
                if score < SCORE_THRESH:
                    continue
                color = color_mapping[result['instance_ids'][i] % len(color_mapping)]

                traj_score = result['trajs_score'][i].numpy()
                traj = result['trajs_3d'][i].numpy()

                mode_idx = traj_score.argmax()
                traj = traj[mode_idx]
                origin = bboxes[i, :2][None]
                traj = np.concatenate([origin, traj], axis=0)
                traj_expand = np.ones((traj.shape[0], 1))
                traj_expand[:] = bboxes[i, 2] - bboxes[i, 5] / 2
                traj = np.concatenate([traj, traj_expand], axis=1)

                center = bboxes[i, 0 : 3]
                box_dims = bboxes[i, 3 : 6]
                nusc_dims = box_dims[..., [1, 0, 2]]
                quat = Quaternion(axis=[0, 0, 1], radians=bboxes[i, 6])
                box = NuScenesBox(
                    center,
                    nusc_dims,
                    quat
                )
                box.rotate(rot)
                box.translate(trans)
                if not box_in_image(box, cam_intrinsic, imsize):
                    continue
                traj_points = traj @ extrinsic[:3, :3] + trans
                self._render_traj(traj_points, cam_intrinsic, j, color=color, s=15)


    def draw_planning_gt(self, data):
        if not (self.plot_choices['draw_gt'] and self.plot_choices['planning']):
            return

        # 前视摄像头，主摄像头，front camera
        idx = 0 ## front camera
        cam_intrinsic = data['cam_intrinsic'][idx]

        lidar2img = data['lidar2img'][idx]
        mat4 = np.eye(4)
        mat4[:3, :3] = cam_intrinsic[:3, :3]
        extrinsic = np.linalg.inv(mat4) @ lidar2img
        U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
        Q = U @ Vh
        rot = Quaternion(matrix=Q, rtol=0.0001, atol=0.0001)
        trans = extrinsic[:3, 3]

        masks = data['gt_ego_fut_masks'].astype(bool)
        plan_traj = data['gt_ego_fut_trajs'][masks]
        plan_traj = plan_traj.cumsum(axis=0)
        plan_traj = np.concatenate((np.zeros((1, 2)), plan_traj), axis=0)
        traj_expand = np.ones((plan_traj.shape[0], 1))
        plan_traj = np.concatenate([plan_traj, traj_expand], axis=1)


        traj_points = plan_traj @ extrinsic[:3, :3].T + trans #注意cyw rt矩阵是规范定义，sparse有点混淆
        self._render_traj(traj_points, cam_intrinsic, j=1)

        pass


    def draw_planning_pred(self, data, result):
        if not (self.plot_choices['draw_pred'] and self.plot_choices['planning'] and "planning" in result):
            return
        # for j, cam in enumerate(self.CAM_NAMES_NUSC[1]):
        #     idx = self.CAM_NAMES_NUSC_converter.index(cam)
        #     if idx >= len(data['img_filename']):
        #         continue
        #     cam_intrinsic = data['cam_intrinsic'][idx]
        #     lidar2cam = data['lidar2cam']
        #     extrinsic = lidar2cam[idx]
        #     trans = extrinsic[3, :3]
        #     U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
        #     Q = U @ Vh
        #     rot = Quaternion(matrix=Q).inverse
        ##     rot = Quaternion(matrix=extrinsic[:3, :3]).inverse
        #     imsize = (1600, 900)

        #     plan_trajs = result['planning'][0].cpu().numpy()
        #     plan_trajs = plan_trajs.reshape(3, -1, 6, 2)
        #     num_cmd = len(CMD_LIST)
        #     num_mode = plan_trajs.shape[1]
        #     plan_trajs = np.concatenate((np.zeros((num_cmd, num_mode, 1, 2)), plan_trajs), axis=2)
        #     plan_trajs = plan_trajs.cumsum(axis=-2)
        #     plan_score = result['planning_score'][0].cpu().numpy()
        #     plan_score = plan_score.reshape(3, -1)

        #     cmd = data['gt_ego_fut_cmd'].argmax()
        #     plan_trajs = plan_trajs[cmd]
        #     plan_score = plan_score[cmd]

        #     mode_idx = plan_score.argmax()
        #     plan_traj = plan_trajs[mode_idx]
        #     traj_expand = np.ones((plan_traj.shape[0], 1)) * -2
        #     # traj_expand[:] = bboxes[i, 2] - bboxes[i, 5] / 2
        #     plan_traj = np.concatenate([plan_traj, traj_expand], axis=1)

        #     traj_points = plan_traj @ extrinsic[:3, :3] + trans
        #     self._render_traj(traj_points, cam_intrinsic, j)

        # 前视摄像头，主摄像头，front camera
        idx = 0 ## front camera
        cam_intrinsic = data['cam_intrinsic'][idx]
        lidar2cam = data['lidar2cam']
        extrinsic = lidar2cam[idx]
        trans = extrinsic[3, :3]
        U, S, Vh = np.linalg.svd(extrinsic[:3, :3])
        Q = U @ Vh
        rot = Quaternion(matrix=Q).inverse
        # rot = Quaternion(matrix=extrinsic[:3, :3]).inverse
        # plan_trajs = result['planning'][0].cpu().numpy()
        # plan_trajs = plan_trajs.reshape(3, -1, 6, 2)
        # num_cmd = len(CMD_LIST)
        # num_mode = plan_trajs.shape[1]
        # plan_trajs = np.concatenate((np.zeros((num_cmd, num_mode, 1, 2)), plan_trajs), axis=2)
        # plan_trajs = plan_trajs.cumsum(axis=-2)
        # plan_score = result['planning_score'][0].cpu().numpy()
        # plan_score = plan_score.reshape(3, -1)

        # cmd = data['gt_ego_fut_cmd'].argmax()
        # plan_trajs = plan_trajs[cmd]
        # plan_score = plan_score[cmd]

        # mode_idx = plan_score.argmax()
        # plan_traj = plan_trajs[mode_idx]
        plan_traj = result["final_planning"]
        plan_traj = np.concatenate((np.zeros((1, 2)), plan_traj), axis=0)
        traj_expand = np.ones((plan_traj.shape[0], 1))
        plan_traj = np.concatenate([plan_traj, traj_expand], axis=1)

        traj_points = plan_traj @ extrinsic[:3, :3] + trans
        self._render_traj(traj_points, cam_intrinsic, j=1)

    def _render_traj(self, traj_points, cam_intrinsic, j, color=(1, 0.5, 0), s=150, points_per_step=10):
        total_steps = (len(traj_points)-1) * points_per_step + 1
        total_xy = np.zeros((total_steps, 3))
        for k in range(total_steps-1):
            unit_vec = traj_points[k//points_per_step +
                                    1] - traj_points[k//points_per_step]
            total_xy[k] = (k/points_per_step - k//points_per_step) * \
                unit_vec + traj_points[k//points_per_step]
        in_range_mask = total_xy[:, 2] > 0.1
        traj_points = view_points(
            total_xy.T, cam_intrinsic, normalize=True)[:2, :]
        traj_points = traj_points[:2, in_range_mask]
        self.axes[j // 3, j % 3].scatter(traj_points[0], traj_points[1], color=color, s=s)