# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import warnings
import os
from copy import deepcopy

import cv2
from os import path as osp
from pathlib import Path
from PIL import Image
import torch
import mmcv
import numpy as np
import tqdm
from mmcv import Config, DictAction, mkdir_or_exist
from mmdet.core.bbox.assigners.center_region_assigner import bboxes_area

from mmdet3d.core.bbox import (Box3DMode, CameraInstance3DBoxes, Coord3DMode,
                               DepthInstance3DBoxes, LiDARInstance3DBoxes)
from mmdet3d.core.visualizer import (show_multi_modality_result,
                                     show_seg_result)
from tools.visualizer import open3d_vis, zwh_viewer
from mmdet3d.datasets import build_dataset
from tools.visualizer import show_result
from projects.mmdet3d_plugin import *

def parse_args():
    parser = argparse.ArgumentParser(description='Browse a dataset')
    parser.add_argument('config', help='train config file path')
    parser.add_argument(
        '--skip-type',
        type=str,
        nargs='+',
        default=['Normalize'],
        help='skip some useless pipeline')
    parser.add_argument(
        '--output-dir',
        default=None,
        type=str,
        help='If there is no display interface, you can save it')
    parser.add_argument(
        '--task',
        type=str,
        choices=['det', 'seg', 'multi_modality-det', 'mono-det'],
        help='Determine the visualization method depending on the task.')
    parser.add_argument(
        '--data_type',
        type=str,
        default='cyw',
        choices=['cyw', 'dr', 'nus'],
        help='input data type')
    parser.add_argument(
        '--aug',
        action='store_true',
        help='Whether to visualize augmented datasets or original dataset.')
    parser.add_argument(
        '--online',
        action='store_true',
        help='Whether to perform online visualization. Note that you often '
             'need a monitor to do so.')
    parser.add_argument(
        '--cfg-options',
        nargs='+',
        action=DictAction,
        help='override some settings in the used config, the key-value pair '
             'in xxx=yyy format will be merged into config file. If the value to '
             'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
             'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
             'Note that the quotation marks are necessary and that no white space '
             'is allowed.')
    parser.add_argument('--dataset_fun', type=str, default='train')
    parser.add_argument('--save_dir', type=str, default='work_dirs/browse_pics',help='save img with points')
    args = parser.parse_args()
    return args


def build_data_cfg(config_path, skip_type, aug, cfg_options):
    """Build data config for loading visualization data."""

    cfg = Config.fromfile(config_path)
    if cfg_options is not None:
        cfg.merge_from_dict(cfg_options)
    # extract inner dataset of `RepeatDataset` as `cfg.data.train`
    # so we don't need to worry about it later
    if cfg.data.train['type'] == 'RepeatDataset':
        cfg.data.train = cfg.data.train.dataset
    # use only first dataset for `ConcatDataset`
    if cfg.data.train['type'] == 'ConcatDataset':
        cfg.data.train = cfg.data.train.datasets[0]
    train_data_cfg = cfg.data.train

    if aug:
        show_pipeline = cfg.train_pipeline
    else:
        show_pipeline = cfg.test_pipeline
        for i in range(len(cfg.train_pipeline)):
            if cfg.train_pipeline[i]['type'] == 'LoadAnnotations3D':
                show_pipeline.insert(i, cfg.train_pipeline[i])
            # Collect points as well as labels
            if cfg.train_pipeline[i]['type'] == 'Collect3D':
                if show_pipeline[-1]['type'] == 'Collect3D':
                    show_pipeline[-1] = cfg.train_pipeline[i]
                else:
                    show_pipeline.append(cfg.train_pipeline[i])

    train_data_cfg['pipeline'] = [
        x for x in show_pipeline if x['type'] not in skip_type
    ]

    return cfg


def to_depth_mode(points, bboxes):
    """Convert points and bboxes to Depth Coord and Depth Box mode."""
    if points is not None:
        points = Coord3DMode.convert_point(points.copy(), Coord3DMode.LIDAR,
                                           Coord3DMode.DEPTH)
    if bboxes is not None:
        bboxes = Box3DMode.convert(bboxes.clone(), Box3DMode.LIDAR,
                                   Box3DMode.DEPTH)
        if bboxes.shape[1] == 9:  # 注意速度也要变换
            bboxes = bboxes[:, [0, 1, 2, 3, 4, 5, 6, 8, 7]]
            bboxes[:, 7:8] = -bboxes[:, 7:8]
    return points, bboxes

points_rbgs=[]
bboxes=[]
poses=[]
def show_det_data(input, out_dir, show=False, data_type='cyw'):
    """Visualize 3D point cloud and 3D bboxes."""
    if data_type == 'cyw' or data_type == 'dr':
        cams = [None ,'camera75',None,'camera80','camera77','camera81']
    else:
        cams = ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
                'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT']
    img_metas = [input['img_metas']._data]
    points = input['points']._data.numpy()
    radar = None
    if 'pts_radar' in input.keys():
        radar = input['pts_radar']._data.numpy()
    if 'seq' in input.keys():# fusion multiframe
        for data in input['seq']:
            pre2cur = data['pre2cur']
            points_pre = data['points']._data.numpy()
            points_pre = zwh_viewer.transform_3Dpts(points_pre,  [pre2cur])
            points = np.concatenate((points, points_pre), axis=0)
            if 'radar' in data:
                # radar = np.concatenate((radar,data['radar'].tensor.numpy()),axis=0)
                radar_pre = zwh_viewer.tranform_pt(data['radar']._data.numpy(),  [pre2cur])
                radar = np.concatenate((radar, radar_pre), axis=0)
    #points = input['points']._data.numpy()
    gt_bboxes = input['gt_bboxes_3d'].data[0].tensor
    gt_labels = input['gt_labels_3d'].data[0]
    if img_metas[0]['box_mode_3d'] != Box3DMode.DEPTH:
        # points, gt_bboxes = to_depth_mode(points, gt_bboxes)
        pass
    # filename = osp.splitext(osp.basename(img_metas[0]['filename']))[0]

    # temp test

    if 'img' in input:
        import cv2
        import torch
        # img_cyw = cv2.resize(img_cyw,(704, 256))
        points = points[:, :3]
        points = np.concatenate([points[:, :3], np.ones((len(points), 1))], axis=1)
        if radar is not None:
            radar = radar[:, :3]
            radar = np.concatenate([radar[:, :3], np.ones((len(radar), 1))], axis=1)
        boxes = gt_bboxes.numpy()[:,:7]
        boxes[:, 2] = boxes[:, 2] + boxes[:, 5] * 0.5

        imgs = []
        points_rbg = np.ones((len(points), 6))
        points_rbg[:, :3] = points[:, :3]
        idx = -1
        for cam in cams:
            if cam is None:
                continue
            idx+=1
            if idx >= len(input['img'].data):
                continue
            img = input['img'].data[idx].detach().cpu().numpy().transpose(1, 2, 0)
            img_cyw = mmcv.imdenormalize(
                img, np.array([123.675, 116.28, 103.53]), np.array([58.395, 57.12, 57.375]), True).astype(np.uint8)
            img_path = input['img_metas'].data['filename'][idx]
            if not os.path.exists(img_path):
                img_path = img_path.replace('_normal', '')# carla可能没有normal
            img_cyw2 = cv2.imread(img_path)

            # img_cyw = np.uint8(input['img'].data.permute(0,1,3,4,2).numpy()[0][idx])
            # cv2.imshow('sss',img_cyw)
            # cv2.waitKey()

            lidar2img = input['lidar2img'].data.numpy()[idx]

            from zwh_viewer import draw_pts2img, draw_boxes2img, draw_img2pts, just_look, view3d, view_6imgs
            img_new = copy.deepcopy(img_cyw)
            img_new = draw_boxes2img(boxes, img_new, [lidar2img])
            imgs.append(img_new)
            # just_look(img_new)
            draw_img2pts(img_cyw[:,:,::-1],points_rbg,[lidar2img])

        if show:
            view_6imgs(imgs,cams)
        if 'gt_depth' in input:

            def custom_depth_transform(depth, max_depth=255,
                                       near_threshold=20, beta=255):  # 10米对应于10/25.5的比例（假设255对应于最大深度，比如25.5米或其他） beta是放大因子，使得近距离变化更明显
                # 归一化深度值
                depth_normalized = depth / max_depth
                # 初始化变换后的深度图
                transformed_depth = np.zeros_like(depth_normalized, dtype=np.float32)
                # 对近距离的深度值应用更强的变换
                near_mask = depth_normalized < (near_threshold / max_depth)
                far_mask = ~near_mask
                # 近距离：使用对数变换增强对比度
                transformed_depth[near_mask] = np.log1p(depth_normalized[near_mask] * beta) / np.log(
                    26)
                # 远距离：线性变换，但压缩范围
                transformed_depth[far_mask] = (depth_normalized[far_mask] - (near_threshold / max_depth)) / (
                            1 - (near_threshold / max_depth))
                # 重新映射到0-1范围，并转换回0-255
                transformed_depth = np.clip(transformed_depth, 0, 1)
                transformed_depth = (transformed_depth * 255).astype(np.uint8)
                return transformed_depth
            def merge_depth_with_image(imgs, dep_imgs, alpha=0.5):
                merged_imgs=[]
                for img, dep_img in zip(imgs, dep_imgs):
                    # 应用自定义深度变换
                    transformed_dep_img = custom_depth_transform(dep_img)

                    # 调整深度图尺寸为与图像相同的尺寸
                    resized_dep_img = cv2.resize(transformed_dep_img, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_LINEAR)

                    from matplotlib import cm
                    dep_img_colored = cm.viridis(resized_dep_img / 255.0)[..., :3]  # 取RGB通道，并归一化到0-1
                    dep_img_colored = (dep_img_colored * 255).astype(np.uint8)  # 转换为0-255范围

                    # 合并图像和深度图
                    merged_img = cv2.addWeighted(img.astype(np.float32), 1 - alpha,
                                                 dep_img_colored.astype(np.float32), alpha, 0)
                    merged_img = merged_img.astype(np.uint8)  # 转换回uint8类型
                    merged_imgs.append(merged_img)
                return merged_imgs

            dep_imgs = []
            for data in input['gt_depth'][0]:
                dep_imgs.append(data.clip(0,255).astype(np.uint8))
            view_6imgs(dep_imgs,cams)

            merged_imgs = merge_depth_with_image(imgs, dep_imgs)
            view_6imgs(merged_imgs,cams)
        boxes[:,2] -=  boxes[:,5]*0.5
        if show:
            view3d(points_rbg, boxes)
            global points_rbgs
            global bboxes
            global poses
            points_rbgs.append(points_rbg)
            bboxes.append(boxes)
            poses.append(input['ego_pose'].data.detach().cpu().numpy())

            new_points_clouds = []
            new_boxes = []
            for points, box, pose in zip(points_rbgs, bboxes, poses):
                tmp = zwh_viewer.tranform_pt_pad(deepcopy(points), [pose])
                tmp_boxes = zwh_viewer.tranform_pt_pad(deepcopy(box), [pose])
                xyz = zwh_viewer.rotationMatrixToEulerAngles(pose[:3,:3])
                tmp_boxes[:,6]=tmp_boxes[:,6]+xyz[2]

                new_points_clouds.append(tmp)
                new_boxes.append(tmp_boxes)
            new_points_clouds = np.concatenate(new_points_clouds, axis=0)
            new_boxes = np.concatenate(new_boxes, axis=0)


            view3d(new_points_clouds, new_boxes)
        pass
        # # temp test

    # gt_bboxes[0,0] = 10
    # gt_bboxes[0,1] = 5
    # gt_bboxes[0,2] = 0
    # gt_bboxes[0,3] = 10
    # gt_bboxes[0,4] = 5
    # gt_bboxes[0,5] = 4
    # gt_bboxes[0,6] = 3.14*45/180.0
    # show_result(
    #     points,
    #     gt_bboxes.clone().numpy(),
    #     None,
    #     out_dir,
    #     filename,
    #     radar=radar,
    #     show=show,
    #     snapshot=True,
    #     gt_labels=gt_labels,
    #     show_speed=False)
    #
    # if not show:
    #     return imgs


def _get_rot(h):
    return torch.Tensor(
        [
            [np.cos(h), np.sin(h)],
            [-np.sin(h), np.cos(h)],
        ]
    )

def _img_transform(img, resize, resize_dims, crop, flip, rotate):
    ida_rot = torch.eye(2)
    ida_tran = torch.zeros(2)
    # adjust image
    img = img.resize(resize_dims)
    img = img.crop(crop)
    if flip:
        img = img.transpose(method=Image.FLIP_LEFT_RIGHT)
    img = img.rotate(rotate)

    # post-homography transformation
    ida_rot *= resize
    ida_tran -= torch.Tensor(crop[:2])
    if flip:
        A = torch.Tensor([[-1, 0], [0, 1]])
        b = torch.Tensor([crop[2] - crop[0], 0])
        ida_rot = A.matmul(ida_rot)
        ida_tran = A.matmul(ida_tran) + b
    A = _get_rot(rotate / 180 * np.pi)
    b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2
    b = A.matmul(-b) + b
    ida_rot = A.matmul(ida_rot)
    ida_tran = A.matmul(ida_tran) + b
    ida_mat = torch.eye(3)
    ida_mat[:2, :2] = ida_rot
    ida_mat[:2, 2] = ida_tran
    return img, ida_mat

def show_points(input, show=False):
    """Visualize 3D point cloud and 3D bboxes."""
    points = input['points'][0].data.numpy()
    if show:
        vis = open3d_vis.Visualizer(points)
        radar = None
        if 'pts_radar' in input.keys():
            radar = input['pts_radar'][0]._data.numpy()
            vis.add_points(radar, points_size=5, point_color=(0.8, 0.2, 0.4))
        vis.show()


def show_seg_data(input, out_dir, show=False):
    """Visualize 3D point cloud and segmentation mask."""
    img_metas = input['img_metas']._data
    points = input['points']._data.numpy()
    gt_seg = input['pts_semantic_mask']._data.numpy()
    filename = osp.splitext(osp.basename(img_metas['pts_filename']))[0]
    show_seg_result(
        points,
        gt_seg.copy(),
        None,
        out_dir,
        filename,
        np.array(img_metas['PALETTE']),
        img_metas['ignore_index'],
        show=show,
        snapshot=True)


def show_proj_bbox_img(input, out_dir, show=False, is_nus_mono=False):
    """Visualize 3D bboxes on 2D image by projection."""
    gt_bboxes = input['gt_bboxes_3d']._data
    img_metas = input['img_metas']._data
    img = input['img']._data.numpy()
    # need to transpose channel to first dim
    img = img.transpose(1, 2, 0)
    # no 3D gt bboxes, just show img
    if gt_bboxes.tensor.shape[0] == 0:
        gt_bboxes = None
    filename = Path(img_metas['filename']).name
    if isinstance(gt_bboxes, DepthInstance3DBoxes):
        show_multi_modality_result(
            img,
            gt_bboxes,
            None,
            None,
            out_dir,
            filename,
            box_mode='depth',
            img_metas=img_metas,
            show=show)
    elif isinstance(gt_bboxes, LiDARInstance3DBoxes):
        show_multi_modality_result(
            img,
            gt_bboxes,
            None,
            img_metas['lidar2img'],
            out_dir,
            filename,
            box_mode='lidar',
            img_metas=img_metas,
            show=show)
    elif isinstance(gt_bboxes, CameraInstance3DBoxes):
        show_multi_modality_result(
            img,
            gt_bboxes,
            None,
            img_metas['cam2img'],
            out_dir,
            filename,
            box_mode='camera',
            img_metas=img_metas,
            show=show)
    else:
        # can't project, just show img
        warnings.warn(
            f'unrecognized gt box type {type(gt_bboxes)}, only show image')
        show_multi_modality_result(
            img, None, None, None, out_dir, filename, show=show)


def main():
    args = parse_args()

    if args.output_dir is not None:
        mkdir_or_exist(args.output_dir)

    cfg = build_data_cfg(args.config, args.skip_type, args.aug,
                         args.cfg_options)

    # import modules from plguin/xx, registry will be updated
    if hasattr(cfg, 'plugin'):
        if cfg.plugin:
            import importlib
            if hasattr(cfg, 'plugin_dir'):
                plugin_dir = cfg.plugin_dir
                _module_dir = os.path.dirname(plugin_dir)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]

                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)
            else:
                # import dir is the dirpath for the config file
                _module_dir = os.path.dirname(args.config)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]
                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)

    try:
        dataset = build_dataset(
            cfg.data[args.dataset_fun], default_args=dict(filter_empty_gt=False))
    except TypeError:  # seg dataset doesn't have `filter_empty_gt` key
        dataset = build_dataset(cfg.data[args.dataset_fun])

    dataset_type = cfg.dataset_type
    if dataset_type[0:3]=='CYW':
        args.data_type = 'cyw'
    else:
        args.data_type = 'nus'
    # configure visualization mode
    vis_task = args.task  # 'det', 'seg', 'multi_modality-det', 'mono-det'
    progress_bar = mmcv.ProgressBar(len(dataset))

    for idx, input in enumerate(tqdm.tqdm(dataset)):
        # if idx < 0 or idx%2!=0:
        #     continue
        if args.dataset_fun in ['val', 'test']:
            show_points(input, show=args.online)
            continue
        if vis_task in ['det', 'multi_modality-det']:
            # show 3D bboxes on 3D point clouds
            imgs = show_det_data(input, args.output_dir, show=args.online, data_type=args.data_type)
            if not args.online:
                img = cv2.resize(imgs[0], (720, 540))
                cv2.imwrite(os.path.join(args.save_dir,str(idx)+'.jpg'), img)
        if vis_task in ['multi_modality-det', 'mono-det']:
            # project 3D bboxes to 2D image
            show_proj_bbox_img(
                input,
                args.output_dir,
                show=args.online,
                is_nus_mono=(dataset_type == 'NuScenesMonoDataset'))
        elif vis_task in ['seg']:
            # show 3D segmentation mask on 3D point clouds
            show_seg_data(input, args.output_dir, show=args.online)
        progress_bar.update()


if __name__ == '__main__':
    main()
