import numpy as np
import cv2
import copy
import matplotlib.pyplot as plt
from ..zmath import pad_ones, gen_corners
from .open3d_vis import Visualizer

def tranform_pt(points, tfs: list):
    '''
    pt from a to b
    Args:
        points: shape = (n,4)
        tfs: [a2x,x2y,...,z2b]
    Returns:
    '''
    assert points.shape[1] == 4
    tf_all = np.array([[1, 0, 0, 0],
                       [0, 1, 0, 0],
                       [0, 0, 1, 0],
                       [0, 0, 0, 1]])
    for tf in tfs:
        tf_all = tf @ tf_all
    return points @ tf_all.T

def tranform_pt_pad(points, tfs: list):
    points4d = pad_ones(points[:,:3])
    points[:,:3] = tranform_pt(points4d, tfs)[:,:3]
    return points


def tranform_pt_2_pixel(points, tfs: list):
    assert points.shape[1] == 4
    new_points = tranform_pt(points, tfs)
    new_points[:, :2] /= new_points[:, 2:3]
    return new_points


def draw_pts2img(points, image, tfs: list, pts_size=1):
    # assert points.shape[1] == 4 or points.shape[1] == 6
    points3 = points[:,:3]
    points4 = pad_ones(points3)
    image_new = copy.deepcopy(image)
    image_based_points = tranform_pt_2_pixel(points4, tfs)
    show_points = image_based_points[image_based_points[:, 2] > 0][:, :3].astype(np.int32)
    for x, y, z in show_points:
        cv2.circle(image_new, (x, y), pts_size, (int(20 * z), int(10 * z // 5), int(255.0 / 1600 * z * z)), thickness=-1,
                   lineType=10)  # lidar的信息画到image上
    return image_new


def draw_boxes_8c2img(boxes_8c, image, tfs: list):
    image_new = copy.deepcopy(image)
    assert boxes_8c.shape[1] == 8 and boxes_8c.shape[2] == 3
    for corners_res in boxes_8c:  # 8 corners
        corners_res = np.concatenate((corners_res, np.ones(shape=(corners_res.shape[0], 1))), axis=1)
        corners = tranform_pt_2_pixel(corners_res, tfs).astype(int)
        ix, iy = [0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7], [4, 5, 6, 7, 1, 2, 3, 0, 5, 6, 7, 4]
        for p0, p1 in zip(corners[ix], corners[iy]):
            if p0[2] <= 0 or p1[2] <= 0: continue
            cv2.line(image_new, (p0[0], p0[1]), (p1[0], p1[1]), (0, 255, 0), 2, 16)  # tingfeng10 到此是把角点划到image上
            '''   
            循环表示，
            画0 与 4 的边   画1 与 5 的边   画2 与 6 的边 画3 与 7 的边 
            画0 与 1 的边   画1 与 2 的边   画2 与 3 的边 画3 与 0 的边 
            画4 与 5 的边   画5 与 6 的边   画6 与 7 的边 画7 与 4 的边 
                0 ------ 1
              / |     /  |
            4 ------ 5   |
            |   3 ---|-- 2    
            |  /     | /
            7 ------ 6
            '''
    return image_new


def draw_boxes2img(boxes, image, tfs: list):
    image_new = copy.deepcopy(image)
    assert boxes.shape[1] >= 7
    for box in boxes:  # 标注的框的信息，在sample['anns']中，遍历标注框
        corners_res = gen_corners(box).T
        corners_res = pad_ones(corners_res)
        corners = tranform_pt_2_pixel(corners_res, tfs).astype(int)
        ix, iy = [0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7], [4, 5, 6, 7, 1, 2, 3, 0, 5, 6, 7, 4]
        for p0, p1 in zip(corners[ix], corners[iy]):
            if p0[2] <= 0 or p1[2] <= 0: continue
            cv2.line(image_new, (p0[0], p0[1]), (p1[0], p1[1]), (0, 255, 0), 2, 16)  # tingfeng10 到此是把角点划到image上
            '''   
            循环表示，
            画0 与 4 的边   画1 与 5 的边   画2 与 6 的边 画3 与 7 的边 
            画0 与 1 的边   画1 与 2 的边   画2 与 3 的边 画3 与 0 的边 
            画4 与 5 的边   画5 与 6 的边   画6 与 7 的边 画7 与 4 的边 
                0 ------ 1
              / |     /  |
            4 ------ 5   |
            |   3 ---|-- 2    
            |  /     | /
            7 ------ 6
            '''
    return image_new


def draw_img2pts(image, points_rgb, tfs: list):
    assert points_rgb.shape[1] == 6
    points4 = np.concatenate((points_rgb[:, :3], np.ones(shape=(len(points_rgb), 1))), axis=1)
    image_based_points = tranform_pt_2_pixel(points4, tfs)
    valid_indices = (image_based_points[:, 0] >= 0) & (image_based_points[:, 0] < image.shape[1]) & \
                    (image_based_points[:, 1] >= 0) & (image_based_points[:, 1] < image.shape[0]) & \
                    (image_based_points[:, 2] >= 0)  # 假设z坐标在图像表示中不需要特别处理
    valid_points = image_based_points[valid_indices].astype(np.int32)
    colors = image[valid_points[:, 1], valid_points[:, 0], :3]
    points_rgb[valid_indices, 3:6] = colors
    pass


def read_label_cyw(file_path):
    '''
    读取label，返回字典
    '''
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip().split(' ') for line in lines]
    annotation = {}
    annotation['name'] = np.array([line[0].lower() for line in lines])
    annotation['location'] = np.array([line[1:4] for line in lines],
                                      dtype=np.float32)  # 激光坐标系下的坐标(location) (x, y, z), 下平面中心点的坐标
    annotation['dimensions'] = np.array([line[4:7] for line in lines],
                                        dtype=np.float32)  # 3d 尺寸(dimensions) hwl -> camera coordinates (lhw)
    annotation['rotation_y'] = np.array([line[7:8] for line in lines], dtype=np.float32)  # 相机坐标系下绕轴旋转的弧度(rotation_y)
    return annotation


def read_label_kitti(file_path):
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip().split(' ') for line in lines]
    annotation = {}
    annotation['name'] = np.array([line[0:1] for line in lines])
    annotation['truncated'] = np.array([line[1:2] for line in lines], dtype=np.float)  # 截断
    annotation['occluded'] = np.array([line[2:3] for line in lines], dtype=np.int)  # 遮挡
    annotation['alpha'] = np.array([line[3:4] for line in lines], dtype=np.float)  # 观测角
    annotation['bbox'] = np.array([line[4:8] for line in lines], dtype=np.float)  # 图像2d bbox  lt rb
    annotation['dimensions'] = np.array([line[8:11] for line in lines], dtype=np.float)[:,
                               [2, 1, 0]]  # 3d 尺寸(dimensions) hwl -> lwh
    annotation['location'] = np.array([line[11:14] for line in lines],
                                      dtype=np.float)  # 相机坐标系下的坐标(location) (x, y, z), 下平面中心点的坐标
    annotation['location'][:, 1] = annotation['location'][:, 1] - 0.5 * annotation['dimensions'][:,
                                                                        2]  # kitti z(camera坐标系下为y负方向)为最低 所以(z+0.5h) -> 相机坐标系(y-0.5h)然后转到lidar坐标系
    annotation['rotation_y'] = np.array([line[14:15] for line in lines], dtype=np.float)  # 相机坐标系下绕轴旋转的弧度(rotation_y)

    return annotation


def read_label_dr(file_path):
    '''
    读取label，返回字典
    '''
    with open(file_path, 'r') as f:
        lines = f.readlines()
    lines = [line.strip().split(' ') for line in lines]
    annotation = {}
    annotation['name'] = np.array([line[0].lower() for line in lines])
    annotation['location'] = np.array([line[11:14] for line in lines],
                                      dtype=np.float32)  # 激光坐标系下的坐标(location) (x, y, z), 下平面中心点的坐标
    annotation['dimensions'] = np.array([line[10:7:-1] for line in lines],
                                        dtype=np.float32)  # 3d 尺寸(dimensions) hwl -> camera coordinates (lhw)
    # annotation['dimensions'] = annotation['dimensions'][:, [1, 0, 2]]  # 3d 尺寸(dimensions) hwl -> camera coordinates (lhw)
    annotation['rotation_y'] = - np.array([line[14:15] for line in lines],
                                          dtype=np.float32)  # 相机坐标系下绕轴旋转的弧度(rotation_y)
    annotation['rotation_y'] = annotation['rotation_y'] - np.pi / 2  # 修正旋转角度
    #
    # 预测框坐标系变换：相机坐标系--->lidar坐标系
    tr_velo2cam = np.array([[0.9994015951319486, -0.03199927972993585, 0.013133839670170688, 0.2710357611083472],
                            [0.013650110615193858, 0.015969147292999922, -0.9997793060545548, -0.11226274034223951],
                            [0.03178248146242525, 0.9993603116151348, 0.01639638498544132, -0.02040423686369943],
                            [0, 0, 0, 1]])
    trans_z = np.array([[0.0, 1.0, 0.0, 0.0], [-1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0, 0, 0, 1]])
    tr_cam2velo = tr_velo2cam.copy()
    tr_cam2velo[:3, :3] = np.linalg.inv(tr_velo2cam[:3, :3])
    tr_cam2velo[:3, -1] = np.dot(-tr_cam2velo[:3, :3], tr_velo2cam[:3, -1])

    # 3D框中心点转换坐标--->
    # box_center = box[:, :3]
    box_center1 = np.concatenate((annotation['location'], np.ones((len(annotation['location']), 1))), axis=1)
    annotation['location'] = (trans_z @ tr_cam2velo @ box_center1.T).T[:, :3]
    # annotation['location'] = (tr_cam2velo @ box_center1.T).T[:, :3]
    annotation['location'][:, 2] += 0.5 * annotation['dimensions'][:, 2]  # 高度修正，kitti相机坐标系以地面为中心 lidar坐标系通常以中心点为中心

    return annotation


def just_look_array(img, title: str = '0'):
    cv2.namedWindow(title, cv2.WINDOW_NORMAL)
    cv2.imshow(title, img)
    cv2.waitKey()


def kpos_self_response(img_pos_embed, img_id, pix_x, pix_y=0):
    import torch.nn.functional as F
    similarity = F.cosine_similarity(img_pos_embed[img_id, pix_y, pix_x, :], img_pos_embed, dim=-1)
    similarity = similarity * 0.5 + 0.5
    return similarity


def width_respone(input, img_id, pix_x, fixed_range=True):
    width_pos_embed = input.detach().cpu()
    img_pos_embed = width_pos_embed[0].unsqueeze(1)
    sims = kpos_self_response(img_pos_embed, img_id, pix_x, 0)
    view_6sims(sims, fixed_range=fixed_range)
    return sims


def pic_respone(input, img_id, pix_x, pix_y, fixed_range=True):
    width_pos_embed = input.detach().cpu()
    img_pos_embed = width_pos_embed[0]
    sims = kpos_self_response(img_pos_embed, img_id, pix_x, pix_y)
    view_6sims(sims, fixed_range=fixed_range)
    return sims


def view_6sims(images, titles=['0', '1', '2', '3', '4', '5'], fixed_range=True):
    # assert len(images)==6
    # 创建一个包含6个子图的图形
    fig, axs = plt.subplots(2, 3, figsize=(24, 9))  # 2行3列
    for ax, img, title in zip(axs.flat, images, titles):
        if fixed_range:
            ax.imshow(img, cmap='viridis', vmin=0, vmax=1)
        else:
            ax.imshow(img, cmap='viridis', vmin=img.min(), vmax=img.max())
        ax.set_title(title)  # 设置子图标题
        ax.axis('off')  # 关闭坐标轴
    # 显示图形
    plt.tight_layout()  # 自动调整子图参数, 使之填充整个图像区域
    plt.show()


def view_6imgs(images, titles=['0', '1', '2', '3', '4', '5'], fig_title='Global Title'):
    # assert len(images)==6
    # 创建一个包含6个子图的图形
    fig, axs = plt.subplots(2, 3, figsize=(24, 9))  # 2行3列
    # 设置全局标题
    fig.suptitle(fig_title, fontsize=16)  # 可以通过fontsize调整标题大小
    for ax, img, title in zip(axs.flat, images, titles):
        if len(img.shape) == 3 and img.shape[2] == 3:
            ax.imshow(img[:, :, ::-1])  # 显示图像
        else:
            ax.imshow(img)  # 显示图像
        ax.set_title(title)  # 设置子图标题
        ax.axis('off')  # 关闭坐标轴
    # 显示图形
    plt.tight_layout()  # 自动调整子图参数, 使之填充整个图像区域
    plt.show()

def view3d(pts,boxes=None,mode='xyzrgb'):
    vis = Visualizer(pts,boxes, points_size=5, mode=mode)
    import open3d as o3d
    # 创建一个坐标系点云
    axis_pcd = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2.0, origin=[0, 0, 0])
    vis.o3d_visualizer.add_geometry(axis_pcd)
    vis.show()
