import cv2
import numpy as np
import json
import open3d as o3d


# 立体图像矫正
def BinocularImageRectify(imageLeft, imageRight, CameraMatrixLeft, distCoeffsLeft, CameraMatrixRight, distCoeffsRight,
                          R, T):
    """
    :param imageLeft: 双目相机左相机图像
    :param imageRight: 双目相机右相机图像
    :param CameraMatrixLeft: 双目相机左相机的内参矩阵
    :param distCoeffsLeft: 双目相机左相机的畸变系数
    :param CameraMatrixRight: 双目相机右相机内参矩阵
    :param distCoeffsRight: 双目相机右相机畸变系数
    :param R: 两相机的旋转矩阵
    :param T: 两相机的平移矩阵
    :return: 左相机的矫正后的图像，右相机的矫正后的图像，重投影矩阵
    """
    image_size = (imageLeft.shape[1], imageLeft.shape[0])
    # 将参数转换为相同的数据类型
    # 运行Bouguet立体校正算法
    R1, R2, P1, P2, Q, _, _ = cv2.stereoRectify(CameraMatrixLeft, distCoeffsLeft, CameraMatrixRight, distCoeffsRight,
                                                image_size, R, T, flags=0)
    map1, map2 = cv2.initUndistortRectifyMap(CameraMatrixLeft, distCoeffsLeft, R1, P1, image_size, cv2.CV_32FC1)
    map3, map4 = cv2.initUndistortRectifyMap(CameraMatrixRight, distCoeffsRight, R2, P2, image_size, cv2.CV_32FC1)

    # 应用校正变换映射
    rectified_img1 = cv2.remap(imageLeft, map1, map2, cv2.INTER_LINEAR)
    rectified_img2 = cv2.remap(imageRight, map3, map4, cv2.INTER_LINEAR)

    return rectified_img1, rectified_img2, Q


# 获得视差图像
def getDepthImage(imgL, imgR, window_size=3, min_disp=0, num=6, blockSize=22, uniqueness_ratio=10, speckle_range=32,
                  speckle_window_size=100, disp12_max_diff=1):
    """
    :param imgL:左相机矫正后的图像
    :param imgR:右相机矫正后的图像
    :param window_size:
    :param min_disp:
    :param num:
    :param blockSize:
    :param uniqueness_ratio:
    :param speckle_range:
    :param speckle_window_size:
    :param disp12_max_diff:
    :return: dis 深度图，该深度图的值乘以了16在使用深度图计算时要除以16，dis_color 可以显示的彩色深度图
    """
    P1 = 8 * 3 * window_size ** 2
    P2 = 32 * 3 * window_size ** 2
    imgL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
    imgR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
    stereo = cv2.StereoSGBM_create(minDisparity=min_disp,
                                   numDisparities=16 * num,
                                   blockSize=blockSize,
                                   uniquenessRatio=uniqueness_ratio,
                                   speckleRange=speckle_range,
                                   speckleWindowSize=speckle_window_size,
                                   disp12MaxDiff=disp12_max_diff,
                                   P1=P1,
                                   P2=P2)
    dis = stereo.compute(imgL, imgR)
    # 计算出的视差是CV_16S格式-16位有符号整数（-32768…32767）
    # 将深度图归一化到0-255的范围
    dis_color = cv2.normalize(dis, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
    dis_color = cv2.applyColorMap(dis_color, 2)
    return dis, dis_color


# 获得双目理想模型系统参数
def getBinocularSystemParameter(Q):
    """
    :param Q: 重投影矩阵
    :return: b基线长度,f焦距,cx左相机光心x坐标,cy左相机光心y坐标
    """
    b = -1 / Q[3, 2]
    f = Q[2, 3]
    cx = -Q[0, 3]
    cy = -Q[1, 3]
    return b, f, cx, cy


# 获得目标物体到相机的距离
def getTargetObjectDistance(target_disparity, focal_length, baseline):
    """
    :param target_disparity: 视差值
    :param focal_length:焦距
    :param baseline: 双目相机系统的基线长度
    :return: 目标物体到相机的距离
    """
    target_distance = baseline * focal_length / target_disparity
    return target_distance


# 获得三维图像
def getThreeDimensionalImage(img, b, f, cx, cy, dis):
    """
    :param img: 左相机图像
    :param b:双目系统的基线长度
    :param f:双目系统的焦距
    :param cx:双目系统的左相机光心x坐标
    :param cy:双目系统的左相机光心y坐标
    :param dis:视差图
    :return:三维信息图像包含xyz坐标，以及颜色信息
    """
    img_color = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    i = 0
    output_points = np.zeros((640 * 480, 6))
    for row in range(dis.shape[0]):
        for col in range(dis.shape[1]):
            if dis[row][col] != 0 and dis[row][col] != (-16):
                output_points[i][0] = 16 * b * (col - cx) / dis[row][col]
                output_points[i][1] = 16 * b * (row - cy) / dis[row][col]
                output_points[i][2] = 16 * b * f / dis[row][col]
                output_points[i][3] = img_color[row][col][0]
                output_points[i][4] = img_color[row][col][1]
                output_points[i][5] = img_color[row][col][2]
                i = i + 1

    return output_points


# 读取json文件中的参数
def read_camera_parameters(json_file):
    with open(json_file, 'r') as f:
        data = json.load(f)

    # 读取相机内参矩阵和畸变系数
    camera_matrix1 = np.array(data['camera_matrix1'])
    dist_coeffs1 = np.array(data['dist_coeffs1'])
    camera_matrix2 = np.array(data['camera_matrix2'])
    dist_coeffs2 = np.array(data['dist_coeffs2'])

    # 读取旋转矩阵和平移矩阵
    rotation_matrix = np.array(data['rotation_matrix'])
    translation_matrix = np.array(data['translation_matrix'])

    return camera_matrix1, dist_coeffs1, camera_matrix2, dist_coeffs2, rotation_matrix, translation_matrix


def draw_point_cloud(matrix):
    # 创建一个空的点云对象
    point_cloud = o3d.geometry.PointCloud()

    # 从输入矩阵中提取点的坐标和颜色信息
    points = matrix[:, :3]
    colors = matrix[:, 3:]

    # 设置点云的点和颜色
    point_cloud.points = o3d.utility.Vector3dVector(points)
    point_cloud.colors = o3d.utility.Vector3dVector(colors / 255.0)  # 归一化颜色值

    # 创建一个可视化窗口，并将点云添加到窗口中进行显示
    vis = o3d.visualization.Visualizer()
    vis.create_window()
    vis.add_geometry(point_cloud)
    vis.run()
    vis.destroy_window()
