# 双目标定加深度图生成
import cv2

import cv2.ximgproc
import matplotlib.pyplot as plt # plt 用于显示图片
import matplotlib.image as mpimg # mpimg 用于读取图片
from mpl_toolkits.mplot3d import Axes3D
 
import sys
import numpy as np
import glob
import math

class shuangmu:
    def __init__(self):
        # self.m1 = 0
        # self.m2 = 0
        # self.d1 = 0
        # self.d2 = 0
        # self.R = 0
        # self.T = 0
        self.m1 = np.array([[1.27267825e+03, 0.00000000e+00, 9.88387071e+02],
                            [0.00000000e+00, 1.27265778e+03, 6.36566555e+02],
                            [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
        self.m2 = np.array([[1.27386637e+03, 0.00000000e+00, 9.65632284e+02],
                            [0.00000000e+00, 1.27367885e+03, 6.39194967e+02],
                            [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
        self.d1 = np.array([-1.29939435e-01, 1.51095405e-01, -2.01009309e-04, 2.48735316e-05, -3.90017117e-02])
        self.d2 = np.array([-0.13019095, 0.15059209, 0.00074157, -0.00023195, -0.03407628])
        self.R = np.array([[ 9.99904579e-01, -1.33709919e-02, -3.47135126e-03],
                            [ 1.33741257e-02, 9.99910174e-01, 8.81120659e-04],
                            [ 3.45925798e-03, -9.27462870e-04, 9.99993587e-01]])
        self.T = np.array([[-300.19742601],
                            [-2.21131235],
                            [-1.19155963]])
stereo = shuangmu()

class StereoCalibration(object):
    def __init__(self):
        self.imagesL = self.read_images('/home/cky/Action/projects/ws10_binocular_camera/data/frame/camL')
        self.imagesR = self.read_images('/home/cky/Action/projects/ws10_binocular_camera/data/frame/camR')
        print(len(self.imagesL))
        print(len(self.imagesR))
        
    def read_images(self , cal_path):
        filepath = glob.glob(cal_path + '/*.bmp')
        filepath.sort()
        return filepath
    #标定图像
    def calibration_photo(self):
        #设置要标定的角点个数
        x_nums = 11                                                   #x方向上的角点个数
        y_nums = 8
        # 设置(生成)标定图在世界坐标中的坐标
        world_point = np.zeros((x_nums * y_nums,3),np.float32)            #生成x_nums*y_nums个坐标，每个坐标包含x,y,z三个元素
        world_point[:,:2] = np.mgrid[:x_nums,:y_nums].T.reshape(-1, 2)    #mgrid[]生成包含两个二维矩阵的矩阵，每个矩阵都有x_nums列,y_nums行
                                                                            #.T矩阵的转置
                                                                            #reshape()重新规划矩阵，但不改变矩阵元素
        #保存角点坐标
        world_position = []
        image_positionl = []
        image_positionr = []
        #设置角点查找限制
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER,30,0.001)
        #获取所有标定图
        for ii in range(len(self.imagesL)):

            image_path_l = self.imagesL[ii]
            image_path_r = self.imagesR[ii]

            image_l = cv2.imread(image_path_l)
            image_r = cv2.imread(image_path_r)
            gray_l = cv2.cvtColor(image_l,cv2.COLOR_RGB2GRAY)
            gray_r = cv2.cvtColor(image_r,cv2.COLOR_RGB2GRAY)
            # cv2.imshow("Image L", gray_l)
            # cv2.imshow("Image R", gray_r)
            # cv2.waitKey(1000)
            # cv2.destroyAllWindows()

            #查找角点
            # ok,corners = cv2.findChessboardCorners(gray,(x_nums,y_nums),None)
            ok1,cornersl = cv2.findChessboardCorners(gray_l,(x_nums,y_nums),None)
            ok2,cornersr = cv2.findChessboardCorners(gray_r,(x_nums,y_nums),None)
            # ok1,cornersl = cv2.findCirclesGrid(gray_l,(x_nums,y_nums),None)
            # ok2,cornersr = cv2.findCirclesGrid(gray_r,(x_nums,y_nums),None)
            
            self.world = world_point
            print(ok1&ok2)
            if ok1&ok2:
                #把每一幅图像的世界坐标放到world_position中
                center_spacing = 30     ## 棋盘格边长
                world_position.append(world_point*center_spacing)
                #获取更精确的角点位置
                exact_cornersl = cv2.cornerSubPix(gray_l,cornersl,(11,11),(-1,-1),criteria)
                exact_cornersr = cv2.cornerSubPix(gray_r,cornersr,(11,11),(-1,-1),criteria)
                #把获取的角点坐标放到image_position中
                image_positionl.append(exact_cornersl)
                image_positionr.append(exact_cornersr)
                #可视化角点
                # image = cv2.drawChessboardCorners(image,(x_nums,y_nums),exact_corners,ok)
                # cv2.imshow('image_corner',image)
                # cv2.waitKey(0)
        #计算内参数
        image_shape = gray_l.shape[::-1]
        
        retl, mtxl, distl, rvecsl, tvecsl = cv2.calibrateCamera(world_position, image_positionl, image_shape , None,None)
        retr, mtxr, distr, rvecsr, tvecsr = cv2.calibrateCamera(world_position, image_positionr, image_shape , None,None)
        print('ml = ',mtxl)
        print('mr = ',mtxr)
        print('dl = ' , distl)
        print('dr = ' , distr)
        stereo.m1 = mtxl
        stereo.m2 = mtxr
        stereo.d1 = distl
        stereo.d2 = distr
        
        
        #计算误差
        self.cal_error(world_position , image_positionl ,  mtxl , distl , rvecsl , tvecsl)
        self.cal_error(world_position , image_positionr ,  mtxr,  distr , rvecsr , tvecsr)

        ##双目标定
        self.stereo_calibrate( world_position ,image_positionl , image_positionr , mtxl, distl, mtxr, distr, image_shape)
        
    def cal_error(self , world_position , image_position ,  mtx , dist , rvecs , tvecs):
        #计算偏差
        mean_error = 0
        for i in range(len(world_position)):
            image_position2, _ = cv2.projectPoints(world_position[i], rvecs[i], tvecs[i], mtx, dist)
            error = cv2.norm(image_position[i], image_position2, cv2.NORM_L2) / len(image_position2)
            mean_error += error
        print("total error: ", mean_error / len(image_position))

    def stereo_calibrate( self ,  objpoints ,imgpoints_l , imgpoints_r , M1, d1, M2, d2, dims):
        flags = 0
        flags |= cv2.CALIB_FIX_INTRINSIC
        flags |= cv2.CALIB_USE_INTRINSIC_GUESS
        flags |= cv2.CALIB_FIX_FOCAL_LENGTH
        flags |= cv2.CALIB_ZERO_TANGENT_DIST
        stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +cv2.TERM_CRITERIA_EPS, 100, 1e-5)
        ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
                                    objpoints, imgpoints_l, 
                                    imgpoints_r, M1, d1, M2,
                                    d2, dims,
                                    criteria=stereocalib_criteria, flags=flags)
        print("R = ", R)
        print("T = ", T)
        stereo.R = R
        stereo.T = T

#双目相机参数
class stereoCameral(object):
    def __init__(self):
        #左相机内参数
        self.cam_matrix_left = stereo.m1
        #右相机内参数
        self.cam_matrix_right = stereo.m2
        #左右相机畸变系数:[k1, k2, p1, p2, k3]
        self.distortion_l = stereo.d1
        self.distortion_r = stereo.d2
        #旋转矩阵
        self.R = stereo.R
        #平移矩阵
        self.T = stereo.T
        #基线距离
        self.baseline = stereo.T[0]
 
# 获取畸变校正和立体校正的映射变换矩阵、重投影矩阵
# @param：config是一个类，存储着双目标定的参数:config = stereoconfig.stereoCamera()
def getRectifyTransform(height, width, config):
    # 读取内参和外参
    left_K = config.cam_matrix_left
    right_K = config.cam_matrix_right
    left_distortion = config.distortion_l
    right_distortion = config.distortion_r
    R = config.R
    T = config.T
 
    # 计算立体校正变换，得到矫正映射矩阵
    height = int(height)
    width = int(width)
    R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(left_K, left_distortion, right_K, right_distortion, (width, height), R, T, alpha=-1)
    map1x, map1y = cv2.initUndistortRectifyMap(left_K, left_distortion, R1, P1, (width, height), cv2.CV_16SC2)
    map2x, map2y = cv2.initUndistortRectifyMap(right_K, right_distortion, R2, P2, (width, height), cv2.CV_16SC2)
    print(width,height)
 
    return map1x, map1y, map2x, map2y, Q

# 立体校正
def rectifyImage(image1, image2, map1x, map1y, map2x, map2y):
    rectifyed_img1 = cv2.remap(image1, map1x, map1y, cv2.INTER_LINEAR)
    rectifyed_img2 = cv2.remap(image2, map2x, map2y, cv2.INTER_LINEAR)

    return rectifyed_img1, rectifyed_img2
 
# 图像拼接及画线（彩色）
def draw_line1(image1, image2):
    height = max(image1.shape[0], image2.shape[0])
    width = image1.shape[1] + image2.shape[1]
    
    output = np.zeros((height, width,3), dtype=np.uint8)
    output[0:image1.shape[0], 0:image1.shape[1]] = image1
    output[0:image2.shape[0], image1.shape[1]:] = image2
 
    for k in range(0, height, 50):
        cv2.line(output, (0, k), (2 * width, k), (0, 255, 0), thickness=2, lineType=cv2.LINE_AA)  # 直线间隔：50
 
    return output
# 图像拼接及画线（灰度）
def draw_line2(image1, image2):
    height = max(image1.shape[0], image2.shape[0])
    width = image1.shape[1] + image2.shape[1]
    
    output = np.zeros((height, width), dtype=np.uint8)
    output[0:image1.shape[0], 0:image1.shape[1]] = image1
    output[0:image2.shape[0], image1.shape[1]:] = image2
 
    for k in range(0, height, 50):
        cv2.line(output, (0, k), (2 * width, k), (0, 255, 0), thickness=2, lineType=cv2.LINE_AA)  # 直线间隔：50
 
    return output
# 视差计算（SGBM）
def disparity_SGBM(left_image, right_image, down_scale=False):
    # SGBM匹配参数设置
    if left_image.ndim == 2:
        img_channels = 1
    else:
        img_channels = 3
    blockSize = 9
    param = {'minDisparity': 60,
             'numDisparities': 352,
             'blockSize': blockSize,
             'P1': 8 * img_channels * blockSize ** 2,
             'P2': 32 * img_channels * blockSize ** 2,
             'disp12MaxDiff': -1,
             'preFilterCap': 1,
             'uniquenessRatio': 15,
             'speckleWindowSize': 0,
             'speckleRange': 0,
             'mode': cv2.STEREO_SGBM_MODE_SGBM_3WAY
             }
 
    # 构建SGBM对象
    left_matcher = cv2.StereoSGBM_create(**param)
    right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
    # 计算视差图
    size = (left_image.shape[1], left_image.shape[0])
    if down_scale == False:
        disparity_left = left_matcher.compute(left_image, right_image)
        disparity_right = right_matcher.compute(right_image, left_image)
    else:
        left_image_down = cv2.pyrDown(left_image)
        right_image_down = cv2.pyrDown(right_image)
        factor = size[0] / left_image_down.shape[1]
        disparity_left_half = left_matcher.compute(left_image_down, right_image_down)
        disparity_right_half = right_matcher.compute(right_image_down, left_image_down)
        disparity_left = cv2.resize(disparity_left_half, size, interpolation=cv2.INTER_AREA) 
        disparity_right = cv2.resize(disparity_right_half, size, interpolation=cv2.INTER_AREA)
        disparity_left *= factor 
        disparity_right *= factor
    # 归一化函数算法，生成深度图（灰度图）
    disp = disparity_left
    disp = cv2.normalize(disparity_left, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
    # 生成深度图（颜色图）
    dis_color = disparity_left
    dis_color = cv2.applyColorMap(disp, 2)
    
    cv2.imshow("disp", disp)
    cv2.imshow("dis_color", dis_color)

    # 使用wls滤波器
    wls_filter = cv2.ximgproc.createDisparityWLSFilter(left_matcher)
    wls_filter.setLambda(8000)
    wls_filter.setSigmaColor(2.0)
    filtered_disp = wls_filter.filter(disparity_left, left_image, None, disparity_right, None, right_image)
    filtered_disp = cv2.normalize(filtered_disp, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)

    cv2.imshow("filtered_disp", filtered_disp)

    return disparity_left, disparity_right
# 视差计算（BM）
def disparity_BM(left_image, right_image):
    # 构建SGBM对象
    left_matcher = cv2.StereoBM_create(352, 9)
    # left_matcher.setROI1((774, 712, 94, 96))
    # left_matcher.setROI2((649, 712, 94, 96))
    # 计算视差图
    disparity_left = left_matcher.compute(left_image, right_image)
    # 归一化函数算法，生成深度图（灰度图）
    disp = disparity_left
    disp = cv2.normalize(disparity_left, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
    cv2.imshow("disp", disp)

    return disparity_left

# 鼠标回调事件
def onmouse_pick_points(event, x, y, flags, param):
    if event == cv2.EVENT_LBUTTONDOWN:
        threeD = param
        print('\n像素坐标 x = %d, y = %d' % (x, y))
        # print("世界坐标是：", threeD[y][x][0], threeD[y][x][1], threeD[y][x][2], "mm")
        print("世界坐标xyz 是：", threeD[y][x][0] / 1000.0, threeD[y][x][1] / 1000.0, threeD[y][x][2] / 1000.0, "m")

        distance = math.sqrt(threeD[y][x][0] ** 2 + threeD[y][x][1] ** 2 + threeD[y][x][2] ** 2)
        distance = distance / 1000.0  # mm -> m
        print("距离是：", distance, "m")
        
if __name__ == '__main__':
#   根据图片进行标定
    # biaoding = StereoCalibration()
    # biaoding.calibration_photo()

#   验证标定参数
    imgL = cv2.imread("/home/cky/Action/projects/ws10_binocular_camera/data/frame/camL/1729333522_283801.bmp")
    imgR = cv2.imread("/home/cky/Action/projects/ws10_binocular_camera/data/frame/camR/1729333522_283801.bmp")
#     imgL , imgR = preprocess(imgL ,imgR )
    unprocessed_frame = draw_line1(imgL , imgR)
    cv2.imshow("unprocessed_frame", unprocessed_frame)
    
    height, width = imgL.shape[0:2]
    config = stereoCameral()    # 读取相机内参和外参
    
    # 立体校正和几何极线对齐
    map1x, map1y, map2x, map2y, Q = getRectifyTransform(height, width, config)
    iml_rectified, imr_rectified = rectifyImage(imgL, imgR, map1x, map1y, map2x, map2y)
    iml_rectified = cv2.cvtColor(iml_rectified, cv2.COLOR_BGR2GRAY)
    imr_rectified = cv2.cvtColor(imr_rectified, cv2.COLOR_BGR2GRAY)
    linepic = draw_line2(iml_rectified , imr_rectified)
    cv2.imshow("linepic", linepic)
    # 计算视差
    # lookdispL,lookdispR = disparity_SGBM(iml_rectified, imr_rectified)
    # linepic2 = draw_line2(lookdispL,lookdispR)
    lookdispL = disparity_BM(iml_rectified, imr_rectified)

    # 计算三维坐标数据值
    threeD = cv2.reprojectImageTo3D(lookdispL, Q, handleMissingValues=True)
    # 计算出的threeD，需要乘以16，才等于现实中的距离
    threeD = threeD * 16
    # 鼠标回调事件
    cv2.setMouseCallback("disp", onmouse_pick_points, threeD)
    
    # cv2.imshow("linepic2", linepic2)
    
    cv2.waitKey(0)
    cv2.destroyAllWindows()
