import sys
import time
import numpy as np
import cv2
import os
import glob
from PIL import Image
import matplotlib.pyplot as plt
from pypylon import pylon

# 读取相机内外参的txt文件
file = open('Parameters.txt', 'r')

P = (np.array([x.strip() for x in file.readlines()])).astype(np.float64)
imageSize = (1280, 1024)


def getStereoRectifyImageFromCameraParameters(P, imageSize):
    # 左相机内参
    mtx_l = np.array([[P[0], 0, P[1], 0],
                      [0, P[2], P[3], 0],
                      [0, 0, 1, 0]])
    # 右相机内参
    mtx_r = np.array([[P[4], 0, P[5], 0],
                      [0, P[6], P[7], 0],
                      [0, 0, 1, 0]])
    # 右相机到左相机的旋转矩阵
    R_lr = np.array([[P[8], P[9], P[10]],
                     [P[11], P[12], P[13]],
                     [P[14], P[15], P[16]]])
    # 右相机到左相机的平移矩阵
    T_lr = np.array([[P[17]],
                     [P[18]],
                     [P[19]]])
    cameraMatrixL = mtx_l[:, 0:3]
    cameraMatrixR = mtx_r[:, 0:3]
    # 左相机畸变
    distCoeffL = np.array([P[20], P[21], P[22], P[23], P[24]])
    # 右相机畸变
    distCoeffR = np.array([P[25], P[26], P[27], P[28], P[29]])
    # 左相机到左相机的投影矩阵
    R_ll = ([[1, 0, 0],
             [0, 1, 0],
             [0, 0, 1]])
    T_ll = ([[0], [0], [0]])
    temp_R_ll = np.append(R_ll, T_ll, axis=1)
    _temp_R_ll = np.row_stack((temp_R_ll, [0, 0, 0, 1]))
    P0 = np.dot(mtx_l, _temp_R_ll)
    # 左相机到右相机的投影矩阵
    temp_R_lr = np.append(R_lr, T_lr, axis=1)
    _temp_R_lr = np.row_stack((temp_R_lr, [0, 0, 0, 1]))
    P1 = np.dot(mtx_r, _temp_R_lr)
    # # 立体校正
    Rl, Rr, Pl, Pr, Q, validROIL, validROIR = cv2.stereoRectify(cameraMatrixL, distCoeffL,
                                                                cameraMatrixR, distCoeffR,
                                                                imageSize, R_lr, T_lr,
                                                                flags=0,
                                                                alpha=0, newImageSize=(0, 0))

    # 计算更正remap
    mapLx, mapLy = cv2.initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pl, imageSize, cv2.CV_32FC1)
    mapRx, mapRy = cv2.initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, cv2.CV_32FC1)

    return P0, P1, Pl, Pr, mapLx, mapLy, mapRx, mapRy


def calCalibrationAccuracyFromCameraParameters(P, grayImageL, grayImageR):
    # 图像的分辨率
    imageSize = (grayImageL.shape[1], grayImageL.shape[0])
    P0, P1, Pl, Pr, mapLx, mapLy, mapRx, mapRy = getStereoRectifyImageFromCameraParameters(P, imageSize)
    # 经过remap之后，左右相机的图像已经共面并且行对齐
    rectifyImageL = cv2.remap(grayImageL, mapLx, mapLy, cv2.INTER_LINEAR)
    rectifyImageR = cv2.remap(grayImageR, mapRx, mapRy, cv2.INTER_LINEAR)

    # 没有校正后的图，在已经极线对齐的图片上均匀画线
    im_L = Image.fromarray(grayImageL)
    im_R = Image.fromarray(grayImageR)
    width = im_L.size[0] * 2
    height = im_R.size[1]
    img_compare1 = Image.new('RGBA', (width, height))
    img_compare1.paste(im_L, box=(0, 0))
    img_compare1.paste(im_R, box=(1280, 0))
    for i in range(0, 20):
        lenth = 1280 / 20
        plt.axhline(y=i * lenth, color='r', linestyle='-', lw=1)
    plt.imshow(img_compare1)
    plt.title('Image')
    plt.show()

    # 校正后的图，在已经极线对齐的图片上均匀画线
    im_L = Image.fromarray(rectifyImageL)
    im_R = Image.fromarray(rectifyImageR)
    width = im_L.size[0] * 2
    height = im_R.size[1]
    img_compare = Image.new('RGBA', (width, height))
    img_compare.paste(im_L, box=(0, 0))
    img_compare.paste(im_R, box=(1280, 0))
    for i in range(0, 20):
        lenth = 1280 / 20
        plt.axhline(y=i * lenth, color='r', linestyle='-', lw=1)
    plt.imshow(img_compare)
    plt.title('rectifyImage')
    plt.show()

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    # 找校正后的棋盘格角点
    retL, cornersL = cv2.findChessboardCorners(rectifyImageL, (15, 11), None)
    retR, cornersR = cv2.findChessboardCorners(rectifyImageR, (15, 11), None)
    # 精细化找角点
    cornersL = cv2.cornerSubPix(rectifyImageL, cornersL, (15, 11), (-1, -1), criteria)
    cornersR = cv2.cornerSubPix(rectifyImageR, cornersR, (15, 11), (-1, -1), criteria)

    # 找原始图像的棋盘格角点
    retLO, cornersLO = cv2.findChessboardCorners(grayImageL, (15, 11), None)
    retRO, cornersRO = cv2.findChessboardCorners(grayImageR, (15, 11), None)
    # 精细化找角点
    cornersLO = cv2.cornerSubPix(grayImageL, cornersLO, (15, 11), (-1, -1), criteria)
    cornersRO = cv2.cornerSubPix(grayImageR, cornersRO, (15, 11), (-1, -1), criteria)
    # 在左图像中画出角点
    for i in range(0, len(cornersLO)):
        cv2.circle(grayImageL, (int(cornersLO[i][0][0]), int(cornersLO[i][0][1])), 1, (0, 0, 255))

    # 计算棋盘格角点的坐标
    i = 0
    w_distance = []
    wO_distance = []
    for i in range(cornersL.shape[0]):
        if (i + 1) % 15 == 0:
            continue
        # 校正图像第i个角点的左右质心坐标
        l0 = np.array([cornersL[i][0][0], cornersL[i][0][1]])
        r0 = np.array([cornersR[i][0][0], cornersR[i][0][1]])
        # 校正图像第i个角点的三维坐标
        s0 = np.array(cv2.triangulatePoints(Pl, Pr, l0, r0)).T
        w0 = s0[0][:-1] / np.max(s0[0][-1])
        # 校正图像第i+1个角点的左右质心坐标
        l1 = np.array([cornersL[i + 1][0][0], cornersL[i + 1][0][1]])
        r1 = np.array([cornersR[i + 1][0][0], cornersR[i + 1][0][1]])
        # 校正图像第i+1个角点的三维坐标
        s1 = np.array(cv2.triangulatePoints(Pl, Pr, l1, r1)).T
        w1 = s1[0][:-1] / np.max(s1[0][-1])
        # 校正图像横向相邻角点的间距
        distance = np.sqrt((w0[0] - w1[0]) ** 2 + (w0[1] - w1[1]) ** 2 + (w0[2] - w1[2]) ** 2)
        w_distance.append(distance)

        # 原始图像第i个角点的左右质心坐标
        lO0 = np.array([cornersLO[i][0][0], cornersLO[i][0][1]])
        rO0 = np.array([cornersRO[i][0][0], cornersRO[i][0][1]])
        # 原始图像第i个角点的三维坐标
        sO0 = np.array(cv2.triangulatePoints(P0, P1, lO0, rO0)).T
        wO0 = sO0[0][:-1] / np.max(sO0[0][-1])
        # 原始图像第i+1个角点的左右质心坐标
        lO1 = np.array([cornersLO[i + 1][0][0], cornersLO[i + 1][0][1]])
        rO1 = np.array([cornersRO[i + 1][0][0], cornersRO[i + 1][0][1]])
        # 原始图像第i+1个角点的三维坐标
        sO1 = np.array(cv2.triangulatePoints(P0, P1, lO1, rO1)).T
        wO1 = sO1[0][:-1] / np.max(sO1[0][-1])
        # 原始图像横向相邻角点的间距
        distance = np.sqrt((wO0[0] - wO1[0]) ** 2 + (wO0[1] - wO1[1]) ** 2 + (wO0[2] - wO1[2]) ** 2)
        wO_distance.append(distance)

    h_distance = []
    hO_distance = []
    for i in range(0, 15):
        for j in range(i, len(cornersR), 15):
            if j >= 150:
                continue
            # 校正图像第j个角点的左右质心坐标
            l0 = np.array([cornersL[j][0][0], cornersL[j][0][1]])
            r0 = np.array([cornersR[j][0][0], cornersR[j][0][1]])
            # 校正图像第j个角点的三维坐标
            s0 = np.array(cv2.triangulatePoints(Pl, Pr, l0, r0)).T
            w0 = s0[0][:-1] / np.max(s0[0][-1])
            # 校正图像第j+15个角点的左右质心坐标
            l1 = np.array([cornersL[j + 15][0][0], cornersL[j + 15][0][1]])
            r1 = np.array([cornersR[j + 15][0][0], cornersR[j + 15][0][1]])
            # 校正图像第j+15个角点的三维坐标
            s1 = np.array(cv2.triangulatePoints(Pl, Pr, l1, r1)).T
            w1 = s1[0][:-1] / np.max(s1[0][-1])
            # 校正图像纵向相邻角点的间距
            distance = np.sqrt((w0[0] - w1[0]) ** 2 + (w0[1] - w1[1]) ** 2 + (w0[2] - w1[2]) ** 2)
            h_distance.append(distance)

            # 原始图像第j个角点的左右质心坐标
            lO0 = np.array([cornersLO[j][0][0], cornersLO[j][0][1]])
            rO0 = np.array([cornersRO[j][0][0], cornersRO[j][0][1]])
            # 原始图像第j个角点的三维坐标
            sO0 = np.array(cv2.triangulatePoints(P0, P1, lO0, rO0)).T
            wO0 = sO0[0][:-1] / np.max(sO0[0][-1])
            # 原始图像第j+15个角点的左右质心坐标
            lO1 = np.array([cornersLO[j + 15][0][0], cornersLO[j + 15][0][1]])
            rO1 = np.array([cornersRO[j + 15][0][0], cornersRO[j + 15][0][1]])
            # 原始图像第j+15个角点的三维坐标
            sO1 = np.array(cv2.triangulatePoints(P0, P1, lO1, rO1)).T
            wO1 = sO1[0][:-1] / np.max(sO1[0][-1])
            # 原始图像纵向相邻角点的间距
            distance = np.sqrt((wO0[0] - wO1[0]) ** 2 + (wO0[1] - wO1[1]) ** 2 + (wO0[2] - wO1[2]) ** 2)
            hO_distance.append(distance)

    # 校正图像棋盘格的相邻角点间距
    distance = np.append(w_distance, h_distance)
    # 原始图像棋盘格的相邻角点间距
    distanceO = np.append(wO_distance, hO_distance)
    # 校正图像棋盘格相邻角点间距和基准距离的绝对误差
    error = (max(distance) - 30)
    # 原始图像棋盘格相邻角点间距和基准距离的绝对误差
    errorO = (max(distanceO) - 30)

    # 显示原始和校正后计算的棋盘格角点间距
    plt.figure(1)
    plt.title('Error value of calculation (mm)')
    plt.xlabel("The number of feature points (pcx)")
    plt.ylabel("Corners distance (mm)")
    plt.plot(distance, color='red', label=['Measurement accuracy after rectify', "%.3f" % float(error)])
    plt.plot(distanceO, color='gray', label=['Measurement accuracy before rectify', "%.3f" % float(errorO)])
    plt.legend(loc='best')
    plt.show()

    return error


def calCoordinateFromSpotcentroid(P, imgL, imgR):
    # 图像的分辨率
    imageSize = (imgL.shape[1], imgL.shape[0])
    P0, P1, Pl, Pr, mapLx, mapLy, mapRx, mapRy = getStereoRectifyImageFromCameraParameters(P, imageSize)
    # 经过remap之后，左右相机的图像已经共面并且行对齐
    rectifyimgL = cv2.remap(imgL, mapLx, mapLy, cv2.INTER_LINEAR)
    rectifyimgR = cv2.remap(imgR, mapRx, mapRy, cv2.INTER_LINEAR)
    # 阈值分割
    ret, thr = cv2.threshold(rectifyimgL, 175, 255, cv2.THRESH_BINARY)
    # 找到左图像轮廓并画出来
    contoursL, hie = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(rectifyimgL, contours=contoursL, contourIdx=-1, color=[0, 0, 255], thickness=2)
    contoursL = [cnt for cnt in contoursL if cv2.contourArea(cnt) > 30]
    # 算左图像质心
    for index in range(len(contoursL)):
        ML = cv2.moments(contoursL[index])
        cxL = round(ML['m10'] / ML['m00'], 3)
        cyL = round(ML['m01'] / ML['m00'], 3)
        # 在左图像中显示质心坐标
        cv2.putText(rectifyimgL, "X:" + str(cxL) + "   " + "Y:" + str(cyL), (50, (index + 1) * 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
        centerlistL = np.array([cxL, cyL])

    ret, thr = cv2.threshold(rectifyimgR, 175, 255, cv2.THRESH_BINARY)
    # 找到右图像光斑轮廓并画出来
    contoursR, hie = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(rectifyimgR, contours=contoursR, contourIdx=-1, color=[0, 0, 255], thickness=2)
    contoursR = [cnt for cnt in contoursR if cv2.contourArea(cnt) > 30]
    # 算右图像质心
    for index in range(len(contoursR)):
        MR = cv2.moments(contoursR[index])
        cxR = round(MR['m10'] / MR['m00'], 3)
        cyR = round(MR['m01'] / MR['m00'], 3)
        # 在右图像中显示质心坐标
        cv2.putText(rectifyimgR, "X:" + str(cxR) + "   " + "Y:" + str(cyR), (50, (index + 1) * 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
        centerlistR = np.array([cxR, cyR])

    sO0 = np.array(cv2.triangulatePoints(Pl, Pr, centerlistL, centerlistR)).T
    W = sO0[0][:-1] / np.max(sO0[0][-1])
    # print('左光斑质心:', centerlistL, '右光斑质心:', centerlistR)
    # 将左右图像拼接在一起显示
    # htitch = np.hstack((rectifyimgL, rectifyimgR))
    # image = cv2.resize(htitch, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
    # # image = cv2.circle(image, (int(cxL), int(cyL)), 1, (0, 0, 255), -1)
    # # image = cv2.circle(image, (int(cxR), int(cyR)), 1, (0, 0, 255), -1)
    # cv2.imshow("ImageCapture", image)
    # cv2.waitKey(0)
    return W


def search_get_device():
    i = 0
    tl_factory = pylon.TlFactory.GetInstance()
    for dev_info in tl_factory.EnumerateDevices():
        # print("DeviceClass:", dev_info.GetDeviceClass())
        if dev_info.GetDeviceClass() == 'BaslerGigE':  # 千兆网(GigE)
            if i == 0:
                # print(f"ModelName:{dev_info.GetModelName()}\n"f"IP:{dev_info.GetIpAddress()}")
                camera = pylon.InstantCamera(tl_factory.CreateDevice(dev_info))
                i += 1
            elif i == 1:
                # print(f"ModelName:{dev_info.GetModelName()}\n"f"IP:{dev_info.GetIpAddress()}")
                camera1 = pylon.InstantCamera(tl_factory.CreateDevice(dev_info))
                break
    else:
        raise EnvironmentError("no GigE device found")

    return camera, camera1


camera, camera1 = search_get_device()
camera.Open()  # 将相机打开
camera1.Open()

# 以最小延迟连续抓取(视频)
camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter = pylon.ImageFormatConverter()
camera1.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
converter1 = pylon.ImageFormatConverter()
# 转换为opencv BGR格式
converter.OutputPixelFormat = pylon.PixelType_BGR8packed  # 如果需要转换成RBG格式，改为PixelType_RBG8packed
converter.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
converter1.OutputPixelFormat = pylon.PixelType_BGR8packed  # 如果需要转换成RBG格式，改为PixelType_RBG8packed
converter1.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
# 等待一个图像，然后检索它。超时时间为5000ms。
grabResult = camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)
grabResult1 = camera1.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException)

i = 50
while camera.IsGrabbing() or camera1.IsGrabbing():
    grabResult = camera.RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)
    grabResult1 = camera1.RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)

    if grabResult.GrabSucceeded():
        # Access the image data
        image = converter.Convert(grabResult)
        imgR = image.GetArray()
        # cv2.namedWindow("cameraL", cv2.WINDOW_NORMAL)
        # cv2.imshow("cameraL", imgR)
        # # 连续输出图像路径，按数字顺序给图像命名
        cv2.imwrite('L' + str(i) + '.bmp', imgR)
        # i = i + 1
        # # 每隔0.025秒采集一张图像
        # time.sleep(5)

        cv2.waitKey(300)

    if grabResult1.GrabSucceeded():
        # Access the image data
        imageL = converter.Convert(grabResult1)
        imgL = imageL.GetArray()
        # cv2.namedWindow("cameraR", cv2.WINDOW_NORMAL)
        # cv2.imshow("cameraR", imgL)
        cv2.imwrite('R' + str(i) + '.bmp', imgL)
        # i = i + 1
        cv2.waitKey(300)

        k = cv2.waitKey(30)

        # if k == 27:
        #     cv2.destroyAllWindows()
        break

    grabResult.Release()
    grabResult1.Release()
# Releasing the resource
camera.StopGrabbing()
camera1.StopGrabbing()
camera.Close()  # 将相机打开
camera1.Close()
cv2.waitKey(3)

cv2.destroyAllWindows()

imgL = cv2.imread('L50.bmp', 0)
imgR = cv2.imread('R50.bmp', 0)

imageSize = (imgL.shape[1], imgL.shape[0])
P0, P1, Pl, Pr, mapLx, mapLy, mapRx, mapRy = getStereoRectifyImageFromCameraParameters(P, imageSize)
rectifyimgL = cv2.remap(imgL, mapLx, mapLy, cv2.INTER_LINEAR)
rectifyimgR = cv2.remap(imgR, mapRx, mapRy, cv2.INTER_LINEAR)
# 阈值分割
ret, thr = cv2.threshold(rectifyimgL, 175, 255, cv2.THRESH_BINARY)
# 找到左图像轮廓并画出来
contoursL, hie = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(rectifyimgL, contours=contoursL, contourIdx=-1, color=[0, 0, 255], thickness=2)
contoursL = [cnt for cnt in contoursL if cv2.contourArea(cnt) > 30]
# 算左图像质心
for index in range(len(contoursL)):
    ML = cv2.moments(contoursL[index])
    cxL = round(ML['m10'] / ML['m00'], 3)
    cyL = round(ML['m01'] / ML['m00'], 3)
    # cv2.line(rectifyimgL, (int(cxL), int(cyL) - 10), (int(cxL), int(cyL) + 10), color=(0, 0, 255), thickness=2)
    # cv2.line(rectifyimgL, (int(cxL) - 10, int(cyL)), (int(cxL) + 10, int(cyL)), color=(0, 0, 255), thickness=2)
    rectifyimgL = cv2.circle(rectifyimgL, (int(cxL), int(cyL)), 1, (0, 0, 255), 2)
    # 在左图像中显示质心坐标
    cv2.putText(rectifyimgL, "X:" + str(cxL) + "   " + "Y:" + str(cyL), (50, (index + 1) * 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
    centerlistL = np.array([cxL, cyL])

ret, thr = cv2.threshold(rectifyimgR, 175, 255, cv2.THRESH_BINARY)
# 找到右图像光斑轮廓并画出来
contoursR, hie = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(rectifyimgR, contours=contoursR, contourIdx=-1, color=[0, 0, 255], thickness=2)
contoursR = [cnt for cnt in contoursR if cv2.contourArea(cnt) > 30]
# 算右图像质心
for index in range(len(contoursR)):
    MR = cv2.moments(contoursR[index])
    cxR = round(MR['m10'] / MR['m00'], 3)
    cyR = round(MR['m01'] / MR['m00'], 3)
    # cv2.line(rectifyimgR, (int(cxR), int(cyR) - 10), (int(cxR), int(cyR) + 10), color=(0, 0, 255), thickness=2)
    # cv2.line(rectifyimgR, (int(cxR) - 10, int(cyR)), (int(cxR) + 10, int(cyR)), color=(0, 0, 255), thickness=2)
    rectifyimgR = cv2.circle(rectifyimgR, (int(cxR), int(cyR)), 1, (0, 255, 255), 2)
    # 在右图像中显示质心坐标
    cv2.putText(rectifyimgR, "X:" + str(cxR) + "   " + "Y:" + str(cyR), (50, (index + 1) * 50),
                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
    centerlistR = np.array([cxR, cyR])

sO0 = np.array(cv2.triangulatePoints(Pl, Pr, centerlistL, centerlistR)).T
W = sO0[0][:-1] / np.max(sO0[0][-1])
print('左光斑质心:', centerlistL, '右光斑质心:', centerlistR)

# 光斑的三维坐标
# w = np.array(calCoordinateFromSpotcentroid(P, imgL, imgR))

W[0] = round(W[0], 2)
W[1] = round(W[1], 2)
W[2] = round(W[2], 2)

self.ui.label.setText(str(W[0]))
self.ui.label_2.setText(str(W[1]))
self.ui.label_3.setText(str(W[2]))

htitch = np.hstack((rectifyimgL, rectifyimgR))
image = cv2.resize(htitch, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
# image = cv2.circle(image, (int(cxL), int(cyL)), 1, (0, 0, 255), -1)
# image = cv2.circle(image, (int(cxR), int(cyR)), 1, (0, 0, 255), -1)
cv2.imshow("ImageCapture", image)
cv2.waitKey(0)

imgL = cv2.imread('L50.bmp', 0)
imgR = cv2.imread('R50.bmp', 0)

w = np.array(calCoordinateFromSpotcentroid(P, imgL, imgR))
