from pypylon import pylon
import cv2
import numpy as np
import time
from PIL import Image
import matplotlib.pyplot as plt


def getStereoRectifyImageFromCameraParameters(P, imageSize):
    '''
    作者：SXL；
    日期：2022.9.5；
    功能：通过双目相机的内外参数和畸变系数进行立体校正，获得去畸变后的双目参数；
    输入：左右相机的内外参P、图像的分辨率imageSize;
    输出：左、右相机的映射矩阵，校正后左、右相机的映射矩阵;
    DEMO：
        import numpy as np
        import cv2
        import BinocularMeas as bl
        # 双目相机的内外参和畸变参数
        P= ( [[1.58194505e+03,6.11360774e+02,1.58103367e+03,4.81957409e+02,
            1.58550432e+03, 6.60125535e+02,1.58560729e+03 ,5.30058387e+02,
            0.99935972,0.00832822,-0.03479633,
            -0.00818233,0.99995714,0.00433311,
            0.03483093,-0.00404562,0.99938503,
            -1.45455737e+02,-1.00152336e-01,3.81513984e+00,
            -0.10048731,0.15942811,0.00067713,-0.00077331,0.02106416,
            -0.10669631,0.27109715,0.00093811,-0.00115607,-0.21457691]])
        # 图片分辨率
        imageSize=(1280,1024)
        P0,P1,Pl,Pr,mapLx,mapLy,mapRx,mapRy = getStereoRectifyImageFromCameraParameters(P, imageSize)
        print('左相机投影矩阵',P0)
        print('右相机投影矩阵',P1)
        print('校正后的左相机投影矩阵',Pl)
        print('校正后的右相机投影矩阵',Pr)
        print('左相机(x,y)映射',mapLx,mapLy)
        print('右相机(x,y)映射',mapRx,mapRy)
    '''
    # 左相机内参
    mtx_l = np.array([[P[0], 0, P[1], 0],
                      [0, P[2], P[3], 0],
                      [0, 0, 1, 0]])
    # 右相机内参
    mtx_r = np.array([[P[4], 0, P[5], 0],
                      [0, P[6], P[7], 0],
                      [0, 0, 1, 0]])
    # 右相机到左相机的旋转矩阵
    R_lr = np.array([[P[8], P[9], P[10]],
                     [P[11], P[12], P[13]],
                     [P[14], P[15], P[16]]])
    # 右相机到左相机的平移矩阵
    T_lr = np.array([[P[17]],
                     [P[18]],
                     [P[19]]])
    cameraMatrixL = mtx_l[:, 0:3]
    cameraMatrixR = mtx_r[:, 0:3]
    # 左相机畸变
    distCoeffL = np.array([P[20], P[21], P[22], P[23], P[24]])
    # 右相机畸变
    distCoeffR = np.array([P[25], P[26], P[27], P[28], P[29]])
    # 左相机到左相机的投影矩阵
    R_ll = ([[1, 0, 0],
             [0, 1, 0],
             [0, 0, 1]])
    T_ll = ([[0], [0], [0]])
    temp_R_ll = np.append(R_ll, T_ll, axis=1)
    _temp_R_ll = np.row_stack((temp_R_ll, [0, 0, 0, 1]))
    P0 = np.dot(mtx_l, _temp_R_ll)
    # 左相机到右相机的投影矩阵
    temp_R_lr = np.append(R_lr, T_lr, axis=1)
    _temp_R_lr = np.row_stack((temp_R_lr, [0, 0, 0, 1]))
    P1 = np.dot(mtx_r, _temp_R_lr)
    # # 立体校正
    Rl, Rr, Pl, Pr, Q, validROIL, validROIR = cv2.stereoRectify(cameraMatrixL, distCoeffL,
                                                                cameraMatrixR, distCoeffR,
                                                                imageSize, R_lr, T_lr,
                                                                flags=0,
                                                                alpha=0, newImageSize=(0, 0))

    # 计算更正remap  cv2.CALIB_ZERO_DISPARITY
    mapLx, mapLy = cv2.initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pl, imageSize, cv2.CV_32FC1)
    mapRx, mapRy = cv2.initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, cv2.CV_32FC1)

    return P0, P1, Pl, Pr, mapLx, mapLy, mapRx, mapRy


def getCameraDevice():
    '''
    作者：SXL；
    日期：2022.9.17；
    功能：搜索两个BASLER相机设备；
    输入：电脑连接BASLER相机;
    输出：左右相机的型号。
    '''
    i = 0
    tl_factory = pylon.TlFactory.GetInstance()
    for dev_info in tl_factory.EnumerateDevices():
        print("DeviceClass:", dev_info.GetDeviceClass())
        # 千兆网(GigE)
        if dev_info.GetDeviceClass() == 'BaslerGigE':
            if i == 0:
                # 右相机的型号
                # print(f"ModelName:{dev_info.GetModelName()}\n"f"IP:{dev_info.GetIpAddress()}")
                cameraR = pylon.InstantCamera(tl_factory.CreateDevice(dev_info))
                i += 1
            elif i == 1:
                # 左相机的型号
                # print(f"ModelName:{dev_info.GetModelName()}\n"f"IP:{dev_info.GetIpAddress()}")
                cameraL = pylon.InstantCamera(tl_factory.CreateDevice(dev_info))
                break
    else:
        raise EnvironmentError("no GigE device found")

    return cameraL, cameraR


def getSpotcentroidFromCamera(P):
    '''
    作者：SXL；
    日期：2022.9.17；
    功能：通过两个BASLER相机拍摄光斑从而得到光斑的质心坐标；
    输入：电脑连接BASLER相机;
    输出：光斑在左右相机下的质心坐标。
    '''

    # VideoWriter方法是cv2库提供的保存视频方法
    # 按照设置的格式来out输出
    outvideo = cv2.VideoWriter('ou2t.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10.0, (1280, 512))

    cameraL, cameraR = getCameraDevice()
    # 将相机打开
    cameraL.Open()
    cameraR.Open()
    # 以最小延迟连续抓取(视频)
    cameraL.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
    converterL = pylon.ImageFormatConverter()
    cameraR.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
    converterR = pylon.ImageFormatConverter()
    # 转换为opencv BGR格式
    converterL.OutputPixelFormat = pylon.PixelType_BGR8packed
    converterL.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
    converterR.OutputPixelFormat = pylon.PixelType_BGR8packed
    converterR.OutputBitAlignment = pylon.OutputBitAlignment_MsbAligned
    FRAME_NOW = 0
    centerlistL = []
    centerlistR = []
    coordinate = []

    # 等待一个图像，然后检索它。超时时间为1000ms。
    while cameraR.IsGrabbing() or cameraL.IsGrabbing():
        grabResultL = cameraL.RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)
        grabResultR = cameraR.RetrieveResult(1000, pylon.TimeoutHandling_ThrowException)

        if grabResultL.GrabSucceeded():
            # 左相机获取的图像信息
            image = converterL.Convert(grabResultL)
            imgL = image.GetArray()
            imgGray = cv2.cvtColor(imgL, cv2.COLOR_RGB2GRAY)
            # 图像的分辨率
            imageSize = (imgGray.shape[1], imgGray.shape[0])
            P0, P1, Pl, Pr, mapLx, mapLy, mapRx, mapRy = getStereoRectifyImageFromCameraParameters(P, imageSize)
            # 经过remap之后，左右相机的图像已经共面并且行对齐
            rectifyImageL = cv2.remap(imgGray, mapLx, mapLy, cv2.INTER_LINEAR)
            # 阈值分割
            ret, thr = cv2.threshold(rectifyImageL, 175, 255, cv2.THRESH_BINARY)
            # 寻找光斑轮廓
            contours, hie = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            # 画轮廓
            rectifyImageL = np.expand_dims(rectifyImageL, axis=2)
            rectifyImageL = np.concatenate([rectifyImageL, rectifyImageL, rectifyImageL], axis=2)
            rectifyImageL = cv2.drawContours(rectifyImageL, contours=contours, contourIdx=-1, color=[0, 0, 255],
                                             thickness=2)
            contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 30]
            for index in range(len(contours)):
                M = cv2.moments(contours[index])
                # 算质心
                cx = round(M['m10'] / M['m00'], 3)
                cy = round(M['m01'] / M['m00'], 3)
                # 在左图像中显示质心坐标
                cv2.putText(rectifyImageL, "ul:" + str(cx) + "   " + "vl:" + str(cy), (50, (index + 1) * 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
                centerlistL.append(np.array([cx, cy]))

        if grabResultR.GrabSucceeded():
            # 右相机获取的图像信息
            image = converterR.Convert(grabResultR)
            imgR = image.GetArray()
            imgGrayR = cv2.cvtColor(imgR, cv2.COLOR_RGB2GRAY)
            # 图像的分辨率
            imageSize = (imgGrayR.shape[1], imgGrayR.shape[0])
            P0, P1, Pl, Pr, mapLx, mapLy, mapRx, mapRy = getStereoRectifyImageFromCameraParameters(P, imageSize)
            # 经过remap之后，左右相机的图像已经共面并且行对齐
            rectifyImageR = cv2.remap(imgGrayR, mapLx, mapLy, cv2.INTER_LINEAR)
            # 阈值分割
            ret, thr = cv2.threshold(rectifyImageR, 175, 255, cv2.THRESH_BINARY)
            # 寻找光斑轮廓
            contours, hie = cv2.findContours(thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            # 画轮廓

            # start = time.clock()
            rectifyImageR = np.expand_dims(rectifyImageR, axis=2)
            rectifyImageR = np.concatenate([rectifyImageR, rectifyImageR, rectifyImageR], axis=2)
            rectifyImageR = cv2.drawContours(rectifyImageR, contours=contours, contourIdx=-1, color=[0, 0, 255],
                                             thickness=2)
            contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 30]

            for index in range(len(contours)):
                M = cv2.moments(contours[index])
                # 算质心
                cx = round(M['m10'] / M['m00'], 3)
                cy = round(M['m01'] / M['m00'], 3)

                # end = time.clock()
                # T = (end - start)*1000
                # 在右图像中显示质心坐标
                # cv2.putText(rectifyImageR, "ur:" + str(cx) + "   " + "vr:" + str(cy)+"                  "+
                #             'time:'+str("%.3f" % float(T)) ,
                #             (50, (index + 1) * 50),cv2.FONT_HERSHEY_SIMPLEX, 1,(255, 255, 255), 1)
                cv2.putText(rectifyImageR, "ur:" + str(cx) + "   " + "vr:" + str(cy) + "                  ",
                            (50, (index + 1) * 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)
                centerlistR.append(np.array([cx, cy]))

            for i in range(len(centerlistL)):
                s1 = np.array(cv2.triangulatePoints(Pl, Pr, centerlistR[i], centerlistL[i])).T
                w = s1[0][:-1] / np.max(s1[0][-1])
                coordinate.append(w)
                print((centerlistL[i]), (centerlistR[i]), "%.3f" % float(w[0]), "%.3f" % float(w[1]),
                      "%.3f" % float(w[2]))
                # print("%.3f" % float(w[0]), "%.3f" % float(w[1]), "%.3f" % float(w[2]))
                # 将左右图像拼接在一起显示
                htitch = np.hstack((rectifyImageL, rectifyImageR))
                image = cv2.resize(htitch, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
                cv2.putText(image, "X:" + str("%.3f" % float(w[0])) + "   " + "Y:" + str("%.3f" % float(w[1])) +
                            "   " + "Z:" + str("%.3f" % float(w[2])), (400, 500),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1)

                cv2.imshow("image", image)
                outvideo.write(image)

            cv2.waitKey(30)
            k = cv2.waitKey(30)
            if k == 27:
                cv2.destroyAllWindows()
                break
        grabResultR.Release()
        grabResultL.Release()

    cameraR.StopGrabbing()
    cameraL.StopGrabbing()
    cv2.destroyAllWindows()

    return centerlistL, centerlistR, coordinate
