import os
from Modules.FolderProcess import MyOS as fdp
import cv2
import numpy as np


class MyCV2(object):
    def __init__(self):
        super().__init__()

    @staticmethod
    def cv_show(name, img):
        '''
        展示图片
        :param name:图窗名str
        :param img: 图片矩阵np（uint8）
        :return:
        '''
        cv2.namedWindow(name, cv2.WINDOW_NORMAL)
        cv2.imshow(name, img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    @staticmethod
    def cv_imread(file_path):
        '''
        读取图片（含中文路径）
        :param file_path: 图片路径str
        :return: 图片矩阵np（uint8）
        '''
        cv_img = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), -1)
        return cv_img

    @staticmethod
    def im_list(folder):  #
        '''
        读取文件夹下的图片(jpg、png、bmp)
        :param folder: 文件夹路径str
        :return: img_list 图片列表list
        '''
        folder_list = os.listdir(folder)
        img_list = []
        for i in folder_list:
            if i[-3:] == "jpg" or i[-3:] == "png" or i[-3:] == "bmp":
                img_list.append(i)
        return img_list

    @staticmethod
    def erro_drawlines(left_img_you, right_img_you, left_nei_kps, right_nei_kps, path=""):
        # time_start1 = time.time()  # 记录开始时间
        trans_left = np.copy(left_nei_kps)
        trans_right = np.copy(right_nei_kps)
        show_img = np.concatenate([left_img_you, right_img_you], 1)
        show_img2 = np.copy(show_img)
        trans_right[:, 0] = trans_right[:, 0] + len(left_img_you)
        i = 0
        trans_left = trans_left.astype(int).tolist()
        trans_right = trans_right.astype(int).tolist()
        fdp.makefolder(path)
        for j in range(len(trans_left)):
            show_img = cv2.circle(show_img, tuple(trans_left[j]), 7, (0, 0, 255), 0)
            show_img = cv2.circle(show_img, tuple(trans_right[j]), 7, (0, 0, 255), 0)
            show_img = cv2.line(
                show_img, tuple(trans_left[j]), tuple(trans_right[j]), (0, 255, 0), 1
            )
            show_img2 = cv2.circle(show_img2, tuple(trans_left[j]), 7, (0, 0, 255), 0)
            show_img2 = cv2.circle(show_img2, tuple(trans_right[j]), 7, (0, 0, 255), 0)
            if bool(path):
                if 10 < i < 30 and i % 20 == 0:
                    cv2.imwrite(path + "\\" + str(i) + "first.jpg", show_img)
            else:
                MyCV2.cv_show('name', show_img)
            i += 1
        i = 0
        # cv_show('name', show_img);
        if bool(path):
            cv2.imwrite(path + "\\result.jpg", show_img)
            cv2.imwrite(path + "\\keypoints.jpg", show_img2)
            print('绘图结束,输出到',path);
        return show_img
        # time_end1 = time.time()  # 记录结束时间
        # time_sum1 = time_end1 - time_start1  # 计算的时间差为程序的执行时间，单位为秒/s
        # print('drawlines程序运行花费',time_sum1,'s');

    @staticmethod
    def im_write(name,img,path=''):#
        '''
        保存图片(可以有中文路径)
        :param name: 文件名str
        :param img: 图片np
        :param path: 保存路径str
        :return:
        '''
        if path:
            if os.path.isdir(path):
                cv2.imencode('.jpg', img)[1].tofile(path + '\\' + name);
            else:
                fdp.makefolder(path)
                cv2.imencode('.jpg', img)[1].tofile(path + '\\' + name);
        else:

            cv2.imencode('.jpg', img)[1].tofile(name);
            print('保存路径默认为当前工作目录')

    @staticmethod
    def get_feature(img, draw=0, way='sift'):
        gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if way == 'sift':
            sift = cv2.SIFT_create()
            # SIFT算子实例化
            kp, des = sift.detectAndCompute(gray_image, None)
            # sift.detectAndComputer(gray， None)
            # 计算出图像的关键点和sift特征向量
            if draw:
                kp_image = cv2.drawKeypoints(img, kp, None)
                return kp, des, kp_image
            # ret = cv2.drawKeypoints(gray, kp, img) 在图中画出关键点
            # 参数说明：gray表示输入图片, kp表示关键点，img表示输出的图片
            return kp, des
        elif way == 'orb':
            orb = cv2.ORB_create(
                nfeatures=30000,scaleFactor=1.2, nlevels=8,
                edgeThreshold=31,
                firstLevel=0,
                WTA_K=2,
                patchSize=31,
                fastThreshold=20)
            # SIFT算子实例化
            kp, des = orb.detectAndCompute(gray_image, None)
            # sift.detectAndComputer(gray， None)
            # 计算出图像的关键点和sift特征向量
            if draw:
                kp_image = cv2.drawKeypoints(img, kp, None,color=(0, 255, 0), flags=0)
                return kp, des, kp_image
            # ret = cv2.drawKeypoints(gray, kp, img) 在图中画出关键点
            # 参数说明：gray表示输入图片, kp表示关键点，img表示输出的图片
            return kp, des

    @staticmethod
    def draw_match(left_img, left_kps, right_img, right_kps, good_matches):
        draw_params = dict(matchColor=(0, 255, 0),  # 绿色匹配线
                           singlePointColor=(0, 0, 255),  # 红色内点
                           flags=2)
        good_matches = np.expand_dims(good_matches, 1)
        return cv2.drawMatchesKnn(left_img, left_kps, right_img, right_kps, good_matches, None, **draw_params)
    @staticmethod
    def draw_feature(img,kps,out_path = '',color = (0, 0, 255)):
        draw = np.copy(img)
        trans_right = kps.astype(int).tolist()

        for j in range(len(trans_right)):
            draw = cv2.circle(draw, tuple(trans_right[j]), 7,color , -1)
        if out_path:
            fdp.makefolder(out_path)
            MyCV2.im_write('extract_feature_result',draw,out_path)
        return draw
    @staticmethod
    def sift_match(left_img, left_kps, left_des, right_img, right_kps, right_des, draw=1):
        FLANN_INDEX_KDTREE = 1;  # kDTREE在匹配器中的算法编号为1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5);
        search_params = dict(checks=50);
        flann = cv2.FlannBasedMatcher(index_params, search_params);
        matches = flann.knnMatch(left_des, right_des, 2);
        value = np.array([i.distance / (j.distance+1e-5) for i, j in matches])
        index = np.where(value < 0.7)[0]
        if not len(index):
            if draw:
                return [],[],[],[],[],left_img
            return [],[],[],[],[]
        np_matches = np.array(matches)
        good_matches = np_matches[index][:, 0]
        # left_index = np.array([i.queryIdx for i, j in good_matches])
        good_left_kps = cv2.KeyPoint_convert(left_kps)
        good_right_kps = cv2.KeyPoint_convert(right_kps)
        right_index = np.array([i.trainIdx for i in good_matches])
        temp = good_right_kps[right_index][:, 0] + good_right_kps[right_index][:, 1]
        _, indices = np.unique(temp, return_index=True)
        # right_index1, indices = np.unique(right_index, return_index=True)  # 存在重复的匹配，直接删了
        good_left_kps = good_left_kps[index[indices]]
        good_left_des = left_des[index[indices]]
        good_matches = good_matches[indices]
        # good_matches = (good_matches[indices]).tolist()
        good_right_kps = good_right_kps[right_index[indices]]
        good_right_des = right_des[right_index[indices]]
        F, mask = cv2.findFundamentalMat(good_left_kps, good_right_kps, cv2.FM_RANSAC, 0.9, 0.99);
        good_left_kps = good_left_kps[mask.ravel() == 1];  # 只选取内部点，这是一种比较高级的提取索引的方法，第一次见
        good_right_kps = good_right_kps[mask.ravel() == 1]
        good_left_des= good_left_des[mask.ravel() == 1]
        good_right_des = good_right_des[mask.ravel() == 1]
        good_matches = good_matches[mask.ravel() == 1]

        # mask = np.full((len(matches),2),0,dtype=int)
        # mask[index[indices],0] = 1#n,2的列表,若选中则在第0列为1
        # right_index = np.array([i.trainIdx for i, j in good_matches]).reshape(-1,1)
        # left_index = np.array([i.queryIdx for i, j in good_matches]).reshape(-1,1)
        # left = cv2.KeyPoint_convert(left_kps)[left_index]
        # right = cv2.KeyPoint_convert(right_kps)[right_index]
        if draw:
            match_result = MyCV2.draw_match(left_img, left_kps, right_img, right_kps, good_matches)
            return good_left_kps, good_right_kps, good_left_des, good_right_des, good_matches, match_result
        return good_left_kps, good_right_kps, good_left_des, good_right_des, good_matches
    @staticmethod
    def orb_match(img1, kp1,des1, img2, kp2,des2,draw = 1):
        FLANN_INDEX_LSH = 6
        index_params = dict(algorithm=FLANN_INDEX_LSH,
                            table_number=6,  # 12
                            key_size=12,  # 20
                            multi_probe_level=1)  # 2

        search_params = dict(checks=50);
        flann = cv2.FlannBasedMatcher(index_params, search_params);
        matches = flann.knnMatch(des1, des2, 1)
        # 计算最大距离和最小距离
        min_distance = 0
        for x in matches:
            if x[0].distance < min_distance:
                min_distance = x[0].distance

        '''
            当描述子之间的距离大于两倍的最小距离时，认为匹配有误。
            但有时候最小距离会非常小，所以设置一个经验值30作为下限。
        '''
        good_match = []
        left_kp = []
        right_kp = []
        left_des_index = []
        right_des_index = []
        for x in matches:
            if x[0].distance <= max(2 * min_distance, 30):
                good_match.append(x[0])
                left_kp.append(kp1[x[0].queryIdx].pt)
                right_kp.append(kp2[x[0].trainIdx].pt)
                left_des_index.append(x[0].queryIdx)
                right_des_index.append(x[0].trainIdx)
        left_des = des1[np.array(left_des_index)]
        right_des = des2[np.array(right_des_index)]
        if draw:
            outimage = MyCV2.draw_match(img1, kp1, img2, kp2, good_match)
            return (np.array(left_kp)).reshape(-1,2),(np.array(right_kp)).reshape(-1,2),left_des,right_des,outimage
        return (np.array(left_kp)).reshape(-1, 2), (np.array(right_kp)).reshape(-1, 2), left_des, right_des
    @staticmethod
    def hexian_ransac1(
            left_nei_kps, good_left_des, right_nei_kps,good_right_des , K,good_matches,left_kps,left_img,right_kps,right_img,draw = 1
    ):
        # time_start1 = time.time()  # 记录开始时间
        # left_nei_kps=np.array(good_left_kps);
        # right_nei_kps=np.array(good_right_kps);
        F, maskF = cv2.findFundamentalMat(
            left_nei_kps, right_nei_kps, cv2.FM_RANSAC, 1.5, 0.95
        )
        # 计算两像片的转化矩阵F，各同名像点的转换矩阵应该是比较一致的，使用的方法为ransac，距直线阈值为0.9个像素，置信区间为0.99
        # text = "基础矩阵匹配中" + str(len(maskF)) + "个匹配点中，" + str(np.sum(maskF)) + "个内部点\n"
        # #print(text);
        left_nei_kps = left_nei_kps[maskF.ravel() == 1]
        right_nei_kps = right_nei_kps[maskF.ravel() == 1]
        good_left_des = good_left_des[maskF.ravel() == 1]
        good_right_des = good_right_des[maskF.ravel() == 1]
        good_matches = good_matches[maskF.ravel() == 1]
        # (height, width) = img.shape
        # K=np.array([width/2.0,0,width/2.0,0,height/2.0,height/2.0,0,0,1.0]).reshape(3,3);#需要归一化吗？
        E, maskE = cv2.findEssentialMat(
            left_nei_kps, right_nei_kps, K, cv2.FM_RANSAC, 0.95, 1.5
        )
        left_nei_kps = left_nei_kps[maskE.ravel() == 1]
        # 只选取内部点，这是一种比较高级的提取索引的方法，第一次见
        right_nei_kps = right_nei_kps[maskE.ravel() == 1]
        good_left_des = good_left_des[maskE.ravel() == 1]
        good_right_des = good_right_des[maskE.ravel() == 1]
        good_matches = good_matches[maskE.ravel() == 1]
        if draw:
            match_result = MyCV2.draw_match(left_img, left_kps, right_img, right_kps, good_matches.tolist())
            return left_nei_kps,right_nei_kps,good_left_des, good_right_des, match_result
        return left_nei_kps, right_nei_kps, good_left_des, good_right_des

        # text = text + "本质矩阵匹配中" + str(len(maskE)) + "个匹配点中，" + str(np.sum(maskE)) + "个内部点\n"
        # #print(text);
        pose=cv2.recoverPose(E,left_nei_kps,right_nei_kps,K,150);
        left_nei_kps=left_nei_kps[pose[3].ravel()==255];
        right_nei_kps=right_nei_kps[pose[3].ravel()==255];
        good_left_des=good_left_des[pose[3].ravel()==255];
        good_right_des=good_right_des[pose[3].ravel()==255];
        # text=text+'恢复相对姿态时'+str(len(pose[3]))+'个匹配点中，'+str(int(np.sum(pose[3])/255))+'个满足条件的点\n'
        # #print(text);

        # time_end1 = time.time()  # 记录结束时间
        # time_sum1 = time_end1 - time_start1  # 计算的时间差为程序的执行时间，单位为秒/s
        # print('ransac程序运行花费',time_sum1,'s');
        # return left_nei_kps,right_nei_kps,pose,K;
        return left_nei_kps, right_nei_kps, K, good_left_des, good_right_des

    @staticmethod
    def matches(left_img,right_img, K,draw=1):
        left_gray_you = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
        right_gray_you = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
        sift = cv2.SIFT_create()
        # 创建sift探测器
        left_kps, left_des = sift.detectAndCompute(left_gray_you, None)
        # kps为关键点，des为sift描述关键点的特征向量（4*4*8=128维）
        right_kps, right_des = sift.detectAndCompute(right_gray_you, None)
        good_left_kps, good_right_kps, good_left_des, good_right_des = MyCV2.kdTree(
            left_des, right_des, left_kps, right_kps, 0
        )
        # drawlines(left_img_you, right_img_you, good_left_kps, good_right_kps,out_path)
        # K=read_K(left_dir);

        if len(good_left_kps) >= 30:

            # left_nei_kps,right_nei_kps,pose,K=hexian_ransac(good,good_left_kps,good_right_kps,left_gray_you,
            #                                                   good_left_des,good_right_des,out_path);
            left_nei_kps, right_nei_kps, good_left_des, good_right_des = MyCV2.hexian_ransac(
                good_left_kps,
                good_right_kps,
                left_gray_you,
                good_left_des,
                good_right_des,
                1,
                K,
            )
        else:
            # return [],[],[],[];
            return [], [], [], []
        if len(left_nei_kps) >= 20:
            return left_nei_kps, right_nei_kps, good_left_des, good_right_des
        else:
            # return [],[],[],[];
            return [], [], [], []

    @staticmethod
    def kdTree(left_des, right_des, left_kps, right_kps, no_car=0):
        hou_nocar = [450, 1350, 1090]
        # u:450,1350;v:1090:
        zheng_nocar = [190, 1340, 1150]
        # time_start1 = time.time()  # 记录开始时间
        FLANN_INDEX_KDTREE = 1
        # kDTREE在匹配器中的算法编号为1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        """
        在匹配时会有一定的重复点，比如在左片中是A点和B点，在右片中搜索他们的同名像点时就可能都找成C点
        但显然C点是且只能是一个点的同名像点，这时就需要取舍，如何取舍是一个问题
        """
        if len(left_des) > len(right_des):  # 大概减少0.1s
            matches = flann.knnMatch(right_des, left_des, 2)
            mn = 0
        else:
            matches = flann.knnMatch(left_des, right_des, 2)
            mn = 1
        good_left_kps = []
        # 良好的连接线对应的左片像点
        good_right_kps = []
        # 右片像点
        good_left_des_index = []
        good_right_des_index = []
        # 根据Lowe的论文进行比例测试
        if mn == 1:

            if no_car == 1:
                for m, n in matches:  # enumerate返回了3个参数，后两个是matches中的两个最邻近点，第一个是这两个邻近点的次序
                    # if (left_kps[m.queryIdx].pt[1])<zheng_nocar and (right_kps[m.trainIdx].pt[1])<zheng_nocar:#不要车
                    if (left_kps[m.queryIdx].pt[1]) < zheng_nocar[2] and zheng_nocar[0] < (
                            left_kps[m.queryIdx].pt[0]
                    ) < zheng_nocar[1]:
                        # 不要车,左片满足时应该就不会匹配到右片了，所以只判断一个
                        if (
                                m.distance < 0.7 * n.distance
                        ):  # 凡是前一个小于后一个距离的0.7倍了，才给掩膜赋1，这时应该才会画点
                            good_left_kps.append(left_kps[m.queryIdx].pt)
                            good_right_kps.append(right_kps[m.trainIdx].pt)
                            good_left_des_index.append(m.queryIdx)
                            good_right_des_index.append(m.trainIdx)
            elif no_car == 2:
                # if (left_kps[m.queryIdx].pt[1])<hou_nocar and (right_kps[m.trainIdx].pt[1])<hou_nocar:#不要车
                for m, n in matches:
                    if (left_kps[m.queryIdx].pt[1]) < hou_nocar[2] and hou_nocar[0] < (
                            left_kps[m.queryIdx].pt[0]
                    ) < hou_nocar[1]:
                        if (
                                m.distance < 0.7 * n.distance
                        ):  # 凡是前一个小于后一个距离的0.7倍了，才给掩膜赋1，这时应该才会画点
                            good_left_kps.append(left_kps[m.queryIdx].pt)
                            good_right_kps.append(right_kps[m.trainIdx].pt)
                            good_left_des_index.append(m.queryIdx)
                            good_right_des_index.append(m.trainIdx)
            else:
                for m, n in matches:  # enumerate返回了3个参数，后两个是matches中的两个最邻近点，第一个是这两个邻近点的次序
                    if m.distance < 0.7 * n.distance:  # 凡是前一个小于后一个距离的0.7倍了，才给掩膜赋1，这时应该才会画点
                        good_left_kps.append(left_kps[m.queryIdx].pt)
                        good_right_kps.append(right_kps[m.trainIdx].pt)
                        good_left_des_index.append(m.queryIdx)
                        good_right_des_index.append(m.trainIdx)
        else:
            if no_car == 1:
                for m, n in matches:  # enumerate返回了3个参数，后两个是matches中的两个最邻近点，第一个是这两个邻近点的次序
                    # if (left_kps[m.queryIdx].pt[1])<zheng_nocar and (right_kps[m.trainIdx].pt[1])<zheng_nocar:#不要车
                    if (left_kps[m.trainIdx].pt[1]) < zheng_nocar[2] and zheng_nocar[0] < (
                            left_kps[m.trainIdx].pt[0]
                    ) < zheng_nocar[1]:
                        # 不要车,左片满足时应该就不会匹配到右片了，所以只判断一个
                        if (
                                m.distance < 0.7 * n.distance
                        ):  # 凡是前一个小于后一个距离的0.7倍了，才给掩膜赋1，这时应该才会画点
                            good_left_kps.append(left_kps[m.trainIdx].pt)
                            good_right_kps.append(right_kps[m.queryIdx].pt)
                            good_left_des_index.append(m.trainIdx)
                            good_right_des_index.append(m.queryIdx)
            elif no_car == 2:
                for m, n in matches:
                    if (left_kps[m.trainIdx].pt[1]) < hou_nocar[2] and hou_nocar[0] < (
                            left_kps[m.trainIdx].pt[0]
                    ) < hou_nocar[1]:
                        if (
                                m.distance < 0.7 * n.distance
                        ):  # 凡是前一个小于后一个距离的0.7倍了，才给掩膜赋1，这时应该才会画点
                            good_left_kps.append(left_kps[m.trainIdx].pt)
                            good_right_kps.append(right_kps[m.queryIdx].pt)
                            good_left_des_index.append(m.trainIdx)
                            good_right_des_index.append(m.queryIdx)
            else:
                for m, n in matches:  # enumerate返回了3个参数，后两个是matches中的两个最邻近点，第一个是这两个邻近点的次序
                    if m.distance < 0.7 * n.distance:  # 凡是前一个小于后一个距离的0.7倍了，才给掩膜赋1，这时应该才会画点
                        good_left_kps.append(left_kps[m.trainIdx].pt)
                        good_right_kps.append(right_kps[m.queryIdx].pt)
                        good_left_des_index.append(m.trainIdx)
                        good_right_des_index.append(m.queryIdx)
        if not len(good_left_kps):
            a = np.random.random([1,2])*10000
            return a,a,a,a
        good_left_des = left_des[np.array(good_left_des_index), :]
        good_right_des = right_des[np.array(good_right_des_index), :]
        left_nei_kps = np.array(good_left_kps)
        right_nei_kps = np.array(good_right_kps)
        temp = left_nei_kps[:, 0] + left_nei_kps[:, 1]
        # 加一下就能作为简单的区分，判断那些是重复的了（unique必须得把数组展成1维）
        _, index = np.unique(temp, return_index=True)
        left_nei_kps = left_nei_kps[index]
        right_nei_kps = right_nei_kps[index]
        good_left_des = good_left_des[index]
        good_right_des = good_right_des[index]
        # time_end1 = time.time()  # 记录结束时间
        # time_sum1 = time_end1 - time_start1  # 计算的时间差为程序的执行时间，单位为秒/s
        # print('kdTree程序运行花费',time_sum1,'s');
        return left_nei_kps, right_nei_kps, good_left_des, good_right_des
    @staticmethod
    def hexian_ransac(
            left_nei_kps, right_nei_kps, img, good_left_des, good_right_des, out_path, K
    ):
        # time_start1 = time.time()  # 记录开始时间
        # left_nei_kps=np.array(good_left_kps);
        # right_nei_kps=np.array(good_right_kps);
        F, maskF = cv2.findFundamentalMat(
            left_nei_kps, right_nei_kps, cv2.FM_RANSAC, 1.5, 0.99
        )
        # 计算两像片的转化矩阵F，各同名像点的转换矩阵应该是比较一致的，使用的方法为ransac，距直线阈值为0.9个像素，置信区间为0.99
        # text = "基础矩阵匹配中" + str(len(maskF)) + "个匹配点中，" + str(np.sum(maskF)) + "个内部点\n"
        # #print(text);
        left_nei_kps = left_nei_kps[maskF.ravel() == 1]
        right_nei_kps = right_nei_kps[maskF.ravel() == 1]
        good_left_des = good_left_des[maskF.ravel() == 1]
        good_right_des = good_right_des[maskF.ravel() == 1]
        # (height, width) = img.shape
        # K=np.array([width/2.0,0,width/2.0,0,height/2.0,height/2.0,0,0,1.0]).reshape(3,3);#需要归一化吗？
        E, maskE = cv2.findEssentialMat(
            left_nei_kps, right_nei_kps, K, cv2.FM_RANSAC, 0.99, 1.5
        )
        left_nei_kps = left_nei_kps[maskE.ravel() == 1]
        # 只选取内部点，这是一种比较高级的提取索引的方法，第一次见
        right_nei_kps = right_nei_kps[maskE.ravel() == 1]
        good_left_des = good_left_des[maskE.ravel() == 1]
        good_right_des = good_right_des[maskE.ravel() == 1]
        # text = text + "本质矩阵匹配中" + str(len(maskE)) + "个匹配点中，" + str(np.sum(maskE)) + "个内部点\n"
        # #print(text);
        # pose=cv2.recoverPose(E,left_nei_kps,right_nei_kps,K,150);
        # left_nei_kps=left_nei_kps[pose[3].ravel()==255];
        # right_nei_kps=right_nei_kps[pose[3].ravel()==255];
        # good_left_des=good_left_des[pose[3].ravel()==255];
        # good_right_des=good_right_des[pose[3].ravel()==255];
        # text=text+'恢复相对姿态时'+str(len(pose[3]))+'个匹配点中，'+str(int(np.sum(pose[3])/255))+'个满足条件的点\n'
        # #print(text);

        # time_end1 = time.time()  # 记录结束时间
        # time_sum1 = time_end1 - time_start1  # 计算的时间差为程序的执行时间，单位为秒/s
        # print('ransac程序运行花费',time_sum1,'s');
        # return left_nei_kps,right_nei_kps,pose,K;
        return left_nei_kps, right_nei_kps,  good_left_des, good_right_des
    @staticmethod
    def same_shape(left_img_you, right_img_you):
        '''
        将两张图片填充为相同大小（填充的部分用黑色像素）
        :param left_img_you: 左片
        :param right_img_you: 右片
        :return:
        left：调整大小后的左片
        right：调整大小后的右片
        left_change：左片的填充量，是一个list，list[0]=上侧填充的像素值，list[1]为左侧填充的像素值
        right_change：右片填充量
        '''
        left = np.copy(left_img_you)
        right = np.copy(right_img_you)
        left_shape = left.shape
        right_shape = right.shape
        height = max(left_shape[0], right_shape[0])
        width = max(left_shape[1], right_shape[1])
        left_change = [int(np.ceil((height-left_shape[0])/2)) , int(np.ceil((width-left_shape[1])/2))]
        right_change = [int(np.ceil((height - right_shape[0]) / 2)), int(np.ceil((width - right_shape[1]) / 2))]
        left = cv2.copyMakeBorder(left,left_change[0],int((height-left_shape[0])/2),left_change[1]
                                  ,int((width-left_shape[1])/2),cv2.BORDER_CONSTANT)
        right = cv2.copyMakeBorder(right, right_change[0], int((height - right_shape[0]) / 2), right_change[1]
                                  , int((width - right_shape[1]) / 2), cv2.BORDER_CONSTANT)
        return left,right,left_change,right_change

    @staticmethod
    def drawlines(left_img_you, right_img_you, left_nei_kps, right_nei_kps, path=""):
        # time_start1 = time.time()  # 记录开始时间

        trans_left = np.copy(left_nei_kps)
        trans_right = np.copy(right_nei_kps)
        left, right, left_change, right_change = MyCV2.same_shape(left_img_you, right_img_you)

        show_img = np.concatenate([left, right], 1)
        show_img2 = np.copy(show_img)
        trans_right[:, 0] = trans_right[:, 0] + left.shape[1] + right_change[1]
        trans_right[:,1] = trans_right[:,1] + right_change[0]
        trans_left[:,0] = trans_left[:,0] + left_change[1]
        trans_left[:,1] = trans_left[:,1] + left_change[0]
        i = 0
        trans_left = trans_left.astype(int).tolist()
        trans_right = trans_right.astype(int).tolist()

        for j in range(len(trans_left)):
            show_img = cv2.circle(show_img, tuple(trans_left[j]), 7, (0, 0, 255), 0)
            show_img = cv2.circle(show_img, tuple(trans_right[j]), 7, (0, 0, 255), 0)
            show_img = cv2.line(
                show_img, tuple(trans_left[j]), tuple(trans_right[j]), (0, 255, 0), 1
            )
            show_img2 = cv2.circle(show_img2, tuple(trans_left[j]), 7, (0, 0, 255), 0)
            show_img2 = cv2.circle(show_img2, tuple(trans_right[j]), 7, (0, 0, 255), 0)
            if bool(path):
                fdp.makefolder(path)
                if 10 < i < 30 and i % 20 == 0:
                    cv2.imwrite(path + "\\" + str(i) + "first.jpg", show_img)
            # else:
            #     MyCV2.cv_show('name', show_img)
            i += 1
        i = 0
        # cv_show('name', show_img);
        if bool(path):
            cv2.imwrite(path + "\\result.jpg", show_img)
            cv2.imwrite(path + "\\keypoints.jpg", show_img2)
        else:
            return show_img
            # print('绘图结束');
        # time_end1 = time.time()  # 记录结束时间
        # time_sum1 = time_end1 - time_start1  # 计算的时间差为程序的执行时间，单位为秒/s
        # print('drawlines程序运行花费',time_sum1,'s');
if __name__ == '__main__':
    a = np.loadtxt(r"D:\BaiduSyncdisk\python\python_program\2023\others\3dtiles-vpr-master\3dtiles-vpr-master\test.txt")[:,0:2]
    imp = MyCV2()
    img = imp.cv_imread(r'I:\zzdx_3dtiles\zzdx\zzdx\database\Tile_+000_+000\Tile_+000_+000_L22_000001200.b3dm.glb.jpg')
    c = imp.draw_feature(img,a)
    imp.cv_show('a',c)