import  cv2
import numpy as np
import os
import time

bf = cv2.BFMatcher()
def getkp(img):
    sift = cv2.SIFT_create()
    kp, des = sift.detectAndCompute(img, None)
    return kp, des

    # 用sift做矫正
def sift_uv(good1, good2):
    uv_all = good2 - good1
    uv_sum = np.sum(uv_all, axis=0)
    uv_mean = uv_sum / len(good1)
    return uv_mean


def sift_points(kp1,des1,kp2,des2):
    matches = bf.knnMatch(des1, des2, k=2)
    # matchesMask = [[0, 0] for i in range(len(matches))]

    pt1_list = []
    pt2_list = []
    goodmatch = []

    for i, (m1, m2) in enumerate(matches):
        if m1.distance < 0.5 * m2.distance:  # 两个特征向量之间的欧氏距离，越小表明匹配度越高。
            # matchesMask[i] = [1, 0]
            if m1 not in goodmatch:  # 剔除重复值
                goodmatch.append(m1)
                pt1 = kp1[m1.queryIdx].pt  # trainIdx    是匹配之后img1上关键点的序号
                pt2 = kp2[m1.trainIdx].pt  # q1ueryIdx  是匹配之后img2上关键点的序号
                if pt1 not in pt1_list:  # 剔除重复值
                    pt1_list.append((pt1[0],pt1[1]))
                    pt2_list.append((pt2[0],pt2[1]))
    # l=len(pt1_list)
    good0 = np.array(pt1_list)  # 匹配点坐标集合
    good1 = np.array(pt2_list)
    return good0, good1

#筛选出处于box中的描述子和特征点
def feature_inbox(box,kp,des):
    # Extract coordinates of the bounding box
    x_min, y_min = box[0, :2]
    x_max, y_max = box[3, :2]
    # Extract x and y coordinates of keypoints
    kp_coordinates = np.array([kp[i].pt for i in range(len(kp))])
    x_coords, y_coords = kp_coordinates[:, 0], kp_coordinates[:, 1]

    # Find the indices of points that are inside the bounding box
    in_box_indices = np.where(
        (x_min <= x_coords) & (x_coords <= x_max) &
        (y_min <= y_coords) & (y_coords <= y_max)
    )[0]

    # Extract keypoints and descriptors using the indices
    kp_in = np.array(kp)[in_box_indices]
    des_in = np.array(des)[in_box_indices]
    return kp_in, des_in


def draw_sift(good1,good2,img1,img2):
    img=np.concatenate((img1,img2),axis=1)
    good1=good1.astype(int)
    good2=good2.astype(int)
    for i in range(len(good1)):
        cv2.line(img, (good1[i,0], good1[i,1]), (good2[i,0]+img1.shape[1], good2[i,1]), color=(0, 255, 0))
    return img

def sift_match(img1,img2,i):
    time0 = time.time()
    sift = cv2.SIFT_create()
    kp1, des1 = sift.detectAndCompute(img1, None)
    kp2, des2 = sift.detectAndCompute(img2, None)

    # time1 = time.time()
    # time_ = time1 - time0
    # with open("/home/ltt/point_track/time/time_siftkp.txt", 'a') as fd:
    #     fd.write(str(i) + ' ' + str(time_) + '\n')
    bf = cv2.BFMatcher()
    matches = bf.knnMatch(des1, des2, k=2)
    # matchesMask = [[0, 0] for i in range(len(matches))]

    pt1_list = []
    pt2_list = []
    goodmatch = []

    for i, (m1, m2) in enumerate(matches):
        if m1.distance < 0.5 * m2.distance:  # 两个特征向量之间的欧氏距离，越小表明匹配度越高。
            # matchesMask[i] = [1, 0]
            if m1 not in goodmatch:  # 剔除重复值
                goodmatch.append(m1)
                pt1 = kp1[m1.queryIdx].pt  # trainIdx    是匹配之后img1上关键点的序号
                pt2 = kp2[m1.trainIdx].pt  # q1ueryIdx  是匹配之后img2上关键点的序号
                if pt1 not in pt1_list:  # 剔除重复值
                    pt1_list.append((pt1[0], pt1[1]))
                    pt2_list.append((pt2[0], pt2[1]))
    # l=len(pt1_list)
    good0 = np.array(pt1_list)  # 匹配点坐标集合
    good1 = np.array(pt2_list)
    # time2 = time.time()
    # time_match = time2 - time1
    # with open("/home/ltt/point_track/time/time_siftmatch.txt", 'a') as fd:
    #     fd.write(str(i) + ' ' + str(time_match) + '\n')
    return good0, good1


def orb_match(img1, img2,i):
    time0=time.time()
    orb = cv2.ORB_create()
    kp1, des1 = orb.detectAndCompute(img1, None)
    kp2, des2 = orb.detectAndCompute(img2, None)

    time1 = time.time()
    time_ = time1 - time0
    with open("/home/ltt/point_track/time/time_orb.txt", 'a') as fd:
        fd.write(str(i) + ' ' + str(time_) + '\n')

    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)#reverse = False默认升序
    # 计算最大距离和最小距离
    min_distance = matches[0].distance

    good_match = []
    pt1_list=[]
    pt2_list=[]
    for x in matches:
        if x.distance <= max(2* min_distance, 30):
            pt1 = kp1[x.queryIdx].pt  # trainIdx    是匹配之后img1上关键点的序号
            pt2 = kp2[x.trainIdx].pt
            good_match.append(x)
            pt1_list.append(pt1)
            pt2_list.append(pt2)
        good0 = np.array(pt1_list)  # 匹配点坐标集合
        good1 = np.array(pt2_list)

    time2=time.time()
    time_match=time2-time1
    with open("/home/ltt/point_track/time/time_orbmatch.txt", 'a') as fd:
        fd.write(str(i) + ' ' + str(time_match) + '\n')
    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches[:40], None, flags=2)
    return img3,good0,good1



if __name__ == '__main__':
    q=1
    # img0 = cv2.imread("/home/ltt/point_track/huaxi/image01_z/00000.jpg", flags=1)
    # img1 = cv2.imread("/home/ltt/point_track/huaxi/image01_z/00082.jpg", flags=1)
    img0_pth = "/data_big/ltt/STIRD/25/left/seq15/frames/00002.png"
    img1_pth = "/data_big/ltt/STIRD/25/left/seq15/frames/00092.png"

    img0 = cv2.imread(img0_pth, flags=1)
    img1 = cv2.imread(img1_pth, flags=1)

    w=img0.shape[1]
    kp0,des0=getkp(img0)
    kp1,des1=getkp(img1)
    good00,good01=sift_points(kp0,des0,kp1,des1)
    img = np.concatenate((img0, img1), axis=1)
    for i in range(0,len(good00),1):
        cv2.line(img,(int(good00[i,0]),int(good00[i,1])),(int(good01[i,0])+w,int(good01[i,1])),(255, 0, 0),2)
    cv2.imwrite("/home/ltt/ref_code/EfficientLoFTR/result/sift_stir.png", img)
    #
    cv2.namedWindow("1", cv2.WINDOW_NORMAL)
    cv2.imshow("1", img)
    cv2.waitKey(0)
    #
    # H, mask = cv2.findHomography(good00,good01, method=cv2.RANSAC, ransacReprojThreshold=2.0, maxIters=5000,
    #                           confidence=0.9999)
    # mask=np.array(mask)
    # a=sum(mask==1)
    #
    # for i in range(0,len(good00),5):
    #     box2_x1, box2_x1_x, box2_x1_y =patch_track.H_point(H, good00[i])
    #     cv2.line(img1,(int(good00[i,0]),int(good00[i,1])),(int(box2_x1_x+img0.shape[1]),int(box2_x1_y)),(0,200,0))
    #
    # cv2.namedWindow("1", cv2.WINDOW_NORMAL)
    # cv2.imshow("1", img)
    #
    # cv2.namedWindow("2", cv2.WINDOW_NORMAL)
    # cv2.imshow("2", img1)
    # cv2.waitKey(0)
    #
    #
    # img3=draw_sift(good00,good01,img0,img1)
    #
    # img4, good0_, good1_=orb_match(img0,img1)
    # img5 = draw_sift(good0_, good1_, img0, img1)
    # cv2.imwrite(os.path.join('/home/ltt/point_track/track_image/mask1/sift.jpg'), img3)
    # cv2.imwrite(os.path.join('/home/ltt/point_track/track_image/mask1/orb.jpg'), img5)
    #
    # a=1