import cv2
import glob
import numpy as np
import math


# Surf 是sift的变种，更高效
def SURF_detect(img1, img2):
    """
    hessianThreshold代表Hessian矩阵行列式所计算出的曲率强度，
    数值越高，代表区分匹配点的要求越高，一般推荐1000-2500之间
    """
    # img1 = cv2.imread('imgs/home.jpg')
    # img2 = cv2.imread('imgs/home2.png')
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    minThreashold = 1000  # hession 矩阵阈值，在这里调整精度，值越大点越少，越精准
    surf = cv2.xfeatures2d_SURF.create(minThreashold)
    keypoints, descriptor = surf.detectAndCompute(gray1, None)
    keypoints2, descriptor2 = surf.detectAndCompute(gray2, None)

    bfMatcher = cv2.FlannBasedMatcher()
    matches = bfMatcher.knnMatch(descriptor, descriptor2, k=2)

    good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]
    img3 = cv2.drawMatchesKnn(img1, keypoints, img2, keypoints2, good, None, flags=2)

    img3 = cv2.resize(img3, (1920, 540))

    cv2.imshow('img3', img3)
    cv2.imwrite("surf_matching.jpg", img3)


def ORB_detect(img1, img2):
    # img1 = cv2.imread('imgs/home.jpg')
    # img2 = cv2.imread('imgs/home2.png')
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    nkeypoint = 100  # 算法在图片中找到匹配点的对数

    orb = cv2.ORB.create(nkeypoint)

    kp1, descriptors1 = orb.detectAndCompute(gray1, None)
    kp2, descriptors2 = orb.detectAndCompute(gray2, None)

    bf = cv2.DescriptorMatcher.create('BruteForce')
    # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(descriptors1, descriptors2)

    # good = [[m] for m, n in matches if m.distance < 0.4 * n.distance]

    img3 = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=2)

    img3 = cv2.resize(img3, (1920, 540))

    cv2.imshow('img3', img3)
    cv2.imwrite("orb_matching.jpg", img3)


def sift(img1, img2):
    gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

    maxcorners = 1000

    sift = cv2.xfeatures2d_SIFT.create(nfeatures=maxcorners)
    kp1, descriptors1 = sift.detectAndCompute(gray1,None)
    kp2, descriptors2 = sift.detectAndCompute(gray2,None)

    bfMatcher = cv2.FlannBasedMatcher()
    matches = bfMatcher.knnMatch(descriptors1, descriptors2, k=2)

    good = [[m] for m, n in matches if m.distance < 0.5 * n.distance]
    img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=2)

    img3 = cv2.resize(img3, (1920, 540))

    cv2.imshow('img3', img3)
    cv2.imwrite("sift_matching.jpg", img3)


def run():
    img1 = cv2.imread('./imgs_featureMatching/l.jpg')
    img2 = cv2.imread('./imgs_featureMatching/r.jpg')

    # 测试三种不同的角点检测和匹配算法，其中surf和sift的准确率都不错
    # SURF_detect(img1, img2)
    # ORB_detect(img1, img2)
    sift(img1, img2)

    # 我这里不会混合Harris和sift使用，harris获得的角点结果是ndarry， sift的compute要求输入KeyPoint类型

if __name__ == '__main__':
    run()
    cv2.waitKey(0)
