import cv2

# 2.使用一本书，在两个不同位姿拍摄，然后使用任一种匹配方法计算特征点，完成匹配，并标出匹配结果。
# 加载图片
imgL = cv2.imread('left.jpg')
imgR = cv2.imread('right.jpg')
# 转换为灰度图
grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
# 提取特征点
surf = cv2.xfeatures2d.SURF_create(hessianThreshold=800)   # hessian矩阵阈值，在这里调整精度，值越大，点越少，越精准
kL, dL = surf.detectAndCompute(grayL, None)
kR, dR = surf.detectAndCompute(grayR, None)

# 创建 BFMatcher 对象
bf = cv2.BFMatcher(cv2.NORM_L2)
# 根据描述子匹配特征点.
matches = bf.match(dL, dR)
# 画出匹配点
img3 = cv2.drawMatches(imgL, kL, imgR, kR, matches, None, flags=2)
cv2.namedWindow("SURF", 0)
cv2.resizeWindow("SURF", 640, 480)
cv2.imshow("SURF", img3)
# ------------------------------------------------------------------------------------------------
# 设置FLANN 超参数
FLANN_INDEX_KDTREE = 0
# K-D树索引超参数
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
# 搜索超参数
search_params = dict(checks=50)
# 初始化FlannBasedMatcher匹配器
flann = cv2.FlannBasedMatcher(index_params, search_params)
# 通过KNN的方式匹配两张图的描述子
matches = flann.knnMatch(dL, dR, k=2)
# 筛选比较好的匹配点
good = []
for i, (m, n) in enumerate(matches):
    if m.distance < 0.6 * n.distance:
        good.append(m)
# 画出匹配点
img3 = cv2.drawMatches(imgL, kL, imgR, kR, good, None, flags=2)


cv2.namedWindow("SURF-FLANN", 0)
cv2.resizeWindow("SURF-FLANN", 640, 480)
cv2.imshow("SURF-FLANN", img3)
cv2.waitKey(0)