import cv2
import numpy as np
import matplotlib.pyplot as plt

MIN_MATCH_COUNT = 10

# 读取图片，并按原图的指定大小进行放缩
img1 = cv2.imread('1.jpg', 1)
img1 = cv2.resize(img1, dsize=None, fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)     # 图像过大就缩小了再处理
img2 = cv2.imread('2.jpg', 1)
img2 = cv2.resize(img2, dsize=None, fx=0.9, fy=0.9, interpolation=cv2.INTER_LINEAR)




# 创建并初始化sift特征点检测器
sift = cv2.SIFT_create()
#sift特征点具有旋转不变性，用于图片配准和目标匹配

# 找到两个图中sift特征点，分别保留特征点的向量，和对应描述符
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# 这里请输出检测到特征点的数目

# 将特征点加载原图上并进行保存
img1_result = cv2.drawKeypoints(img1, kp1, img1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite('result/kp_img1.jpg', img1_result)
img2_result = cv2.drawKeypoints(img2, kp2, img2, None)
cv2.imwrite('result/kp_img2.jpg', img2_result)


# FLANN 匹配器
# 使用搜索树的最近邻搜索
FLANN_INDEX_KDTREE = 1

index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) # 使用搜索树的最近邻搜索算法，构建5棵k-d trees
search_params = dict(checks=50)   # 定义了一个用于配置FLANN库最近邻搜索参数的字典，指定了在搜索过程中检查50个节点。

# 创建flann的搜索对象
flann = cv2.FlannBasedMatcher(index_params,search_params)

# 使用KNN最近邻搜索，两组特征描述符des1，des2之间寻找最相似的匹配
# k: 指定返回最近邻数量。k=2表示对于每个特征描述符，返回与之最相似的2个邻居。
matches = flann.knnMatch(des1, des2, k=2)
print("匹配数目为",len(matches))
# 这里请输出得到的匹配数目, 应与size(des1)*k一致

# 遍历所有的匹配，将匹配点转化为对应坐标
matchesMask = [[0, 0] for i in range(len(matches))]
# ratio test as per Lowe's paper
for i, (m, n) in enumerate(matches):
    # i为特征点匹配的序号
    # m和n分别为匹配的DMatch对象，每一个DMatch都包含了索引，匹配距离等信息
    # 设定符合条件的匹配距离，为训练图像中距离的0.7倍
    if m.distance < 0.7*n.distance:
        matchesMask[i] = [1, 0]
# 这里请输出有效匹配的数目   14809
print("有效匹配数目为",len(matchesMask))

# Draw
# 请用彩色的线画出匹配图，参数需要调整
draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=(255, 0, 0),
                   matchesMask=matchesMask,
                   flags=cv2.DrawMatchesFlags_DEFAULT)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
plt.imshow(img3)
plt.show()


# Another Method
# 暴力匹配器 BFMatcher  bf.match()返回最匹配的 or bf.knnMatch() 返回k个最匹配的
bf = cv2.BFMatcher()
matches_bf = bf.knnMatch(des1, des2, k=2)
# 请输出匹配数目
# Apply ratio test
print()
good = []
for m, n in matches_bf:
    if m.distance < 0.75*n.distance:
        good.append([m])
# 请输出好的点数目
print("暴力匹配好的点数目为 ",len(good))
# cv.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv2.imwrite(r'./result/bfmatcher.jpg', img3, None)
plt.imshow(img3)
plt.show()


# Compute Homography
# store all the good matches as per Lowe's ratio test.The example uses flann's result.
good = []
for m, n in matches:
    if m.distance < 0.7*n.distance:
        good.append(m)

if len(good) > MIN_MATCH_COUNT:
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1, 1, 2)
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1, 1, 2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    # fingHomography之后点数改变，请输出点数
    matchesMask = mask.ravel().tolist()
    (h, w,_) = img1.shape
    pts = np.float32([ [0,0], [0,h-1], [w-1,h-1], [w-1,0] ]).reshape(-1, 1, 2)
    dst = cv2.perspectiveTransform(pts, M)
    img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
    print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
    matchesMask = None

# Draw inliers (if successfully found the object) or matching keypoints (if failed).
draw_params = dict(matchColor=(0, 255, 0),
                   singlePointColor=None,
                   matchesMask=matchesMask,
                   flags = 2)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
plt.imshow(img3, 'gray')
plt.show()




