import argparse
import cv2
import numpy as np
import imageio
import plotmatch
from lib.cnn_feature import cnn_feature_extract
import matplotlib.pyplot as plt
import time
from skimage import measure
from skimage import transform
from lib.sp_net import SuperPointFeture

#time count
start = time.perf_counter()

_RESIDUAL_THRESHOLD = 30
#Test1nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8
# imgfile1 = 'E:/dataspace/yu/20210704/images/sim2.jpg'
# imgfile2 = 'E:/dataspace/yu/20210704/images/d2.jpg'
#imgfile1 = 'df-ms-data/1/df-uav-sar-500.jpg'

# imgfile1 = 'E:/dataspace/yu/20210801175104/test/0006sim.jpg'
# imgfile2 = 'E:/dataspace/yu/20210801175104/test/0006.jpg'

imgfile1 = 'E:/dataspace/yu/20210801175104/simtest/1.jpg'
imgfile2 = 'E:/dataspace/yu/20210801175104/simtest/3.jpg'

# imgfile1 = 'E:/dataspace/yu/6.0.png'
# imgfile2 = 'E:/dataspace/yu/6.png'

start = time.perf_counter()

# read left image
image1 = imageio.imread(imgfile1)
image2 = imageio.imread(imgfile2)

print('read image time is %6.3f' % (time.perf_counter() - start))

start0 = time.perf_counter()
#
# spf = SuperPointFeture()
# kps_left, des_left = spf.feature_extract(imgfile1)
# kps_right, des_right= spf.feature_extract(imgfile2)

##SIFT
getter = cv2.SIFT_create();
kps_left, des_left = getter.detectAndCompute(image1,None) # 计算哪张图片的用哪张图片的关键点。
kps_right, des_right = getter.detectAndCompute(image2,None)

###D2
# kps_left, sco_left, des_left = cnn_feature_extract(image1,  nfeatures = -1)
# kps_right, sco_right, des_right = cnn_feature_extract(image2,  nfeatures = -1)
# kps1 = []
# kps2 = []
# for k in kps_left:
#     p1 = cv2.KeyPoint(k[0], k[1], 1)
#     kps1.append(p1)
# for k in kps_right:
#     p2 = cv2.KeyPoint(k[0], k[1], 1)
#     kps2.append(p2)
# kps_left = kps1
# kps_right = kps2

#Flann特征匹配
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=40)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des_left, des_right, k=2)
good_matches = []
locations_1_to_use = []
locations_2_to_use = []
for m, n in matches:
    if m.distance < 0.9 * n.distance:
    #if True:
        good_matches.append(m)
        p2 = kps_right[m.trainIdx]
        p1 = kps_left[m.queryIdx]
        locations_1_to_use.append([p1.pt[0], p1.pt[1]])
        locations_2_to_use.append([p2.pt[0], p2.pt[1]])
        # locations_1_to_use.append([p1[0], p1[1]])
        # locations_2_to_use.append([p2[0], p2[1]])
####显示匹配结果
# resultimage=cv2.drawMatches(image1,kps_left,image2,kps_right,good_matches,None, flags=2) #画出匹配的结果
# plt.rcParams['savefig.dpi'] = 100 #图片像素
# plt.rcParams['figure.dpi'] = 100 #分辨率
# plt.rcParams['figure.figsize'] = (16.0, 9.0) # 设置figure_size尺寸
# plt.imshow(resultimage)
# plt.show()
######显示匹配结果

locations_1_to_use = np.array(locations_1_to_use)
locations_2_to_use = np.array(locations_2_to_use)

_, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                          transform.AffineTransform,
                          min_samples=3,
                          residual_threshold=_RESIDUAL_THRESHOLD,
                          max_trials=1000)

inlier_idxs = np.nonzero(inliers)[0]
#最终匹配结果
# matches = np.column_stack((inlier_idxs, inlier_idxs))
# print('whole time is %6.3f' % (time.perf_counter() - start0))
#
# # Visualize correspondences, and save to file.
# #1 绘制匹配连线
# plt.rcParams['savefig.dpi'] = 100 #图片像素
# plt.rcParams['figure.dpi'] = 100 #分辨率
# plt.rcParams['figure.figsize'] = (4.0, 3.0) # 设置figure_size尺寸
# _, ax = plt.subplots()
# plotmatch.plot_matches(
#     ax,
#     image1,
#     image2,
#     locations_1_to_use,
#     locations_2_to_use,
#     np.column_stack((inlier_idxs, inlier_idxs)),
#     plot_matche_points = False,
#     matchline = True,
#     matchlinewidth = 0.3)
# ax.axis('off')
# ax.set_title('')
# plt.show()
# #
pts1 = []
pts2 = []
good_matches_2 = []
for idx in inlier_idxs:
    pt1 = locations_1_to_use[idx]
    pt2 = locations_2_to_use[idx]
    pts1.append([pt1])
    pts2.append([pt2])
    good_matches_2.append(good_matches[idx])

####显示匹配结果
resultimage=cv2.drawMatches(image1,kps_left,image2,kps_right,good_matches_2,None, flags=2) #画出匹配的结果
plt.rcParams['savefig.dpi'] = 200 #图片像素
plt.rcParams['figure.dpi'] = 200 #分辨率
plt.rcParams['figure.figsize'] = (16.0, 9.0) # 设置figure_size尺寸
plt.imshow(resultimage)
plt.show()
######显示匹配结果

ptsA = np.float32(pts1)
ptsB = np.float32(pts2)

# ptsA= np.float32([kps_left[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
# ptsB = np.float32([kps_right[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
ransacReprojThreshold = 4
H, status =cv2.findHomography(ptsA,ptsB,cv2.RANSAC);
#其中H为求得的单应性矩阵矩阵
#status则返回一个列表来表征匹配成功的特征点。
#ptsA,ptsB为关键点
#cv2.RANSAC, ransacReprojThreshold这两个参数与RANSAC有关
#H = cv2.getPerspectiveTransform(ptsA,ptsB)
imgOut = cv2.warpPerspective(image2, H, (image1.shape[1],image1.shape[0]),flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
# M = cv2.estimateAffine2D(ptsA,ptsB)
# imgOut = cv2.warpAffine(image2, M[0], (image1.shape[1],image1.shape[0]),flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)


overlapping = cv2.addWeighted(image1, 0.5, imgOut, 0.5, 0)
plt.rcParams['savefig.dpi'] = 100 #图片像素
plt.rcParams['figure.dpi'] = 100 #分辨率
plt.rcParams['figure.figsize'] = (16.0, 9.0) # 设置figure_size尺寸
plt.imshow(overlapping)
plt.show()





#bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# bf = cv2.BFMatcher()
# matches = bf.knnMatch(des_left,des_right,k=2)
#matches = sorted(matches, key = lambda x:x.distance)

#matcher = cv2.DescriptorMatcher_create("BruteForce")

# kps_left, sco_left, des_left = cnn_feature_extract(image1,  nfeatures = -1)
# kps_right, sco_right, des_right = cnn_feature_extract(image2,  nfeatures = -1)
#
# goodMatch,matches = []
# def get_good_match(des1,des2):
#     bf = cv2.BFMatcher()
#     matches = bf.knnMatch(des1, des2, k=2)
#     for m, n in matches:
#         if m.distance < 0.75 * n.distance:
#             goodMatch.append(m)
#     return goodMatch,matches
#
# print('Feature_extract time is %6.3f, left: %6.3f,right %6.3f' % ((time.perf_counter() - start), len(kps_left), len(kps_right)))
# start = time.perf_counter()
#
#
# locations_1_to_use = []
# locations_2_to_use = []
#
# for m, n in goodMatch:
#     #自适应阈值
#     if n.distance > m.distance + disdif_avg:
#         goodMatch.append(m)
#         p2 = cv2.KeyPoint(kps_right[m.trainIdx][0],  kps_right[m.trainIdx][1],  1)
#         p1 = cv2.KeyPoint(kps_left[m.queryIdx][0], kps_left[m.queryIdx][1], 1)
#         locations_1_to_use.append([p1.pt[0], p1.pt[1]])
#         locations_2_to_use.append([p2.pt[0], p2.pt[1]])
# #goodMatch = sorted(goodMatch, key=lambda x: x.distance)
# print('match num is %d' % len(goodMatch))
# locations_1_to_use = np.array(locations_1_to_use)
# locations_2_to_use = np.array(locations_2_to_use)
#
# # Perform geometric verification using RANSAC.
# _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
#                           transform.AffineTransform,
#                           min_samples=3,
#                           residual_threshold=_RESIDUAL_THRESHOLD,
#                           max_trials=1000)
#
# print('Found %d inliers' % sum(inliers))
#
# inlier_idxs = np.nonzero(inliers)[0]
# #最终匹配结果
# matches = np.column_stack((inlier_idxs, inlier_idxs))
# print('whole time is %6.3f' % (time.perf_counter() - start0))
#
# # Visualize correspondences, and save to file.
# #1 绘制匹配连线
# plt.rcParams['savefig.dpi'] = 100 #图片像素
# plt.rcParams['figure.dpi'] = 100 #分辨率
# plt.rcParams['figure.figsize'] = (4.0, 3.0) # 设置figure_size尺寸
# _, ax = plt.subplots()
# plotmatch.plot_matches(
#     ax,
#     image1,
#     image2,
#     locations_1_to_use,
#     locations_2_to_use,
#     np.column_stack((inlier_idxs, inlier_idxs)),
#     plot_matche_points = False,
#     matchline = True,
#     matchlinewidth = 0.3)
# ax.axis('off')
# ax.set_title('')
# plt.show()