import argparse
import cv2
import numpy as np
import imageio

import matplotlib.pyplot as plt
import pnp.services.plotmatch as plotmatch
import time
from skimage import measure
from skimage import transform
from pnp.lib.sp_net import SuperPointFeture
from pnp.lib.cnn_feature import cnn_feature_extract
from pnp.lib.coord import Coord3D
import pnp.cfg as cfg
import pnp.registry as registry
import pnp.services.MyRansac as MyRansac

#time count
# start = time.perf_counter()
#
# _RESIDUAL_THRESHOLD = 30
#Test1nThbg6kXUpJWGl7E1IGOCspRomTxdCARLviKw6E5SY8
# imgfile1 = 'E:/dataspace/yu/20210704/images/sim2.jpg'
# imgfile2 = 'E:/dataspace/yu/20210704/images/d2.jpg'
#imgfile1 = 'df-ms-data/1/df-uav-sar-500.jpg'
class RansacMatchingPhone:
    def __init__(self):
        ####仿真图像
        # self._imgpath1='E:/dataspace/mpos/yu/20210801175104/simtest/1.jpg'
        # self._imgpath2 = 'E:/dataspace/mpos/yu/20210801175104/simtest/3.jpg'
        # self._depthpath = 'E:/dataspace/mpos/yu/20210801175104/simtest/1p.2.txt'
        # self._cameraMatrix = np.array([
        #     [886.784, 0, 512],
        #     [0, 886.784, 384],
        #     [0, 0, 1]], dtype=np.float32)
        # self._distCoeffs = np.array([0, 0, 0, 0], dtype=np.float32)
        # self._width = 1024
        # self._height = 576

        # ####无人机图像 大学
        # self._targetImagePath='E:/dataspace/mpos/experiment/phone/7.jpg'
        # self._baseImagePath = 'E:/dataspace/mpos/experiment/phone/7sim.jpg'
        # self._baseDepthPath = 'E:/dataspace/mpos/experiment/phone/7d.txt'
        # self._width = 1024
        # self._height = 768
        # self._coord = None
        #
        # self._cameraMatrix =np.array([
        #             [5697.18160099225*0.2, 0, 0.2*3658.29679303204],
        #             [0, 5697.18160099225*0.75*0.2, 0.2*2655.52222688525],
        #             [0, 0, 1]], dtype=np.float32)
        # self._distCoeffs = np.array([0.04804117734, 0.106330487415, 0, 0, -0.348382351964332],dtype=np.float32)
        # #self._cameraMatrix = np.loadtxt("E:/dataspace/mpos/experiment/uav/mtx.txt")
        # #self._distCoeffs = np.loadtxt("E:/dataspace/mpos/experiment/uav/dist.txt")
        # ###手机图像

        ####无人机图像 大学
        self._targetImagePath='E:/dataspace/mpos/experiment/phone/1.jpg'
        self._baseImagePath = 'E:/dataspace/mpos/experiment/phone/1sim.jpg'
        self._baseDepthPath = 'E:/dataspace/mpos/experiment/phone/1d.txt'
        self._width = 768
        self._height = 1024
        self._coord = None

        self._cameraMatrix =np.array([
                    [5697.18160099225*0.75*0.2,0, 0.2*2655.52222688525],
                    [0, 5697.18160099225 * 0.2, 0.2 * 3658.29679303204],
                    [0, 0, 1]], dtype=np.float32)
        self._distCoeffs = np.array([0.04804117734, 0.106330487415, 0, 0, -0.348382351964332],dtype=np.float32)
        #self._cameraMatrix = np.loadtxt("E:/dataspace/mpos/experiment/uav/mtx.txt")
        #self._distCoeffs = np.loadtxt("E:/dataspace/mpos/experiment/uav/dist.txt")
        ###手机图像

    def setOptions(self,options):
        # self._imgpath1 = cfg.UploadPath +  options['img1']
        # self._imgpath2 = cfg.UploadPath + options['img2']
        # self._depthpath = cfg.UploadPath + options['depth1']

        # self._imgpath1 = registry.baseImagePath
        # self._imgpath2 = registry.targetImagePath
        # self._depthpath = registry.baseDepthFilePath
        # self._width = int(options['width'])
        # self._height = int(options['height'])
        return

    def initData(self):
        # self._coord = Coord3D()
        # self._coord.setSize(self._width, self._height)
        # self._coord.loadcsv(self._depthpath)
        # if cfg.CameraMatrixFilePath:
        #     self._cameraMatrix = np.loadtxt(cfg.CameraMatrixFilePath)
        # if cfg.DistCoeffsFilePath:
        #     self._distCoeffs = np.loadtxt(cfg.DistCoeffsFilePath)
        # cameraMatrix =np.loadtxt("E:/dataspace/yu/calib2/mtx.txt")
        # distCoeffs = np.loadtxt("E:/dataspace/yu/calib2/dist.txt")
        return

    def exec(self):

        # imgfile1 = 'E:/dataspace/yu/20210801175104/simtest/1.jpg'
        # imgfile2 = 'E:/dataspace/yu/20210801175104/simtest/3.jpg'
        # coordfile = 'E:/dataspace/yu/20210801175104/simtest/1p.2.txt'
        # imageWidth = 1024
        # imageHeight = 768



        start = time.perf_counter()

        # read left image
        image1 = imageio.imread(self._targetImagePath)
        image2 = imageio.imread(self._baseImagePath)

        print('read image time is %6.3f' % (time.perf_counter() - start))

        baseShp = image2.shape
        width = baseShp[1]
        height = baseShp[0]
        coord3d = Coord3D()
        coord3d.setSize(width, height)
        coord3d.loadcsv(self._baseDepthPath)

        # cameraMatrix = np.array([
        #             [1, 0, 0],
        #             [0, 1, 0],
        #             [0, 0, 1]], dtype=np.float32)
        # cameraMatrix = None
        # image2 = cv2.undistort(image2,self._cameraMatrix,self._distCoeffs,newCameraMatrix=cameraMatrix)
        #
        # distCoeffs =  np.array([0, 0, 0, 0, 0],dtype=np.float32)

        cameraMatrix = self._cameraMatrix
        distCoeffs = self._distCoeffs
        # start0 = time.perf_counter()
        ###@SUPERPOINT_NET
        spf = SuperPointFeture()
        kps_left, des_left = spf.feature_extract(self._targetImagePath)
        kps_right, des_right= spf.feature_extract(self._baseImagePath)
        #


        # #getter = cv2.ORB_create()
        # getter = cv2.SIFT_create(1000)
        # # # #getter = cv2.xfeatures2d_MSDDetector()
        # kps_left, des_left = getter.detectAndCompute(image1,None) # 计算哪张图片的用哪张图片的关键点。
        # kps_right, des_right = getter.detectAndCompute(image2,None)

        #d2-net
        # kps_left, sco_left, des_left = cnn_feature_extract(image1,  nfeatures = -1)
        # kps_right, sco_right, des_right = cnn_feature_extract(image2,  nfeatures = -1)
        # kps1 = []
        # kps2 = []
        # for k in kps_left:
        #     p1 = cv2.KeyPoint(k[0], k[1], 1)
        #     kps1.append(p1)
        # for k in kps_right:
        #     p2 = cv2.KeyPoint(k[0], k[1], 1)
        #     kps2.append(p2)
        # kps_left = kps1
        # kps_right = kps2




        g_tnm = 0
        g_ncm = 0
        g_cmr = 0
        g_rmse = 0

        #Flann特征匹配
        FLANN_INDEX_KDTREE = 1
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=40)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des_left, des_right, k=2)
        g_tnm = len(matches)

        good_matches = []
        locations_1_to_use = []
        locations_2_to_use = []

        # 匹配对筛选
        min_dist = 1000
        max_dist = 0
        disdif_avg = 0
        # 统计平均距离差
        for ms in matches:
            m = ms[0]
            n = ms[1]
            datdis = n.distance - m.distance
            disdif_avg += datdis
            if min_dist > datdis:
                min_dist = datdis
            if max_dist < datdis:
                max_dist = datdis
        disdif_avg = disdif_avg / len(matches)

        for ms in matches:
            m = ms[0]
            n = ms[1]
            if n.distance > m.distance + disdif_avg:
            #if True:
                good_matches.append(m)
                p2 = kps_right[m.trainIdx]
                p1 = kps_left[m.queryIdx]
                locations_1_to_use.append([p1.pt[0], p1.pt[1]])
                locations_2_to_use.append([p2.pt[0], p2.pt[1]])


        locations_1_to_use = np.array(locations_1_to_use)
        locations_2_to_use = np.array(locations_2_to_use)
        # locations_3_to_use = np.array(locations_3_to_use)
        indata = [i for i in range(len(matches))]
        indata = np.array(indata,dtype=np.int)
        # _, inliers = measure.ransac((indata),
        #                           MyRansac.ModelClass,
        #                           min_samples=4,
        #                           residual_threshold=20,
        #                           max_trials=2000)

        _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                                  transform.AffineTransform,
                                  min_samples=3,
                                  residual_threshold=15,
                                  max_trials=1000)



        inlier_idxs = np.nonzero(inliers)[0]
        pts1 = []
        pts2 = []
        pts3d = []
        pts2d = []
        good_matches_2 = []
        for idx in inlier_idxs:
            pt1 = locations_1_to_use[idx]
            pt2 = locations_2_to_use[idx]
            pts1.append(pt1)
            #pts2.append([pt2])
            pts2.append(pt2)

            xyz = coord3d.getCoord3d(int(pt2[0]+0.5),int(pt2[1]+0.5))
            if abs(xyz[0])>1 and abs(xyz[1])>1 and abs(xyz[2])>1:
                pts3d.append(xyz)
                p2d = pt1.copy();
                # p2d[1] = imageHeight - 1 - p2d[1]
                pts2d.append(p2d)
                good_matches_2.append(good_matches[idx])

        #good_matches_2 = good_matches
        #pts1 = locations_1_to_use
        #pts2 = locations_2_to_use

        g_ncm = len(good_matches_2)

        basexyz = coord3d.getCenter()
        for idx in range(len(pts3d)):
            pts3d[idx][0] = pts3d[idx][0] - basexyz[0]
            pts3d[idx][1] = pts3d[idx][1] - basexyz[1]
            pts3d[idx][2] = pts3d[idx][2] - basexyz[2]
            pts3d[idx][0] = pts3d[idx][0]*0.01
            pts3d[idx][1] = pts3d[idx][1]*0.01
            pts3d[idx][2] = pts3d[idx][2]*0.01

        ptspnp3d = np.float32(pts3d)
        ptspnp2d = np.float32(pts2d)



        #cameraMatrix =np.loadtxt("E:/dataspace/yu/calib2/mtx.txt")
        #distCoeffs = np.loadtxt("E:/dataspace/yu/calib2/dist.txt")




        #if cfg.ShowMatch:
            #ransacReprojThreshold = 4
            #H, status =cv2.findHomography(ptsA,ptsB,cv2.RANSAC);
            #其中H为求得的单应性矩阵矩阵
            #status则返回一个列表来表征匹配成功的特征点。
            #ptsA,ptsB为关键点
            #cv2.RANSAC, ransacReprojThreshold这两个参数与RANSAC有关
            #H = cv2.getPerspectiveTransform(ptsA,ptsB)
            #H = cv2.findFundamentalMat(ptsA, ptsB)
            #imgOut = cv2.warpPerspective(image2, H, (image1.shape[1],image1.shape[0]),flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)
            # M = cv2.estimateAffine2D(ptsA,ptsB)
            # imgOut = cv2.warpAffine(image2, M[0], (image1.shape[1],image1.shape[0]),flags=cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)


            # overlapping = cv2.addWeighted(image1, 0.5, imgOut, 0.5, 0)
            # plt.rcParams['savefig.dpi'] = 100 #图片像素
            # plt.rcParams['figure.dpi'] = 100 #分辨率
            # plt.rcParams['figure.figsize'] = (16.0, 9.0) # 设置figure_size尺寸
            # plt.imshow(overlapping)
            # plt.show()

        

        # rvec =  np.array([0,0,0],dtype=np.float64)
        # tvec =  np.array([0,0,0],dtype=np.float64)
        retval, rvec, tvec,inners =cv2.solvePnPRansac(ptspnp3d,ptspnp2d,cameraMatrix,distCoeffs,flags=cv2.SOLVEPNP_EPNP)
        # retval, rvec, tvec =cv2.solvePnP(ptspnp3d,ptspnp2d,cameraMatrix,distCoeffs)
        # ptsA= np.float32([kps_left[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
        # ptsB = np.float32([kps_right[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)

        inlier_idxs = np.zeros(1)
        if not inners is None:
            inlier_idxs = np.reshape(inners,inners.shape[0]*inners.shape[1])


            #####显示匹配结果

        #计算重投影误差
        # 反投影误差
        imgpoints3v, _ = cv2.projectPoints(ptspnp3d,rvec, tvec, cameraMatrix, distCoeffs)
        imgpoints2v = np.reshape(imgpoints3v,(-1,2))
        # error = cv2.norm(ptspnp2d, imgpoints2v, cv2.NORM_L2) / len(imgpoints2v)
        # g_rmse = error

        error_list = []
        for i in range(len(ptspnp2d)):
            m = imgpoints2v[i]
            n = ptspnp2d[i]
            error = cv2.norm(m, n, cv2.NORM_L2)
            error_list.append(error)

        flag_arr = np.zeros(len(good_matches_2), dtype=np.int)
        for i in range(len(error_list)):
            e = error_list[i]
            if(e < 15 ):
                flag_arr[i] = 1



        if cfg.ShowMatch:
            ###显示匹配结果
            #resultimage = cv2.drawMatches(image1, kps_left, image2, kps_right, good_matches_2,None,matchColor=(0,255,0), flags=cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS)  # 画出匹配的结果

            #resultimage=cv2.drawMatches(image1,kps_left,image2,kps_right,good_matches_2,None, flags=2) #画出匹配的结果
            pts_left = []
            for pt in kps_left:
                p1 = [pt.pt[0], pt.pt[1]]
                pts_left.append(p1)
            pts_right = []
            for pt in kps_right:
                p1 = [pt.pt[0], pt.pt[1]]
                pts_right.append(p1)

            pts_matches_left=[]
            pts_matches_right = []
            for m in good_matches_2:
                pts_matches_right.append(m.trainIdx)
                pts_matches_left.append(m.queryIdx)

            pts_left_arr = np.float32(pts_left)
            pts_right_arr = np.float32(pts_right)
            pts_matches_left_arr = np.array(pts_matches_left)
            pts_matches_right_arr = np.array(pts_matches_right)

            g_pnp_ncm = 0
            if not inners is None:
                g_pnp_ncm = len(inners)

            plt.rcParams['savefig.dpi'] = 100 #图片像素
            plt.rcParams['figure.dpi'] = 100 #分辨率
            plt.rcParams['figure.figsize'] = (16.0, 5.0) # 设置figure_size尺寸
            # frame = plt.gca()
            # frame.axes.get_yaxis().set_visible(False)
            # frame.axes.get_xaxis().set_visible(False)
            # plt.imshow(resultimage)
            _, ax = plt.subplots()
            plotmatch.plot_matches_inners(
                ax,
                image1,
                image2,
                pts_left_arr,
                pts_right_arr,
                np.column_stack((pts_matches_left_arr, pts_matches_right_arr)),
                flag_arr,
                plot_matche_points=False,
                matchline=True,
                matches_color=[0,1,0],
                matchlinewidth=1
            )
            ax.axis('off')
            ax.set_title('')
            plt.show()












        R = cv2.Rodrigues(rvec)
        #世界坐标经过RT变换到待求相机坐标系下
        ##Xw * R|T = Xc

        #待求图像相机位置在世界坐标系下的坐标
        # tvec = np.dot(R[0].T,-tvec)
        tvec[0] = tvec[0]*100 + basexyz[0]
        tvec[1] = tvec[1]*100 + basexyz[1]
        tvec[2] = tvec[2]*100 + basexyz[2]

        # R|T * {原坐标} = 新相机坐标

        #OPENCV X向右 Y向下 Z向前
        # ##转换为CESIUM相机坐标系,X向右，Y向上，Z向后

        # tvec[1] = tvec[1]*-1
        # tvec[2] = tvec[2]*-1
        # tvec[0] = tvec[0] * -1

        r = R[0].copy()
        ax = np.array([[1,0,0],[0,-1,0],[0,0,-1]],dtype=np.float64)
        r = ax.dot(r).dot(ax)
        tvec = ax.dot(tvec)
        t = [tvec[0][0],tvec[1][0],tvec[2][0]]

        pt = np.dot(r.T,-tvec)
        print(pt)
        print(r.T)
        print("TNM:%d" %(g_tnm))
        print("NCM:%d" %(g_ncm))
        print("PNP_NCM:%d" % (g_pnp_ncm))
        print("CMR:%f" %(g_ncm / g_tnm))
        print("RMSE:%f" % (g_rmse))


        return t,r.tolist()



#bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# bf = cv2.BFMatcher()
# matches = bf.knnMatch(des_left,des_right,k=2)
#matches = sorted(matches, key = lambda x:x.distance)

#matcher = cv2.DescriptorMatcher_create("BruteForce")

# kps_left, sco_left, des_left = cnn_feature_extract(image1,  nfeatures = -1)
# kps_right, sco_right, des_right = cnn_feature_extract(image2,  nfeatures = -1)
#
# goodMatch,matches = []
# def get_good_match(des1,des2):
#     bf = cv2.BFMatcher()
#     matches = bf.knnMatch(des1, des2, k=2)
#     for m, n in matches:
#         if m.distance < 0.75 * n.distance:
#             goodMatch.append(m)
#     return goodMatch,matches
#
# print('Feature_extract time is %6.3f, left: %6.3f,right %6.3f' % ((time.perf_counter() - start), len(kps_left), len(kps_right)))
# start = time.perf_counter()
#
#
# locations_1_to_use = []
# locations_2_to_use = []
#
# for m, n in goodMatch:
#     #自适应阈值
#     if n.distance > m.distance + disdif_avg:
#         goodMatch.append(m)
#         p2 = cv2.KeyPoint(kps_right[m.trainIdx][0],  kps_right[m.trainIdx][1],  1)
#         p1 = cv2.KeyPoint(kps_left[m.queryIdx][0], kps_left[m.queryIdx][1], 1)
#         locations_1_to_use.append([p1.pt[0], p1.pt[1]])
#         locations_2_to_use.append([p2.pt[0], p2.pt[1]])
# #goodMatch = sorted(goodMatch, key=lambda x: x.distance)
# print('match num is %d' % len(goodMatch))
# locations_1_to_use = np.array(locations_1_to_use)
# locations_2_to_use = np.array(locations_2_to_use)
#
# # Perform geometric verification using RANSAC.
# _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
#                           transform.AffineTransform,
#                           min_samples=3,
#                           residual_threshold=_RESIDUAL_THRESHOLD,
#                           max_trials=1000)
#
# print('Found %d inliers' % sum(inliers))
#
# inlier_idxs = np.nonzero(inliers)[0]
# #最终匹配结果
# matches = np.column_stack((inlier_idxs, inlier_idxs))
# print('whole time is %6.3f' % (time.perf_counter() - start0))
#
# # Visualize correspondences, and save to file.
# #1 绘制匹配连线
# plt.rcParams['savefig.dpi'] = 100 #图片像素
# plt.rcParams['figure.dpi'] = 100 #分辨率
# plt.rcParams['figure.figsize'] = (4.0, 3.0) # 设置figure_size尺寸
# _, ax = plt.subplots()
# plotmatch.plot_matches(
#     ax,
#     image1,
#     image2,
#     locations_1_to_use,
#     locations_2_to_use,
#     np.column_stack((inlier_idxs, inlier_idxs)),
#     plot_matche_points = False,
#     matchline = True,
#     matchlinewidth = 0.3)
# ax.axis('off')
# ax.set_title('')
# plt.show()