# -*- coding: utf-8 -*-
"""
Created on Mon May  2 20:46:21 2022

@author: hailiyue
"""
#import io
#from PIL import Image, ImageTk
#import tkinter as tk 
import cv2
import numpy as np
import numba as nb

imgpath1 = "D:/2022/3DRCON/3DRecon_by_python/img1.jpg";
imgpath2 = "D:/2022/3DRCON/3DRecon_by_python/img2.jpg";
img1 = cv2.imread(imgpath1)
img2 = cv2.imread(imgpath2)
g1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
g2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)

'''创建sift特征提取器'''
sift = cv2.xfeatures2d.SIFT_create()
'''提取特征 及其描述子 128的大小 array大小'''
kp1, de1 = sift.detectAndCompute(g1,None)
kp2, de2 = sift.detectAndCompute(g2,None)

'''定义特征之间的距离评价指标'''
def FeaturesDist(A,B):
    '''sift 采取欧式距离'''
    return np.sqrt(sum(np.power((A - B), 2)))

'''进行特征匹配'''
@nb.jit()
def FeaturesMatch(de1, de2, thresold, RatioTest = False, RatioTestThresold = 0.5):
    '''对de1中每一个描述子 从de2中找距离他最近的n个值'''
    de1Len = len(de1); de2Len = len(de2);
    matchRes = [];
    for i in range(0, de1Len):
        eachMatchRes = []; #每一个描述子的
        print(i)
        for j in range(0, de2Len):
            dist = FeaturesDist(de1[i], de2[j]);
            if(dist<thresold):
                eachMatchRes.append([j, dist]);
        '''对每一个描述子的匹配结果 按距离从小到大进行排序'''
        if(len(eachMatchRes) < 2):
            if(len(eachMatchRes) == 1):
                matchRes.append([i,eachMatchRes[0][0]]);
        else:
            eachMatchRes = sorted(eachMatchRes,key=lambda x:x[1],reverse=False) #按距离升序
            '''比值过滤'''
            if(RatioTest):
                if(abs(eachMatchRes[0][1] / eachMatchRes[1][1]) < RatioTestThresold):
                    matchRes.append([i,eachMatchRes[0][0]]);
            else:
               matchRes.append([i,eachMatchRes[0][0]]);
    return matchRes;

'''互惠滤波'''
def ReciprocityFilter(de1, de2, thresold, RatioTest = False, RatioTestThresold = 0.5):
    '''de1 - de2'''
    de1Tode2 = FeaturesMatch(de1, de2, thresold, RatioTest, RatioTestThresold);
    print(len(de1Tode2))
    '''de2 - de1'''
    de2Tode1 = FeaturesMatch(de2, de1, thresold, RatioTest, RatioTestThresold);
    print(len(de2Tode1))
    '''进行检验'''
    de1Tode2Len = len(de1Tode2)
    matchRes = [];
    for i in range(0, de1Tode2Len):
        de1Index = de1Tode2[i][0];
        de2Index = de1Tode2[i][1];
        if([de2Index, de1Index] in de2Tode1):
            '''反向若在 则保留'''
            matchRes.append(de1Tode2[i]);  
    return matchRes

'''绘制匹配结果'''
def DrawMatchs(img1, kp1, img2, kp2, matchs):
    '''图像左右拼接'''
    h1 = img1.shape[0]; w1 = img1.shape[1];
    h2 = img2.shape[0]; w2 = img2.shape[1];
    
    out = np.zeros((max(h1, h2), w1+w2+1), np.uint8)  
    if(len(img1.shape) == 3):
        out = cv2.cvtColor(out, cv2.COLOR_GRAY2BGR) 
    out[:h1, :w1] = img1  
    out[:h2, w1+1:w1+w2+1] = img2 
    '''图像绘制匹配线圆'''
    for match in matchs:
        x1 = kp1[match[0]].pt[0];
        y1 = kp1[match[0]].pt[1];
        x2 = kp2[match[1]].pt[0];
        y2 = kp2[match[1]].pt[1];

        linePoints = np.int32([[x1, y1], [x2+w1+1, y2]]) 
    
        b = np.random.randint(0,256)
        g = np.random.randint(0,256)
        r = np.random.randint(0,256)
        cv2.polylines(out, [linePoints], True, (b, g, r), thickness = 1)   # bgr
    
        cv2.circle(out, (int(np.round(x1)),int(np.round(y1))), 3, (b, g, r), 1)      #画圆，cv2.circle()参考官方文档
        cv2.circle(out, (int(np.round(x2)+w1+1),int(np.round(y2))), 3, (b, g, r), 1)
    return out;

'''提取矩阵'''
def find_transform(K, p1, p2):  
    # p1,p2是匹配好的关键点
    focal_length = 0.5 * (K[0, 0] + K[1, 1])
    principle_point = (K[0, 2], K[1, 2])
    # 参数mask输出N个元素的数组，其中每个元素对于异常值设置为0，对其他点设置为1。
    E,mask = cv2.findEssentialMat(p1, p2, focal_length, principle_point, cv2.RANSAC, 0.999, 1.0)
    cameraMatrix = np.array([[focal_length, 0, principle_point[0]], [0, focal_length, principle_point[1]], [0, 0, 1]])
    # 该函数求解出来的 R,t已经是最合适的R,t；已经通过内部的代码去掉了另外三种错误的解
    # 在输出掩码中，只有通过手性检查的内点
    pass_count, R, T, mask = cv2.recoverPose(E, p1, p2, cameraMatrix, mask)
    
    return R, T, mask


matchs = ReciprocityFilter(de1, de2, 10, False, 0.5);
'''
特征距离阈值越小   比值阈值越小   →   匹配结果越少
比值滤波的阈值，[0-1]  否则0将会全部进行过滤 1不进行滤波
之所以有输出，是因为距离阈值限制后只有唯一一个匹配项，比值滤波是在有两个匹配项时，比较两个的距离差异
若要查看比值滤波的效果 尽量选择较大的距离阈值
'''
print('matchs:',len(matchs))
out = DrawMatchs(img1, kp1, img2, kp2, matchs)

'''计算相机基础矩阵'''
points1 = []
points2 = []
for match in matchs:
    points1.append(kp1[match[0]].pt)
    points2.append(kp2[match[1]].pt)
points1 = np.array(points1)
points2 = np.array(points2)

fm, mask = cv2.findFundamentalMat(points1, points2, method = cv2.FM_RANSAC,ransacReprojThreshold=0.9, confidence=0.99)

dpi = 96;
f = 35; #焦距 mm
dx = 25.4 / dpi; #像素的宽度 mm
dy = dx;
h = img1.shape[0]
w = img1.shape[1]
'''相机内参矩阵'''
cameraIntrinsicMatrix = np.array([[f/dx,0,w/2],[0,f/dy,h/2],[0,0,1]]);
E,mask1 = cv2.findEssentialMat(points1, points2, cameraIntrinsicMatrix, cv2.RANSAC, 0.999, 1.0)
pass_count, R, T, mask = cv2.recoverPose(E, points1, points2, cameraIntrinsicMatrix, mask1)


cv2.imshow('img1', out)
while True:
    if cv2.getWindowProperty('img1', 0) == -1: #当窗口关闭时为-1，显示时为0
        break
    else:
        cv2.waitKey(1)
cv2.destroyAllWindows()

'''   
print('直接匹配:',len(FeaturesMatch(de1, de2, 100)));
print('比值滤波:',len(FeaturesMatch(de1, de2, 100, True, 10)));
print('互惠滤波未比值:',len(ReciprocityFilter(de1, de2, 100)));
print('互惠滤波+比值:',len(ReciprocityFilter(de1, de2, 100, True, 10)));
'''

'''
#利用函数DMatch
#创建sift特征提取器
sift = cv2.xfeatures2d.SIFT_create()
#提取特征 及其描述子 128的大小 array大小
kp1, de1 = sift.detectAndCompute(g1,None)
kp2, de2 = sift.detectAndCompute(g2,None)
match = cv2.FlannBasedMatcher(dict(algorithm =2, trees =1), {})
m = match.knnMatch(de1, de2, 2)
m = sorted(m,key = lambda x:x[0].distance)
ok = [m1 for (m1, m2) in m if m1.distance < 0.7 * m2.distance]
med = cv2.drawMatches(img1, kp1, img2, kp2, ok, None)

#从DMacth中恢复匹配点坐标
def RecoverMatchPos(kp1, kp2, DMatch): 
    points1 = []
    points2 = []
    for match in DMatch:
        idx1 = match.queryIdx;
        idx2 = match.trainIdx;
        points1.append(kp1[idx1].pt)
        points2.append(kp2[idx2].pt)
    points1 = np.array(points1)
    points2 = np.array(points2)
    return points1, points2
'''










