#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
1. 使用同一相机，在两个不同的位姿分别拍摄同一个棋盘格，然后使用本质矩阵估计两次拍摄间的平移和旋转
(提示：可参考https://blog.csdn.net/qqh19910525/article/details/52240521)
2. 使用一本书，在两个不同位姿拍摄，然后使用任一种匹配方法计算特征点，完成匹配，并标出匹配结果。
'''

import cv2
import numpy as np
from matplotlib import pyplot as plt
import glob

img1 = cv2.imread('img/08.jpg',0)    # left image
img2 = cv2.imread('img/09.jpg',0)  # right image



sift = cv2.xfeatures2d.SIFT_create()

# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)

# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50)

flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)

good = []
pts1 = []
pts2 = []

# ratio test as per Lowe's paper
for i,(m,n) in enumerate(matches):
    if m.distance < 0.8*n.distance:
        good.append(m)
        pts2.append(kp2[m.trainIdx].pt)
        pts1.append(kp1[m.queryIdx].pt)
# Now we have the list of best matches from both the images. Let’s find the Fundamental Matrix.
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC, 1,0.99)
print("=========fundamental mat=========")
print(F)
images = glob.glob('picture_read/*.jpg')


# 加载相机标定的数据
with np.load('calibrate.npz') as X:
    mtx, dist, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]

k = mtx
E, mask0 = cv2.findEssentialMat(pts1, pts2, k, cv2.FM_RANSAC)
print("=========essential mat=========")
# E1 = np.mat(k).T * F * k
# print(E1)
print(E)
# essentialMat = E[0]
# print(essentialMat)

print("=========camera mat=========")
print(k)

rst = cv2.recoverPose(E, pts1, pts2, k) #
R3x3Mat = rst[1]
tMat = rst[2]
# 这种方法也可以
# rst = cv2.decomposeEssentialMat(E)
# R3x3Mat1 = rst[1]
# tMat1 = rst[2]

#print('rst = \n', rst)
print('\nessentialMat = \n', E)
print('\nR3x3Mat = \n', R3x3Mat)
print('\ntMat = \n', tMat)
# print('\nR3x3Mat = \n', R3x3Mat1)
# print('\ntMat = \n', tMat1)


# 2. 使用一本书，在两个不同位姿拍摄，然后使用任一种匹配方法计算特征点，完成匹配，并标出匹配结果。

# We select only inlier points
pts1 = pts1[mask.ravel()==1]
pts2 = pts2[mask.ravel()==1]

# Next we find the epilines. Epilines corresponding to the points in first image is drawn on second image.
# So mentioning of correct images are important here. We get an array of lines. So we define a new function to draw these lines on the images.

def drawlines(img1,img2,lines,pts1,pts2,color_list):
     # img1 - image on which we draw the epilines for the points in img2
     #    lines - corresponding epilines  
    r,c = img1.shape
    img1 = cv2.cvtColor(img1,cv2.COLOR_GRAY2BGR)
    img2 = cv2.cvtColor(img2,cv2.COLOR_GRAY2BGR)
    count = 0
    for r,pt1,pt2,color in zip(lines,pts1,pts2,color_list):
        x0,y0 = map(int, [0, -r[2]/r[1] ])
        x1,y1 = map(int, [c, -(r[2]+r[0]*c)/r[1] ])
        img1 = cv2.line(img1, (x0,y0), (x1,y1), color,1)
        img1 = cv2.circle(img1,tuple(pt1),5,color,-1)
        img2 = cv2.circle(img2,tuple(pt2),5,color,-1)
    return img1,img2
# Now we find the epilines in both the images and draw them.
# 随机生成颜色组
color_list = []
for pt1 in pts1:
    color = tuple(np.random.randint(0,255,3).tolist())
    color_list.append(color)
# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv2.computeCorrespondEpilines(pts2.reshape(-1,1,2), 2,F)
lines1 = lines1.reshape(-1,3)
img5,img6 = drawlines(img1,img2,lines1,pts1,pts2,color_list)

# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3,img4 = drawlines(img2,img1,lines2,pts2,pts1,color_list)

plt.subplot(121),plt.imshow(img5)
plt.subplot(122),plt.imshow(img3)
plt.show()
''''''