'''
原理可参考https://zhuanlan.zhihu.com/p/30033898
'''
import os
import cv2
import math
import sfm_config
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import least_squares
import open3d as o3d
##########################
#两张图之间的特征提取及匹配
##########################
def extract_features(image_names):
    
    sift = cv2.SIFT_create(0, 3, 0.04, 10) # 这些是默认值创建
    key_points_for_all = []
    descriptor_for_all = []
    colors_for_all = []
    for image_name in image_names:
        image = cv2.imread(image_name)
        
        if image is None:
            continue
        key_points, descriptor = sift.detectAndCompute(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), None)
        
        if len(key_points) <= 10:
            continue
        
        key_points_for_all.append(key_points)
        descriptor_for_all.append(descriptor)
        colors = np.zeros((len(key_points), 3))
        for i, key_point in enumerate(key_points):
            p = key_point.pt  # p是关键点的位置
            colors[i] = image[int(p[1])][int(p[0])]         
        colors_for_all.append(colors)
    return np.array(key_points_for_all), np.array(descriptor_for_all), np.array(colors_for_all),key_points_for_all

def match_features(query, train):
    # Brute Force匹配是opencv二维特征点匹配常见的办法，BFMatcher总是尝试所有可能的匹配，从而使得它总能够找到最佳匹配
    bf = cv2.BFMatcher(cv2.NORM_L2)
    knn_matches = bf.knnMatch(query, train, k=2)
    matches = []
    #Apply Lowe's SIFT matching ratio test(MRT)，值得一提的是，这里的匹配没有
    #标准形式，可以根据需求进行改动。
    # 令最近邻的距离为d1，再找到第二近的匹配对点之间距离为d2，如果两个距离d1和d2之比小于一个阈值如0.6，就可以判定为可接受的匹配对。
    for m, n in knn_matches:
        if m.distance < sfm_config.MRT * n.distance:
            matches.append(m)


    return np.array(matches),matches

def match_all_features(descriptor_for_all):
    matches_for_all = []
    matches_for_all_list = []
    for i in range(len(descriptor_for_all) - 1):
        matches,matches_list = match_features(descriptor_for_all[i], descriptor_for_all[i + 1])
        matches_for_all.append(matches)
        matches_for_all_list.append(matches_list)
    return np.array(matches_for_all),matches_for_all_list

# 可视化匹配结果
def vis_match(image_names,key_points_for_all,matches_for_all):
    for i in range(len(key_points_for_all) - 1):
        image1 = cv2.imread(image_names[i])
        image2 = cv2.imread(image_names[i+1])

        # flags = 2表示只可视化要匹配的关键点，不是所有的都显示
        img_match = cv2.drawMatches(image1,key_points_for_all[i],image2,key_points_for_all[i+1],matches_for_all[i][640:680],None,flags=2)
        cv2.imshow('result.jpg', img_match)
        cv2.waitKey(0)


######################
# 寻找图与图之间的对应相机旋转角度以及相机平移
######################
def find_transform(K, p1, p2):  
    # p1,p2是匹配好的关键点
    focal_length = 0.5 * (K[0, 0] + K[1, 1])
    principle_point = (K[0, 2], K[1, 2])
    # 参数mask输出N个元素的数组，其中每个元素对于异常值设置为0，对其他点设置为1。
    E,mask = cv2.findEssentialMat(p1, p2, focal_length, principle_point, cv2.RANSAC, 0.999, 1.0)
    cameraMatrix = np.array([[focal_length, 0, principle_point[0]], [0, focal_length, principle_point[1]], [0, 0, 1]])
    # 该函数求解出来的 R,t已经是最合适的R,t已经通过内部的代码去掉了另外三种错误的解
    # 在输出掩码中，只有通过手性检查的内点--32个
    pass_count, R, T, mask = cv2.recoverPose(E, p1, p2, cameraMatrix, mask)
    
    return R, T, mask

def get_matched_points(p1, p2, matches):
    # 只保留有匹配的关键点
    src_pts = np.asarray([p1[m.queryIdx].pt for m in matches])
    dst_pts = np.asarray([p2[m.trainIdx].pt for m in matches])

    return src_pts, dst_pts

def get_matched_colors(c1, c2, matches):
    
    color_src_pts = np.asarray([c1[m.queryIdx] for m in matches])
    color_dst_pts = np.asarray([c2[m.trainIdx] for m in matches])    

    return color_src_pts, color_dst_pts

#选择重合的点（去除不符合要求的点（即mask=0的点））
def maskout_points(p1, mask):   
    
    p1_copy = []
    for i in range(len(mask)):
        if mask[i] > 0:
            p1_copy.append(p1[i])
    
    return np.array(p1_copy)
    
def init_structure(K, key_points_for_all, colors_for_all, matches_for_all):  
    # 只有前两张图片是通过求本征矩阵来求解R，t
    p1, p2 = get_matched_points(key_points_for_all[0], key_points_for_all[1], matches_for_all[0])
    c1, c2 = get_matched_colors(colors_for_all[0], colors_for_all[1], matches_for_all[0])
    
    if find_transform(K, p1, p2):
        R,T,mask = find_transform(K, p1, p2)
    else:
        R,T,mask = np.array([]), np.array([]), np.array([])
                
    p1 = maskout_points(p1, mask)  # 去除不符合要求的点（即mask=0的点）
    p2 = maskout_points(p2, mask)
    colors = maskout_points(c1, mask)
    #设置第一个相机的变换矩阵，即作为剩下摄像机矩阵变换的基准。
    R0 = np.eye(3, 3)
    T0 = np.zeros((3, 1))
    structure = reconstruct(K, R0, T0, R, T, p1, p2) # 关键点在空间中的三维点
    rotations = [R0, R]
    motions = [T0, T]
    print(rotations)
    correspond_struct_idx = []
    for key_p in key_points_for_all:
        correspond_struct_idx.append(np.ones(len(key_p)) *- 1)
    correspond_struct_idx = np.array(correspond_struct_idx)
    idx = 0
    matches = matches_for_all[0]  # 第一帧到第二帧的匹配
    for i, match in enumerate(matches):
        if mask[i] == 0:
            continue
        correspond_struct_idx[0][int(match.queryIdx)] = idx
        correspond_struct_idx[1][int(match.trainIdx)] = idx
        idx += 1
    # 至此，对前两帧图片实现了三维恢复，将保留的匹配点（这里32个）的索引写在correspond_struct_idx的前两行中，其余为-1,保留的匹配点是从0-n（保留的个数）
    return structure, correspond_struct_idx, colors, rotations, motions
    
#############
#三维重建
#############
def reconstruct(K, R1, T1, R2, T2, p1, p2):
    # proj1/2是3×4的变换矩阵
    proj1 = np.zeros((3, 4))
    proj2 = np.zeros((3, 4))
    proj1[0:3, 0:3] = np.float32(R1)
    proj1[:, 3] = np.float32(T1.T)
    proj2[0:3, 0:3] = np.float32(R2)
    proj2[:, 3] = np.float32(T2.T)
    fk = np.float32(K)
    proj1 = np.dot(fk, proj1)
    proj2 = np.dot(fk, proj2)
    s = cv2.triangulatePoints(proj1, proj2, p1.T, p2.T) # s是空间点的其次坐标
    structure = []  # 3维点
    
    for i in range(len(s[0])):
        col = s[:, i]
        col /= col[3]
        structure.append([col[0], col[1], col[2]])
    
    return np.array(structure)

###########################
#将已作出的点云进行融合
###########################
def fusion_structure(matches, struct_indices, next_struct_indices, structure, next_structure, colors, next_colors):
    # 将上一帧恢复的三维点重恢复：上次32个，这次将所有匹配上的1078个恢复，同时给出下一帧要恢复的点的索引
    for i,match in enumerate(matches):  
        query_idx = match.queryIdx
        train_idx = match.trainIdx
        struct_idx = struct_indices[query_idx]  
        if struct_idx >= 0:
            next_struct_indices[train_idx] = struct_idx
            continue
        structure = np.append(structure, [next_structure[i]], axis = 0)
        colors = np.append(colors, [next_colors[i]], axis = 0)
        struct_indices[query_idx] = next_struct_indices[train_idx] = len(structure) - 1
    return struct_indices, next_struct_indices, structure, colors

#制作图像点以及空间点
def get_objpoints_and_imgpoints(matches, struct_indices, structure, key_points):
    # 在上一帧保留点的基础上，筛选下一帧匹配好的点（比如上一帧有32个点，这一帧从32个里面筛选还匹配上的，最后有16个）
    # 返回的是16个上一帧的三维点以及下一帧的16个图像点
    object_points = []
    image_points = []
    for match in matches:
        query_idx = match.queryIdx
        train_idx = match.trainIdx
        struct_idx = struct_indices[query_idx]  
        if struct_idx < 0: 
            continue
        object_points.append(structure[int(struct_idx)])   # object_points是从structure上筛选得到的，而structure是从第一帧不断累积的，所以structure的坐标系应该是第一帧的，一直不变
        image_points.append(key_points[train_idx].pt)
    
    return np.array(object_points), np.array(image_points)

########################
#bundle adjustment
########################

# 这部分中，函数get_3dpos是原方法中对某些点的调整，而get_3dpos2是根据笔者的需求进行的修正，即将原本需要修正的点全部删除。
# bundle adjustment请参见https://www.cnblogs.com/zealousness/archive/2018/12/21/10156733.html

def get_3dpos(pos, ob, r, t, K):
    dtype = np.float32
    def F(x):
        p,J = cv2.projectPoints(x.reshape(1, 1, 3), r, t, K, np.array([]))
        p = p.reshape(2)
        e = ob - p
        err = e    
                
        return err
    res = least_squares(F, pos)
    return res.x

def get_3dpos_v1(pos,ob,r,t,K):
    p,J = cv2.projectPoints(pos.reshape(1, 1, 3), r, t, K, np.array([]))
    p = p.reshape(2)
    e = ob - p
    if abs(e[0]) > sfm_config.x or abs(e[1]) > sfm_config.y:
        return None
    return pos

def bundle_adjustment(rotations, motions, K, correspond_struct_idx, key_points_for_all, structure):
    
    for i in range(len(rotations)):
        r, _ = cv2.Rodrigues(rotations[i])
        rotations[i] = r
    for i in range(len(correspond_struct_idx)):
        point3d_ids = correspond_struct_idx[i]
        key_points = key_points_for_all[i]
        r = rotations[i]
        t = motions[i]
        for j in range(len(point3d_ids)):
            point3d_id = int(point3d_ids[j])
            if point3d_id < 0:
                continue
            new_point = get_3dpos_v1(structure[point3d_id], key_points[j].pt, r, t, K)
            structure[point3d_id] = new_point
    
    return structure


def main():
    imgdir = sfm_config.image_dir
    img_names = os.listdir(imgdir)
    img_names = sorted(img_names)
    
    for i in range(len(img_names)):
        img_names[i] = imgdir + img_names[i]

    # K是摄像头的参数矩阵
    K = sfm_config.K
    
    key_points_for_all, descriptor_for_all, colors_for_all,key_points_for_all_list = extract_features(img_names)
    matches_for_all,matches_for_all_list = match_all_features(descriptor_for_all)
    structure, correspond_struct_idx, colors, rotations, motions = init_structure(K, key_points_for_all, colors_for_all, matches_for_all)

    for i in range(1, len(matches_for_all)):
        # 输入为：i图的保留匹配点索引及三维点，i+1图的所有关键点，i和i+1图的所有匹配
        object_points, image_points = get_objpoints_and_imgpoints(matches_for_all[i], correspond_struct_idx[i], structure, key_points_for_all[i + 1])
        #在python的opencv中solvePnPRansac函数的第一个参数长度需要大于7，否则会报错
		#这里对小于7的点集做一个重复填充操作，即用点集中的第一个点补满7个
        if len(image_points) < 7:
            while len(image_points) < 7:
                object_points = np.append(object_points, [object_points[0]], axis = 0)
                image_points = np.append(image_points, [image_points[0]], axis = 0)

        _, r, T, _ = cv2.solvePnPRansac(object_points, image_points, K, np.array([]))

        R, _ = cv2.Rodrigues(r)
        rotations.append(R)
        print("-----------------rotations-----------------")
        print(rotations)
        motions.append(T)
        p1, p2 = get_matched_points(key_points_for_all[i], key_points_for_all[i + 1], matches_for_all[i])
        c1, c2 = get_matched_colors(colors_for_all[i], colors_for_all[i + 1], matches_for_all[i])
        next_structure = reconstruct(K, rotations[i], motions[i], R, T, p1, p2)


        correspond_struct_idx[i], correspond_struct_idx[i + 1], structure, colors = fusion_structure(matches_for_all[i],correspond_struct_idx[i],correspond_struct_idx[i+1],structure,next_structure,colors,c1)
    print("--------rotations---------------")
    print(rotations)
    np.savetxt(r'../data/rotations.txt', np.reshape(np.asarray(rotations), (-1, 9)))

    print("--------motions---------------")
    print(motions)
    structure = bundle_adjustment(rotations, motions, K, correspond_struct_idx, key_points_for_all, structure)
    # adj_transform = adjacent_transform(rotations,motions)

    i = 0
    # 由于经过bundle_adjustment的structure，会产生一些空的点（实际代表的意思是已被删除）
	# 这里删除那些为空的点
    while i < len(structure):
        if math.isnan(structure[i][0]):
            structure = np.delete(structure, i, 0)
            colors = np.delete(colors, i, 0)
            i -= 1
        i += 1
    print("-----------------structure-----------------")
    print(structure.shape)
    print("-----------------colors-----------------")
    print(colors.shape)
    print(rotations)
    print(np.asarray(rotations).shape)
    print(np.asarray(motions).shape)
    print(np.reshape(np.asarray(rotations),(-1,3)).shape)
    print(np.reshape(np.asarray(motions),(-1,3)).shape)
    np.reshape(rotations,(-1,3))
    np.savetxt(r'../data/structures.txt', structure)
    np.savetxt(r'../data/colors.txt', colors)

    np.savetxt(r'../data/motions.txt', np.reshape(np.asarray(motions),(-1,3)))
'''
注意：structure是通过不断的三角化恢复新的三维点，而三角化需要的两帧的变换矩阵，这个变换矩阵是相对于同一个世界坐标系的（也就是第一帧图像的相机坐标系）
因为求解PnP时的object_points虽然为上一帧的点，但是它的坐标系是初始帧的坐标系（object_points来源于不断累加的structure）
得到的motions和rotations应该是每一帧相对于固定的初始帧坐标系的平移和旋转
'''

if __name__ == '__main__':
    main()
