import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
class Window_3d:
    def __init__(self, width=800, height=600):
        super().__init__()
        self.fig = plt.figure(figsize=(6.4, 4.8))
        self.ax1 = self.fig.add_subplot(1,1,1, projection='3d')


    def update_win(self,line1,prev_frame,curr_frame,length=200):

        if line1 is not None and len(line1) > 0:
            step = 1
            if len(line1)>length:
                step = len(line1)//length
            line1 = line1[::step]
            x, y, z = line1[:, 0], line1[:, 1], line1[:, 2]
            self.ax1.plot(x, y, z)

        plt.show(block=False)
        plt.pause(0.01)
        plt.cla()

def clear_data():
    gt_path = 'slam_dataset/Monocular_Visual_Odometry_Dataset/TUM/sequence_01/groundtruthSync.txt'
    gt_list = open(gt_path,'r').read().split('\n')[:-1]
    new_gt_path = 'slam_dataset/Monocular_Visual_Odometry_Dataset/TUM/sequence_01/gt.txt'
    new_gt_file = open(new_gt_path,'w')
    num = 0
    for line in gt_list:
        if 'NaN' in line:
            continue
        new_gt_file.write(line+'\n')
        num+=1
    print('gt_number',num)
    new_gt_file.close()

def main():
    win = Window_3d()
    position = (50, 100)  # 文字的起始位置
    font = cv2.FONT_HERSHEY_SIMPLEX  # 字体类型
    font_scale = 2  # 字体缩放比例
    color = (0, 255, 0)  # 文字颜色
    thickness = 2  # 文字粗细

    # 相机内参和畸变参数
    data_dir = 'slam_dataset/Monocular_Visual_Odometry_Dataset/TUM/sequence_01/'
    cam = open(data_dir+'camera.txt','r').read().split('\n')[:-1]
    print(cam)
    cx,cy,fx,fy,m = cam[0].split('\t')
    w,h = cam[1].split(' ')
    print('w',w,'h',h)
    # k1,k2,p1,p2,k3 = cam[2].split(' ')
    K = np.array([[float(fx), 0, float(cx)],
                  [0, float(fy), float(cy)],
                  [0, 0, float(m)]])
    # dist_coeffs = np.array([k1, k2, p1, p2])

    img_list = open(data_dir+'times.txt','r').read().split('\n')[:-1]
    num_frames = len(img_list)
    num_image = len(os.listdir(data_dir+'images/'))
    print('num_time',num_frames,'num_image',num_image)
    assert num_frames == num_image,'times.txt length not comparable images number'

    # 读取第一张图像并提取特征
    img_path = data_dir+'images/{}.jpg'.format(img_list.pop(0).split(' ')[0])
    prev_frame = cv2.imread(img_path)
    prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
    prev_keypoints, prev_descriptors = cv2.ORB_create().detectAndCompute(prev_gray, None)

    # 初始化相机位姿矩阵和路径列表
    pose = np.zeros((3,1))
    path = []
    rotation_matrix = np.eye(3)

    # 逐帧处理图像序列
    for line in img_list:
        # 读取当前帧图像并提取特征
        img_path = data_dir + 'images/{}.jpg'.format(line.split(' ')[0])
        print(img_path.split('/')[-1])

        curr_frame = cv2.imread(img_path)
        curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
        curr_keypoints, curr_descriptors = cv2.ORB_create().detectAndCompute(curr_gray, None)

        # 使用 Brute-Force 匹配器进行特征点匹配
        bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        matches = bf.match(prev_descriptors, curr_descriptors)

        # 根据匹配结果计算相机运动
        src_pts = np.float32([prev_keypoints[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([curr_keypoints[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
        E, mask = cv2.findEssentialMat(dst_pts, src_pts, K, method=cv2.RANSAC, prob=0.999, threshold=1.0)
        # print(mask)
        src_pts_filtered = src_pts[mask.ravel() == 1]
        dst_pts_filtered = dst_pts[mask.ravel() == 1]
        src_pts_filtered = np.round(src_pts_filtered).astype(np.int16).squeeze()
        dst_pts_filtered = np.round(dst_pts_filtered).astype(np.int16).squeeze()
        _, R, t, mask = cv2.recoverPose(E, dst_pts, src_pts, K)
        # print(mask)
        # 更新相机位姿
        pose += rotation_matrix.dot(t)
        rotation_matrix = R.dot(rotation_matrix)
        # print(pose[:,0])

        # 将当前帧的位姿添加到路径列表中
        path.append(pose[:,0].copy())


        # 绘制匹配点
        matches_num = len(src_pts_filtered)
        print('matches num:',matches_num)
        colors = np.random.randint(0, 256, (matches_num, 3), dtype=np.uint8)
        p_f,c_f = prev_frame.copy(),curr_frame.copy()
        for i,(p1,p2) in enumerate(zip(src_pts_filtered,dst_pts_filtered)):
            clr = colors[i]
            clr = clr.tolist()
            cv2.circle(p_f,tuple(p1),4,color=tuple(clr),thickness=-1)
            cv2.putText(p_f, 'prev frame', position, font, font_scale, color, thickness)

            cv2.circle(c_f,tuple(p2),4,color=tuple(clr),thickness=-1)
            cv2.putText(c_f, 'curr frame', position, font, font_scale, color, thickness)
        # result_img = cv2.drawMatches(prev_frame, prev_keypoints, curr_frame, curr_keypoints, matches, None,
        #                              matchColor=(0, 255, 0), singlePointColor=(255, 0, 0))
        result = np.hstack((p_f,c_f))[::2,::2]
        cv2.imshow('match',result)
        cv2.waitKey(30)
        win.update_win(line1=np.asarray(path), prev_frame=None,curr_frame=None)

        # 更新当前帧作为下一帧的参考帧
        prev_gray = curr_gray
        prev_frame = curr_frame
        prev_keypoints = curr_keypoints
        prev_descriptors = curr_descriptors
    cv2.destroyAllWindows()

if __name__ == '__main__':
    # clear_data()
    main()