
import time
import matplotlib.pyplot as plt

import numpy as np
import cv2
def print_red_text(text):
    print("\033[31m" + text + "\033[0m")

class Window_3d:
    def __init__(self, width=800, height=600):
        super().__init__()
        self.fig = plt.figure(figsize=(6.4, 4.8))
        self.ax = self.fig.add_subplot(111, projection='3d')
        # self.sc = self.ax.scatter3D([], [], [], c=[], cmap='viridis')

    def update_win(self,points,colors,lines=None):
        num = points.shape[0]
        if lines is not None and len(lines) > 0:
            x, y, z = lines[:, 0], lines[:, 1], lines[:, 2]
            self.ax.plot(x, y, z)
        plt.show(block=False)
        plt.pause(0.01)
        plt.cla()

def save_camera():
    camera_dict = {}
    camera_dict['f1'] = [591.1,590.1,331.0,234.0,-0.041,0.3286,0.0087,0.0051,-0.5643]
    camera_dict['f2'] = [580.8,581.8,308.8,253.0,-0.2297,1.4766,0.0005,-0.0075,-3.4194]

def rgbd2xyz(img,depth,fx,fy,cx,cy,step=4):
    factor = 5000
    points = []
    colors = []
    for v in range(depth.shape[0])[::step]:
        for u in range(depth.shape[1])[::step]:
            z = depth[v,u]/factor*1000
            x = (u-cx)*z/fx
            y = (v-cy)*z/fy
            color = img[v,u]
            # print((x,y,z))
            points.append((x,y,z))
            colors.append(color)
    points = np.asarray(points)
    colors = np.asarray(colors)
    return points,colors


def rgbd2xyz_from_points_list(pts,img,depth,fx,fy,cx,cy,step=1):
    factor = 5000
    points = []
    colors = []
    num = len(pts)
    # print(num)
    if num == 0: return [], []
    step = num// 500 if num >500 else step
    for u,v in pts[::step]:
        v,u = int(u), int(v)
        z = depth[u, v] / factor * 1000
        x = (u - cx) * z / fx
        y = (v - cy) * z / fy
        color = img[u, v]
        # print((x,y,z))
        points.append((x, y, z))
        colors.append(color)
    points = np.asarray(points)
    colors = np.asarray(colors)
    return points, colors


def updat_3d_lines(ax,points):
    x, y ,z = points[:,0],points[:,1],points[:,2]
    ax.plot3D(x, y, z)
    ax.relim()
    ax.autoscale_view(True, True, True)
    plt.draw()
    plt.pause(0.001)


class Monocular_SLAM:
    def __init__(self,k,dist_coeffs):
        super().__init__()
        self.k = k
        self.dist_coeffs = dist_coeffs
        self.prev_frame = None
        self.prev_keypoints = None
        self.prev_descriptors = None
        self.orb = cv2.ORB_create()

        # 初始化相机位姿
        self.pose = np.zeros((3, 1))  # 相机位移（平移）
        self.rotation_matrix = np.identity(3)  # 相机旋转
        self.trajectory = []

        # 创建地图
        self.map_points = []  # 3D地图点
        self.map_ref_frames = []  # 每个地图点对应的参考帧索引
        self.map_colors = []  # 地图点的颜色（用于可视化）


    def extract_points(self,img):
        # 图像预处理（灰度化、去畸变等）
        # 去畸变
        t = time.time()
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        undistorted = cv2.undistort(gray, self.k, self.dist_coeffs)
        print('undistort:{:4f}s'.format(time.time() - t))

        # 显示去畸变后的图像
        pic = np.vstack((img,cv2.cvtColor(undistorted,cv2.COLOR_GRAY2BGR)))
        cv2.imshow('Undistorted Image', pic)
        cv2.waitKey(10)

        # 特征提取与匹配
        keypoints, descriptors = self.orb.detectAndCompute(undistorted, None)

        if self.prev_frame is not None:
            # 特征匹配
            t = time.time()
            matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
            matches = matcher.match(self.prev_descriptors, descriptors)
            print_red_text('match: {:4f}s'.format(time.time()-t))

            # 计算相机运动
            t = time.time()
            ref_pts = np.float32([self.prev_keypoints[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
            curr_pts = np.float32([keypoints[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
            E, mask = cv2.findEssentialMat(curr_pts, ref_pts, self.k, method=cv2.RANSAC, prob=0.999, threshold=1.0)
            _, R, T, mask = cv2.recoverPose(E, curr_pts, ref_pts, self.k, mask=mask)
            print_red_text('compute camera motion: {:4f}s'.format(time.time() - t))

            # 更新相机位姿

            self.pose += self.rotation_matrix.dot(T)
            self.rotation_matrix = R.dot(self.rotation_matrix)
            # print(self.pose)
            self.trajectory.append(self.pose[:,0].copy())

            # 更新地图
            t = time.time()
            for i, match in enumerate(matches):
                if mask[i] == 1:
                    map_idx = match.trainIdx
                    if map_idx not in self.map_points:
                        self.map_points.append(keypoints[map_idx].pt)
                        self.map_ref_frames.append(len(self.trajectory) - 1)
                        self.map_colors.append(img[int(keypoints[map_idx].pt[1]), int(keypoints[map_idx].pt[0])].tolist())
            print_red_text('update map: {:4f}s'.format(time.time() - t))

            t = time.time()
            print_red_text('map points:{}'.format(len(self.map_colors)))
            # print(self.map_points)
            # print(np.array(self.trajectory))
        # 更新前一帧的关键点和描述子
        self.prev_frame = undistorted
        self.prev_keypoints = keypoints
        self.prev_descriptors = descriptors

def main():

    data_dir = 'slam_dataset/rgbd_slam_dataset/TUM/'
    img_dir = data_dir + 'rgbd_dataset_freiburg1_xyz/'
    rgb_list_txt = open(img_dir + 'rgb.txt', 'r')
    rgb_list = rgb_list_txt.read().split('\n')[3:-1]
    depth_list_txt = open(img_dir + 'depth.txt', 'r')
    depth_list = depth_list_txt.read().split('\n')[3:-1]

    # 相机内参
    f = open(data_dir + 'camera.txt', 'r')
    fx, fy, cx, cy, d0, d1, d2, d3, d4 = f.read().split('\n')[1].split(' ')[1:]

    # 相机内参矩阵
    K = np.array([[float(fx), 0, float(cx)],
                  [0, float(fy), float(cy)],
                  [0, 0, 1]])

    # 畸变系数
    dist_coeffs = np.array([float(d0), float(d1), float(d2), float(d3), float(d4)])
    # 创建o3d窗口
    win = Window_3d()

    # 创建SLAM
    mo_slam = Monocular_SLAM(K,dist_coeffs)
    print(len(rgb_list), len(depth_list))
    for (rgb_line, depth_line) in zip(rgb_list, depth_list):
        ts, rgb_path = rgb_line.split(' ')
        _, depth_path = depth_line.split(' ')
        # 读取图像
        t = time.time()
        rgb = cv2.imread(img_dir + rgb_path)
        depth = cv2.imread(img_dir + depth_path,cv2.IMREAD_UNCHANGED)
        print('read:{:4f}s'.format(time.time()-t))
        # print(rgb.shape, depth.shape)


        # 视觉里程计
        t = time.time()
        mo_slam.extract_points(rgb)
        print('compute visual odometry:{:4f}s'.format(time.time() - t))

        # 恢复3d坐标
        t = time.time()
        points, colors = rgbd2xyz_from_points_list(mo_slam.map_points,rgb, depth, float(fx), float(fy), float(cx), float(cy))
        print('compute 3d:{:4f}s'.format(time.time() - t))
        print('pose',mo_slam.pose)
        # print('ori pts',points)
        points = np.array([mo_slam.pose[:,0]+p for p in points])
        # print('new pts',points)
        # 展示3d坐标
        t = time.time()
        win.update_win(np.array(points),np.array(colors),np.array(mo_slam.trajectory))
        print('show 3d:{:4f}s'.format(time.time() - t))

    cv2.destroyAllWindows()

def undistort_images(rgb,K,dist_coeffs):
    # 图像去畸变
    undistorted_image = cv2.undistort(rgb, K, dist_coeffs)
    return undistorted_image

if __name__ == '__main__':
    main()
