import json
import numpy as np
import os
import re
import copy
import csv
import shutil
import argparse
from scipy.spatial.transform import Rotation
from my_io import readcsv

def process_folder(folder_path):
    # 读取文件夹路径
    assert os.path.exists(folder_path)

    def extract_number(name):
        match = re.search(r'\d+', name)
        return int(match.group()) if match else float('inf')

    def bin_timestamp(folder_path):
        # 使用os.scandir()获取目录项对象，并提取文件名
        dir_entries = os.scandir(folder_path)
        file_names = [entry.name for entry in dir_entries if entry.is_file()]

        # 根据提取的数字对文件名进行排序
        sorted_file_names = sorted(file_names, key=extract_number)

        # 初始化一个空列表来存储处理后的文件名（去掉.bin后缀）
        file_names_without_extension = []

        # 遍历排序后的文件名列表
        for file_name in sorted_file_names:
            # 检查文件是否为.bin文件
            if file_name.endswith('.bin'):
                # 去掉文件名中的.bin后缀
                file_name_without_extension = os.path.splitext(file_name)[0]
                processed_filename = file_name_without_extension[:10] + '.' + file_name_without_extension[10:]
                # 将处理后的文件名添加到列表中
                file_names_without_extension.append(processed_filename)

            # 打印处理后的文件名列表
        # print(file_names_without_extension)

        with open('txt1.txt', 'w') as txt1_file:
            for filename in file_names_without_extension:
                txt1_file.write(filename + '\n')

    def pose_timestamp(txt_pose_file_path):
        pose_data = []

        # 读取txt_pose文件并提取前7列和最后一列数据
        with open(txt_pose_file_path, 'r') as pose_file:
            for line in pose_file:
                data = line.strip().split()
                if len(data) >= 8:  # 确保至少有8列数据
                    pose_data.append(data[:7] + [data[-1]])  # 取前7列和最后一列

        # 将提取的pose数据写入txt2文件
        with open('txt2.txt', 'w') as txt2_file:
            for data in pose_data:
                txt2_file.write(' '.join(data) + '\n')

        txt1_data = [line.strip() for line in open('txt1.txt', 'r')]
        txt2_last_column = [line.strip().split()[-1] for line in open('txt2.txt', 'r')]

        # 初始化一个列表来存储与txt1中数据最接近的txt2中的行索引
        closest_indices = []

        # 遍历txt1中的每个文件名，找到txt2中最接近的行
        for txt1_filename in txt1_data:
            min_diff = float('inf')
            closest_index = -1
            for i, txt2_value in enumerate(txt2_last_column):
                # 将文件名和txt2中的值转换为浮点数进行比较
                diff = abs(float(txt1_filename) - float(txt2_value))
                if diff < min_diff:
                    min_diff = diff
                    closest_index = i
            closest_indices.append(closest_index)

        # 根据找到的最接近的索引，从txt2中提取对应的行并写入新的txt文件
        with open('filtered_txt2_data.txt', 'w') as filtered_txt2_file:
            for index in closest_indices:
                filtered_txt2_file.write(' '.join(pose_data[index]) + '\n')

        print("处理完成，结果已保存到filtered_txt2_data.txt文件中。")

    def pose_extract():
        # 读取原始txt文件
        with open('filtered_txt2_data.txt', 'r') as file:
            lines = file.readlines()

        # 创建一个列表来存储变换矩阵
        transformation_matrices = []

        # 逐行处理数据
        for line in lines:
            # 分割每行的数据
            data = line.strip().split()

            # 提取平移量（T矩阵）
            T = np.array([float(data[0]), float(data[1]), float(data[2])]).reshape(3, 1)

            # 提取四元数（R矩阵）
            w, x, y, z = [float(data[i]) for i in range(3, 7)]

            R2 = Rotation.from_quat(np.array([x, y, z, w])).as_matrix()

            # 组合成变换矩阵（4x4）
            transform_matrix = np.vstack((np.hstack((R2, T)), np.array([0, 0, 0, 1]).reshape(1, 4)))

            # 将变换矩阵添加到列表中
            transformation_matrices.append(transform_matrix)

            # 将变换矩阵写入新的txt文件
        with open('full_pose.txt', 'w') as file:
            for matrix in transformation_matrices:
                # 将矩阵转换为字符串并写入文件
                file.write('\n'.join([' '.join(map(str, row)) for row in matrix]) + '\n')

        print('处理完成，结果已保存到full_pose.txt文件中。')

    def pose_flatten():
        # 读取原始txt文件并获取矩阵列表
        with open('full_pose.txt', 'r') as file:
            lines = file.readlines()

        # 将每4行组合成一个矩阵
        matrices = []
        matrix_lines = []
        for line in lines:
            matrix_lines.append(line.strip().split())
            if len(matrix_lines) == 4:
                matrices.append(np.array(matrix_lines, dtype=float))
                matrix_lines = []

        # 确保最后一个矩阵也被添加（如果有不完整的行，则忽略）
        if matrix_lines:
            # 如果最后一个矩阵不完整，这里可能需要进行错误处理或数据补齐
            print("Warning: The last matrix is incomplete and will be ignored.")

        # 求第一个变换矩阵的逆
        # rt = copy.deepcopy(matrices[0])
        # rt[:3, :3] = np.linalg.inv(rt[:3, :3])
        # rt[:3, -1] = np.dot(-rt[:3, :3], rt[:3, -1])

        # transformed_matrices_flat = [
        #     ' '.join(map(str, matrix[:-1].flatten())) for matrix in [np.dot(rt, matrix) for matrix in matrices]
        # ]

        transformed_matrices = [' '.join(map(str, matrix[:].flatten())) for matrix in matrices]

        # 将新的变换矩阵（完整版，已扁平化）写入新的txt文件
        with open(args.full_bin_pose_dir, 'w') as file:
            for matrix in transformed_matrices:
                file.write(matrix + '\n')

        transformed_matrices_flat = [
            ' '.join(map(str, matrix[:-1].flatten())) for matrix in matrices
        ]

        # 将新的变换矩阵（去掉最后一行 0 0 0 1，已扁平化）写入新的txt文件
        with open(args.full_bin_pose_flatten_dir, 'w') as file:
            for matrix in transformed_matrices_flat:
                file.write(matrix + '\n')

        print('处理完成，结果已保存到full_pose_flatten.txt文件中。')

    def key_frame_extraction(lidar_bin_path):
        # 获取点云数据文件名列表
        point_cloud_files = sorted([f for f in os.listdir(lidar_bin_path) if f.endswith('.bin')])

        # 读取位姿信息
        with open(args.full_bin_pose_dir, 'r') as f:
            poses = [list(map(float, line.split())) for line in f.readlines()]

        with open(args.full_bin_pose_flatten_dir, 'r') as f:
            poses_flatten = [list(map(float, line.split())) for line in f.readlines()]

        # 遍历点云数据文件名，提取时间戳
        timestamps = [os.path.splitext(filename)[0] for filename in point_cloud_files]

        # 将时间戳添加到位姿信息的第一列
        # poses_with_timestamps = [[timestamp] + pose for timestamp, pose in zip(timestamps, poses)]
        # with open(args.full_bin_pose_dir, 'w') as f:
        #     for pose in poses_with_timestamps:
        #         f.write(' '.join(map(str, pose)) + '\n')
        poses_dict = {timestamp: np.array(pose).reshape(4, 4).tolist() for timestamp, pose in zip(timestamps, poses)}
        with open(args.full_bin_pose_dir, 'w') as f:
            json.dump(poses_dict, f, indent=4)

        poses_flatten_with_timestamps = [[timestamp] + pose for timestamp, pose in zip(timestamps, poses_flatten)]

        # 写入增加时间戳后的txt文件

        with open(args.full_bin_pose_flatten_dir, 'w') as f:
            for pose in poses_flatten_with_timestamps:
                f.write(' '.join(map(str, pose)) + '\n')

        # 读取增加时间戳后的位姿信息
        with open(args.full_bin_pose_flatten_dir, 'r') as f:
            poses_flatten_with_timestamps = [list(map(float, line.split())) for line in f.readlines()]

        # 初始化关键帧列表
        keyframes = []

        # 初始化第一个关键帧
        keyframes.append(poses_flatten_with_timestamps[0])

        # 初始化当前关键帧索引
        current_keyframe_index = 0

        # 遍历位姿数据，寻找关键帧
        for i in range(1, len(poses_flatten_with_timestamps)):
            # 获取当前行数据
            current_pose = poses_flatten_with_timestamps[i]

            # 获取当前帧时间戳和位置
            current_timestamp = current_pose[0]
            current_position = current_pose[1:3]

            # 获取上一个关键帧数据
            last_keyframe = keyframes[current_keyframe_index]
            last_keyframe_timestamp = last_keyframe[0]
            last_keyframe_position = last_keyframe[1:3]

            # 计算时间戳差值和位置变化
            timestamp_diff = float(current_timestamp) - float(last_keyframe_timestamp)
            position_diff = np.sum(np.square(np.array(current_position) - np.array(last_keyframe_position)))

            # 检查是否满足关键帧条件
            if timestamp_diff >= 500 or position_diff >= 5:
                keyframes.append(current_pose)
                current_keyframe_index += 1

        # 写入关键帧文件
        with open('key_frame_pose.txt', 'w') as f:
            for keyframe in keyframes:
                f.write(' '.join(map(str, keyframe)) + '\n')

        print('处理完成，结果已保存到key_frame_pose.txt文件中。')

        # 确保输出文件夹存在
        if not os.path.exists(args.key_frame_bin_dir):
            os.makedirs(args.key_frame_bin_dir)
        if not os.path.exists(args.key_frame_pcd_dir):
            os.makedirs(args.key_frame_pcd_dir)
        if not os.path.exists(args.key_frame_jpg_dir):
            os.makedirs(args.key_frame_jpg_dir)
        if not os.path.exists(args.label_dir):
            os.makedirs(args.label_dir)

        # 读取关键帧文件
        with open('key_frame_pose.txt', 'r') as f:
            keyframes = [list(map(float, line.split())) for line in f.readlines()]

        # 提取关键帧时间戳
        keyframe_timestamps = [str(int(kf[0])) for kf in keyframes]  # 转换为字符串并取整，确保与文件名匹配

        # 遍历关键帧时间戳，复制对应点云文件到输出文件夹
        for timestamp in keyframe_timestamps:
            # 构造原始点云文件名
            source_file = os.path.join(args.lidar_bin_dir, f"{timestamp}.bin")
            source_file_pcd = os.path.join(args.lidar_pcd_dir, f"{timestamp}.pcd")
            source_file_jpg = os.path.join(args.lidar_jpg_dir, f"{timestamp}.jpg")

            # 构造输出点云文件名，保持原文件名不变
            dest_file = os.path.join(args.key_frame_bin_dir, os.path.basename(source_file))
            dest_file_pcd = os.path.join(args.key_frame_pcd_dir, os.path.basename(source_file_pcd))
            dest_file_jpg = os.path.join(args.key_frame_jpg_dir, os.path.basename(source_file_jpg))

            # 检查文件是否存在，并复制文件
            if os.path.exists(source_file):
                shutil.copy2(source_file, dest_file)
                # print(f"Copied {source_file} to {dest_file}")
            else:
                print(f"File {source_file} does not exist, skipping.")

            if os.path.exists(source_file_pcd):
                shutil.copy2(source_file_pcd, dest_file_pcd)
                # print(f"Copied {source_file} to {dest_file}")
            else:
                print(f"File {source_file_pcd} does not exist, skipping.")

            if os.path.exists(source_file_jpg):
                shutil.copy2(source_file_jpg, dest_file_jpg)
                # print(f"Copied {source_file} to {dest_file}")
            else:
                print(f"File {source_file_jpg} does not exist, skipping.")

        print("关键帧点云提取完成")

        # readme加一行Samples_Size
        fieldnames = ['Item', 'Details']
        csv_datas = readcsv(os.path.join(args.readme_csv_dir, 'README.csv'), fieldnames)
        assert 'Samples_Size' in csv_datas
        csv_datas['Samples_Size'] = str(len(keyframe_timestamps))
        csvfile = open(os.path.join(args.readme_csv_dir, 'README.csv'), mode='w', newline='')
        # 创建 DictWriter 对象
        write = csv.DictWriter(csvfile, fieldnames=fieldnames)
        # 写入表头
        for key in csv_datas:
            write.writerow({'Item': key, 'Details': csv_datas[key]})

        print("更新README.csv文件")

        # 移除每行的第一列（时间戳）
        poses_without_timestamps = [[float(data) for data in kf[1:]] for kf in keyframes]

        # 将处理后的数据写入新的poses.txt文件
        with open(args.sampled_bin_pose_dir, 'w') as f:
            for pose in poses_without_timestamps:
                f.write(' '.join(map(str, pose)) + '\n')

        print("关键帧位姿提取完成")

    def main(args):
        assert os.path.exists(args.lidar_bin_dir)
        assert os.path.exists(args.pose_path)

        bin_timestamp(args.lidar_bin_dir)
        pose_timestamp(args.pose_path)
        pose_extract()
        pose_flatten()
        key_frame_extraction(args.lidar_bin_dir)

        return

    if __name__ == '__main__':
        parser = argparse.ArgumentParser(description='Configuration Parameters')
        parser.add_argument('--readme_csv-dir', default=os.path.join(folder_path), help='readme_csv')
        parser.add_argument('--lidar_bin-dir', default=os.path.join(folder_path, 'lidar_bin'), help='lidar_bin')
        parser.add_argument('--lidar_pcd-dir', default=os.path.join(folder_path, 'lidar'), help='lidar_pcd')
        parser.add_argument('--lidar_jpg-dir', default=os.path.join(folder_path, 'camera/camera75'), help='jpg')
        parser.add_argument('--pose-path', default=os.path.join(folder_path, 'localization/pose.txt'))
        parser.add_argument('--key_frame_bin-dir', default=os.path.join(folder_path,'samples/lidar_bin'), help='key_frame_lidar_bin')
        parser.add_argument('--key_frame_pcd-dir', default=os.path.join(folder_path,'samples/lidar'), help='key_frame_lidar_pcd')
        parser.add_argument('--label-dir', default=os.path.join(folder_path,'samples/label'), help='for label')
        parser.add_argument('--key_frame_jpg-dir', default=os.path.join(folder_path,'samples/camera/camera75'), help='key_frame_sjpg')
        parser.add_argument('--full_bin_pose_flatten-dir', default=os.path.join(folder_path, 'localization/full_bin_pose_flatten.txt'), help='full_bin_pose_flatten')
        parser.add_argument('--full_bin_pose-dir', default=os.path.join(folder_path, 'localization/full_bin_pose.json'), help='full_bin_pose')
        parser.add_argument('--sampled_bin_pose-dir', default=os.path.join(folder_path, 'localization/sampled_bin_pose.txt'), help='sampled_bin_pose')
        args = parser.parse_args()

        main(args)

base_path = '/home/shn/Project/parse_result'
for filename in os.listdir(base_path):
    if os.path.isdir(os.path.join(base_path, filename)):
        process_folder(os.path.join(base_path, filename))