import json

import numpy as np
import os
import copy
import csv
import argparse


def euler_to_rotation_matrix(roll, pitch, yaw):
    Rx = np.array([[1, 0, 0],
                   [0, np.cos(roll), -np.sin(roll)],
                   [0, np.sin(roll), np.cos(roll)]])
    Ry = np.array([[np.cos(pitch), 0, np.sin(pitch)],
                   [0, 1, 0],
                   [-np.sin(pitch), 0, np.cos(pitch)]])
    Rz = np.array([[np.cos(yaw), -np.sin(yaw), 0],
                   [np.sin(yaw), np.cos(yaw), 0],
                   [0, 0, 1]])
    R = np.dot(Rz, np.dot(Ry, Rx))
    return R

def process_folder(folder_path):
    assert os.path.exists(folder_path)

    def extract_data_from_csv():
        with open(args.localization_dir, 'r', newline='', encoding='utf-8') as csv_file:

            csv_reader = csv.reader(csv_file)
            with open('localization_1.txt', 'w', encoding='utf-8') as txt_file:
                next(csv_reader)  # 跳过第一行
                transformation_matrices = []
                for row in csv_reader:
                    extracted_data = [row[0].strip(), row[4].strip(), row[5].strip(), row[6].strip(), row[13].strip(),
                                      row[14].strip(), row[15].strip()]
                    txt_file.write(' '.join(map(str, extracted_data)) + '\n')
                    T = np.array([float(row[4]), float(row[5]), float(row[6])]).reshape(3, 1)
                    R = euler_to_rotation_matrix(float(row[13]), float(row[14]), float(row[15]))
                    transform_matrix = np.vstack((np.hstack((R, T)), np.array([0, 0, 0, 1]).reshape(1, 4)))
                    transformation_matrices.append(transform_matrix)
                with open('localization_2.txt', 'w') as file:
                    for matrix in transformation_matrices:
                        # 将矩阵转换为字符串并写入文件
                        file.write('\n'.join([' '.join(map(str, row)) for row in matrix]) + '\n')

    def pose_flatten():
        # 读取原始txt文件并获取矩阵列表
        with open('localization_2.txt', 'r') as file:
            lines = file.readlines()

        # 将每4行组合成一个矩阵
        matrices = []
        matrix_lines = []
        for line in lines:
            matrix_lines.append(line.strip().split())
            if len(matrix_lines) == 4:
                matrices.append(np.array(matrix_lines, dtype=float))
                matrix_lines = []

        # 确保最后一个矩阵也被添加（如果有不完整的行，则忽略）
        if matrix_lines:
            # 如果最后一个矩阵不完整，这里可能需要进行错误处理或数据补齐
            print("Warning: The last matrix is incomplete and will be ignored.")

        transformed_matrices_flat = [
            ' '.join(map(str, matrix[:-1].flatten())) for matrix in matrices
        ]

        # 将新的变换矩阵（已扁平化）写入新的txt文件
        with open('localization_3.txt', 'w') as file:
            for matrix in transformed_matrices_flat:
                file.write(matrix + '\n')

    def merge_time_stamp_and_pose():
        with open('localization_1.txt', 'r') as f1:
            time_stamps = [line.strip().split()[0] for line in f1]
        with open('localization_3.txt', 'r') as f2, open(args.full_bin_pose_flatten_dir, 'w') as f3:
            for time_stamp, line in zip(time_stamps, f2):
                f3.write(f'{time_stamp} {line}')
        print(f'已将时间戳和位姿数据合并到full_bin_pose.txt 文件中。')

    def sampled_pose():
        # 读取原始txt文件并获取矩阵列表
        with open(args.full_bin_pose_flatten_dir, 'r') as file:
            lines = file.readlines()
            poses = {}

            for line in lines:
                parts = line.strip().split()
                if len(parts) >= 5:
                    time_stamp = float(parts[0])
                    pose = tuple(float(x) for x in parts[1:])
                    poses[time_stamp] = pose

        with open(args.full_bin_pose_dir, 'w') as new_file:
            point_cloud_files = sorted([f for f in os.listdir(args.sampled_lidar_bin_dir) if f.endswith('.bin')])
            timestamps = [int(os.path.splitext(filename)[0]) for filename in point_cloud_files]
            poses_dict = {}
            closest_poses = []
            for filename in point_cloud_files:
                timestamp = float(os.path.splitext(filename)[0])

                keys = np.array(list(poses.keys()))
                diff = np.abs(keys - timestamp)
                closest_idx = np.argmin(diff)
                closest_timestamp = keys[closest_idx]
                closest_pose = poses[closest_timestamp]
                closest_pose = np.array(closest_pose).reshape(3, 4)
                closest_pose = np.vstack((closest_pose, [0, 0, 0, 1]))
                closest_poses.append(closest_pose)
                # pose_dict = {timestamp: np.array(closest_pose).reshape(4, 4).tolist()}
            poses_dict = {timestamp: np.array(closest_pose).reshape(4, 4).tolist() for timestamp, closest_pose in
                          zip(timestamps, closest_poses)}
            json.dump(poses_dict, new_file, indent=4)

        with open(args.sampled_bin_pose_flatten_dir, 'w') as new_file:
            point_cloud_files = sorted([f for f in os.listdir(args.sampled_lidar_bin_dir) if f.endswith('.bin')])
            for filename in point_cloud_files:
                timestamp = float(os.path.splitext(filename)[0])

                keys = np.array(list(poses.keys()))
                diff = np.abs(keys - timestamp)
                closest_idx = np.argmin(diff)
                closest_timestamp = keys[closest_idx]
                closest_pose = poses[closest_timestamp]

                new_file.write(f"{' '.join(map(str, closest_pose))}\n")

    def main(args):
        assert os.path.exists(args.localization_dir)
        assert os.path.exists(args.sampled_lidar_bin_dir)

        extract_data_from_csv()
        pose_flatten()
        merge_time_stamp_and_pose()
        sampled_pose()

        return

    if __name__ == '__main__':
        parser = argparse.ArgumentParser(description='Configuration Parameters')

        parser.add_argument('--localization-dir',
                            default=os.path.join(folder_path, 'localization/localization.csv'),
                            help='localization_file_path')
        parser.add_argument('--sampled_lidar_bin-dir',
                            default=os.path.join(folder_path, 'lidar_bin'),
                            help='sampled_lidar_bin_path')
        parser.add_argument('--full_bin_pose_flatten-dir',
                            default=os.path.join(folder_path, 'localization/full_bin_pose_flatten.txt'),
                            help='full_bin_pose_flatten_dir')
        parser.add_argument('--full_bin_pose-dir',
                            default=os.path.join(folder_path, 'localization/full_bin_pose.json'),
                            help='full_bin_pose_dir')
        parser.add_argument('--sampled_bin_pose_flatten-dir',
                            default=os.path.join(folder_path, 'localization/sampled_bin_pose.txt'),
                            help='sampled_bin_pose_flatten')
        args = parser.parse_args()

        main(args)

base_path = '/home/shn/tmp'
for filename in os.listdir(base_path):
    if os.path.isdir(os.path.join(base_path, filename)):
        folder_path = os.path.join(base_path, filename)
        process_folder(folder_path)