import os
import pickle
import scipy
import numpy as np

from bisect import bisect_left
from matplotlib import pyplot as plt
from dateutil import parser

def read_bin_file_and_split_packages(filepath):
    try:
        with open(filepath, 'rb') as f:
            packet_psn = np.frombuffer(f.read(4), dtype='<i4')[0]
            pack_min, pack_max = np.frombuffer(f.read(8), dtype='<f4')
            points_count = np.frombuffer(f.read(4), dtype='<i4')[0]

            points = np.frombuffer(f.read(points_count * 3 * 4), dtype='<f4')
            points = points.reshape(-1, 3)

            num_packages = points.shape[0] // 125
            packages = []
            for i in range(num_packages):
                start_idx = i * 125
                end_idx = start_idx + 125
                package_points = points[start_idx:end_idx]
                packages.append({
                    "packet_psn": packet_psn,
                    "pack_min": pack_min,
                    "pack_max": pack_max,
                    "points_count": 125,
                    "points": package_points
                })

            remaining_points = points.shape[0] % 125
            if remaining_points > 0:
                raise ValueError(f"LiDAR points are not *125.")

            return packages
    except Exception as e:
        print(f"read {filepath} failed: {e}")
        return None

def read_txt_file_timestamps(filepath):
    timestamps = []
    try:
        with open(filepath, 'r') as f:
            for line in f:
                timestamp_str = line.strip()
                timestamps.append(timestamp_str)
    except Exception as e:
        print(f"read txt file {filepath} failed: {e}")
        return None

    return timestamps

def read_mat_files(file_path):
    try:
        mat_data = scipy.io.loadmat(file_path)
        return mat_data
    except Exception as e:
        print(f"Error reading {file_path}: {e}")
        return None
    
def process_files_in_folder(folder_path):
    files = os.listdir(folder_path)

    bin_files = [f for f in files if f.endswith('.bin')]
    txt_files = [f for f in files if f.endswith('.txt')]

    bin_files_dict = {}
    for f in sorted(bin_files, key=lambda x: int(x.split('_')[0])):
        prefix = f.split('_')[0]
        bin_files_dict[prefix] = f

    txt_files_dict = {}
    for f in sorted(txt_files, key=lambda x: int(x.split('_')[0])):
        prefix = f.split('_')[0]
        txt_files_dict[prefix] = f

    all_data_list = []

    for prefix, bin_filename in bin_files_dict.items():
        if prefix in txt_files_dict:
            txt_filename = txt_files_dict[prefix]
            bin_filepath = os.path.join(folder_path, bin_filename)
            txt_filepath = os.path.join(folder_path, txt_filename)

            packages = read_bin_file_and_split_packages(bin_filepath)
            timestamps = read_txt_file_timestamps(txt_filepath)
            if packages is None or timestamps is None:
                continue
            num_packages = len(packages)
            num_timestamps = len(timestamps)
            min_length = min(num_packages, num_timestamps)
            data_list = []
            for i in range(min_length):
                timestamp_str = timestamps[i]
                try:
                    timestamp = parser.isoparse(timestamp_str)
                    data_list.append((timestamp, packages[i]))
                except Exception as e:
                    print(f"解析时间戳 {timestamp_str} 失败: {e}")
                    continue
            all_data_list.extend(data_list)
        else:
            print(f"未找到 bin 文件 {bin_filename} 对应的 txt 文件")

    all_data_list.sort(key=lambda x: x[0])

    return all_data_list

def save_dataset_to_file(dataset, filepath):
    try:
        with open(filepath, 'wb') as f:
            pickle.dump(dataset, f)
    except Exception as e:
        print(f"Error saving dataset: {e}")

def load_dataset_from_file(filepath):
    try:
        with open(filepath, 'rb') as f:
            return pickle.load(f)
    except Exception as e:
        print(f"Error loading dataset: {e}")
        return None

def get_nearest_packages_points(input_time_str, num_package, data_list):
    input_time = parser.isoparse(input_time_str)

    times = [item[0] for item in data_list]

    pos = bisect_left(times, input_time)

    if pos == 0:
        min_index = 0
    elif pos == len(times):
        min_index = len(times) - 1
    else:
        before = abs((times[pos - 1] - input_time).total_seconds())
        after = abs((times[pos] - input_time).total_seconds())
        min_index = pos - 1 if before <= after else pos

    if abs((times[min_index] - input_time).total_seconds()) > 1e-1:
        raise TypeError("The target time is out of range")

    half = num_package // 2
    start_index = max(0, min_index - half)
    end_index = start_index + num_package

    if end_index > len(data_list):
        end_index = len(data_list)
        start_index = max(0, end_index - num_package)

    selected_packages = data_list[start_index:end_index]

    collected_points = []
    for _, package_data in selected_packages:
        collected_points.extend(package_data["points"])

    return collected_points

def remap_histogram(image):
    flattened = image.flatten()
    unique_values = np.unique(flattened)
    mapping = {old_val: new_val for new_val, old_val in enumerate(unique_values)}
    remapped_flattened = np.array([mapping[val] for val in flattened])
    remapped_image = remapped_flattened.reshape(image.shape)
    normalized_image = ((remapped_image - remapped_image.min()) / \
                        (remapped_image.max() - remapped_image.min()) * 255).astype(np.uint8)
    return normalized_image

def filter_points_by_range(points, x_range, y_range):
    return [p for p in points if x_range[0] <= p[0] <= x_range[1] and y_range[0] <= p[1] <= y_range[1]]

def generate_lidar_image(points, x_range, y_range, utc_time, output_folder):
    grid_shape = (1024, 1024)
    x_bins = np.linspace(x_range[0], x_range[1], grid_shape[1] + 1)
    y_bins = np.linspace(y_range[0], y_range[1], grid_shape[0] + 1)
    grid = np.zeros(grid_shape)

    x_indices = np.digitize([p[0] for p in points], x_bins) - 1
    y_indices = np.digitize([p[1] for p in points], y_bins) - 1

    for xi, yi, p in zip(x_indices, y_indices, points):
        if 0 <= xi < grid_shape[1] and 0 <= yi < grid_shape[0]:
            grid[xi, yi] = max(grid[yi, xi], p[2])

    grid = remap_histogram(grid)

    fig, ax = plt.subplots(figsize=(10, 10), dpi=1024/10)
    ax = fig.add_axes([0, 0, 1, 1])
    ax.set_axis_off()
    ax.imshow(grid.T, origin='lower', extent=[x_range[0], x_range[1], y_range[0], y_range[1]], cmap='jet', aspect='auto')

    sanitized_utc_time = utc_time.replace(':', '_')
    output_file = os.path.join(output_folder, f"{sanitized_utc_time}.png")
    fig.savefig(output_file, dpi=1024/10, bbox_inches='tight', pad_inches=0, facecolor='white')
    plt.close()

def print_min_max(points):
    transposed = list(zip(*points))
    for i, dim in enumerate(transposed):
        print(f"Dimension {i + 1}: Min = {min(dim)}, Max = {max(dim)}")

def transform_points(points):
    print_min_max(points)
    transformed_points = []
    for p in points:
        transformed_points.append([-p[1] - 0.5, p[0] + 2.52, p[2] + 100])
    return transformed_points

if __name__ == "__main__":
    folder_LiDAR_path = "./workspace/data/LiDAR"
    folder_output_path = "./workspace/image/LiDAR/height"
    os.makedirs(folder_output_path, exist_ok=True)
    dataset_filepath = os.path.join(folder_LiDAR_path, "LiDAR_dataset.pkl")

    if os.path.exists(dataset_filepath):
        LiDAR_dataset = load_dataset_from_file(dataset_filepath)
    else:
        LiDAR_dataset = process_files_in_folder(folder_LiDAR_path)
        save_dataset_to_file(LiDAR_dataset, dataset_filepath)

    folder_SAR_path = "./workspace/data/SAR"

    for root, _, files in os.walk(folder_SAR_path):
        for file in files:
            mat_path = os.path.join(root, file)
            radar_data = read_mat_files(mat_path)
            target_time_str = radar_data["utcTimeChar"][0]
            
            sanitized_utc_time = target_time_str.replace(':', '_')
            output_file = os.path.join(folder_output_path, f"{sanitized_utc_time}.png")
            if os.path.exists(output_file):
                continue
            
            collected_points = get_nearest_packages_points(target_time_str, 630, LiDAR_dataset)

            if collected_points is not None:
                x_range = (-32, 32)
                y_range = (0, 64)

                collected_points = transform_points(collected_points)
                #generate_lidar_3d(collected_points, "1")
                filtered_points = filter_points_by_range(collected_points, x_range, y_range)
                generate_lidar_image(collected_points, x_range, y_range, target_time_str, folder_output_path)

