import os
import cv2
import numpy as np

def get_image_paths(directory):
    # 定義常見圖片格式
    image_extensions = (".png", ".jpg", ".jpeg", ".gif", ".bmp", ".tiff", ".webp")
    image_paths = []

    # 遍歷目錄及其子目錄
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.lower().endswith(image_extensions):  # 判斷是否是圖片
                image_paths.append(os.path.join(root, file))
    
    # 對結果進行排序
    image_paths.sort()  # 按完整路徑排序
    return image_paths


def load_depth_map(depth_path: str, depth_height: int = 480, depth_width: int = 640) -> np.ndarray:
    with open(depth_path, 'rb') as f:
        depth_data = np.fromfile(f, dtype=np.uint16)

    depth_map = depth_data.reshape((depth_height, depth_width))
    return depth_map


def get_contours(dst_mask_path):
    image = cv2.imread(dst_mask_path)

    # 转换为灰度图
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # 二值化处理（设置阈值，将背景与前景分离）
    _, binary = cv2.threshold(gray, 50, 255, cv2.THRESH_BINARY)

    # 检测轮廓
    contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    return contours



def get_xyz_from_pixel(pixel: tuple, depth_map: np.ndarray, K: np.ndarray, D: np.ndarray) -> tuple:
    """根据像素点坐标获取点云空间内的 (x, y, z)"""
    u, v = pixel  # x,y
    # 获取深度值（单位：米）
    # z = depth_map[u, v] / 1000.0  # 假设深度图单位是毫米，转换为米
    z = depth_map[v][u] / 1000.0  # 假设深度图单位是毫米，转换为米
    # print(z)
    if z <= 0 or z > 1.50:  # 检查深度值是否有效
        return 0, 0, 0
        # neighbors = [
        #     (u - 1, v - 1), (u, v - 1), (u + 1, v - 1),  # 上方三个点
        #     (u - 1, v),                 (u + 1, v),      # 左右两个点
        #     (u - 1, v + 1), (u, v + 1), (u + 1, v + 1)   # 下方三个点
        # ]
        
        # for nu, nv in neighbors:
        #     if 0 <= nv < depth_map.shape[0] and 0 <= nu < depth_map.shape[1]:  # 检查是否在图像范围内
        #         neighbor_z = depth_map[nv][nu] / 1000.0  # 获取深度值
        #         print(f"邻居像素点 ({nu}, {nv}) 的深度值: {neighbor_z} 米")
        #     else:
        #         print(f"邻居像素点 ({nu}, {nv}) 超出图像范围")
                      
        # raise ValueError(f"无效的深度值: {z} m")
        
    
    # 使用鱼眼模型进行反投影
    pts = np.array([[[u, v]]], dtype=np.float32)  # 像素点坐标需要是 (1, 1, 2) 的形状
    undistorted_pts = cv2.fisheye.undistortPoints(pts, K, D)  # 去畸变并反投影
    x = undistorted_pts[0][0][0] * z
    y = undistorted_pts[0][0][1] * z

    point = x,y,z 
    new_x, new_y, new_z = cam_2_robot(T_v_cam, point)

    return new_x, new_y, new_z



def get_fuse_xyz_rpy(file_path):
    # 读取文件内容
    with open(file_path, 'r') as file:
        data_string = file.read()

    # 用于存储结果的列表
    data_list = []

    # 按行分割字符串
    lines = data_string.strip().split('\n')
    for line in lines:
        # 按空格分割每行数据
        values = line.split()
        if len(values) == 7:  # 确保每行有 7 个值
            # 直接将每行数据以字符串形式保存
            values[0] = values[0].replace('.', '', 1)  # 只替换第一个小数点

            data_list.append(values)

    return data_list

def find_closest_timestamp_incremental(fuse_list, obts_time_stamp, start=0):
    """
    在有序递增的 fuse_list 中查找与 obts_time_stamp 差值最小且小于 50ms 的时间戳，
    并利用上一次查找的结果作为起点，同时返回 fuse_list 的下标。
    
    参数：
    - fuse_list: 包含时间戳和其他数据的列表，每个元素是一个长度为 7 的列表，且时间戳有序递增。
    - obts_time_stamp: 目标时间戳，字符串格式。
    - last_index: 上一次查找的起始索引，默认为 0。
    
    返回：
    - closest_id: 与目标时间戳差值小于 50ms 的时间戳 ID（字符串格式），若无有效值则返回 None。
    - closest_index: 最近时间戳在 fuse_list 中的下标，若无有效值则返回 None。
    - new_index: 当前查找的最近时间戳索引，可用于下一次查找。
    """
    obts_time_stamp = int(obts_time_stamp)  # 将目标时间戳转换为整数
    n = len(fuse_list)  # 获取 fuse_list 的长度

    # 从起始索引开始向后查找
    for i in range(start, n):
        current_timestamp = int(fuse_list[i][0])  # 当前时间戳
        diff = abs(current_timestamp - obts_time_stamp)  # 计算差值

        if diff <= 30*1000:  # 如果差值小于或等于 30ms
            return current_timestamp, i , i  # 返回时间戳、ID和当前索引

        if current_timestamp > obts_time_stamp:  # 如果当前时间戳已经大于目标时间戳，停止查找
            break

    # 如果没有找到满足条件的时间戳
    return None, None, start



def cam_2_robot(T_v_cam, point):

    x,y,z = point
    p_cam = np.array([x,y,z , 1.0])  # 示例点 (x, y, z, 1)

    p_v = np.dot(T_v_cam, p_cam) #  

    return p_v[0], p_v[1], p_v[2]

main_path = '/media/ai/0bea6433-71ce-4bf1-a689-3b0348c3c57b/vslam/0423/965/results'

T_v_cam = np.array([
[0.00274878, -0.141637, 0.989915, 0.466449],
[-0.999923, 0.0115661, 0.00443145, 0.0242619],
[-0.0120771, -0.989851, -0.141595, 0.111014],
[0.0, 0.0, 0.0, 1.0]
])
if __name__ == '__main__':
    



    path_mask  = os.path.join(main_path, "mask")
    path_depth = os.path.join(main_path, "depth")
    path_left_undistort_fisheye = os.path.join(main_path, "left_undistort_fisheye")
    fuse_txt = '/media/ai/0bea6433-71ce-4bf1-a689-3b0348c3c57b/vslam/0423/965/results/test/fusedpose.txt'

    img_paths = get_image_paths(path_left_undistort_fisheye)


    K = np.array([[380.514984, 0, 512.130066],
                  [0, 380.059082, 332.302795],
                  [0, 0, 1]]) 

    D = np.array([0.0422173887, -0.000108877211, -0.000979754375, -0.000220517832])

    last_index = 0  # 初始化上一次查找的索
    fuse_list = get_fuse_xyz_rpy(fuse_txt)
    # for index, img_path in enumerate(img_paths):
    # for i in range(img_paths.size()):


    with open("fuse_list.txt", "w") as fuse_file:
        for line in fuse_list:
            fuse_file.write(line[0] + "\n")  # 只写入时间戳

    # 将 file_id 写入 file_id.txt
    with open("file_id.txt", "w") as file_id_file:
        for img_path in img_paths:
            file_name_with_ext = os.path.basename(img_path)  # 提取文件名（带扩展名）
            file_id = os.path.splitext(file_name_with_ext)[0]  # 去掉扩展名，获取文件 ID
            file_id_file.write(file_id + "\n")

    print("fuse_list 和 file_id 已成功写入文件。")
    obts_time_stamp = None
    valid_len = 0

    # for i in range(1):


    FuseData = []
    for i in range(img_paths.__len__()):
        img_path = img_paths[i]
        file_name_with_ext = os.path.basename(img_path)  # 提取文件名（带扩展名）
        file_id = os.path.splitext(file_name_with_ext)[0]  # 去掉扩展名，获取文件 ID
        dst_raw_name = file_id + ".raw"
        dst_mask_name = file_id + ".jpg"
        obts_time_stamp = file_id

        dst_raw_path = os.path.join(path_depth, dst_raw_name)
        dst_mask_path = os.path.join(path_mask, dst_mask_name)
        # 初始化变量
        obst = []
        closest_time, close_id, last_index = find_closest_timestamp_incremental(fuse_list, obts_time_stamp, last_index)
        

        tmp_dict = {}
        if close_id == None:
            pass
            print(f'time {file_id} step')
        else:
            valid_len += 1
            # fusexyz rpy 
            tmp_dict["fusepose"] = fuse_list[close_id] 
            tmp_dict["ai_time"] = file_id
            fuse_xyz_rpy_cur = fuse_list[int(close_id)]
            resized_depth_map = None
            contours = None
            if os.path.exists(dst_raw_path):
                raw_data = load_depth_map(dst_raw_path)
                resized_depth_map = cv2.resize(raw_data, (1024, 768), interpolation=cv2.INTER_NEAREST)


            if os.path.exists(dst_mask_path):
                contours = get_contours(dst_mask_path)


            # 判断两个变量是否都存在
            if resized_depth_map is not None and contours is not None:
                print("Both resized_depth_map and contours exist.")
                for idx in range(contours.__len__()):
                    tmp_contour = contours[idx]
                    if tmp_contour.shape[0] < 10:
                    # print(f'{idx} point is less {tmp_contour.shape[0]}')
                        continue
                    else:
                        point_num = tmp_contour.shape[0]
                        for index in  range(point_num):
                            uv = tmp_contour[index][0] 
                            u,v = uv[0], uv[1]
                            pixel = (u,v)
                            x,y,z = get_xyz_from_pixel(pixel, resized_depth_map, K, D)
                            if (x, y, z) == (0, 0, 0):
                                pass
                            else:
                                obst.append((x,y,z))


            else:
                print("One or both of resized_depth_map and contours do not exist.")
            
            tmp_dict["ai_obts"] = obst
            
            diff = abs(closest_time - int(file_id)) / 1000
            print(f'fuse_time {closest_time} file_id {file_id} diff {int(diff)}')
        FuseData.append(tmp_dict)

    print("sucess")
    print(f'valid_len {valid_len}')
    print(f'paths {img_paths.__len__()}')


    # for i in range(FuseData.__len__()):
    output_file = "output2.txt"

    # 将字典写入文件
    with open(output_file, "w") as f:
        for i in range(FuseData.__len__()):
            for key, value in FuseData[i].items():
                # 判断值是否是列表
                if isinstance(value, list):
                    f.write(f"{key}: {', '.join(map(str, value))}\n")  # 将列表转换为字符串写入
                elif isinstance(value, str):
                    f.write(f"{key}: {value}\n")  # 写入字符串
                elif isinstance(value, list) and all(isinstance(item, tuple) for item in value):  # 检查是否是三元组列表
                    f.write(f"{key}: {', '.join([str(item) for item in value])}\n")  # 写入三元组
                else:
                    f.write(f"{key}: {value}\n")  # 写入其他类型

    print(f"字典已成功写入到 {output_file}")

    





