import open3d as o3d
import numpy as np
import cv2
import sys
import json
import time
import os
from pathlib import Path
from interact import demo_manual_registration, demo_crop_geometry
from cam_yaml_new import transform_yaml
from cam_view_pcd import visualize_camera_view

# "D:\ANACONDA3\envs\gyh\python.exe"


# this script adjusts the extrinsic parameters of a camera manually
# accept parameters:
# 1. approximate extrinsic parameter of camera
# 2. depth graph taken by the real camera
#
# process:
# 1. load pcd file (fixed) to get point cloud(1)
# 2. use approximate extrinsic parameters and the depth graph to generate point cloud(2)
# 3. put 2 point clouds into one pcd file, use several camera to get views from different angles
# 4. show the views to user in one picture, allow user to adjust extrinsic parameters
# 5. return to process 2
#
# output:
# adjusted extrinsic parameters of camera
#
from save_file import save_info
from intrinsics import intrinsics_sim, intrinsics_real
# bounding box
# focused_bbox = [[-0.8, -0.3, 0.74], [0.8, 0.3, 3.0]]
use_bbox = True
focused_bbox = [[-2, -0.5, 0], [2, 0.5, 3.0]]

# 0.8 recommended
camera_distance = 5.0
bbox = []


def load_point_cloud(pcd_path):
    pcd = o3d.io.read_point_cloud(pcd_path)
    return pcd


def is_point_in_bbox(point, bbox):
    return (
        bbox[0][0] <= point[0] <= bbox[1][0]
        and bbox[0][1] <= point[1] <= bbox[1][1]
        and bbox[0][2] <= point[2] <= bbox[1][2]
    )



def depth_to_point_cloud(depth_img, intrinsics):
    print(f"depth_to_point_cloud shape: {depth_img.shape}")
    if depth_img.ndim == 2:
        isRgb = False
    else:
        isRgb = True
    h, w, rgbd = depth_img.shape
    fx, fy, cx, cy = intrinsics
    points = []
    colors = []
    for v in range(h):
        for u in range(w):
            z = depth_img[v, u, (int)(3 * isRgb)] * 0.001
            if isRgb:
                rgb = depth_img[v, u, :3] / 255.0  # 假设RGB为0-255，归一化
            else:
                rgb = [0.0, 1.0, 0.0]
            if z == 0:
                continue
            x = (u - cx) * z / fx
            y = (v - cy) * z / fy
            pt_cam = np.array([x, y, z, 1.0])
            pt_world = pt_cam
            if use_bbox and not is_point_in_bbox(pt_world[:3], focused_bbox):
                continue
            points.append(pt_world[:3])
            colors.append(rgb)
    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(np.array(points))
    if len(colors) > 0:
        pcd.colors = o3d.utility.Vector3dVector(np.array(colors))
    # demo_crop_geometry(pcd)  # 手动裁剪
    return pcd


def extrinsics_dict_to_matrix(extrinsics_dict):
    # Extract translation
    tx = extrinsics_dict["translation"]["x"]
    ty = extrinsics_dict["translation"]["y"]
    tz = extrinsics_dict["translation"]["z"]
    print("get translation")
    # Extract roll, pitch, yaw (角度转弧度)
    pitch = np.deg2rad(extrinsics_dict["rotation"]["pitch"])
    yaw = np.deg2rad(extrinsics_dict["rotation"]["yaw"])
    roll = np.deg2rad(extrinsics_dict["rotation"]["roll"])
    print(f"get rotation: roll={roll}, pitch={pitch}, yaw={yaw}")

    # 构造旋转矩阵（ZYX顺序：先绕Z轴yaw，再Y轴pitch，再X轴roll）
    Rx = np.array(
        [
            [1, 0, 0],
            [0, np.cos(pitch), -np.sin(pitch)],
            [0, np.sin(pitch), np.cos(pitch)],
        ]
    )
    Ry = np.array(
        [
            [np.cos(yaw), 0, np.sin(yaw)],
            [0, 1, 0],
            [-np.sin(yaw), 0, np.cos(yaw)],
        ]
    )
    Rz = np.array(
        [
            [np.cos(roll), -np.sin(roll), 0],
            [np.sin(roll), np.cos(roll), 0],
            [0, 0, 1],
        ]
    )
    R = Rz @ Ry @ Rx

    print(f"get rotation matrix:\n {R}")
    # Compose 4x4 matrix
    T = np.eye(4)
    T[:3, :3] = R
    T[:3, 3] = [tx, ty, tz]
    return T


# put two point clouds into one scene, take photos from front, back, left, right, top, bottom
# put photos into one single png for user
def save_multiview_image(pcd1, pcd2, dir_path, width=640, height=480):
    import open3d as o3d

    views = [
        {"pos": [0, 1, 0], "up": [0, 0, 1]},  # front
        {"pos": [0, -1, 0], "up": [0, 0, 1]},  # back
        {"pos": [-1, 0, 0], "up": [0, 0, 1]},  # left
        {"pos": [1, 0, 0], "up": [0, 0, 1]},  # right
        {"pos": [0, 0, 1], "up": [0, -1, 0]},  # top
        {"pos": [0, 0, -1], "up": [0, 1, 0]},  # bottom
    ]

    center = [0, 0, 0.80]

    vis = o3d.visualization.Visualizer()
    vis.create_window(visible=True, width=width, height=height)

    imgs = []
    for idx, view in enumerate(views):
        # print view information
        print(f"Rendering view {idx + 1}/{len(views)}: {view}")

        vis.add_geometry(pcd1)
        vis.add_geometry(pcd2)
        ctr = vis.get_view_control()

        eye = [center[i] + view["pos"][i] * camera_distance for i in range(3)]
        up = view["up"]
        ctr.set_lookat(center)
        front = [-view["pos"][i] for i in range(3)]
        ctr.set_front(front)
        ctr.set_up(up)
        ctr.set_zoom(0.7)
        vis.poll_events()
        vis.update_renderer()
        img = vis.capture_screen_float_buffer(False)
        img = (np.asarray(img) * 255).astype(np.uint8)
        imgs.append(img)

    vis.destroy_window()

    view_names = ["Front", "Back", "Left", "Right", "Top", "Bottom"]
    rows, cols = 2, 3
    h, w, c = imgs[0].shape
    canvas = np.zeros((rows * h, cols * w, c), dtype=np.uint8)
    for idx, img in enumerate(imgs):
        y = (idx // cols) * h
        x = (idx % cols) * w
        canvas[y : y + h, x : x + w] = img
        # 添加黑色边框
        cv2.rectangle(
            canvas,
            (x, y),
            (x + w - 1, y + h - 1),
            (0, 0, 0),  # 黑色
            4,  # 边框粗细，可根据需要调整
        )
        # 添加文字注释
        cv2.putText(
            canvas,
            view_names[idx],
            (x + 10, y + 30),
            cv2.FONT_HERSHEY_SIMPLEX,
            1.0,
            (0, 0, 0),
            2,
            cv2.LINE_AA,
        )

    cv2.imwrite(f"{dir_path}/views.png", canvas)
    print(f"Saved multi-view image to {dir_path}/views.png")


def show_interactive_pointclouds(
    pcd1, pcd2, extrinsics_dict=None, width=640, height=480
):
    """
    打开一个可交互窗口，用户可自由拖动、旋转、缩放视角查看点云。
    关闭窗口后程序继续。
    extrinsics_dict: 如果提供，则在窗口中显示 [tx, ty, tz] 的红色球
    """
    import open3d as o3d

    geometries = [pcd1, pcd2]
    # 红色球表示外参位置
    if extrinsics_dict is not None:
        tx = extrinsics_dict["translation"]["x"]
        ty = extrinsics_dict["translation"]["y"]
        tz = extrinsics_dict["translation"]["z"]
        sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
        sphere.translate([tx, ty, tz])
        sphere.paint_uniform_color([1, 0, 0])  # 红色
        geometries.append(sphere)
    # 绿色球表示原点 [0, 0, 0]
    origin_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.05)
    origin_sphere.translate([0, 0, 0])
    origin_sphere.paint_uniform_color([0, 0, 1])  # 蓝色
    geometries.append(origin_sphere)

    print("请在弹出的窗口中自由拖动、缩放、旋转视角，关闭窗口后程序继续。")
    o3d.visualization.draw_geometries(
        geometries,
        window_name="自由视角点云",
        width=width,
        height=height,
        left=50,
        top=50,
        point_show_normal=False,
    )


# 修改 watch_and_visualize 调用
def watch_and_visualize(dir_path, sim_pcd, intrinsics_real, intrinsics_sim, real_pcd):
    extrinsics_path = f"{dir_path}/to_adjust.json"
    print("请按照顺时针选取点云中的点，至少选择3个对应关系")
    pcd_real = depth_to_point_cloud(real_pcd, intrinsics_real) # 直接无外参加载深度图点云
    pcd_sim = depth_to_point_cloud(sim_pcd, intrinsics_sim) # 加载参考点云

    res = demo_manual_registration(pcd_real, pcd_sim)
    with open(extrinsics_path, "w") as f:
        json.dump(res, f, ensure_ascii=False, indent=2)


usage = """
Usage: python adjust.py <dir_path>

参数说明：
    <dir_path>    包含待调整外参的 to_adjust.json、深度图 real.npy 的文件夹路径

示例：
    python adjust.py test

文件结构要求：
    <dir_path>/to_adjust.json   # 相机外参（平移+旋转）JSON文件
    <dir_path>/real.npy        # 深度图（480x640, 单位mm, numpy数组）
    <dir_path>/sim.npy        # 参考点云（480x640, 单位mm, numpy数组）

程序会监控 to_adjust.json 文件变化，自动刷新点云可视化窗口。
"""
def save_rgb_d_from_npy(npy,tag,dir_path):
    rgb=npy[:,:,:3]
    depth=npy[:,:,3]
    print("rgb shape:", rgb.shape)
    print("depth shape:", depth.shape)
    if rgb.dtype == np.uint16:
        rgb = (rgb).astype(np.uint8) 
    save_info("rgb",dir_path / f"{tag}_rgb",rgb)
    save_info("depth", dir_path / f"{tag}_depth",depth )
def main():
    if len(sys.argv) < 2:
        print(usage)
        sys.exit(1)
    dir_path = Path(__file__).parent / sys.argv[1]
    sim_npy_path = dir_path / "sim.npy"
    real_npy_path = dir_path / "real.npy"
    sim_npy = np.load(sim_npy_path)
    print("loading data")
    
    real_npy = np.load(real_npy_path)
    save_rgb_d_from_npy(sim_npy,"sim",dir_path)#240x320
    save_rgb_d_from_npy(real_npy,"real",dir_path)#480x640
    # real_npy = real_npy[::2,::2,:]
    print("real_npy new shape:", real_npy.shape)#480x640x4
    watch_and_visualize(dir_path, sim_npy, intrinsics_real, intrinsics_sim, real_npy)
    
    sim_rgb = sim_npy[:,:,:3]
    sim_depth = sim_npy[:,:,3]
    sim_rgbd = np.concatenate([sim_rgb, sim_depth.reshape(sim_depth.shape[0], sim_depth.shape[1], 1)], axis=2)
    ref_cf_pcd = depth_to_point_cloud(sim_rgbd, intrinsics_sim)
    o3d.io.write_point_cloud(str(dir_path / "ref-camframe.pcd"), ref_cf_pcd)
    print(f"Generated ref-camframe.pcd from SIM RGB and depth data, saved to {dir_path / 'ref-camframe.pcd'}")
    
    yaml_path = dir_path / "cam_pfl.yaml"
    transform_path = dir_path / "to_adjust.json"
    transform_yaml(yaml_path, transform_path)
    # ref_pcd_path = dir_path / "ref.pcd"
    # camera_view_output = dir_path / "camera_view.png"
    # visualize_camera_view(yaml_path, ref_pcd_path, "midBack_camera", camera_view_output)



if __name__ == "__main__":
    main()
