import argparse
from pathlib import Path
import json
import numpy as np
import open3d as o3d
import cv2
import cv2


def read_intrinsics(dataset_dir: Path):
    j = dataset_dir / 'camera_intrinsics.json'
    if not j.exists():
        raise SystemExit('缺少 camera_intrinsics.json（请先用采集脚本录制或自行提供）')
    data = json.loads(j.read_text(encoding='utf-8'))
    width = int(data['width'])
    height = int(data['height'])
    fx, fy, cx, cy = map(float, (data['fx'], data['fy'], data['cx'], data['cy']))
    intr = o3d.camera.PinholeCameraIntrinsic(width, height, fx, fy, cx, cy)
    depth_factor = float(data.get('depth_map_factor', 1000.0))
    return intr, depth_factor


def load_pairs(dataset_dir: Path):
    assoc = dataset_dir / 'associations.txt'
    if not assoc.exists():
        raise SystemExit('缺少 associations.txt')
    pairs = []
    for line in assoc.read_text(encoding='utf-8').splitlines():
        line = line.strip()
        if not line or line.startswith('#'):
            continue
        ts1, rgb, ts2, depth = line.split()[:4]
        pairs.append((dataset_dir / rgb, dataset_dir / depth))
    return pairs

def paint_if_empty(pcd: o3d.geometry.PointCloud) -> o3d.geometry.PointCloud:
    if pcd.has_colors() and len(np.asarray(pcd.colors)) > 0:
        return pcd
    pts = np.asarray(pcd.points)
    if pts.size == 0:
        return pcd
    z = pts[:, 2]
    z = (z - z.min()) / max(1e-6, (z.max() - z.min()))
    cmap = np.stack([z, 1.0 - z, 0.5 * (1.0 - np.abs(z - 0.5))], axis=1)
    pcd.colors = o3d.utility.Vector3dVector(cmap)
    return pcd


def main():
    ap = argparse.ArgumentParser(description='Open3D RGB-D 里程计 + TSDF 演示（无需 ORB-SLAM3 可执行）')
    ap.add_argument('--dataset', default='data/room1')
    ap.add_argument('--voxel', type=float, default=0.02)
    ap.add_argument('--sdf_trunc', type=float, default=0.08)
    ap.add_argument('--max_depth', type=float, default=4.0)
    ap.add_argument('--frame_step', type=int, default=1, help='每隔多少帧处理一帧以提速')
    ap.add_argument('--resize_scale', type=float, default=1.0, help='下采样比例(0.5/0.75/1.0)')
    ap.add_argument('--median_ksize', type=int, default=3, help='深度中值滤波核(奇数，0关闭)')
    ap.add_argument('--out', default=None)
    ap.add_argument('--out_mesh', default=None)
    ap.add_argument('--rotate180_x', action='store_true', help='对输出绕X轴旋转180°')
    ap.add_argument('--rotate180_y', action='store_true', help='对输出绕Y轴旋转180°')
    ap.add_argument('--rotate180_z', action='store_true', help='对输出绕Z轴旋转180°')
    ap.add_argument('--center', action='store_true', help='将结果平移到质心为原点，便于查看')
    args = ap.parse_args()

    dataset_dir = Path(args.dataset)
    intr, depth_factor = read_intrinsics(dataset_dir)
    pairs = load_pairs(dataset_dir)

    volume = o3d.pipelines.integration.ScalableTSDFVolume(
        voxel_length=args.voxel,
        sdf_trunc=args.sdf_trunc,
        color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8,
    )

    odo_init = np.eye(4)
    prev_rgbd = None
    T_w_c = np.eye(4)

    for idx, (rgb_path, depth_path) in enumerate(pairs):
        if args.frame_step > 1 and (idx % args.frame_step) != 0:
            continue
        # 更快的IO路径：OpenCV读取
        color_bgr = cv2.imread(str(rgb_path), cv2.IMREAD_COLOR)
        if color_bgr is None:
            continue
        depth_np = cv2.imread(str(depth_path), cv2.IMREAD_UNCHANGED)
        if depth_np is None:
            continue
        if depth_np.dtype != np.uint16:
            depth_np = depth_np.astype(np.uint16)

        # 轻度中值滤波，保边降噪
        if args.median_ksize and args.median_ksize % 2 == 1:
            depth_np = cv2.medianBlur(depth_np, args.median_ksize)

        intr_use = intr
        if 0.0 < args.resize_scale < 1.0:
            color_bgr = cv2.resize(color_bgr, (0, 0), fx=args.resize_scale, fy=args.resize_scale, interpolation=cv2.INTER_AREA)
            depth_np = cv2.resize(depth_np, (0, 0), fx=args.resize_scale, fy=args.resize_scale, interpolation=cv2.INTER_NEAREST)
            intr_use = o3d.camera.PinholeCameraIntrinsic(
                int(round(intr.width * args.resize_scale)),
                int(round(intr.height * args.resize_scale)),
                intr.get_focal_length()[0] * args.resize_scale,
                intr.get_focal_length()[1] * args.resize_scale,
                intr.get_principal_point()[0] * args.resize_scale,
                intr.get_principal_point()[1] * args.resize_scale,
            )
        # 转为 Open3D Image
        color_rgb = cv2.cvtColor(color_bgr, cv2.COLOR_BGR2RGB)
        color = o3d.geometry.Image(color_rgb)
        depth_raw = o3d.geometry.Image(depth_np)
        rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(
            color, depth_raw, depth_scale=depth_factor, depth_trunc=args.max_depth, convert_rgb_to_intensity=False
        )

        if prev_rgbd is None:
            volume.integrate(rgbd, intr_use, np.linalg.inv(T_w_c))
            prev_rgbd = rgbd
            continue

        success, T_delta, _ = o3d.pipelines.odometry.compute_rgbd_odometry(
            rgbd, prev_rgbd, intr, odo_init,
            o3d.pipelines.odometry.RGBDOdometryJacobianFromHybridTerm(),
            o3d.pipelines.odometry.OdometryOption()
        )
        if success:
            # T_delta: from prev to curr in camera coordinates
            T_w_c = T_w_c @ np.linalg.inv(T_delta)
            volume.integrate(rgbd, intr_use, np.linalg.inv(T_w_c))
            prev_rgbd = rgbd

    pcd = volume.extract_point_cloud()
    # 若需要，修正朝向（组合旋转）
    import numpy as _np
    R_fix = _np.eye(3, dtype=float)
    if args.rotate180_x:
        R_fix = _np.array([[1.0,  0.0,  0.0],
                           [0.0, -1.0,  0.0],
                           [0.0,  0.0, -1.0]], dtype=float) @ R_fix
    if args.rotate180_y:
        R_fix = _np.array([[-1.0, 0.0,  0.0],
                           [ 0.0, 1.0,  0.0],
                           [ 0.0, 0.0, -1.0]], dtype=float) @ R_fix
    if args.rotate180_z:
        R_fix = _np.array([[-1.0, 0.0, 0.0],
                           [ 0.0,-1.0, 0.0],
                           [ 0.0, 0.0, 1.0]], dtype=float) @ R_fix
    if not _np.allclose(R_fix, _np.eye(3)):
        pcd.rotate(R_fix, center=(0.0, 0.0, 0.0))
    if args.center:
        c = pcd.get_center()
        pcd.translate((-c[0], -c[1], -c[2]))
    pcd = paint_if_empty(pcd)
    pcd.estimate_normals()
    out = Path(args.out) if args.out else dataset_dir / 'tsdf_pointcloud_demo.ply'
    o3d.io.write_point_cloud(str(out), pcd)
    print(f'已保存演示点云: {out}')

    if args.out_mesh is not None:
        mesh = volume.extract_triangle_mesh()
        if not _np.allclose(R_fix, _np.eye(3)):
            mesh.rotate(R_fix, center=(0.0, 0.0, 0.0))
        if args.center:
            c = mesh.get_center()
            mesh.translate((-c[0], -c[1], -c[2]))
        mesh.compute_vertex_normals()
        # 若网格缺少颜色，则从点云传递颜色（近邻映射）
        try:
            if not mesh.has_vertex_colors() or len(np.asarray(mesh.vertex_colors)) == 0:
                if pcd.has_colors():
                    kdtree = o3d.geometry.KDTreeFlann(pcd)
                    verts = np.asarray(mesh.vertices)
                    colors = []
                    pcols = np.asarray(pcd.colors)
                    for v in verts:
                        _, idx, _ = kdtree.search_knn_vector_3d(v, 1)
                        colors.append(pcols[idx[0]])
                    mesh.vertex_colors = o3d.utility.Vector3dVector(np.asarray(colors))
        except Exception:
            pass
        outm = Path(args.out_mesh)
        o3d.io.write_triangle_mesh(str(outm), mesh)
        print(f'已保存演示网格: {outm}')


if __name__ == '__main__':
    main()


