from dust3r.inference import inference
from dust3r.model import AsymmetricCroCo3DStereo
from dust3r.utils.image import load_images
from dust3r.image_pairs import make_pairs
from dust3r.cloud_opt import global_aligner, GlobalAlignerMode
import numpy as np

if __name__ == '__main__':
    device = 'cuda'
    batch_size = 1
    schedule = 'cosine'
    lr = 0.01
    niter = 300

    # model_name = "naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt"
    model_name = "checkpoints/DUSt3R_ViTLarge_BaseDecoder_512_dpt.pth"

    # 模型加载：加载预训练的DUSt3R模型到指定的设备（GPU）
    model = AsymmetricCroCo3DStereo.from_pretrained(model_name).to(device)

    # load_images can take a list of images or a directory
    # images = load_images(['croco/assets/Chateau1.png', 'croco/assets/Chateau2.png'], size=512)
    # 图像加载：使用load_images函数加载一对图像，这些图像应该是同一场景的两个视角。
    images = load_images(['assets/1.jpg', 'assets/2.jpg'], size=512)

    # 图像对生成：通过make_pairs函数生成图像对，用于后续的立体匹配。
    pairs = make_pairs(images, scene_graph='complete', prefilter=None, symmetrize=True)

    # 推理：调用inference函数进行推理，得到每个图像的初步预测，包括3D点和置信度。
    output = inference(pairs, model, device, batch_size=batch_size)

    # at this stage, you have the raw dust3r predictions
    view1, pred1 = output['view1'], output['pred1']
    view2, pred2 = output['view2'], output['pred2']
    # here, view1, pred1, view2, pred2 are dicts of lists of len(2)
    #  -> because we symmetrize we have (im1, im2) and (im2, im1) pairs
    # in each view you have:
    # an integer image identifier: view1['idx'] and view2['idx']
    # the img: view1['img'] and view2['img']
    # the image shape: view1['true_shape'] and view2['true_shape']
    # an instance string output by the dataloader: view1['instance'] and view2['instance']
    # pred1 and pred2 contains the confidence values: pred1['conf'] and pred2['conf']
    # pred1 contains 3D points for view1['img'] in view1['img'] space: pred1['pts3d']
    # pred2 contains 3D points for view2['img'] in view1['img'] space: pred2['pts3d_in_other_view']

    # next we'll use the global_aligner to align the predictions
    # depending on your task, you may be fine with the raw output and not need it
    # with only two input images, you could use GlobalAlignerMode.PairViewer: it would just convert the output
    # if using GlobalAlignerMode.PairViewer, no need to run compute_global_alignment
    # 全局对齐：使用global_aligner和compute_global_alignment函数对预测的3D点进行全局空间对齐。
    scene = global_aligner(output, device=device, mode=GlobalAlignerMode.PointCloudOptimizer)
    loss = scene.compute_global_alignment(init="mst", niter=niter, schedule=schedule, lr=lr)

    # retrieve useful values from scene:
    # 结果提取：从对齐后的场景中提取有用的信息，如图像数据imgs、相机焦距focals、相机姿态poses、3D点pts3d和置信度掩码confidence_masks。
    imgs = scene.imgs
    focals = scene.get_focals()
    poses = scene.get_im_poses()
    pts3d = scene.get_pts3d()
    confidence_masks = scene.get_masks()

    # visualize reconstruction
    # 可视化重建：调用scene.show()函数可视化三维重建结果。
    scene.show()

    # find 2D-2D matches between the two images
    # 2D-2D匹配：使用find_reciprocal_matches和xy_grid函数在两幅图像之间找到匹配点。
    from dust3r.utils.geometry import find_reciprocal_matches, xy_grid
    pts2d_list, pts3d_list = [], []
    for i in range(2):
        conf_i = confidence_masks[i].cpu().numpy()
        pts2d_list.append(xy_grid(*imgs[i].shape[:2][::-1])[conf_i])  # imgs[i].shape[:2] = (H, W)
        pts3d_list.append(pts3d[i].detach().cpu().numpy()[conf_i])
    reciprocal_in_P2, nn2_in_P1, num_matches = find_reciprocal_matches(*pts3d_list)
    print(f'found {num_matches} matches')
    matches_im1 = pts2d_list[1][reciprocal_in_P2]
    matches_im0 = pts2d_list[0][nn2_in_P1][reciprocal_in_P2]

    # visualize a few matches
    # 可视化匹配:
    # 使用Matplotlib绘制两幅图像的拼接图。
    # 使用颜色映射cmap对匹配线条进行上色，以区分不同的匹配对。
    # 在拼接图上绘制匹配线条，将第一幅图像中的点连接到第二幅图像中的相应点。
    import numpy as np
    from matplotlib import pyplot as pl
    n_viz = 10
    match_idx_to_viz = np.round(np.linspace(0, num_matches-1, n_viz)).astype(int)
    viz_matches_im0, viz_matches_im1 = matches_im0[match_idx_to_viz], matches_im1[match_idx_to_viz]

    H0, W0, H1, W1 = *imgs[0].shape[:2], *imgs[1].shape[:2]
    img0 = np.pad(imgs[0], ((0, max(H1 - H0, 0)), (0, 0), (0, 0)), 'constant', constant_values=0)
    img1 = np.pad(imgs[1], ((0, max(H0 - H1, 0)), (0, 0), (0, 0)), 'constant', constant_values=0)
    img = np.concatenate((img0, img1), axis=1)
    pl.figure()
    pl.imshow(img)
    cmap = pl.get_cmap('jet')
    for i in range(n_viz):
        (x0, y0), (x1, y1) = viz_matches_im0[i].T, viz_matches_im1[i].T
        pl.plot([x0, x1 + W0], [y0, y1], '-+', color=cmap(i / (n_viz - 1)), scalex=False, scaley=False)
    pl.show(block=True)

# # 保存三维重建结果为colmap相关文件
# from pycolmap import COLMAP, Image, Camera, Point3D, Project
#
# # 以下变量已经从 dust3r 重建结果中获取：
# # imgs: 图像数组
# # focals: 焦距数组
# # poses: 相机姿态数组
# # pts3d: 三维点数组
# # confidence_masks: 置信度掩码数组
#
# # 初始化 COLMAP 项目
# project = Project()
#
# # 添加相机模型
# camera_model = Camera(model_type="SIMPLE_PINHOLE", params=[focal_length, 0, 0, 0, 0])
# project.add_camera_model(camera_model)
#
# # 添加图像和三维点
# for i, (img, focal, pose, pts3d_i, conf_mask) in enumerate(zip(imgs, focals, poses, pts3d, confidence_masks)):
#     # 创建图像对象
#     image = Image(path=img['path'], camera_model_id=camera_model.id)
#     project.add_image(image)
#
#     # 为图像设置相机参数
#     project.set_image_camera_parameters(image.id, camera_model, [focal] + list(pose))
#
#     # 提取高置信度的三维点
#     pts3d_i = pts3d_i[conf_mask]
#
#     # 为每个点创建观测并添加到项目中
#     for j, (x, y, z) in enumerate(pts3d_i):
#         point3D = Point3D(X=x, Y=y, Z=z)
#         observation = image.add_point_observation(point3D, j)
#         project.add_point2D(observation)
#         project.add_point3D(point3D)
#
# # 保存项目到指定目录
# project.save_sparse("path/to/save/colmap/project/sparse", image_directories=["path/to/image/directory"])