# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.

# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

import os
from pathlib import Path
import re
import sys
from typing import Any, Dict, List, Tuple

import hydra
import numpy as np
import rootutils
import torch
from omegaconf import DictConfig
from pytorch_lightning import LightningDataModule, LightningModule
from torchvision.utils import save_image
from tqdm import tqdm

rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True)
# ------------------------------------------------------------------------------------ #
from fast3r.utils import (
    RankedLogger,
    extras,
    task_wrapper,
)
from fast3r.dust3r.utils.geometry import geotrf
from fast3r.eval.utils import align_local_pts3d_to_global, estimate_camera_poses
sys.path.append("/lc/code/repos/fast3r")
from fast3r.models.fast3r import Fast3R
from fast3r.models.multiview_dust3r_module import MultiViewDUSt3RLitModule

log = RankedLogger(__name__, rank_zero_only=True)


@task_wrapper
def generate(cfg: DictConfig) -> Tuple[Dict[str, Any], Dict[str, Any]]:  # noqa: C901
    """
    Generates 3D point clouds from a given checkpoint and saves the input image tensor
    and output pts3d tensor to a target directory, using original filenames.

    :param cfg: DictConfig configuration composed by Hydra.
    """
    assert cfg.target_dir, "Target directory must be provided."

    log.info(f"Instantiating datamodule <{cfg.data.data_module._target_}>")
    datamodule: LightningDataModule = hydra.utils.instantiate(cfg.data.data_module)
    datamodule.setup(stage="test")

    log.info(f"Instantiating model <{cfg.model._target_}>")
    model: LightningModule = hydra.utils.instantiate(cfg.model)
    model.setup('test')
    # net = Fast3R.from_pretrained(cfg.model.net.pretrained_model_name_or_path)
    # model = MultiViewDUSt3RLitModule.load_for_inference(net)
    
    # model.eval()
    # net.eval()

    if cfg.ckpt_path is not None:
        log.info(f"Loading checkpoint from: {cfg.ckpt_path}")
        checkpoint = torch.load(cfg.ckpt_path, map_location="cpu", weights_only=False)
        state_dict = checkpoint.get("state_dict", checkpoint)
        
        filtered_state_dict = {k.removeprefix("net."): v for k, v in state_dict.items() if k.startswith("net.")}
        if not filtered_state_dict:
            filtered_state_dict = {k: v for k, v in state_dict.items() if not k.startswith("criterion")}
        model.net.load_state_dict(filtered_state_dict, strict=False)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()

    log.info("Starting generation...")
    dataloader = datamodule.test_dataloader()

    # Wrap the dataloader with iter() to prevent tqdm from calling len() on CombinedLoader
    for batch_idx, combined_output in enumerate(tqdm(iter(dataloader), desc="Generating point clouds")):
        # The actual batch data is the first element of the tuple from CombinedLoader
        batch = combined_output[0]
        for i, view in enumerate(batch):
            for name in "img", "camera_pose", "camera_intrinsics", "valid_mask":
                if name in view:
                    batch[i][name] = view[name].to(device, non_blocking=True)

        with torch.no_grad():
            preds = model.net(batch)
            # _, preds = model.model_step(batch, None)

        # Align local points to global coordinate system if possible
        if "pts3d_in_self_view" in preds[0] and "pts3d_in_other_view" in preds[0]:
            align_local_pts3d_to_global(
                preds=preds,
                views=batch,
                min_conf_thr_percentile=85,
                conf_keys={'local': 'conf_self', 'global': 'conf'},
                result_keys={'local': 'pts3d_in_self_view', 'global': 'pts3d_in_other_view'}
            )

            # Estimate camera poses
            poses_c2w_batch, estimated_focals = estimate_camera_poses(
                preds,
                views=batch,
                niter_PnP=100,
                focal_length_estimation_method='first_view_from_global_head',
                conf_keys={'local': 'conf_self', 'global': 'conf'},
                result_keys={'local': 'pts3d_in_self_view', 'global': 'pts3d_in_other_view'}
            )

        batch_size = batch[0]["img"].shape[0]
        for i in range(batch_size):
            try:
                unique_id = f"{batch_idx:06d}_{i:02d}"
                camera_poses = poses_c2w_batch[i]
                focals = estimated_focals[i]
                
                # Save each view's image tensor and predicted pts3d
                for view_idx, view_data in enumerate(batch):
                    # Get the label to extract the original filename
                    label = view_data["label"][i]
                    match = re.match(r"(.*)-(.*)", label)
                    if match:
                        scenename = match.group(1).replace('/', '_')
                        img_filename = match.group(2)
                    sample_out_dir = Path(cfg.target_dir) / view_data["dataset"][i] / scenename / unique_id
                    os.makedirs(sample_out_dir, exist_ok=True)
                    img_filename_stem = img_filename.removesuffix('.png').removesuffix('_rgb')
                    
                    # Save the image tensor
                    img_tensor = view_data["img"][i].cpu()
                    save_image(img_tensor, sample_out_dir / f'{img_filename_stem}.png')

                    # Collect all data to save in one npz file
                    save_data = {}

                    # Add GT data
                    if "camera_pose" in view_data and "camera_intrinsics" in view_data:
                        save_data["pose_gt"] = view_data["camera_pose"][i].cpu().numpy()
                        save_data["intrinsics_gt"] = view_data["camera_intrinsics"][i].cpu().numpy()
                    if "pts3d" in view_data:
                        save_data["pts3d_gt"] = view_data["pts3d"][i].cpu().numpy()

                    # Add estimated pose and focal
                    save_data["pose"] = camera_poses[view_idx]
                    if focals and focals[view_idx] is not None:
                        save_data["focal_length"] = focals[view_idx]

                    # Add all predictions for this view
                    pred_view = preds[view_idx]
                    for k, v in pred_view.items():
                        if isinstance(v, torch.Tensor):
                            save_data[k] = v[i].cpu().numpy()

                    # Save to npz
                    pred_filename = img_filename_stem + ".npz"
                    np.savez(sample_out_dir / pred_filename, **save_data)

            except Exception as e:
                log.warning(f"Could not process sample {i} in batch (batch_idx {batch_idx}). Error: {e}")
        #     break
        # break

    return {}, {}


@hydra.main(version_base="1.3", config_path="../configs", config_name="generate_pts3d.yaml")
def main(cfg: DictConfig) -> None:
    """Main entry point for generation."""
    if cfg.target_dir is None:
        cfg.target_dir = Path(cfg.ckpt_path).parent.parent/"pts_pred"
    extras(cfg)
    generate(cfg)


if __name__ == "__main__":
    main()