import sys
import os
import argparse
import torch
import numpy as np
from pathlib import Path
from torchvision.utils import save_image
import torchvision.transforms as tvf

sys.path.append("/home/liucong/codes/repos-3D/fast3r")
from fast3r.dust3r.inference_multiview import inference
from fast3r.models.fast3r import Fast3R
from fast3r.models.multiview_dust3r_module import MultiViewDUSt3RLitModule
sys.path.append("/home/liucong/codes/3d/a3R")
sys.path.append("/home/liucong/codes/3d/a3R/src")
from src.dust3r.utils.image import ImgNorm, ImgnetNorm
from src.dust3r.datasets.image_folder_dataset import ImageFolderDataset


def get_args_parser():
    """Parses command-line arguments for the evaluation script."""
    parser = argparse.ArgumentParser("Fast3R Manual Evaluation")
    parser.add_argument("--model_path", type=str, default="/home/liucong/codes/pretrained/fast3r", help="Path to the model checkpoint.")
    parser.add_argument("--data_root", type=str, default="/home/liucong/codes/repos-3D/vggt-training/examples", help="Root directory of the data to process.")
    parser.add_argument("--out_root", type=str, default="/home/liucong/data/3d/eval_outdir/", help="Root directory for saving outputs.")
    parser.add_argument("--device", type=str, default="cuda", help="Device to run the model on ('cuda' or 'cpu').")
    parser.add_argument("--image_size", type=int, nargs=2, default=(512, 384), help="Size to which images are resized as (width, height).")
    parser.add_argument("--num_views", type=int, default=6, help="Number of views to sample from each scene. 0 means all views.")
    parser.add_argument("--samples_per_scene", type=int, default=1, help="Number of samples to generate per scene.")
    return parser


def run_evaluation(args, model, lit_module, device) -> None:
    """
    Processes all scenes in a root directory using ImageFolderDataset for both
    scene discovery and data loading.
    """
    data_root_path = Path(args.data_root)
    out_root_path = Path(args.out_root)

    # 1. Instantiate dataset.
    #    - Set a large `num_views` to ensure all images/frames in a scene are loaded.
    #    - `samples_per_scene=1` means each index `i` corresponds to a unique scene.
    resolution = tuple(args.image_size)
    dataset = ImageFolderDataset(ROOT=args.data_root, resolution=resolution, num_views=9999, samples_per_scene=1, useImgnet=False)
    if not dataset.scenes:
        print(f"No valid scenes found in {args.data_root}. Exiting.")
        return

    # Create a random number generator for sampling and the image transform
    rng = np.random.default_rng(seed=42)

    # 2. Loop over scenes by index
    for i in range(len(dataset)):  # This iterates over scenes because samples_per_scene=1
        scene_name = dataset.scenes[i]
        available_views = len(dataset.scene_data[scene_name])

        num_views_to_sample = args.num_views
        if num_views_to_sample == 0 or num_views_to_sample > available_views:
            num_views_to_sample = available_views

        samples_per_scene = round((available_views / num_views_to_sample) ** 1.0)
        for sample_idx in range(samples_per_scene):
            # 3. Use _get_views to sample from the scene
            scene_views = dataset._get_views(i, resolution, rng, num_views_to_sample)

            if not scene_views:
                print(f"Skipping scene {scene_name}, _get_views returned empty.")
                continue

            for view in scene_views:
                view["img"] =  dataset.transform(view["img"])[None]

            # Get scene name from the label to create the output directory.
            scene_name_from_label = os.path.dirname(scene_views[0]['label'])
            outdir = out_root_path / data_root_path.name / scene_name_from_label / str(sample_idx)
            outdir.mkdir(parents=True, exist_ok=True)

            print(f"Processing scene '{scene_name_from_label}' ({len(scene_views)} images) -> {outdir}")

            # 3. Run Inference
            output_dict = inference(
                scene_views,
                model,
                device,
                dtype=torch.float32,
                verbose=True,
                profiling=False,
            )
            print(output_dict['preds'][0].keys())
            print(model.downstream_head, model.downstream_head_local)
            breakpoint()

            lit_module.align_local_pts3d_to_global(
                preds=output_dict['preds'],
                views=output_dict['views'],
                min_conf_thr_percentile=85
            )
            poses_c2w_batch, estimated_focals = MultiViewDUSt3RLitModule.estimate_camera_poses(
                output_dict['preds'],
                niter_PnP=100,
                focal_length_estimation_method='first_view_from_global_head'
            )
            camera_poses = poses_c2w_batch[0]

            # Move tensors to CPU for saving
            preds_cpu = [{k: v.cpu().numpy().squeeze(0) if isinstance(v, torch.Tensor) else v for k, v in p.items()} for p in output_dict['preds']]

            # 7. Save results
            for view_idx, (pred, pose, image_info) in enumerate(zip(preds_cpu, camera_poses, scene_views)):
                # Extract original filename from the label
                img_filename = os.path.basename(image_info['label'])
                # save_image(image_info['img'], outdir / img_filename, normalize=True)
                img_tensor = image_info['img']
                
                # Un-normalize based on the transform used by the dataset
                if hasattr(dataset.transform, 'transforms') and len(dataset.transform.transforms) > 1 and isinstance(dataset.transform.transforms[1], tvf.Normalize):
                    norm_transform = dataset.transform.transforms[1]
                    mean = torch.tensor(norm_transform.mean, device=img_tensor.device).view(3, 1, 1)
                    std = torch.tensor(norm_transform.std, device=img_tensor.device).view(3, 1, 1)
                    img_to_save = img_tensor.clone().mul_(std).add_(mean)
                else: # Fallback for simple transforms or if no normalization was applied
                    img_to_save = img_tensor

                save_image(img_to_save.clamp(0, 1), outdir / img_filename)



                pred_filename = Path(img_filename).stem + ".npz"
                pred["pose"] = pose
                # Save the estimated intrinsics from the dataset
                pred["intrinsics"] = None
                np.savez(outdir / pred_filename, **pred)

if __name__ == "__main__":
    parser = get_args_parser()
    args = parser.parse_args()

    # --- Setup ---
    device = torch.device(args.device if torch.cuda.is_available() else "cpu")
    model = Fast3R.from_pretrained(args.model_path).to(device)
    lit_module = MultiViewDUSt3RLitModule.load_for_inference(model)
    
    model.eval()
    lit_module.eval()

    run_evaluation(args, model, lit_module, device)