"""
CUDA_VISIBLE_DEVICES=5 python /data0/liqifeng/ZYC/vggt/tools/save_outputs.py \
  --data-root /data1/datasets/distill_libero_90_3/libero_90 \
  --output-dir /data1/datasets/VGGT_outputs \
  --checkpoint /data0/liqifeng/ZYC/model.pt \
  --num-iterations 50 \
  --min-images 3 \
  --max-images 8 \
  --preprocess-mode crop
"""
import argparse
import json
import logging
import os
import random
import glob
import shutil
from contextlib import nullcontext
import sys

sys.path.insert(0, os.path.abspath('/data0/liqifeng/ZYC/vggt'))   # 仓库根目录
import numpy as np
import torch
from tqdm import tqdm

from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images


logger = logging.getLogger(__name__)


def configure_logging(verbose: bool = False) -> None:
    level = logging.DEBUG if verbose else logging.INFO
    logging.basicConfig(level=level, format="%(asctime)s - %(levelname)s - %(message)s")


def get_device_and_dtype():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    supports_bfloat16 = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8
    dtype = torch.bfloat16 if supports_bfloat16 else torch.float16
    return device, dtype


def load_vggt_model(checkpoint_path: str, device: str) -> VGGT:
    logger.info("Loading VGGT model from %s", checkpoint_path)
    model = VGGT()
    checkpoint = torch.load(checkpoint_path, map_location=device)
    state_dict = checkpoint.get("model", checkpoint)
    missing, unexpected = model.load_state_dict(state_dict, strict=False)
    if missing:
        logger.debug("Missing keys during load: %s", missing)
    if unexpected:
        logger.debug("Unexpected keys during load: %s", unexpected)

    for param in model.parameters():
        param.requires_grad_(False)

    model.eval()
    model.to(device)
    return model


def collect_dataset_structure(data_root: str):
    """
    收集数据集结构
    支持两种结构:
    1. data_root/suite_name/scene_name/sample_xxxx/cam_xxx_rgb.png
    2. data_root/scene_name/sample_xxxx/cam_xxx_rgb.png (直接在场景下有样本)
    """
    structure = {}
    if not os.path.isdir(data_root):
        raise FileNotFoundError(f"Data root does not exist: {data_root}")

    # 检查第一层目录
    first_level_dirs = sorted((d for d in os.scandir(data_root) if d.is_dir()), key=lambda entry: entry.name)
    logger.info("Found %d directories under %s", len(first_level_dirs), data_root)

    for first_entry in first_level_dirs:
        # 检查是否直接包含 sample_ 目录（结构2）
        sample_dirs = sorted(
            (d for d in os.scandir(first_entry.path) if d.is_dir() and d.name.startswith("sample_")),
            key=lambda entry: entry.name,
        )
        
        if sample_dirs:
            # 结构2: first_entry 是场景目录
            suite_name = os.path.basename(data_root)  # 使用父目录名作为 suite
            scene_name = first_entry.name
            structure.setdefault(suite_name, {})
            structure[suite_name].setdefault(scene_name, {})
            
            for sample_entry in sample_dirs:
                sample_name = sample_entry.name
                image_paths = []
                
                # 查找所有 cam_*_rgb.png/jpg/jpeg 图片
                for ext in ("png", "jpg", "jpeg"):
                    pattern = os.path.join(sample_entry.path, f"cam_*_rgb.{ext}")
                    image_paths.extend(sorted(glob.glob(pattern)))

                if not image_paths:
                    continue

                structure[suite_name][scene_name][sample_name] = image_paths
        else:
            # 结构1: first_entry 可能是 suite 目录，需要再往下找
            suite_name = first_entry.name
            structure.setdefault(suite_name, {})
            
            # 遍历场景目录
            scene_dirs = sorted(
                (d for d in os.scandir(first_entry.path) if d.is_dir()),
                key=lambda entry: entry.name,
            )

            for scene_entry in scene_dirs:
                scene_name = scene_entry.name
                structure[suite_name].setdefault(scene_name, {})
                
                # 遍历样本目录
                sample_dirs = sorted(
                    (d for d in os.scandir(scene_entry.path) if d.is_dir() and d.name.startswith("sample_")),
                    key=lambda entry: entry.name,
                )
                if not sample_dirs:
                    continue

                for sample_entry in sample_dirs:
                    sample_name = sample_entry.name
                    image_paths = []
                    
                    # 查找所有 cam_*_rgb.png/jpg/jpeg 图片
                    for ext in ("png", "jpg", "jpeg"):
                        pattern = os.path.join(sample_entry.path, f"cam_*_rgb.{ext}")
                        image_paths.extend(sorted(glob.glob(pattern)))

                    if not image_paths:
                        continue

                    structure[suite_name][scene_name][sample_name] = image_paths

    return structure


def process_sample(model, image_paths, device, dtype, min_images, max_images, preprocess_mode):
    """
    处理一个样本，返回模型输出
    """
    if not image_paths:
        raise ValueError("No images available for the sample")

    # 随机选择图片数量
    upper = min(max_images, len(image_paths))
    lower = min(min_images, upper)
    if lower == 0:
        raise ValueError("min_images resolved to zero; check dataset contents")

    num_images = random.randint(lower, upper)
    selected_indices = sorted(random.sample(range(len(image_paths)), num_images))
    selected_paths = [image_paths[i] for i in selected_indices]

    # 加载和预处理图片
    images = load_and_preprocess_images(selected_paths, mode=preprocess_mode).to(device)

    # 前向推理
    autocast_ctx = torch.cuda.amp.autocast(dtype=dtype) if device == "cuda" else nullcontext()
    with torch.no_grad():
        with autocast_ctx:
            output = model(images)

    # 提取需要的输出并转换为 numpy
    result = {
        'image_paths': selected_paths,
        'preprocess_mode': preprocess_mode,
    }
    
    # 提取模型输出
    output_keys = ['pose_enc', 'pose_enc_list', 'depth', 'depth_conf', 'world_points', 'world_points_conf']
    for key in output_keys:
        if key in output:
            value = output[key]
            if isinstance(value, torch.Tensor):
                result[key] = value.detach().to(torch.float16).cpu().numpy()
            elif isinstance(value, list):
                # 处理列表类型的输出（如 pose_enc_list）
                result[key] = [
                    item.detach().to(torch.float16).cpu().numpy() if isinstance(item, torch.Tensor) else item
                    for item in value
                ]
            else:
                result[key] = value
        else:
            logger.warning(f"Output key '{key}' not found in model output")

    return result


def save_iteration_results(output_dir, suite_name, scene_name, sample_name, iteration, result):
    """
    保存单次迭代的结果到 NPZ 文件（简单直接的存储格式）
    """
    # 创建场景目录
    scene_dir = os.path.join(output_dir, suite_name, scene_name)
    os.makedirs(scene_dir, exist_ok=True)
    
    # 创建样本目录
    sample_dir = os.path.join(scene_dir, sample_name)
    os.makedirs(sample_dir, exist_ok=True)
    
    # 创建迭代目录
    iteration_dir = os.path.join(sample_dir, f"iter_{iteration:04d}")
    if os.path.exists(iteration_dir):
        shutil.rmtree(iteration_dir)
    os.makedirs(iteration_dir, exist_ok=True)
    
    # 准备保存的数据
    save_data = {}
    
    # 保存各个输出
    output_keys = ['pose_enc', 'pose_enc_list', 'depth', 'depth_conf', 'world_points', 'world_points_conf']
    
    for key in output_keys:
        if key not in result:
            continue
            
        value = result[key]
        
        if isinstance(value, list):
            # 处理列表类型（如 pose_enc_list）
            for idx, item in enumerate(value):
                if isinstance(item, np.ndarray):
                    save_data[f'{key}_item_{idx}'] = item
        elif isinstance(value, np.ndarray):
            # 处理普通 numpy array
            save_data[key] = value
    
    # 保存为 NPZ 文件（带压缩）
    npz_path = os.path.join(iteration_dir, "outputs.npz")
    np.savez_compressed(npz_path, **save_data)
    
    # 保存 metadata.json
    shapes = {}
    for key in ['pose_enc', 'depth', 'depth_conf', 'world_points', 'world_points_conf']:
        if key in result and isinstance(result[key], np.ndarray):
            shapes[key] = list(result[key].shape)
    
    if 'pose_enc_list' in result and isinstance(result['pose_enc_list'], list):
        shapes['pose_enc_list'] = [list(item.shape) if isinstance(item, np.ndarray) else None 
                                   for item in result['pose_enc_list']]
    
    metadata = {
        "suite_name": suite_name,
        "scene_name": scene_name,
        "sample_name": sample_name,
        "iteration": iteration,
        "num_images": len(result["image_paths"]),
        "image_paths": result["image_paths"],
        "preprocess_mode": result.get("preprocess_mode"),
        "storage_format": "npz_compressed",
        "output_shapes": shapes,
        "saved_outputs": list(save_data.keys()),
        "npz_file": "outputs.npz",
    }
    
    metadata_path = os.path.join(iteration_dir, "metadata.json")
    with open(metadata_path, "w", encoding="utf-8") as f:
        json.dump(metadata, f, indent=2)
    
    logger.debug(f"Saved iteration {iteration} for {suite_name}/{scene_name}/{sample_name}")


def is_sample_already_processed(output_dir, suite_name, scene_name, sample_name, num_iterations):
    """检查该样本是否已经完整处理过"""
    scene_dir = os.path.join(output_dir, suite_name, scene_name)
    sample_dir = os.path.join(scene_dir, sample_name)
    if not os.path.exists(sample_dir):
        return False

    completed_iters = 0
    for iteration in range(num_iterations):
        iter_dir = os.path.join(sample_dir, f"iter_{iteration:04d}")
        meta_path = os.path.join(iter_dir, "metadata.json")
        if os.path.exists(meta_path):
            try:
                with open(meta_path, "r", encoding="utf-8") as f:
                    json.load(f)
                completed_iters += 1
            except Exception:
                pass

    return completed_iters >= num_iterations


def parse_args():
    parser = argparse.ArgumentParser(description="Extract VGGT outputs for Libero dataset")
    parser.add_argument("--data-root", default="/data1/datasets/distill_libero_10_goal", 
                       help="Root directory of the dataset")
    parser.add_argument("--output-dir", default="/data1/datasets/VGGT_outputs", 
                       help="Directory to store extracted outputs")
    parser.add_argument("--checkpoint", default="/data0/liqifeng/ZYC/model.pt", 
                       help="Path to the VGGT checkpoint")
    parser.add_argument("--num-iterations", type=int, default=50, 
                       help="Iterations per sample (number of times to sample images)")
    parser.add_argument("--min-images", type=int, default=3, 
                       help="Minimum images per iteration")
    parser.add_argument("--max-images", type=int, default=8, 
                       help="Maximum images per iteration")
    parser.add_argument("--preprocess-mode", choices=["crop", "pad"], default="crop", 
                       help="Image preprocessing mode")
    parser.add_argument("--seed", type=int, default=2025, 
                       help="Random seed for reproducibility")
    parser.add_argument("--verbose", action="store_true", 
                       help="Enable debug logging")
    return parser.parse_args()


def main():
    args = parse_args()
    configure_logging(args.verbose)

    # 设置随机种子
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # 获取设备和数据类型
    device, dtype = get_device_and_dtype()
    logger.info("Using device %s with dtype %s", device, dtype)

    # 加载模型
    model = load_vggt_model(args.checkpoint, device)

    try:
        # 收集数据集结构
        structure = collect_dataset_structure(args.data_root)
        total_samples = sum(len(samples) for scenes in structure.values() for samples in scenes.values())
        logger.info("Collected %d samples in total", total_samples)

        # 遍历所有 suite
        for suite_name, scenes in structure.items():
            logger.info("Processing suite %s", suite_name)
            
            # 遍历所有场景
            for scene_name, samples in scenes.items():
                if not samples:
                    continue
                logger.info("  Scene %s (%d samples)", scene_name, len(samples))
                
                # 遍历所有样本
                sample_iter = tqdm(samples.items(), desc=f"Scene {scene_name}", leave=False)
                for sample_name, image_paths in sample_iter:
                    sample_iter.set_postfix(sample=sample_name)

                    # 检查是否已处理
                    if is_sample_already_processed(args.output_dir, suite_name, scene_name, 
                                                   sample_name, args.num_iterations):
                        logger.info(f"Skipping already processed sample: {suite_name}/{scene_name}/{sample_name}")
                        continue

                    # 对每个样本进行多次迭代
                    for iteration in range(args.num_iterations):
                        result = process_sample(
                            model,
                            image_paths,
                            device,
                            dtype,
                            args.min_images,
                            args.max_images,
                            args.preprocess_mode,
                        )
                        save_iteration_results(
                            args.output_dir,
                            suite_name,
                            scene_name,
                            sample_name,
                            iteration,
                            result,
                        )

    except Exception as e:
        logger.error("Error during processing: %s", str(e), exc_info=True)
        raise

    logger.info("Output extraction complete")


if __name__ == "__main__":
    main()
