"""
CUDA_VISIBLE_DEVICES=0 python /data0/liqifeng/ZYC/vggt/tools/extract_aggregator_features.py \
  --data-root /data1/datasets/distill2 \
  --output-dir /data1/datasets/VGGT_features \
  --checkpoint /data0/liqifeng/ZYC/model.pt \
  --num-iterations 50 \
  --min-images 3 \
  --max-images 8 \
  --preprocess-mode crop
"""
import argparse
import json
import logging
import os
import random
import glob
import shutil
from contextlib import nullcontext
import sys
from importlib import import_module
from importlib import util as importlib_util

h5py_spec = importlib_util.find_spec("h5py")
if h5py_spec is not None:
    h5py = import_module("h5py")
else:
    h5py = None

sys.path.insert(0, os.path.abspath('/data0/liqifeng/ZYC/vggt'))   # 仓库根目录
import numpy as np
import torch
from tqdm import tqdm

from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images


logger = logging.getLogger(__name__)
_AGGREGATOR_BUFFER = []


def configure_logging(verbose: bool = False) -> None:
    level = logging.DEBUG if verbose else logging.INFO
    logging.basicConfig(level=level, format="%(asctime)s - %(levelname)s - %(message)s")


def quantize_to_int8(array: np.ndarray) -> tuple[np.ndarray, float]:
    arr = np.asarray(array, dtype=np.float32)
    if arr.size == 0:
        return np.zeros_like(arr, dtype=np.int8), 1.0

    max_abs = float(np.max(np.abs(arr)))
    if not np.isfinite(max_abs) or max_abs < 1e-12:
        return np.zeros_like(arr, dtype=np.int8), 1.0

    scale = max_abs / 127.0
    quantised = np.clip(np.round(arr / scale), -128, 127).astype(np.int8)
    return quantised, scale


def get_device_and_dtype():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    supports_bfloat16 = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8
    dtype = torch.bfloat16 if supports_bfloat16 else torch.float16
    return device, dtype


def load_vggt_model(checkpoint_path: str, device: str) -> VGGT:
    logger.info("Loading VGGT model from %s", checkpoint_path)
    model = VGGT()
    checkpoint = torch.load(checkpoint_path, map_location=device)
    state_dict = checkpoint.get("model", checkpoint)
    missing, unexpected = model.load_state_dict(state_dict, strict=False)
    if missing:
        logger.debug("Missing keys during load: %s", missing)
    if unexpected:
        logger.debug("Unexpected keys during load: %s", unexpected)

    for param in model.parameters():
        param.requires_grad_(False)

    model.eval()
    model.to(device)
    return model


def _aggregator_forward_hook(module, inputs, output):
    del module, inputs
    aggregated_tokens_list, patch_start_idx = output
    if not aggregated_tokens_list:
        logger.warning("Aggregator returned no features")
        return

    selected_indices = [4, 11, 17, 23]
    layer_features = {}

    for idx in selected_indices:
        if idx >= len(aggregated_tokens_list):
            logger.warning(
                "Requested layer index %d but only %d layers available",
                idx, len(aggregated_tokens_list),
            )
            continue
        tensor = aggregated_tokens_list[idx].detach().to(torch.float16).cpu().numpy()
        layer_features[f"layer_{idx}"] = tensor

    final_feature = aggregated_tokens_list[-1].detach().to(torch.float16).cpu().numpy()

    _AGGREGATOR_BUFFER.append(
        {
            "features": final_feature,
            "layer_features": layer_features,
            "patch_start_idx": int(patch_start_idx),
            "num_iterations": len(aggregated_tokens_list),
        }
    )


def register_aggregator_hook(model: VGGT):
    return model.aggregator.register_forward_hook(_aggregator_forward_hook)


def collect_dataset_structure(data_root: str):
    structure = {}
    if not os.path.isdir(data_root):
        raise FileNotFoundError(f"Data root does not exist: {data_root}")

    split_dirs = sorted((d for d in os.scandir(data_root) if d.is_dir()), key=lambda entry: entry.name)
    logger.info("Found %d splits under %s", len(split_dirs), data_root)

    for split_entry in split_dirs:
        split_name = split_entry.name
        structure.setdefault(split_name, {})
        scene_dirs = sorted(
            (d for d in os.scandir(split_entry.path) if d.is_dir()),
            key=lambda entry: entry.name,
        )

        for scene_entry in scene_dirs:
            scene_name = scene_entry.name
            structure[split_name].setdefault(scene_name, {})
            sample_dirs = sorted(
                (d for d in os.scandir(scene_entry.path) if d.is_dir() and d.name.startswith("sample_")),
                key=lambda entry: entry.name,
            )
            if not sample_dirs:
                continue

            for sample_entry in sample_dirs:
                sample_name = sample_entry.name
                image_paths = []
                for ext in ("png", "jpg", "jpeg"):
                    pattern = os.path.join(sample_entry.path, f"cam_*_rgb.{ext}")
                    image_paths.extend(sorted(glob.glob(pattern)))

                if not image_paths:
                    continue

                structure[split_name][scene_name][sample_name] = image_paths

    return structure


def process_sample(model, image_paths, device, dtype, min_images, max_images, preprocess_mode):
    if not image_paths:
        raise ValueError("No images available for the sample")

    upper = min(max_images, len(image_paths))
    lower = min(min_images, upper)
    if lower == 0:
        raise ValueError("min_images resolved to zero; check dataset contents")

    num_images = random.randint(lower, upper)
    selected_indices = sorted(random.sample(range(len(image_paths)), num_images))
    selected_paths = [image_paths[i] for i in selected_indices]

    images = load_and_preprocess_images(selected_paths, mode=preprocess_mode).to(device)

    _AGGREGATOR_BUFFER.clear()

    autocast_ctx = torch.cuda.amp.autocast(dtype=dtype) if device == "cuda" else nullcontext()
    with torch.no_grad():
        with autocast_ctx:
            _ = model(images)

    if not _AGGREGATOR_BUFFER:
        raise RuntimeError("Aggregator hook did not capture any features")

    result = _AGGREGATOR_BUFFER.pop()
    return {
        "image_paths": selected_paths,
        "features": result["features"],
        "layer_features": result.get("layer_features", {}),
        "patch_start_idx": result["patch_start_idx"],
        "num_iterations": result["num_iterations"],
        "preprocess_mode": preprocess_mode,
    }


def save_iteration_results(output_dir, split_name, scene_name, sample_name, iteration, result):
    if h5py is None:
        raise RuntimeError("h5py is required for HDF5 output.")

    scene_dir = os.path.join(output_dir, split_name, scene_name)
    os.makedirs(scene_dir, exist_ok=True)
    scene_file_path = os.path.join(scene_dir, "scene_features.h5")

    sample_group_name = sample_name
    iteration_group_name = f"iter_{iteration:04d}"
    saved_layers = sorted(result.get("layer_features", {}).keys())
    layer_scales: dict[str, float] = {}

    with h5py.File(scene_file_path, "a") as scene_file:
        sample_group = scene_file.require_group(sample_group_name)
        if iteration_group_name in sample_group:
            del sample_group[iteration_group_name]
        iter_group = sample_group.create_group(iteration_group_name)

        quant_features, feature_scale = quantize_to_int8(result["features"])
        iter_group.create_dataset(
            "features",
            data=quant_features,
            compression="gzip",
            compression_opts=9,
        )
        iter_group.attrs["feature_scale"] = feature_scale
        iter_group.attrs["patch_start_idx"] = result["patch_start_idx"]
        iter_group.attrs["num_aggregator_iterations"] = result["num_iterations"]
        iter_group.attrs["preprocess_mode"] = result.get("preprocess_mode")
        iter_group.attrs["storage_dtype"] = "int8"
        iter_group.attrs["quantization"] = "symmetric_max_abs"

        string_dtype = h5py.string_dtype(encoding="utf-8")
        iter_group.create_dataset(
            "image_paths",
            data=np.array(result["image_paths"], dtype=string_dtype),
        )

        if saved_layers:
            layer_group = iter_group.create_group("layer_features")
            for layer_name in saved_layers:
                quant_layer, layer_scale = quantize_to_int8(result["layer_features"][layer_name])
                ds = layer_group.create_dataset(
                    layer_name,
                    data=quant_layer,
                    compression="gzip",
                    compression_opts=9,
                )
                ds.attrs["scale"] = layer_scale
                ds.attrs["quantization"] = "symmetric_max_abs"
                layer_scales[layer_name] = layer_scale

    save_dir = os.path.join(scene_dir, sample_name, iteration_group_name)
    if os.path.exists(save_dir):
        shutil.rmtree(save_dir)
    os.makedirs(save_dir, exist_ok=True)

    metadata = {
        "scene_file": os.path.relpath(scene_file_path, start=save_dir),
        "sample_group": sample_group_name,
        "iteration_group": iteration_group_name,
        "num_images": len(result["image_paths"]),
        "feature_shape": list(result["features"].shape),
        "patch_start_idx": result["patch_start_idx"],
        "num_aggregator_iterations": result["num_iterations"],
        "saved_layers": saved_layers,
        "preprocess_mode": result.get("preprocess_mode"),
        "storage_format": "scene_hdf5_int8",
        "quantization": {"dtype": "int8", "method": "symmetric_max_abs"},
        "feature_scale": feature_scale,
        "layer_scales": layer_scales,
        "image_paths": result["image_paths"],
    }

    with open(os.path.join(save_dir, "metadata.json"), "w", encoding="utf-8") as f:
        json.dump(metadata, f, indent=2)


def is_sample_already_processed(output_dir, split_name, scene_name, sample_name, num_iterations):
    """检查该样本是否已经完整处理过"""
    scene_dir = os.path.join(output_dir, split_name, scene_name)
    sample_dir = os.path.join(scene_dir, sample_name)
    if not os.path.exists(sample_dir):
        return False

    completed_iters = 0
    for iteration in range(num_iterations):
        iter_dir = os.path.join(sample_dir, f"iter_{iteration:04d}")
        meta_path = os.path.join(iter_dir, "metadata.json")
        if os.path.exists(meta_path):
            try:
                with open(meta_path, "r", encoding="utf-8") as f:
                    json.load(f)
                completed_iters += 1
            except Exception:
                pass

    return completed_iters >= num_iterations


def parse_args():
    parser = argparse.ArgumentParser(description="Extract VGGT aggregator features for Libero dataset")
    parser.add_argument("--data-root", default="/data1/datasets/distill_libero", help="Root directory of the dataset")
    parser.add_argument("--output-dir", default="/data1/datasets/VGGT_features", help="Directory to store extracted features")
    parser.add_argument("--checkpoint", default="/data0/liqifeng/ZYC/model.pt", help="Path to the VGGT checkpoint")
    parser.add_argument("--num-iterations", type=int, default=1, help="Iterations per sample")
    parser.add_argument("--min-images", type=int, default=1, help="Minimum images per iteration")
    parser.add_argument("--max-images", type=int, default=5, help="Maximum images per iteration")
    parser.add_argument("--preprocess-mode", choices=["crop", "pad"], default="crop", help="Image preprocessing mode")
    parser.add_argument("--seed", type=int, default=2025, help="Random seed for reproducibility")
    parser.add_argument("--verbose", action="store_true", help="Enable debug logging")
    return parser.parse_args()


def main():
    args = parse_args()
    configure_logging(args.verbose)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    device, dtype = get_device_and_dtype()
    logger.info("Using device %s with dtype %s", device, dtype)

    model = load_vggt_model(args.checkpoint, device)
    hook_handle = register_aggregator_hook(model)

    try:
        structure = collect_dataset_structure(args.data_root)
        total_samples = sum(len(samples) for scenes in structure.values() for samples in scenes.values())
        logger.info("Collected %d samples in total", total_samples)

        for split_name, scenes in structure.items():
            logger.info("Processing split %s", split_name)
            for scene_name, samples in scenes.items():
                if not samples:
                    continue
                logger.info("  Scene %s (%d samples)", scene_name, len(samples))
                sample_iter = tqdm(samples.items(), desc=f"Scene {scene_name}", leave=False)
                for sample_name, image_paths in sample_iter:
                    sample_iter.set_postfix(sample=sample_name)

                    # === 新增：检查是否已处理 ===
                    if is_sample_already_processed(args.output_dir, split_name, scene_name, sample_name, args.num_iterations):
                        logger.info(f"Skipping already processed sample: {split_name}/{scene_name}/{sample_name}")
                        continue

                    for iteration in range(args.num_iterations):
                        result = process_sample(
                            model,
                            image_paths,
                            device,
                            dtype,
                            args.min_images,
                            args.max_images,
                            args.preprocess_mode,
                        )
                        save_iteration_results(
                            args.output_dir,
                            split_name,
                            scene_name,
                            sample_name,
                            iteration,
                            result,
                        )
    finally:
        hook_handle.remove()

    logger.info("Feature extraction complete")


if __name__ == "__main__":
    main()
