import os
import itertools
import numpy as np
import torch
import glob
import json
from PIL import Image

import torch.nn.functional as F
from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images
from vggt.utils.pose_enc import pose_encoding_to_extri_intri
from vggt.utils.geometry import unproject_depth_map_to_point_map
import logging
from tqdm import tqdm

# ===============================
# 基础配置
# ===============================
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

device = "cuda" if torch.cuda.is_available() else "cpu"
supports_bfloat16 = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8
dtype = torch.bfloat16 if supports_bfloat16 else torch.float16
logger.info(f"Using device: {device}, dtype: {dtype}")

attention_hooks = {}
frame_attention_maps = {}
global_attention_maps = {}

# ===============================
# 钩子函数定义
# ===============================
def get_attention_hook(layer_name, is_frame_attention=True):
    def hook(module, _input, output):
        print(f"[{layer_name}] hook triggered!")
        attn = None
        if isinstance(output, (tuple, list)):
            attn = output[-1]
        elif isinstance(output, torch.Tensor):
            attn = output

        if attn is None:
            logger.warning(f"[{layer_name}] 无法解析注意力输出类型: {type(output)}")
            return

        if attn.dtype in (torch.float16, torch.bfloat16):
            attn = attn.to(torch.float32)

        attn_np = attn.detach().cpu().numpy()
        if is_frame_attention:
            frame_attention_maps[layer_name] = attn_np
        else:
            global_attention_maps[layer_name] = attn_np

    return hook
        

def register_attention_hooks(model, target_layers):
    """注册帧级和全局注意力钩子"""
    print("target_layers =", target_layers)
    print("frame_blocks  =", len(model.aggregator.frame_blocks))
    print("global_blocks =", len(model.aggregator.global_blocks))

    
    for i, block in enumerate(model.aggregator.frame_blocks):
        if i in target_layers:
            name = f"frame_block_{i}"
            attention_hooks[name] = block.attn.register_forward_hook(
                get_attention_hook(name, True)
            )
    if hasattr(model.aggregator, 'global_blocks'):
        for i, block in enumerate(model.aggregator.global_blocks):
            if i in target_layers:
                name = f"global_block_{i}"
                attention_hooks[name] = block.attn.register_forward_hook(
                    get_attention_hook(name, False)
                )
    print("registered keys:", list(attention_hooks.keys()))


# ===============================
# 模型加载
# ===============================
def load_vggt_model(checkpoint_path="/data1/lqf/model.pt"):
    model = VGGT()
    ckpt = torch.load(checkpoint_path, map_location=device)
    model.load_state_dict(ckpt["model"] if "model" in ckpt else ckpt, strict=False)
    model.eval().to(device)
    target_layers = {4, 11, 17, 23, 24}
    register_attention_hooks(model, target_layers)
    return model

# ===============================
# 数据集结构解析
# ===============================
def get_image_dirs(data_root):
    """返回 {class_name: [folder_path, ...]}"""
    class_dirs = glob.glob(os.path.join(data_root, "*"))
    dataset = {}
    for d in class_dirs:
        if not os.path.isdir(d): 
            continue
        class_name = os.path.basename(d)
        dataset[class_name] = [d]  # 每个文件夹即一组多视角
    return dataset

# ===============================
# 处理单组图片
# ===============================
def process_images(model, img_paths):
    images = load_and_preprocess_images(img_paths).to(device)
    with torch.no_grad():
        with torch.cuda.amp.autocast(dtype=dtype):
            preds = model(images)

    extrinsic, intrinsic = pose_encoding_to_extri_intri(preds["pose_enc"], images.shape[-2:])
    preds["extrinsic"], preds["intrinsic"] = extrinsic, intrinsic

    # Tensor转numpy
    for k in preds.keys():
        if isinstance(preds[k], torch.Tensor):
            preds[k] = preds[k].cpu().numpy().squeeze(0)
    preds["pose_enc_list"] = None

    depth = preds["depth"]
    point_cloud = unproject_depth_map_to_point_map(depth, preds["extrinsic"], preds["intrinsic"])

    result = {
        "depth": depth,
        "point_cloud": point_cloud,
        "frame_attns": {k: v.copy() for k, v in frame_attention_maps.items()},
        "global_attns": {k: v.copy() for k, v in global_attention_maps.items()},
    }
    return result

# ===============================
# 保存结果
# ===============================
def save_results(save_dir, img_paths, results, combo_idx):
    os.makedirs(save_dir, exist_ok=True)
    combo_dir = os.path.join(save_dir, f"combo_{combo_idx}")
    os.makedirs(combo_dir, exist_ok=True)

    with open(os.path.join(combo_dir, "image_paths.json"), "w") as f:
        json.dump(img_paths, f, indent=2)
    np.save(os.path.join(combo_dir, "depth.npy"), results["depth"])
    np.save(os.path.join(combo_dir, "point_cloud.npy"), results["point_cloud"])

    for k, v in results["frame_attns"].items():
        np.save(os.path.join(combo_dir, f"frame_attn_{k}.npy"), v)
    for k, v in results["global_attns"].items():
        np.save(os.path.join(combo_dir, f"global_attn_{k}.npy"), v)

# ===============================
# 清理缓存
# ===============================
def clear_attention_maps():
    frame_attention_maps.clear()
    global_attention_maps.clear()

# ===============================
# 主执行逻辑
# ===============================
def main(
    data_root="/data1/lqf/ZYC/pybullet/dataset_multiview_objects",
    output_root="/data1/lqf/ZYC/VGGT_results_pybullet"
):
    os.makedirs(output_root, exist_ok=True)
    model = load_vggt_model()
    dataset = get_image_dirs(data_root)
    logger.info(f"共发现 {len(dataset)} 个物体类别")

    for class_name, dirs in dataset.items():
        for folder in dirs:
            img_paths = sorted(glob.glob(os.path.join(folder, "*.png")) + 
                               glob.glob(os.path.join(folder, "*.jpg")))
            if len(img_paths) < 1:
                continue

            logger.info(f"[{class_name}] 处理 {len(img_paths)} 张多视角图片...")

            # 生成1–4张图片的所有组合
            all_combos = []
            for n in range(1, len(img_paths) + 1):
                all_combos.extend(list(itertools.combinations(img_paths, n)))

            logger.info(f"共 {len(all_combos)} 种组合")

            save_dir = os.path.join(output_root, class_name)
            for combo_idx, combo in enumerate(tqdm(all_combos, desc=class_name)):
                results = process_images(model, list(combo))
                save_results(save_dir, list(combo), results, combo_idx)
                clear_attention_maps()

    for hook in attention_hooks.values():
        hook.remove()
    logger.info("✅ 全部完成")

# ===============================
if __name__ == "__main__":
    main()
