
import os
import random
import numpy as np
import torch
import glob
import json
from PIL import Image
from torchvision import transforms
import torch.nn.functional as F
from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images
from vggt.utils.pose_enc import pose_encoding_to_extri_intri
from vggt.utils.geometry import unproject_depth_map_to_point_map
import logging
from tqdm import tqdm

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设备配置
device = "cuda" if torch.cuda.is_available() else "cpu"
# 检测是否支持BFloat16
supports_bfloat16 = torch.cuda.is_available() and torch.cuda.get_device_capability()[0] >= 8
dtype = torch.bfloat16 if supports_bfloat16 else torch.float16
logger.info(f"Using device: {device}, dtype: {dtype}")

# 注意力矩阵存储
attention_hooks = {}
frame_attention_maps = {}
global_attention_maps = {}

def get_attention_hook(layer_name, is_frame_attention=True):
    """创建获取注意力矩阵的钩子函数，处理tuple输出"""
    def hook(module, input, output):
        attn = None
        # output 可能是 tuple/list
        if isinstance(output, (tuple, list)):
            attn = output[-1]
        elif isinstance(output, torch.Tensor):
            attn = output
        if attn is None:
            logger.warning(f"[{layer_name}] 无法解析注意力输出类型: {type(output)}")
            return
        
        if attn.dtype in (torch.bfloat16, torch.float16):
            attn = attn.to(torch.float32)
        
        if is_frame_attention:
            frame_attention_maps[layer_name] = attn.detach().cpu().numpy()
        else:
            global_attention_maps[layer_name] = attn.detach().cpu().numpy()
    return hook


def register_attention_hooks(model, target_layers):
    """为目标层注册注意力钩子"""
    # 遍历模型的帧注意力块
    for i, block in enumerate(model.aggregator.frame_blocks):
        layer_name = f"frame_block_{i}"
        if i in target_layers:
            hook_handle = block.attn.register_forward_hook(
                get_attention_hook(layer_name, is_frame_attention=True)
            )
            attention_hooks[layer_name] = hook_handle
    
    # 遍历模型的全局注意力块
    if hasattr(model.aggregator, 'global_blocks'):
        for i, block in enumerate(model.aggregator.global_blocks):
            layer_name = f"global_block_{i}"
            if i in target_layers:
                hook_handle = block.attn.register_forward_hook(
                    get_attention_hook(layer_name, is_frame_attention=False)
                )
                attention_hooks[layer_name] = hook_handle

def load_vggt_model(checkpoint_path="/data1/lqf/model.pt"):
    """加载本地VGGT模型权重"""
    model = VGGT()
    checkpoint = torch.load(checkpoint_path, map_location=device)
    if "model" in checkpoint:
        model.load_state_dict(checkpoint["model"], strict=False)
    else:
        model.load_state_dict(checkpoint, strict=False)
    model.eval()
    model = model.to(device)
    
    # 注册注意力钩子，目标层：4、11、17、23、24
    target_layers = {4, 11, 17, 23, 24}
    register_attention_hooks(model, target_layers)
    
    return model

def get_image_paths(data_root):
    """获取数据集中所有序列的图片路径"""
    sequence_paths = {}
    all_class_dirs = glob.glob(os.path.join(data_root, "*"))
    
    logger.debug(f"在数据根目录发现 {len(all_class_dirs)} 个直接子项")
    
    for idx, class_dir in enumerate(all_class_dirs, 1):
        logger.debug(f"\n处理第 {idx}/{len(all_class_dirs)} 个子项: {class_dir}")
        
        if not os.path.isdir(class_dir):
            logger.warning(f"跳过非目录项: {class_dir}")
            continue
            
        class_name = os.path.basename(class_dir)
        seq_dirs = glob.glob(os.path.join(class_dir, "*"))
        
        if not seq_dirs:
            logger.warning(f"类别目录 {class_name} 下未发现任何序列目录")
            continue
            
        has_valid_sequences = False
        
        for seq_dir in seq_dirs:
            if not os.path.isdir(seq_dir):
                logger.debug(f"跳过非目录项: {seq_dir}")
                continue
                
            seq_name = os.path.basename(seq_dir)
            img_dir = os.path.join(seq_dir, "images")
            
            if not os.path.exists(img_dir):
                logger.debug(f"序列 {seq_name} 缺少images目录，跳过")
                continue
                
            img_paths = sorted(
                glob.glob(os.path.join(img_dir, "frame*.jpg")) +
                glob.glob(os.path.join(img_dir, "frame*.png"))
            )
            
            if len(img_paths) == 0:
                logger.debug(f"序列 {seq_name} 的images目录中未发现符合条件的图片")
                continue
                
            if class_name not in sequence_paths:
                sequence_paths[class_name] = {}
            sequence_paths[class_name][seq_name] = img_paths
            has_valid_sequences = True
            logger.debug(f"序列 {seq_name} 包含 {len(img_paths)} 张有效图片")
            
        if not has_valid_sequences:
            logger.warning(f"类别 {class_name} 下未发现任何包含有效图片的序列")
    
    return sequence_paths

def save_results(output_dir, class_name, seq_name, iteration, img_paths, depth, point_cloud, 
                 frame_attns, global_attns):
    """保存结果到文件"""
    save_dir = os.path.join(output_dir, class_name, seq_name, f"iter_{iteration}")
    os.makedirs(save_dir, exist_ok=True)
    
    # 保存图片路径
    with open(os.path.join(save_dir, "image_paths.json"), "w") as f:
        json.dump(img_paths, f, indent=2)
    
    # 保存深度图
    np.save(os.path.join(save_dir, "depth.npy"), depth)
    
    # 保存点云
    np.save(os.path.join(save_dir, "point_cloud.npy"), point_cloud)
    
    # 保存帧级注意力矩阵
    for layer_name, attn in frame_attns.items():
        np.save(os.path.join(save_dir, f"frame_attention_{layer_name}.npy"), attn)
    
    # 保存全局注意力矩阵
    for layer_name, attn in global_attns.items():
        np.save(os.path.join(save_dir, f"global_attention_{layer_name}.npy"), attn)

def process_sequence(model, img_paths, num_images):
    """处理单个序列的图片，返回模型输出"""
    # 随机选择1-3张图片
    selected_indices = random.sample(range(len(img_paths)), min(num_images, len(img_paths)))
    selected_paths = [img_paths[i] for i in selected_indices]
    
    # 加载和预处理图片
    images = load_and_preprocess_images(selected_paths).to(device)
    
    # 运行模型


    dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16

    with torch.no_grad():
        with torch.cuda.amp.autocast(dtype=dtype):
            predictions = model(images)

            
        # 转换姿态编码为内外参矩阵
    extrinsic, intrinsic = pose_encoding_to_extri_intri(predictions["pose_enc"], images.shape[-2:])
    predictions["extrinsic"] = extrinsic
    predictions["intrinsic"] = intrinsic

    # Convert tensors to numpy
    for key in predictions.keys():
        if isinstance(predictions[key], torch.Tensor):
            predictions[key] = predictions[key].cpu().numpy().squeeze(0)  # remove batch dimension
    predictions['pose_enc_list'] = None # remove pose_enc_list
        
        # 从深度图计算点云
    depth_map = predictions["depth"]

    print(depth_map.shape, predictions["extrinsic"].shape, predictions["intrinsic"].shape)   
    point_cloud = unproject_depth_map_to_point_map(depth_map, predictions["extrinsic"], predictions["intrinsic"])
    
    # 复制注意力映射
    current_frame_attns = dict()
    for k, v in frame_attention_maps.items():
        current_frame_attns[k] = v.copy()

    current_global_attns = dict()
    for k, v in global_attention_maps.items():
        current_global_attns[k] = v.copy()
        
    return {
        "img_paths": selected_paths,
        "depth": depth_map,
        "point_cloud": point_cloud,
        "frame_attns": current_frame_attns,
        "global_attns": current_global_attns
    }

def main(data_root="/data1/lqf/ZYC/co3d_subset", output_dir="/data1/lqf/ZYC/VGGT_results", num_iterations=200):
    """主函数"""
    os.makedirs(output_dir, exist_ok=True)
    
    logger.info("Loading VGGT model from local checkpoint...")
    model = load_vggt_model()
    
    logger.info("Collecting image paths...")
    sequence_paths = get_image_paths(data_root)
    logger.info(f"Found {len(sequence_paths)} classes with sequences")
    
    for class_name, sequences in sequence_paths.items():
        logger.info(f"Processing class: {class_name}")
        
        for seq_name, img_paths in sequences.items():
            logger.info(f"Processing sequence: {seq_name} with {len(img_paths)} images")
            
            for i in tqdm(range(num_iterations), desc=f"Sequence {seq_name} iterations"):
                num_images = random.randint(1, 3)
                results = process_sequence(model, img_paths, num_images)
                
                save_results(
                    output_dir, 
                    class_name, 
                    seq_name, 
                    i,
                    results["img_paths"],
                    results["depth"],
                    results["point_cloud"],
                    results["frame_attns"],
                    results["global_attns"]
                )
    
    # 移除钩子
    for hook in attention_hooks.values():
        hook.remove()
    
    logger.info("Processing complete")

if __name__ == "__main__":
    main()
