#!/usr/bin/env python3
"""
验证提取的特征数据是否正确

Usage:
    python /data0/liqifeng/ZYC/vggt/mini_vggt/Distill/verify_features.py
"""

import os
import sys
import json
import h5py
import numpy as np
import torch
from pathlib import Path

# Add paths
REPO_ROOT = Path(__file__).resolve().parents[2]
sys.path.insert(0, str(REPO_ROOT))

from vggt.models.vggt import VGGT
from vggt.utils.load_fn import load_and_preprocess_images


def check_feature_file(metadata_path):
    """检查单个特征文件的完整性"""
    print(f"\n{'='*80}")
    print(f"检查文件: {metadata_path}")
    print('='*80)
    
    # 1. 读取 metadata
    with open(metadata_path, 'r') as f:
        metadata = json.load(f)
    
    print("\n📋 Metadata 信息:")
    print(f"  - storage_format: {metadata.get('storage_format')}")
    print(f"  - num_images: {metadata.get('num_images')}")
    print(f"  - feature_shape: {metadata.get('feature_shape')}")
    print(f"  - saved_layers: {metadata.get('saved_layers')}")
    print(f"  - preprocess_mode: {metadata.get('preprocess_mode')}")
    print(f"  - num_aggregator_iterations: {metadata.get('num_aggregator_iterations')}")
    
    # 2. 读取 HDF5 文件
    scene_file = metadata.get('scene_file')
    if not os.path.isabs(scene_file):
        scene_file = os.path.normpath(os.path.join(os.path.dirname(metadata_path), scene_file))
    
    print(f"\n📦 HDF5 文件: {scene_file}")
    
    sample_group = metadata.get('sample_group')
    iteration_group = metadata.get('iteration_group')
    
    with h5py.File(scene_file, 'r') as f:
        sample_grp = f[sample_group]
        iter_grp = sample_grp[iteration_group]
        
        # 检查 features
        print(f"\n🔍 检查 features:")
        features_ds = iter_grp['features']
        print(f"  - Shape: {features_ds.shape}")
        print(f"  - Dtype: {features_ds.dtype}")
        print(f"  - Scale: {iter_grp.attrs.get('feature_scale')}")
        
        # 反量化并检查数值
        quant_features = features_ds[...]
        feature_scale = float(iter_grp.attrs.get('feature_scale', 1.0))
        features = quant_features.astype('float32') * feature_scale
        
        print(f"  - Min: {features.min():.6f}")
        print(f"  - Max: {features.max():.6f}")
        print(f"  - Mean: {features.mean():.6f}")
        print(f"  - Std: {features.std():.6f}")
        print(f"  - Has NaN: {np.isnan(features).any()}")
        print(f"  - Has Inf: {np.isinf(features).any()}")
        
        # 检查 layer_features
        if 'layer_features' in iter_grp:
            layer_grp = iter_grp['layer_features']
            print(f"\n🔍 检查 layer_features:")
            
            for layer_name in metadata.get('saved_layers', []):
                if layer_name not in layer_grp:
                    print(f"  ❌ {layer_name}: NOT FOUND!")
                    continue
                
                layer_ds = layer_grp[layer_name]
                layer_scale = float(layer_ds.attrs.get('scale', 1.0))
                
                # 反量化
                quant_layer = layer_ds[...]
                layer_data = quant_layer.astype('float32') * layer_scale
                
                print(f"\n  ✅ {layer_name}:")
                print(f"     - Shape: {layer_ds.shape}")
                print(f"     - Dtype: {layer_ds.dtype}")
                print(f"     - Scale: {layer_scale}")
                print(f"     - Min: {layer_data.min():.6f}")
                print(f"     - Max: {layer_data.max():.6f}")
                print(f"     - Mean: {layer_data.mean():.6f}")
                print(f"     - Std: {layer_data.std():.6f}")
                print(f"     - Has NaN: {np.isnan(layer_data).any()}")
                print(f"     - Has Inf: {np.isinf(layer_data).any()}")
                
                # 检查是否所有值都是0
                if np.allclose(layer_data, 0):
                    print(f"     ⚠️  WARNING: All values are zero!")
        
        # 检查 image_paths
        if 'image_paths' in iter_grp:
            print(f"\n📸 Image paths:")
            image_paths_ds = iter_grp['image_paths']
            paths = []
            for item in image_paths_ds[...]:
                if isinstance(item, (bytes, bytearray)):
                    paths.append(item.decode('utf-8'))
                else:
                    paths.append(str(item))
            
            for i, path in enumerate(paths):
                print(f"  {i+1}. {path}")
                if not os.path.exists(path):
                    print(f"     ❌ File does not exist!")


def verify_with_original_model(metadata_path, checkpoint_path):
    """使用原始模型重新推理，对比特征"""
    print(f"\n{'='*80}")
    print(f"🔬 使用原始 VGGT 模型验证特征")
    print('='*80)
    
    device = "cuda" if torch.cuda.is_available() else "cpu"
    
    # 加载模型
    print("\n加载 VGGT 模型...")
    model = VGGT()
    checkpoint = torch.load(checkpoint_path, map_location=device)
    state_dict = checkpoint.get("model", checkpoint)
    model.load_state_dict(state_dict, strict=False)
    model.eval()
    model.to(device)
    
    # 读取 metadata
    with open(metadata_path, 'r') as f:
        metadata = json.load(f)
    
    # 获取图片路径
    scene_file = metadata.get('scene_file')
    if not os.path.isabs(scene_file):
        scene_file = os.path.normpath(os.path.join(os.path.dirname(metadata_path), scene_file))
    
    sample_group = metadata.get('sample_group')
    iteration_group = metadata.get('iteration_group')
    
    with h5py.File(scene_file, 'r') as f:
        iter_grp = f[sample_group][iteration_group]
        image_paths_ds = iter_grp['image_paths']
        image_paths = []
        for item in image_paths_ds[...]:
            if isinstance(item, (bytes, bytearray)):
                image_paths.append(item.decode('utf-8'))
            else:
                image_paths.append(str(item))
    
    print(f"\n加载 {len(image_paths)} 张图片...")
    preprocess_mode = metadata.get('preprocess_mode', 'crop')
    images = load_and_preprocess_images(image_paths, mode=preprocess_mode).to(device)
    
    # Hook to capture features
    captured_features = {}
    
    def hook_fn(module, inputs, output):
        aggregated_tokens_list, patch_start_idx = output
        selected_indices = [4, 11, 17, 23]
        
        for idx in selected_indices:
            if idx < len(aggregated_tokens_list):
                tensor = aggregated_tokens_list[idx].detach().cpu().numpy()
                # 保持和保存时一样的形状（不squeeze）
                captured_features[f"layer_{idx}"] = tensor
        
        # 保存最终层
        captured_features["final"] = aggregated_tokens_list[-1].detach().cpu().numpy()
        captured_features["num_iterations"] = len(aggregated_tokens_list)
    
    handle = model.aggregator.register_forward_hook(hook_fn)
    
    # 推理
    print("运行推理...")
    with torch.no_grad():
        dtype = torch.bfloat16 if torch.cuda.get_device_capability()[0] >= 8 else torch.float16
        with torch.cuda.amp.autocast(dtype=dtype):
            _ = model(images)
    
    handle.remove()
    
    # 读取保存的特征
    print("\n读取保存的特征...")
    with h5py.File(scene_file, 'r') as f:
        iter_grp = f[sample_group][iteration_group]
        
        # 读取 features
        quant_features = iter_grp['features'][...]
        feature_scale = float(iter_grp.attrs.get('feature_scale', 1.0))
        saved_final = quant_features.astype('float32') * feature_scale
        
        # 读取 layer features
        saved_layers = {}
        if 'layer_features' in iter_grp:
            layer_grp = iter_grp['layer_features']
            for layer_name in metadata.get('saved_layers', []):
                if layer_name in layer_grp:
                    layer_ds = layer_grp[layer_name]
                    layer_scale = float(layer_ds.attrs.get('scale', 1.0))
                    quant_layer = layer_ds[...]
                    saved_layers[layer_name] = quant_layer.astype('float32') * layer_scale
    
    # 对比
    print(f"\n{'='*80}")
    print("📊 对比结果")
    print('='*80)
    
    # 对比 final features
    print(f"\n🔍 Final Features:")
    fresh_final = captured_features["final"]
    if fresh_final.shape[0] == 1:
        fresh_final = fresh_final.squeeze(0)
    
    # 保存的可能有额外的 batch 维度
    saved_final_compare = saved_final.squeeze() if saved_final.shape[0] == 1 else saved_final
    
    print(f"  保存的 shape: {saved_final.shape} -> {saved_final_compare.shape}")
    print(f"  新推理 shape: {fresh_final.shape}")
    
    if saved_final_compare.shape == fresh_final.shape:
        diff = np.abs(saved_final_compare - fresh_final)
        print(f"  ✅ Shape 匹配")
        print(f"  平均差异: {diff.mean():.6f}")
        print(f"  最大差异: {diff.max():.6f}")
        print(f"  相对误差: {(diff.mean() / (np.abs(saved_final_compare).mean() + 1e-8)):.6f}")
        
        if diff.mean() > 0.1:
            print(f"  ⚠️  WARNING: 差异较大!")
    else:
        print(f"  ❌ Shape 不匹配!")
    
    # 对比 layer features
    print(f"\n🔍 Layer Features:")
    for layer_name in ['layer_4', 'layer_11', 'layer_17', 'layer_23']:
        if layer_name not in saved_layers:
            print(f"\n  ❌ {layer_name}: 保存的特征中不存在")
            continue
        
        if layer_name not in captured_features:
            print(f"\n  ❌ {layer_name}: 新推理中未捕获")
            continue
        
        saved = saved_layers[layer_name]
        fresh = captured_features[layer_name]
        
        if fresh.shape[0] == 1:
            fresh.squeeze(0)
        
        # 移除额外的 batch 维度
        saved_compare = saved.squeeze() if saved.shape[0] == 1 else saved
        
        print(f"\n  {layer_name}:")
        print(f"    保存的 shape: {saved.shape} -> {saved_compare.shape}")
        print(f"    新推理 shape: {fresh.shape}")
        
        if saved_compare.shape == fresh.shape:
            diff = np.abs(saved_compare - fresh)
            print(f"    ✅ Shape 匹配")
            print(f"    平均差异: {diff.mean():.6f}")
            print(f"    最大差异: {diff.max():.6f}")
            print(f"    相对误差: {(diff.mean() / (np.abs(saved_compare).mean() + 1e-8)):.6f}")
            
            if diff.mean() > 0.1:
                print(f"    ⚠️  WARNING: 差异较大!")
        else:
            print(f"    ❌ Shape 不匹配!")
    
    print(f"\n迭代次数: {captured_features['num_iterations']} (预期: {metadata.get('num_aggregator_iterations')})")


def main():
    # 找到第一个特征文件
    feature_root = "/data1/datasets/VGGT_features"
    checkpoint_path = "/data0/liqifeng/ZYC/model.pt"
    
    print("🔍 搜索特征文件...")
    metadata_files = []
    for root, dirs, files in os.walk(feature_root):
        if 'metadata.json' in files:
            metadata_files.append(os.path.join(root, 'metadata.json'))
    
    if not metadata_files:
        print(f"❌ 未找到任何 metadata.json 文件在 {feature_root}")
        return
    
    metadata_files.sort()
    print(f"✅ 找到 {len(metadata_files)} 个特征文件")
    
    # 检查前 3 个文件
    for i, metadata_path in enumerate(metadata_files[:3]):
        print(f"\n\n{'#'*80}")
        print(f"# 检查文件 {i+1}/3")
        print('#'*80)
        
        check_feature_file(metadata_path)
        
        # 第一个文件用原始模型验证
        if i == 0:
            try:
                verify_with_original_model(metadata_path, checkpoint_path)
            except Exception as e:
                print(f"\n❌ 验证失败: {e}")
                import traceback
                traceback.print_exc()


if __name__ == "__main__":
    main()
