import os
import pickle
import numpy as np
from typing import Any, Dict, List

def analyze_data_structure(data: Any, prefix: str = "", max_depth: int = 5) -> List[str]:
    """递归分析数据结构"""
    info = []
    
    if max_depth <= 0:
        return [f"{prefix}: [Max depth reached]"]
    
    if isinstance(data, dict):
        info.append(f"{prefix}: Dict with {len(data)} keys")
        for key, value in data.items():
            key_info = analyze_data_structure(value, f"{prefix}.{key}", max_depth - 1)
            info.extend(key_info)
    elif isinstance(data, (list, tuple)):
        info.append(f"{prefix}: {type(data).__name__} with {len(data)} items")
        if len(data) > 0:
            # 分析第一个元素
            first_item_info = analyze_data_structure(data[0], f"{prefix}[0]", max_depth - 1)
            info.extend(first_item_info)
            if len(data) > 1:
                info.append(f"{prefix}[1...{len(data)-1}]: Similar items")
    elif isinstance(data, np.ndarray):
        info.append(f"{prefix}: numpy.ndarray, shape={data.shape}, dtype={data.dtype}")
        if data.size <= 10:  # 如果数组很小，显示值
            info.append(f"{prefix}.values: {data.flatten()}")
    elif hasattr(data, '__dict__'):
        info.append(f"{prefix}: {type(data).__name__} object")
        for attr_name in dir(data):
            if not attr_name.startswith('_'):
                try:
                    attr_value = getattr(data, attr_name)
                    if not callable(attr_value):
                        attr_info = analyze_data_structure(attr_value, f"{prefix}.{attr_name}", max_depth - 1)
                        info.extend(attr_info)  # 显示所有属性信息
                except:
                    info.append(f"{prefix}.{attr_name}: [Error accessing]")
    else:
        info.append(f"{prefix}: {type(data).__name__} = {data}")
    
    return info

def analyze_pkl_file(file_path: str) -> Dict[str, Any]:
    """分析单个 pkl 文件"""
    result = {
        'file_path': file_path,
        'file_size': os.path.getsize(file_path),
        'success': False,
        'error': None,
        'structure': [],
        'summary': {}
    }
    
    try:
        with open(file_path, 'rb') as f:
            data = pickle.load(f)
        
        result['success'] = True
        result['structure'] = analyze_data_structure(data, "root")
        
        # 生成摘要信息
        if isinstance(data, dict):
            result['summary']['type'] = 'dict'
            result['summary']['keys'] = list(data.keys())
            result['summary']['key_count'] = len(data.keys())
            
            # 特殊分析 FrankMocap 的数据结构
            if 'pred_output_list' in data:
                pred_list = data['pred_output_list']
                result['summary']['pred_output_count'] = len(pred_list) if pred_list else 0
                if pred_list and len(pred_list) > 0 and pred_list[0] is not None:
                    first_pred = pred_list[0]
                    if isinstance(first_pred, dict):
                        result['summary']['prediction_keys'] = list(first_pred.keys())
        else:
            result['summary']['type'] = type(data).__name__
            
    except Exception as e:
        result['error'] = str(e)
    
    return result

def main():
    """主函数"""
    # 设置目录路径
    mocap_dir = './mocap_output_full/mocap'
    
    if not os.path.exists(mocap_dir):
        print(f"错误: 目录 {mocap_dir} 不存在")
        return
    
    # 获取所有 pkl 文件
    pkl_files = sorted([f for f in os.listdir(mocap_dir) if f.endswith('.pkl')])
    
    if not pkl_files:
        print(f"在目录 {mocap_dir} 中没有找到 .pkl 文件")
        return
    
    print(f"找到 {len(pkl_files)} 个 PKL 文件")
    print("=" * 80)
    
    # 分析每个文件
    all_results = []
    for i, filename in enumerate(pkl_files):
        file_path = os.path.join(mocap_dir, filename)
        print(f"\n[{i+1}/{len(pkl_files)}] 分析文件: {filename}")
        print("-" * 50)
        
        result = analyze_pkl_file(file_path)
        all_results.append(result)
        
        if result['success']:
            print(f"文件大小: {result['file_size']} bytes")
            print(f"数据类型: {result['summary'].get('type', 'Unknown')}")
            
            if 'keys' in result['summary']:
                print(f"主要键值: {result['summary']['keys']}")
            
            if 'pred_output_count' in result['summary']:
                print(f"预测输出数量: {result['summary']['pred_output_count']}")
            
            if 'prediction_keys' in result['summary']:
                print(f"预测数据键值: {result['summary']['prediction_keys']}")
            
            print("\n详细结构:")
            for info_line in result['structure']:  # 显示全部结构
                print(f"  {info_line}")
        else:
            print(f"错误: {result['error']}")
    
    # 生成总结报告
    print("\n" + "=" * 80)
    print("总结报告")
    print("=" * 80)
    
    successful_files = [r for r in all_results if r['success']]
    failed_files = [r for r in all_results if not r['success']]
    
    print(f"成功分析: {len(successful_files)} 个文件")
    print(f"失败: {len(failed_files)} 个文件")
    
    if failed_files:
        print("\n失败的文件:")
        for result in failed_files:
            print(f"  {os.path.basename(result['file_path'])}: {result['error']}")
    
    if successful_files:
        print("\n文件内容一致性检查:")
        
        # 检查键值一致性
        all_keys = set()
        for result in successful_files:
            if 'keys' in result['summary']:
                all_keys.update(result['summary']['keys'])
        
        print(f"所有文件中出现的键值: {sorted(all_keys)}")
        
        # 检查预测输出一致性
        pred_counts = [r['summary'].get('pred_output_count', 0) for r in successful_files]
        if pred_counts:
            print(f"预测输出数量范围: {min(pred_counts)} - {max(pred_counts)}")
        
        # 检查预测键值一致性
        all_pred_keys = set()
        for result in successful_files:
            if 'prediction_keys' in result['summary']:
                all_pred_keys.update(result['summary']['prediction_keys'])
        
        if all_pred_keys:
            print(f"预测数据中的键值: {sorted(all_pred_keys)}")
    
    print(f"\n分析完成! 详细信息已保存到 analyze_results.txt")
    
    # 保存详细结果到文件
    with open('analyze_results.txt', 'w', encoding='utf-8') as f:
        f.write("PKL 文件分析结果\n")
        f.write("=" * 80 + "\n\n")
        
        for i, result in enumerate(all_results):
            filename = os.path.basename(result['file_path'])
            f.write(f"[{i+1}] {filename}\n")
            f.write("-" * 50 + "\n")
            
            if result['success']:
                f.write(f"文件大小: {result['file_size']} bytes\n")
                f.write(f"摘要: {result['summary']}\n\n")
                f.write("详细结构:\n")
                for info_line in result['structure']:
                    f.write(f"  {info_line}\n")
            else:
                f.write(f"错误: {result['error']}\n")
            
            f.write("\n" + "=" * 80 + "\n\n")

if __name__ == "__main__":
    main() 