#!/usr/bin/env python3
"""
对比原始ONNX模型和LayerNorm拆分后模型的输出结果
"""

import onnx
import numpy as np
import onnxruntime as ort
import argparse
import os

def load_input_data(input_path):
    """加载输入数据"""
    print(f"加载输入数据: {input_path}")
    if not os.path.exists(input_path):
        raise FileNotFoundError(f"输入文件不存在: {input_path}")
    
    # 读取二进制数据
    with open(input_path, 'rb') as f:
        data = f.read()
    
    # 假设数据是float32格式，形状为[1, 3, 750, 1333]
    input_shape = (1, 3, 750, 1333)
    total_elements = np.prod(input_shape)
    input_data = np.frombuffer(data, dtype=np.float32)
    
    if len(input_data) != total_elements:
        print(f"警告: 输入数据大小不匹配，期望 {total_elements}，实际 {len(input_data)}")
        # 尝试调整数据大小
        if len(input_data) > total_elements:
            input_data = input_data[:total_elements]
        else:
            # 用零填充
            padded_data = np.zeros(total_elements, dtype=np.float32)
            padded_data[:len(input_data)] = input_data
            input_data = padded_data
    
    input_data = input_data.reshape(input_shape)
    print(f"输入数据形状: {input_data.shape}")
    print(f"输入数据范围: [{input_data.min():.6f}, {input_data.max():.6f}]")
    print(f"输入数据均值: {input_data.mean():.6f}")
    print(f"输入数据标准差: {input_data.std():.6f}")
    
    return input_data

def run_model_inference(model_path, input_data):
    """运行模型推理"""
    print(f"\n运行模型推理: {model_path}")
    
    # 创建推理会话
    session = ort.InferenceSession(model_path)
    
    # 获取输入输出信息
    input_name = session.get_inputs()[0].name
    output_names = [output.name for output in session.get_outputs()]
    
    print(f"模型输入名称: {input_name}")
    print(f"模型输出名称: {output_names}")
    
    # 运行推理
    outputs = session.run(output_names, {input_name: input_data})
    
    results = {}
    for i, (name, output) in enumerate(zip(output_names, outputs)):
        results[name] = output
        print(f"输出 {i+1} ({name}): 形状={output.shape}, 范围=[{output.min():.6f}, {output.max():.6f}], 均值={output.mean():.6f}")
    
    return results

def compare_outputs(original_outputs, split_outputs, tolerance=1e-6):
    """对比两个模型的输出"""
    print(f"\n=== 输出对比 (容差: {tolerance}) ===")
    
    all_match = True
    
    for output_name in original_outputs.keys():
        if output_name not in split_outputs:
            print(f"❌ 输出 {output_name} 在拆分模型中不存在")
            all_match = False
            continue
        
        original_output = original_outputs[output_name]
        split_output = split_outputs[output_name]
        
        # 检查形状
        if original_output.shape != split_output.shape:
            print(f"❌ 输出 {output_name} 形状不匹配:")
            print(f"   原始模型: {original_output.shape}")
            print(f"   拆分模型: {split_output.shape}")
            all_match = False
            continue
        
        # 计算差异
        diff = np.abs(original_output - split_output)
        max_diff = np.max(diff)
        mean_diff = np.mean(diff)
        std_diff = np.std(diff)
        
        # 检查是否在容差范围内
        if max_diff <= tolerance:
            print(f"✅ 输出 {output_name}: 完全匹配")
            print(f"   最大差异: {max_diff:.2e}")
            print(f"   平均差异: {mean_diff:.2e}")
            print(f"   差异标准差: {std_diff:.2e}")
        else:
            print(f"❌ 输出 {output_name}: 差异超出容差")
            print(f"   最大差异: {max_diff:.2e} (容差: {tolerance:.2e})")
            print(f"   平均差异: {mean_diff:.2e}")
            print(f"   差异标准差: {std_diff:.2e}")
            all_match = False
        
        # 显示一些统计信息
        print(f"   原始输出范围: [{original_output.min():.6f}, {original_output.max():.6f}]")
        print(f"   拆分输出范围: [{split_output.min():.6f}, {split_output.max():.6f}]")
        print()
    
    return all_match

def main():
    parser = argparse.ArgumentParser(description='对比ONNX模型输出')
    parser.add_argument('--original', default='co_detr_optimized.onnx',
                       help='原始ONNX模型路径')
    parser.add_argument('--split', default='co_detr_layernorm_split_fixed_v5.onnx',
                       help='拆分后ONNX模型路径')
    parser.add_argument('--input', default='../data/input_data.bin',
                       help='输入数据路径')
    parser.add_argument('--tolerance', type=float, default=1e-6,
                       help='数值容差')
    
    args = parser.parse_args()
    
    # 检查文件是否存在
    for path, desc in [(args.original, '原始模型'), (args.split, '拆分模型'), (args.input, '输入数据')]:
        if not os.path.exists(path):
            print(f"错误: {desc}文件不存在: {path}")
            return
    
    try:
        # 加载输入数据
        input_data = load_input_data(args.input)
        
        # 运行原始模型推理
        print("\n" + "="*50)
        print("原始模型推理")
        print("="*50)
        original_outputs = run_model_inference(args.original, input_data)
        
        # 运行拆分模型推理
        print("\n" + "="*50)
        print("拆分模型推理")
        print("="*50)
        split_outputs = run_model_inference(args.split, input_data)
        
        # 对比输出
        print("\n" + "="*50)
        print("结果对比")
        print("="*50)
        match = compare_outputs(original_outputs, split_outputs, args.tolerance)
        
        if match:
            print("\n🎉 所有输出都完全匹配！LayerNorm拆分成功！")
        else:
            print("\n⚠️  存在输出差异，需要进一步检查。")
        
    except Exception as e:
        print(f"错误: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main() 