#!/usr/bin/env python3
"""
ONNX模型可视化工具
支持netron可视化和shape分析
"""

import os
import sys
import subprocess
import argparse
import onnx
import onnxruntime
import numpy as np

def install_netron():
    """安装netron"""
    try:
        import netron
        print("✅ Netron已安装")
        return True
    except ImportError:
        print("📦 正在安装netron...")
        try:
            subprocess.check_call([sys.executable, "-m", "pip", "install", "netron"])
            import netron
            print("✅ Netron安装完成")
            return True
        except Exception as e:
            print(f"❌ Netron安装失败: {e}")
            return False

def analyze_onnx_model(onnx_file):
    """分析ONNX模型"""
    print(f"\n🔍 分析ONNX模型: {onnx_file}")
    print("="*60)
    
    if not os.path.exists(onnx_file):
        print(f"❌ 文件不存在: {onnx_file}")
        return
    
    try:
        # 加载ONNX模型
        onnx_model = onnx.load(onnx_file)
        
        # 基本信息
        print(f"📋 模型信息:")
        print(f"   IR版本: {onnx_model.ir_version}")
        print(f"   Opset版本: {onnx_model.opset_import[0].version}")
        print(f"   Producer: {onnx_model.producer_name}")
        print()
        
        # 输入信息
        print("📥 输入信息:")
        for input_info in onnx_model.graph.input:
            print(f"  名称: {input_info.name}")
            print(f"  类型: {input_info.type.tensor_type.elem_type}")
            shape = []
            for dim in input_info.type.tensor_type.shape.dim:
                if dim.dim_param:
                    shape.append(dim.dim_param)
                else:
                    shape.append(dim.dim_value)
            print(f"  Shape: {shape}")
            print()
        
        # 输出信息
        print("📤 输出信息:")
        for output_info in onnx_model.graph.output:
            print(f"  名称: {output_info.name}")
            print(f"  类型: {output_info.type.tensor_type.elem_type}")
            shape = []
            for dim in output_info.type.tensor_type.shape.dim:
                if dim.dim_param:
                    shape.append(dim.dim_param)
                else:
                    shape.append(dim.dim_value)
            print(f"  Shape: {shape}")
            print()
        
        # 节点统计
        print("🔧 节点统计:")
        node_types = {}
        for node in onnx_model.graph.node:
            op_type = node.op_type
            if op_type in node_types:
                node_types[op_type] += 1
            else:
                node_types[op_type] = 1
        
        total_nodes = sum(node_types.values())
        print(f"  总节点数: {total_nodes}")
        print("  算子类型:")
        for op_type, count in sorted(node_types.items()):
            percentage = (count / total_nodes) * 100
            print(f"    {op_type}: {count} ({percentage:.1f}%)")
        
        # 特殊算子检查
        print("\n⚠️  TensorRT兼容性检查:")
        tensorrt_unsupported = ['If', 'CumSum', 'TopK', 'GridSample', 'Loop', 'Scan']
        for op in tensorrt_unsupported:
            count = node_types.get(op, 0)
            if count > 0:
                print(f"    ❌ {op}: {count} 个 (TensorRT不支持)")
            else:
                print(f"    ✅ {op}: 0 个")
        
        # 模型大小
        file_size = os.path.getsize(onnx_file) / (1024 * 1024)  # MB
        print(f"\n💾 模型大小: {file_size:.2f} MB")
        
    except Exception as e:
        print(f"❌ 模型分析失败: {e}")

def test_onnx_inference(onnx_file):
    """测试ONNX模型推理"""
    print(f"\n🧪 测试推理: {onnx_file}")
    print("="*60)
    
    try:
        # 创建ONNX Runtime会话
        ort_session = onnxruntime.InferenceSession(onnx_file)
        
        # 获取输入信息
        input_info = ort_session.get_inputs()[0]
        input_shape = input_info.shape
        input_name = input_info.name
        
        print(f"📥 输入信息:")
        print(f"  名称: {input_name}")
        print(f"  Shape: {input_shape}")
        print(f"  类型: {input_info.type}")
        
        # 创建测试输入
        if input_shape[0] == 'batch_size' or input_shape[0] == -1:
            batch_size = 1
        else:
            batch_size = input_shape[0]
        
        test_input = np.random.randn(batch_size, 3, 750, 1333).astype(np.float32)
        
        # 运行推理
        print(f"\n🔄 运行推理测试...")
        outputs = ort_session.run(None, {input_name: test_input})
        
        print("✅ 推理成功!")
        print(f"   输出数量: {len(outputs)}")
        
        # 分析输出
        for i, output in enumerate(outputs):
            print(f"   输出 {i}: shape={output.shape}, dtype={output.dtype}")
            
            # 数值范围分析
            if output.dtype in [np.float32, np.float64]:
                print(f"     数值范围: [{output.min():.4f}, {output.max():.4f}]")
                print(f"     均值: {output.mean():.4f}")
                print(f"     标准差: {output.std():.4f}")
            else:
                print(f"     数值范围: [{output.min()}, {output.max()}]")
        
    except Exception as e:
        print(f"❌ 推理测试失败: {e}")

def run_shape_inference(onnx_file):
    """运行ONNX shape推理"""
    print(f"\n🔮 Shape推理: {onnx_file}")
    print("="*60)
    
    try:
        # 加载ONNX模型
        onnx_model = onnx.load(onnx_file)
        
        # 运行shape推理
        print("🔄 运行shape推理...")
        inferred_model = onnx.shape_inference.infer_shapes(onnx_model)
        
        # 保存推理后的模型
        inferred_file = onnx_file.replace('.onnx', '_inferred.onnx')
        onnx.save(inferred_model, inferred_file)
        print(f"✅ Shape推理完成，保存到: {inferred_file}")
        
        # 分析推理后的shape
        print("\n📊 推理后的Shape信息:")
        for output_info in inferred_model.graph.output:
            print(f"  输出: {output_info.name}")
            shape = []
            for dim in output_info.type.tensor_type.shape.dim:
                if dim.dim_param:
                    shape.append(dim.dim_param)
                else:
                    shape.append(dim.dim_value)
            print(f"  Shape: {shape}")
        
        return inferred_file
        
    except Exception as e:
        print(f"❌ Shape推理失败: {e}")
        return None

def visualize_with_netron(onnx_file, port=8080):
    """使用netron可视化ONNX模型"""
    print(f"\n🌐 Netron可视化: {onnx_file}")
    print("="*60)
    
    if not install_netron():
        return
    
    try:
        import netron
        
        print(f"🌐 启动Netron服务器...")
        print(f"   模型文件: {onnx_file}")
        print(f"   端口: {port}")
        print(f"   浏览器地址: http://localhost:{port}")
        print("   按 Ctrl+C 停止服务器")
        
        # 启动netron
        netron.start(onnx_file, port=port)
        
    except KeyboardInterrupt:
        print("\n🛑 Netron服务器已停止")
    except Exception as e:
        print(f"❌ Netron可视化失败: {e}")

def main():
    parser = argparse.ArgumentParser(description='ONNX模型可视化工具')
    parser.add_argument('onnx_file', help='ONNX模型文件路径')
    parser.add_argument('--analyze', action='store_true', help='分析模型结构')
    parser.add_argument('--test', action='store_true', help='测试模型推理')
    parser.add_argument('--infer-shapes', action='store_true', help='运行shape推理')
    parser.add_argument('--visualize', action='store_true', help='启动netron可视化')
    parser.add_argument('--port', type=int, default=8080, help='netron服务器端口')
    parser.add_argument('--all', action='store_true', help='执行所有操作')
    
    args = parser.parse_args()
    
    if not os.path.exists(args.onnx_file):
        print(f"❌ 文件不存在: {args.onnx_file}")
        return
    
    print(f"🎯 ONNX模型可视化工具")
    print(f"📁 模型文件: {args.onnx_file}")
    print("="*60)
    
    # 执行操作
    if args.all or args.analyze:
        analyze_onnx_model(args.onnx_file)
    
    if args.all or args.test:
        test_onnx_inference(args.onnx_file)
    
    if args.all or args.infer_shapes:
        run_shape_inference(args.onnx_file)
    
    if args.all or args.visualize:
        visualize_with_netron(args.onnx_file, args.port)
    
    # 如果没有指定任何操作，默认执行分析
    if not any([args.analyze, args.test, args.infer_shapes, args.visualize, args.all]):
        analyze_onnx_model(args.onnx_file)
        print(f"\n💡 提示: 使用 --help 查看所有选项")

if __name__ == "__main__":
    main() 