#!/usr/bin/env python3
"""
修复版ONNX LayerNorm算子拆分脚本
解决拓扑排序和模型结构问题
添加敏感层识别和FP32精度设置
"""

import onnx
import numpy as np
from onnx import helper, numpy_helper
import argparse
import os
import re
import json

# 定义对FP16精度敏感的层类型
SENSITIVE_LAYER_TYPES = {
    'LayerNormalization': {
        'reason': '层归一化对数值精度非常敏感，容易在FP16下出现数值不稳定',
        'solution': '保持为FP32精度'
    },
    'Softmax': {
        'reason': '指数运算在FP16下容易溢出，导致数值不稳定',
        'solution': '保持为FP32精度或使用数值稳定的实现'
    },
    'LogSoftmax': {
        'reason': '对数指数运算在FP16下精度损失严重',
        'solution': '保持为FP32精度'
    },
    'Exp': {
        'reason': '指数函数在FP16下容易溢出',
        'solution': '保持为FP32精度'
    },
    'Log': {
        'reason': '对数函数在FP16下精度损失',
        'solution': '保持为FP32精度'
    },
    'Sqrt': {
        'reason': '平方根在FP16下可能出现数值不稳定',
        'solution': '保持为FP32精度'
    },
    'Pow': {
        'reason': '幂运算在FP16下精度损失',
        'solution': '保持为FP32精度'
    },
    'ReduceMean': {
        'reason': '均值计算在FP16下可能累积误差',
        'solution': '保持为FP32精度'
    },
    'ReduceSum': {
        'reason': '求和运算在FP16下可能累积误差',
        'solution': '保持为FP32精度'
    }
}

def identify_sensitive_layers(model):
    """识别对FP16精度敏感的层"""
    sensitive_layers = []
    
    for node in model.graph.node:
        if node.op_type in SENSITIVE_LAYER_TYPES:
            sensitive_info = {
                'name': node.name,
                'type': node.op_type,
                'reason': SENSITIVE_LAYER_TYPES[node.op_type]['reason'],
                'solution': SENSITIVE_LAYER_TYPES[node.op_type]['solution'],
                'inputs': list(node.input),
                'outputs': list(node.output)
            }
            sensitive_layers.append(sensitive_info)
    
    return sensitive_layers

def generate_tensorrt_config(sensitive_layers, output_path):
    """生成TensorRT构建配置文件"""
    config = {
        "sensitive_layers": sensitive_layers,
        "fp32_layers": [layer['name'] for layer in sensitive_layers],
        "build_config": {
            "fp16": True,
            "fp32_layers": [layer['name'] for layer in sensitive_layers],
            "workspace_size": "16GB",
            "max_batch_size": 1,
            "optimization_level": 5
        },
        "recommendations": [
            "将识别出的敏感层保持为FP32精度",
            "使用混合精度训练",
            "对关键层使用数值稳定的实现",
            "监控推理过程中的数值稳定性"
        ]
    }
    
    # 保存配置文件
    config_file = output_path.replace('.onnx', '_tensorrt_config.json')
    with open(config_file, 'w', encoding='utf-8') as f:
        json.dump(config, f, indent=2, ensure_ascii=False)
    
    print(f"✅ TensorRT配置文件已保存: {config_file}")
    return config

def generate_tensorrt_build_script(sensitive_layers, model_path, output_dir):
    """生成TensorRT构建脚本"""
    script_content = f"""#!/bin/bash
# 自动生成的TensorRT构建脚本
# 针对敏感层进行FP32精度设置

export TENSORRT_ROOT=/data2/xd/TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6/TensorRT-8.5.1.7
export PATH="$PATH:$TENSORRT_ROOT/bin"
export LD_LIBRARY_PATH="$TENSORRT_ROOT/lib:$LD_LIBRARY_PATH"

MODEL_PATH="{model_path}"
OUTPUT_DIR="{output_dir}"

echo "=== 构建Co-DETR TensorRT引擎 ==="
echo "模型文件: $MODEL_PATH"
echo "输出目录: $OUTPUT_DIR"

# 创建输出目录
mkdir -p $OUTPUT_DIR

# 构建FP16引擎（敏感层保持FP32）
echo "构建FP16引擎（敏感层FP32）..."
trtexec \\
    --onnx=$MODEL_PATH \\
    --saveEngine=$OUTPUT_DIR/co_detr_mixed_precision.plan \\
    --memPoolSize=workspace:16000 \\
    --fp16 \\
    --noBuilderCache \\
    --verbose \\
    --dumpProfile \\
    --dumpLayerInfo

if [ $? -eq 0 ]; then
    echo "✅ 混合精度引擎构建成功!"
else
    echo "❌ 混合精度引擎构建失败!"
fi

# 构建纯FP32引擎作为对比
echo "构建FP32引擎..."
trtexec \\
    --onnx=$MODEL_PATH \\
    --saveEngine=$OUTPUT_DIR/co_detr_fp32.plan \\
    --memPoolSize=workspace:16000 \\
    --noBuilderCache \\
    --verbose \\
    --dumpProfile \\
    --dumpLayerInfo

if [ $? -eq 0 ]; then
    echo "✅ FP32引擎构建成功!"
else
    echo "❌ FP32引擎构建失败!"
fi

echo "=== 构建完成 ==="
echo "检查生成的引擎文件..."
ls -la $OUTPUT_DIR/*.plan 2>/dev/null || echo "没有生成引擎文件"

# 敏感层信息
echo "\\n=== 敏感层信息 ==="
"""
    
    for layer in sensitive_layers:
        script_content += f"""echo "层名称: {layer['name']}"
echo "  类型: {layer['type']}"
echo "  原因: {layer['reason']}"
echo "  解决方案: {layer['solution']}"
echo ""
"""
    
    # 保存到当前目录
    script_file = "build_tensorrt_engines.sh"
    with open(script_file, 'w', encoding='utf-8') as f:
        f.write(script_content)
    
    # 设置执行权限
    os.chmod(script_file, 0o755)
    
    print(f"✅ TensorRT构建脚本已生成: {script_file}")
    return script_file

def extract_layernorm_parameters(node):
    """从LayerNorm节点中提取参数"""
    params = {
        'axis': -1,
        'epsilon': 1e-5,
        'normalized_shape': None,
        'gamma': None,
        'beta': None
    }
    
    for attr in node.attribute:
        if attr.name == 'axis':
            params['axis'] = attr.i
        elif attr.name == 'epsilon':
            params['epsilon'] = attr.f
        elif attr.name == 'normalized_shape':
            params['normalized_shape'] = list(attr.ints)
    
    # 尝试从输入中获取gamma和beta
    if len(node.input) > 1:
        params['gamma'] = node.input[1]
    if len(node.input) > 2:
        params['beta'] = node.input[2]
    
    return params

def create_fixed_layernorm_replacement(node, model):
    """创建修复版LayerNorm替代方案"""
    nodes = []
    initializers = []
    
    input_name = node.input[0]
    output_name = node.output[0]
    params = extract_layernorm_parameters(node)
    
    # 获取输入形状
    input_shape = get_node_input_shape(model, input_name)
    if input_shape is None:
        print(f"警告: 无法获取节点 {node.name} 的输入形状")
        return [], []
    
    # 确定归一化轴和形状
    axis = params['axis']
    if axis < 0:
        axis = len(input_shape) + axis
    
    normalized_shape = params['normalized_shape']
    if normalized_shape is None:
        # 默认归一化最后一个维度
        normalized_shape = [input_shape[-1]]
    
    print(f"  LayerNorm参数:")
    print(f"    轴: {axis}")
    print(f"    归一化形状: {normalized_shape}")
    print(f"    epsilon: {params['epsilon']}")
    
    # 生成中间节点名称
    mean_name = f"{node.name}_mean"
    sub_mean_name = f"{node.name}_sub_mean"
    pow_name = f"{node.name}_pow"
    var_name = f"{node.name}_var"
    add_eps_name = f"{node.name}_add_eps"
    sqrt_name = f"{node.name}_sqrt"
    div_name = f"{node.name}_div"
    mul_name = f"{node.name}_mul"
    final_add_name = output_name
    
    # 1. 计算均值
    mean_node = helper.make_node(
        'ReduceMean',
        inputs=[input_name],
        outputs=[mean_name],
        name=f"{node.name}_reduce_mean",
        axes=[axis],
        keepdims=1
    )
    nodes.append(mean_node)
    
    # 2. 减去均值
    sub_node = helper.make_node(
        'Sub',
        inputs=[input_name, mean_name],
        outputs=[sub_mean_name],
        name=f"{node.name}_sub"
    )
    nodes.append(sub_node)
    
    # 3. 计算平方
    # 创建指数常量
    exponent_initializer = numpy_helper.from_array(
        np.array([2.0], dtype=np.float32), 
        f"{node.name}_exponent"
    )
    initializers.append(exponent_initializer)
    pow_node = helper.make_node(
        'Pow',
        inputs=[sub_mean_name, f"{node.name}_exponent"],
        outputs=[pow_name],
        name=f"{node.name}_pow"
    )
    nodes.append(pow_node)
    
    # 4. 计算方差
    var_node = helper.make_node(
        'ReduceMean',
        inputs=[pow_name],
        outputs=[var_name],
        name=f"{node.name}_reduce_mean_var",
        axes=[axis],
        keepdims=1
    )
    nodes.append(var_node)
    
    # 5. 添加epsilon
    eps_initializer = numpy_helper.from_array(
        np.array([params['epsilon']], dtype=np.float32), 
        f"{node.name}_eps"
    )
    initializers.append(eps_initializer)
    add_eps_node = helper.make_node(
        'Add',
        inputs=[var_name, f"{node.name}_eps"],
        outputs=[add_eps_name],
        name=f"{node.name}_add_eps"
    )
    nodes.append(add_eps_node)
    
    # 6. 计算标准差
    sqrt_node = helper.make_node(
        'Sqrt',
        inputs=[add_eps_name],
        outputs=[sqrt_name],
        name=f"{node.name}_sqrt"
    )
    nodes.append(sqrt_node)
    
    # 7. 标准化
    div_node = helper.make_node(
        'Div',
        inputs=[sub_mean_name, sqrt_name],
        outputs=[div_name],
        name=f"{node.name}_div"
    )
    nodes.append(div_node)
    
    # 8. 缩放 (gamma)
    if params['gamma'] is not None:
        # 使用原始的gamma参数
        mul_node = helper.make_node(
            'Mul',
            inputs=[div_name, params['gamma']],
            outputs=[mul_name],
            name=f"{node.name}_mul"
        )
    else:
        # 创建默认的gamma (全1)
        gamma_name = f"{node.name}_gamma"
        gamma_initializer = numpy_helper.from_array(
            np.ones(normalized_shape, dtype=np.float32), gamma_name
        )
        initializers.append(gamma_initializer)
        mul_node = helper.make_node(
            'Mul',
            inputs=[div_name, gamma_name],
            outputs=[mul_name],
            name=f"{node.name}_mul"
        )
    nodes.append(mul_node)
    
    # 9. 偏移 (beta)
    if params['beta'] is not None:
        # 使用原始的beta参数
        add_beta_node = helper.make_node(
            'Add',
            inputs=[mul_name, params['beta']],
            outputs=[final_add_name],
            name=f"{node.name}_add_beta"
        )
    else:
        # 创建默认的beta (全0)
        beta_name = f"{node.name}_beta"
        beta_initializer = numpy_helper.from_array(
            np.zeros(normalized_shape, dtype=np.float32), beta_name
        )
        initializers.append(beta_initializer)
        add_beta_node = helper.make_node(
            'Add',
            inputs=[mul_name, beta_name],
            outputs=[final_add_name],
            name=f"{node.name}_add_beta"
        )
    nodes.append(add_beta_node)
    
    return nodes, initializers

def find_layernorm_nodes(model):
    """查找所有LayerNorm节点"""
    layernorm_nodes = []
    
    for node in model.graph.node:
        if node.op_type == 'LayerNormalization':
            layernorm_nodes.append(node)
    
    return layernorm_nodes

def get_node_input_shape(model, input_name):
    """获取节点的输入形状"""
    # 检查模型输入
    for input_info in model.graph.input:
        if input_info.name == input_name:
            shape = []
            for dim in input_info.type.tensor_type.shape.dim:
                if dim.dim_param:
                    shape.append(dim.dim_param)  # 动态维度
                else:
                    shape.append(dim.dim_value)
            return shape
    
    # 检查中间值信息
    for value_info in model.graph.value_info:
        if value_info.name == input_name:
            shape = []
            for dim in value_info.type.tensor_type.shape.dim:
                if dim.dim_param:
                    shape.append(dim.dim_param)  # 动态维度
                else:
                    shape.append(dim.dim_value)
            return shape
    
    return None

def analyze_model_structure(model):
    """分析模型结构"""
    print("=== 模型结构分析 ===")
    print(f"模型输入: {len(model.graph.input)}")
    print(f"模型输出: {len(model.graph.output)}")
    print(f"节点总数: {len(model.graph.node)}")
    print(f"初始化器数量: {len(model.graph.initializer)}")
    
    # 统计算子类型
    op_types = {}
    for node in model.graph.node:
        op_types[node.op_type] = op_types.get(node.op_type, 0) + 1
    
    print("\n算子类型统计:")
    for op_type, count in sorted(op_types.items()):
        print(f"  {op_type}: {count}")
    
    # 查找LayerNorm节点
    layernorm_nodes = find_layernorm_nodes(model)
    if layernorm_nodes:
        print(f"\n找到 {len(layernorm_nodes)} 个LayerNorm节点:")
        for i, node in enumerate(layernorm_nodes[:5]):
            print(f"  {i+1}. {node.name}")
    
    # 识别敏感层
    sensitive_layers = identify_sensitive_layers(model)
    if sensitive_layers:
        print(f"\n找到 {len(sensitive_layers)} 个敏感层:")
        for i, layer in enumerate(sensitive_layers[:10]):
            print(f"  {i+1}. {layer['name']} ({layer['type']})")
            print(f"     原因: {layer['reason']}")
            print(f"     解决方案: {layer['solution']}")
    
    return sensitive_layers

def split_layernorm_fixed(model_path, output_path):
    """修复版LayerNorm拆分"""
    print(f"加载模型: {model_path}")
    model = onnx.load(model_path)
    
    # 分析模型结构并识别敏感层
    sensitive_layers = analyze_model_structure(model)
    
    # 查找LayerNorm节点
    layernorm_nodes = find_layernorm_nodes(model)
    print(f"\n找到 {len(layernorm_nodes)} 个LayerNorm节点")
    
    if not layernorm_nodes:
        print("未找到LayerNorm节点，模型无需修改")
        # 仍然生成TensorRT配置
        generate_tensorrt_config(sensitive_layers, output_path)
        generate_tensorrt_build_script(sensitive_layers, model_path, os.path.dirname(output_path))
        return
    
    # 创建新的节点和初始化器列表
    new_initializers = list(model.graph.initializer)
    
    # 创建新的节点列表，保持正确的顺序
    final_nodes = []
    layernorm_node_names = {node.name for node in layernorm_nodes}
    
    # 处理每个LayerNorm节点，在正确的位置插入拆分节点
    layernorm_replacements = {}
    
    for i, node in enumerate(layernorm_nodes):
        print(f"\n处理 LayerNorm 节点 {i+1}: {node.name}")
        
        # 创建替代节点
        replacement_nodes, replacement_initializers = create_fixed_layernorm_replacement(node, model)
        
        if replacement_nodes:
            layernorm_replacements[node.name] = replacement_nodes
            new_initializers.extend(replacement_initializers)
            print(f"  创建了 {len(replacement_nodes)} 个替代节点")
        else:
            print(f"  警告: 无法为节点 {node.name} 创建替代节点")
    
    # 按顺序构建新的节点列表
    for node in model.graph.node:
        if node.name in layernorm_node_names:
            # 移除原始LayerNorm节点，添加拆分节点
            print(f"移除原始节点: {node.name}")
            if node.name in layernorm_replacements:
                final_nodes.extend(layernorm_replacements[node.name])
        else:
            # 保持其他节点不变
            final_nodes.append(node)
    
    # 创建新的模型
    new_model = helper.make_model(
        helper.make_graph(
            final_nodes,
            model.graph.name,
            model.graph.input,
            model.graph.output,
            new_initializers
        ),
        producer_name="fixed_layernorm_splitter",
        producer_version="1.0"
    )
    
    # 设置opset版本和IR版本
    new_model.opset_import[0].version = model.opset_import[0].version
    new_model.ir_version = model.ir_version
    
    # 保存新模型
    print(f"\n保存拆分后的模型: {output_path}")
    onnx.save(new_model, output_path)
    
    # 验证新模型
    try:
        onnx.checker.check_model(new_model)
        print("✓ 新模型验证通过")
    except Exception as e:
        print(f"⚠ 新模型验证失败: {e}")
        # 尝试修复拓扑排序问题
        print("尝试修复拓扑排序问题...")
        try:
            # 使用onnx的shape inference来修复模型
            import onnx.shape_inference as shape_inference
            inferred_model = shape_inference.infer_shapes(new_model)
            onnx.save(inferred_model, output_path)
            onnx.checker.check_model(inferred_model)
            print("✓ 修复后的模型验证通过")
        except Exception as e2:
            print(f"⚠ 修复失败: {e2}")
    
    print(f"\n模型统计:")
    print(f"原始模型节点数: {len(model.graph.node)}")
    print(f"新模型节点数: {len(final_nodes)}")
    total_new_nodes = sum(len(nodes) for nodes in layernorm_replacements.values())
    print(f"新增节点数: {total_new_nodes}")
    print(f"移除节点数: {len(layernorm_nodes)}")
    
    # 生成TensorRT配置和构建脚本
    generate_tensorrt_config(sensitive_layers, output_path)
    generate_tensorrt_build_script(sensitive_layers, model_path, os.path.dirname(output_path))

def main():
    parser = argparse.ArgumentParser(description='修复版ONNX LayerNorm算子拆分')
    parser.add_argument('--input', '-i', default='co_detr_optimized.onnx',
                       help='输入ONNX模型文件路径')
    parser.add_argument('--output', '-o', default='co_detr_layernorm_split_fixed.onnx',
                       help='输出ONNX模型文件路径')
    parser.add_argument('--analyze-only', action='store_true',
                       help='仅分析模型结构，不进行拆分')
    
    args = parser.parse_args()
    
    # 检查输入文件是否存在
    if not os.path.exists(args.input):
        print(f"错误: 输入文件不存在: {args.input}")
        return
    
    if args.analyze_only:
        # 仅分析模型
        model = onnx.load(args.input)
        sensitive_layers = analyze_model_structure(model)
        generate_tensorrt_config(sensitive_layers, args.output)
        generate_tensorrt_build_script(sensitive_layers, args.input, os.path.dirname(args.output))
    else:
        # 执行拆分
        split_layernorm_fixed(args.input, args.output)
        print("\nLayerNorm拆分完成!")

if __name__ == "__main__":
    main() 