#!/bin/bash
# 自动生成的TensorRT构建脚本
# 针对敏感层进行FP32精度设置

export TENSORRT_ROOT=/data2/xd/TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6/TensorRT-8.5.1.7
export PATH="$PATH:$TENSORRT_ROOT/bin"
export LD_LIBRARY_PATH="$TENSORRT_ROOT/lib:$LD_LIBRARY_PATH"

MODEL_PATH="co_detr_sim.onnx"
OUTPUT_DIR=""

echo "=== 构建Co-DETR TensorRT引擎 ==="
echo "模型文件: $MODEL_PATH"
echo "输出目录: $OUTPUT_DIR"

# 创建输出目录
mkdir -p $OUTPUT_DIR

# 构建FP16引擎（敏感层保持FP32）
echo "构建FP16引擎（敏感层FP32）..."
trtexec \
    --onnx=$MODEL_PATH \
    --saveEngine=$OUTPUT_DIR/co_detr_mixed_precision.plan \
    --memPoolSize=workspace:16000 \
    --fp16 \
    --noBuilderCache \
    --verbose \
    --dumpProfile \
    --dumpLayerInfo

if [ $? -eq 0 ]; then
    echo "✅ 混合精度引擎构建成功!"
else
    echo "❌ 混合精度引擎构建失败!"
fi

# 构建纯FP32引擎作为对比
echo "构建FP32引擎..."
trtexec \
    --onnx=$MODEL_PATH \
    --saveEngine=$OUTPUT_DIR/co_detr_fp32.plan \
    --memPoolSize=workspace:16000 \
    --noBuilderCache \
    --verbose \
    --dumpProfile \
    --dumpLayerInfo

if [ $? -eq 0 ]; then
    echo "✅ FP32引擎构建成功!"
else
    echo "❌ FP32引擎构建失败!"
fi

echo "=== 构建完成 ==="
echo "检查生成的引擎文件..."
ls -la $OUTPUT_DIR/*.plan 2>/dev/null || echo "没有生成引擎文件"

# 敏感层信息
echo "\n=== 敏感层信息 ==="
echo "层名称: /transformer/encoder/layers.0/attentions.0/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/encoder/layers.0/attentions.0/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.0/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/attentions.0/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/encoder/layers.1/attentions.0/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.1/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/attentions.0/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/encoder/layers.2/attentions.0/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.2/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/attentions.0/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/encoder/layers.3/attentions.0/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.3/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/attentions.0/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/encoder/layers.4/attentions.0/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.4/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/attentions.0/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/encoder/layers.5/attentions.0/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/encoder/layers.5/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/enc_output_norm/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/enc_output_norm/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/enc_output_norm/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/enc_output_norm/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/Log"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/attentions.0/attn/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/attentions.1/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.0/attentions.1/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.2/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.2/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.2/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.0/norms.2/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/Log_1"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/attentions.0/attn/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/attentions.1/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.1/attentions.1/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.2/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.2/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.2/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.1/norms.2/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/Log_2"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/attentions.0/attn/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/attentions.1/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.2/attentions.1/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.2/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.2/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.2/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.2/norms.2/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/Log_3"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/attentions.0/attn/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/attentions.1/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.3/attentions.1/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.2/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.2/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.2/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.3/norms.2/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/Log_4"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/attentions.0/attn/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/attentions.1/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.4/attentions.1/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.2/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.2/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.2/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.4/norms.2/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/attentions.0/attn/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.0/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.0/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.0/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.0/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/attentions.1/Softmax"
echo "  类型: Softmax"
echo "  原因: 指数运算在FP16下容易溢出，导致数值不稳定"
echo "  解决方案: 保持为FP32精度或使用数值稳定的实现"
echo ""
echo "层名称: /transformer/decoder/layers.5/attentions.1/ReduceSum"
echo "  类型: ReduceSum"
echo "  原因: 求和运算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.2/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.2/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.2/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/layers.5/norms.2/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_5/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_5/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_5/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_5/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_4/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_4/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_4/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_4/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_3/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_3/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_3/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_3/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_2/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_2/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_2/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_2/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_1/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_1/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_1/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm_1/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm/LayerNormalization_reduce_mean"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm/LayerNormalization_pow"
echo "  类型: Pow"
echo "  原因: 幂运算在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm/LayerNormalization_reduce_mean_var"
echo "  类型: ReduceMean"
echo "  原因: 均值计算在FP16下可能累积误差"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/norm/LayerNormalization_sqrt"
echo "  类型: Sqrt"
echo "  原因: 平方根在FP16下可能出现数值不稳定"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /transformer/decoder/Log_5"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /Log_5"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /Log_4"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /Log_3"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /Log_2"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /Log_1"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
echo "层名称: /Log"
echo "  类型: Log"
echo "  原因: 对数函数在FP16下精度损失"
echo "  解决方案: 保持为FP32精度"
echo ""
