#!/bin/bash

# 设置TensorRT路径
export TENSORRT_ROOT=/data2/xd/TensorRT-8.5.1.7.Linux.x86_64-gnu.cuda-11.8.cudnn8.6/TensorRT-8.5.1.7
export PATH="$PATH:$TENSORRT_ROOT/bin"
export LD_LIBRARY_PATH="$TENSORRT_ROOT/lib:$LD_LIBRARY_PATH"

# 设置插件路径
PLUGIN_PATH="/data2/xd/Co-DETR-TensorRT/co_detr_ros_ws/src/co_detr_ros/grid_sampler/build/libtrtgrid_sampler.so"

# 检查插件文件是否存在
if [ ! -f "$PLUGIN_PATH" ]; then
    echo "错误: 插件文件 $PLUGIN_PATH 不存在"
    exit 1
fi

# 检查ONNX文件是否存在
ONNX_PATH="/data2/xd/Co-DETR-TensorRT/nouse/debug_gridsample.onnx"
if [ ! -f "$ONNX_PATH" ]; then
    echo "错误: ONNX文件 $ONNX_PATH 不存在"
    exit 1
fi

echo "=== 使用debug_gridsample.onnx构建引擎 ==="
echo "ONNX文件: $ONNX_PATH"
echo "插件路径: $PLUGIN_PATH"

# 检查插件文件权限和依赖
echo "检查插件文件..."
ls -la "$PLUGIN_PATH"
echo "检查插件依赖..."
ldd "$PLUGIN_PATH"

# 构建FP16引擎
echo "构建FP16引擎..."
trtexec \
    --onnx="$ONNX_PATH" \
    --saveEngine=models/debug_gridsample_fp16.plan \
    --memPoolSize=workspace:8000 \
    --fp16 \
    --plugins="$PLUGIN_PATH" \
    --tacticSources=+CUBLAS,+CUBLAS_LT,+CUDNN \
    --noBuilderCache \
    --verbose \
    --dumpProfile \
    --dumpLayerInfo

if [ $? -eq 0 ]; then
    echo "✅ FP16引擎构建成功!"
    echo "引擎文件: models/debug_gridsample_fp16.plan"
else
    echo "❌ FP16引擎构建失败!"
    echo "尝试构建FP32引擎..."
fi

# 构建FP32引擎
echo "构建FP32引擎..."
trtexec \
    --onnx="$ONNX_PATH" \
    --saveEngine=models/debug_gridsample_fp32.plan \
    --memPoolSize=workspace:8000 \
    --tacticSources=+CUBLAS,+CUBLAS_LT,+CUDNN \
    --noBuilderCache \
    --verbose \
    --dumpProfile \
    --dumpLayerInfo

if [ $? -eq 0 ]; then
    echo "✅ FP32引擎构建成功!"
    echo "引擎文件: models/debug_gridsample_fp32.plan"
else
    echo "❌ FP32引擎构建失败!"
    echo "检查ONNX文件格式..."
    python3 -c "
import onnx
try:
    model = onnx.load('$ONNX_PATH')
    print('ONNX文件格式正确')
    print('IR version:', model.ir_version)
    print('Opset version:', model.opset_import[0].version)
    print('Producer:', model.producer_name)
    print('Inputs:')
    for input in model.graph.input:
        print('  ', input.name, ':', [d.dim_value for d in input.type.tensor_type.shape.dim])
    print('Outputs:')
    for output in model.graph.output:
        print('  ', output.name, ':', [d.dim_value for d in output.type.tensor_type.shape.dim])
    print('Nodes:')
    for node in model.graph.node:
        print('  ', node.name, ':', node.op_type)
except Exception as e:
    print('ONNX文件检查失败:', e)
"
fi

echo "=== 引擎构建完成 ==="
echo "检查生成的引擎文件..."
ls -la models/debug_gridsample_*.plan 2>/dev/null || echo "没有生成引擎文件" 