#!/bin/bash

# Jetson Nano TensorRT环境配置脚本
# 适用于YOLOv5 + TensorRT推理

set -e

echo "🚀 Jetson Nano TensorRT环境配置脚本"
echo "=========================================="

# 颜色定义
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# 函数定义
print_info() {
    echo -e "${BLUE}ℹ️  $1${NC}"
}

print_success() {
    echo -e "${GREEN}✅ $1${NC}"
}

print_warning() {
    echo -e "${YELLOW}⚠️  $1${NC}"
}

print_error() {
    echo -e "${RED}❌ $1${NC}"
}

# 检查是否在Jetson设备上
check_jetson() {
    print_info "检查Jetson设备..."
    if [ -f /etc/nv_tegra_release ]; then
        jetson_info=$(cat /etc/nv_tegra_release)
        print_success "检测到Jetson设备: $jetson_info"
        
        # 检测JetPack版本
        if dpkg -l | grep -q nvidia-jetpack; then
            jetpack_version=$(dpkg -l | grep nvidia-jetpack | awk '{print $3}')
            print_info "JetPack版本: $jetpack_version"
        fi
    else
        print_warning "未检测到Jetson设备，继续安装..."
    fi
}

# 检查CUDA环境
check_cuda() {
    print_info "检查CUDA环境..."
    
    if command -v nvcc &> /dev/null; then
        cuda_version=$(nvcc --version | grep "release" | awk '{print $6}' | cut -d',' -f1)
        print_success "CUDA版本: $cuda_version"
        
        # 检查CUDA库路径
        if [ -d "/usr/local/cuda" ]; then
            print_success "CUDA安装路径: /usr/local/cuda"
        fi
    else
        print_error "CUDA未安装或未正确配置"
        print_info "请安装JetPack或CUDA Toolkit"
        exit 1
    fi
}

# 检查TensorRT
check_tensorrt() {
    print_info "检查TensorRT环境..."
    
    # 检查TensorRT库
    if ls /usr/lib/aarch64-linux-gnu/libnvinfer* 1> /dev/null 2>&1; then
        tensorrt_version=$(dpkg -l | grep tensorrt | head -1 | awk '{print $3}')
        print_success "TensorRT已安装: $tensorrt_version"
    else
        print_error "TensorRT未安装"
        print_info "请通过JetPack安装TensorRT"
        exit 1
    fi
}

# 配置虚拟环境
setup_venv() {
    print_info "配置虚拟环境..."
    
    # 返回工作空间根目录
    cd /home/wuyin/ros_ws
    
    # 检查虚拟环境
    if [ ! -d ".venv" ]; then
        print_info "创建虚拟环境..."
        python3 -m venv .venv
    fi
    
    # 激活虚拟环境
    source .venv/bin/activate
    print_success "虚拟环境已激活"
    
    # 升级pip
    print_info "升级pip..."
    pip install --upgrade pip
}

# 安装PyTorch for Jetson
install_pytorch() {
    print_info "安装PyTorch for Jetson..."
    
    # 检查架构
    arch=$(uname -m)
    if [ "$arch" != "aarch64" ]; then
        print_warning "非ARM64架构，使用标准PyTorch安装"
        pip install torch torchvision torchaudio
        return
    fi
    
    # Jetson Nano适用的PyTorch版本
    print_info "下载Jetson优化的PyTorch..."
    
    # PyTorch 1.13.0 for Jetson (Python 3.8+)
    torch_wheel_url="https://developer.download.nvidia.com/compute/redist/jp/v50/pytorch/torch-1.13.0a0+d0d6b1f2.nv22.09-cp38-cp38-linux_aarch64.whl"
    
    # 检查Python版本
    python_version=$(python3 -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')")
    
    if [ "$python_version" = "3.10" ]; then
        # 对于Python 3.10，尝试安装兼容版本
        print_info "Python 3.10检测到，尝试安装兼容的PyTorch..."
        pip install torch==1.13.1 torchvision==0.14.1 --index-url https://download.pytorch.org/whl/cu117
    else
        # 尝试直接安装wheel
        print_info "尝试安装预编译的PyTorch wheel..."
        pip install "$torch_wheel_url" || {
            print_warning "预编译wheel安装失败，尝试从源码安装..."
            pip install torch torchvision torchaudio
        }
    fi
}

# 安装依赖
install_dependencies() {
    print_info "安装项目依赖..."
    
    cd /home/wuyin/ros_ws/src/yolov5-ukf-imm
    
    # 安装基础依赖
    pip install -r requirements.txt
    
    # 安装额外的Jetson依赖
    print_info "安装Jetson特定依赖..."
    pip install opencv-python
    pip install numpy>=1.18.5
    pip install pillow
    pip install pyyaml
    pip install scipy
    pip install matplotlib
    pip install tqdm
    
    # 尝试安装TensorRT Python绑定
    print_info "配置TensorRT Python绑定..."
    
    # 检查现有的TensorRT Python包
    if python3 -c "import tensorrt" 2>/dev/null; then
        print_success "TensorRT Python绑定已可用"
    else
        print_warning "TensorRT Python绑定不可用，尝试安装..."
        
        # 查找TensorRT wheel文件
        tensorrt_wheel=$(find /opt/nvidia -name "tensorrt-*-py3-none-any.whl" 2>/dev/null | head -1)
        
        if [ -n "$tensorrt_wheel" ]; then
            print_info "安装TensorRT wheel: $tensorrt_wheel"
            pip install "$tensorrt_wheel"
        else
            print_warning "未找到TensorRT wheel文件，请手动安装"
        fi
    fi
}

# 配置环境变量
setup_environment() {
    print_info "配置环境变量..."
    
    # 创建环境配置文件
    cat > /home/wuyin/ros_ws/.venv/bin/setup_tensorrt_env << 'EOF'
#!/bin/bash
# TensorRT环境配置

# CUDA路径
export CUDA_HOME=/usr/local/cuda
export PATH=$CUDA_HOME/bin:$PATH
export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH

# TensorRT路径
export TENSORRT_HOME=/usr/lib/aarch64-linux-gnu
export LD_LIBRARY_PATH=$TENSORRT_HOME:$LD_LIBRARY_PATH

# Python路径
export PYTHONPATH=$PYTHONPATH:/usr/lib/python3/dist-packages

echo "✅ TensorRT环境已配置"
EOF
    
    chmod +x /home/wuyin/ros_ws/.venv/bin/setup_tensorrt_env
    print_success "环境配置脚本已创建"
}

# 测试安装
test_installation() {
    print_info "测试安装..."
    
    cd /home/wuyin/ros_ws/src/yolov5-ukf-imm
    
    # 加载环境
    source /home/wuyin/ros_ws/.venv/bin/setup_tensorrt_env
    
    # 测试PyTorch
    print_info "测试PyTorch..."
    python3 -c "
import torch
print(f'PyTorch版本: {torch.__version__}')
print(f'CUDA可用: {torch.cuda.is_available()}')
if torch.cuda.is_available():
    print(f'CUDA设备数量: {torch.cuda.device_count()}')
    print(f'当前CUDA设备: {torch.cuda.get_device_name(0)}')
" && print_success "PyTorch测试通过" || print_error "PyTorch测试失败"
    
    # 测试TensorRT
    print_info "测试TensorRT..."
    python3 -c "
try:
    import tensorrt as trt
    print(f'TensorRT版本: {trt.__version__}')
    print('TensorRT测试通过')
except ImportError as e:
    print(f'TensorRT导入失败: {e}')
" && print_success "TensorRT测试通过" || print_warning "TensorRT Python绑定不可用"
    
    # 测试OpenCV
    print_info "测试OpenCV..."
    python3 -c "
import cv2
print(f'OpenCV版本: {cv2.__version__}')
print('OpenCV测试通过')
" && print_success "OpenCV测试通过" || print_error "OpenCV测试失败"
}

# 创建启动脚本
create_launcher() {
    print_info "创建启动脚本..."
    
    cat > /home/wuyin/ros_ws/src/yolov5-ukf-imm/run_tensorrt_inference.sh << 'EOF'
#!/bin/bash
# TensorRT推理启动脚本

echo "🚀 启动TensorRT推理系统"

# 进入项目目录
cd /home/wuyin/ros_ws/src/yolov5-ukf-imm

# 激活虚拟环境
source ../../.venv/bin/activate

# 加载TensorRT环境
source ../../.venv/bin/setup_tensorrt_env

# 检查模型文件
if [ ! -f "weights/yolo11n.engine" ]; then
    echo "❌ TensorRT引擎文件不存在: weights/yolo11n.engine"
    exit 1
fi

# 启动推理
echo "📷 启动摄像头推理..."
python3 simple_tensorrt_inference.py --engine weights/yolo11n.engine --camera 0 --show-fps

echo "🎉 推理系统已退出"
EOF
    
    chmod +x /home/wuyin/ros_ws/src/yolov5-ukf-imm/run_tensorrt_inference.sh
    print_success "启动脚本已创建: run_tensorrt_inference.sh"
}

# 主函数
main() {
    echo
    print_info "开始配置TensorRT环境..."
    echo
    
    check_jetson
    check_cuda
    check_tensorrt
    setup_venv
    install_pytorch
    install_dependencies
    setup_environment
    test_installation
    create_launcher
    
    echo
    print_success "TensorRT环境配置完成！"
    echo
    print_info "使用方法:"
    echo "1. 运行推理: ./run_tensorrt_inference.sh"
    echo "2. 或手动运行: python3 simple_tensorrt_inference.py"
    echo "3. 高级系统: python3 camera_detect_advanced.py"
    echo
    print_info "如果遇到问题，请检查:"
    echo "- JetPack是否正确安装"
    echo "- CUDA和TensorRT环境是否配置"
    echo "- 虚拟环境是否正确激活"
    echo
}

# 运行主函数
main "$@"