#!/bin/bash
#===============================================================================
# MPI性能诊断自动化测试脚本（Slurm版本）
# 功能：通过Slurm调度系统自动运行不同核数的MPI性能测试，收集性能数据并生成报告
#
# 使用方法：
#   ./run_slurm_mpi_scaling_test.sh
#
# 说明：
#   - 脚本会自动生成Slurm作业脚本并提交
#   - 测试多个核数配置的MPI性能
#   - 生成详细的性能报告和诊断建议
#   - 帮助判断是硬件问题还是算法问题
#===============================================================================

# 设置颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# 获取脚本所在目录的绝对路径
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "${SCRIPT_DIR}"

# 定义测试的核数数组（根据你的超算环境调整）
# 建议测试范围：从小到大，观察扩展性曲线
# CORE_COUNTS=(9 18 36 72 144 288 576)
CORE_COUNTS=(128)

# 结果输出目录和文件
RESULT_DIR="${SCRIPT_DIR}/results"
SLURM_DIR="${SCRIPT_DIR}/slurm"
RESULT_FILE="${RESULT_DIR}/mpi_performance_slurm.txt"
SUMMARY_FILE="${RESULT_DIR}/scaling_summary_slurm.csv"

# Slurm配置参数（根据超算环境修改）
PARTITION="intel"  # 分区名称
ACCOUNT=""         # 账户名称（如果需要）

# 创建必要的目录
mkdir -p "${RESULT_DIR}"
mkdir -p "${SLURM_DIR}/slog"

#===============================================================================
# 函数定义
#===============================================================================

# 打印带颜色的消息
print_info() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

print_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

print_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

print_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# 检查可执行文件
check_executable() {
    if [ ! -f "${SCRIPT_DIR}/mpiTest" ]; then
        print_error "未找到可执行文件 mpiTest"
        print_info "请先编译程序: mpicxx -O3 -march=native -fopenmp -o mpiTest mpiTest.cpp -lm"
        exit 1
    fi
}

# 生成并提交Slurm作业
run_slurm_test() {
    local nproc=$1
    local flag_file="${RESULT_DIR}/.job_mpi_${nproc}_complete"
    local slurm_script="${SLURM_DIR}/job_mpi_${nproc}.slurm"
    local log_file="${RESULT_DIR}/mpi_test_${nproc}cores.log"
    
    print_info "=========================================="
    print_info "准备提交 ${nproc} 核 Slurm 作业"
    print_info "=========================================="
    
    # 删除可能存在的旧标志文件
    rm -f "$flag_file"
    
    # 生成针对当前核数的 Slurm 作业脚本
    print_info "生成 Slurm 作业脚本: ${slurm_script}"
    
    # 计算节点数和每节点任务数（假设每节点128核）
    local nodes=1
    local ntasks_per_node=$nproc
    
    # 如果核数超过128，分配到多个节点
    if [ $nproc -gt 128 ]; then
        nodes=$(( (nproc + 127) / 128 ))  # 向上取整
        ntasks_per_node=128
    fi
    
    cat > "$slurm_script" <<EOF
#!/bin/bash

#SBATCH --job-name=mpi_${nproc}
#SBATCH --partition=${PARTITION}
#SBATCH -n ${nproc}
#SBATCH --output=${SLURM_DIR}/slog/mpi_${nproc}.out
#SBATCH --error=${SLURM_DIR}/slog/mpi_${nproc}.err
EOF

    # 如果有账户要求，添加账户信息
    if [ -n "$ACCOUNT" ]; then
        echo "#SBATCH --account=${ACCOUNT}" >> "$slurm_script"
    fi
    
    cat >> "$slurm_script" <<'EOF'

ulimit -s unlimited
ulimit -l unlimited

module purge
source $HOME/data/jiangxu/etc/bashrc_of2406

# 设置环境变量
export OMP_NUM_THREADS=1
export I_MPI_PIN_DOMAIN=core
export UCX_RC_VERBS_TX_MIN_SGE=2
export UCX_UD_VERBS_TX_MIN_SGE=1

# 记录开始时间
start_time=$(date +%s)
echo "=========================================="
echo "MPI性能测试 - ${SLURM_NTASKS} 核"
echo "开始时间: $(date)"
echo "节点列表: ${SLURM_JOB_NODELIST}"
echo "=========================================="
echo ""

# 切换到测试目录
# 切换到测试目录 (在生成脚本时展开为绝对路径)

# 运行MPI性能测试
echo "开始运行 mpiTest..."
EOF

    # 将已展开的测试目录写入 Slurm 脚本（确保在提交时为绝对路径）
    echo "cd ${SCRIPT_DIR}" >> "$slurm_script"
    echo "mpirun -np \${SLURM_NTASKS} -x PATH -x LD_LIBRARY_PATH ./mpiTest --bind-to core --map-by socket:PE=1 > ${log_file} 2>&1" >> "$slurm_script"
    
    cat >> "$slurm_script" <<EOF

exit_code=\$?

# 记录结束时间
end_time=\$(date +%s)
wall_time=\$((end_time - start_time))

echo ""
echo "=========================================="
echo "测试完成！"
echo "结束时间: \$(date)"
echo "墙钟时间: \${wall_time} 秒"
echo "退出代码: \${exit_code}"
echo "=========================================="

# 创建完成标志文件
touch ${flag_file}

exit \${exit_code}
EOF
    
    chmod +x "$slurm_script"
    
    # 提交 Slurm 作业
    print_info "提交 Slurm 作业..."
    local job_id=$(sbatch "$slurm_script" 2>&1 | grep -oP 'Submitted batch job \K\d+')
    
    if [ -n "$job_id" ]; then
        print_success "作业已提交，Job ID: ${job_id}"
    else
        print_warning "作业已提交（无法获取 Job ID）"
    fi
    
    # 等待作业完成（通过检测标志文件）
    print_info "等待作业完成（每10秒检测一次）..."
    local wait_count=0
    while [ ! -f "$flag_file" ]; do
        sleep 10
        wait_count=$((wait_count + 1))
        if [ $((wait_count % 6)) -eq 0 ]; then
            print_info "已等待 $((wait_count * 10)) 秒..."
            # 检查作业状态
            if [ -n "$job_id" ]; then
                squeue -j $job_id > /dev/null 2>&1
                if [ $? -ne 0 ]; then
                    print_warning "作业 ${job_id} 不在队列中，可能已完成或失败"
                fi
            fi
        fi
    done
    
    print_success "${nproc}核作业已完成！"
    
    # 删除标志文件
    rm -f "$flag_file"
    
    # 检查日志文件是否存在
    if [ -f "${log_file}" ]; then
        print_info "日志文件: ${log_file}"
        
        # 追加到汇总文件
        echo "========================================" >> "${RESULT_FILE}"
        echo "测试配置: ${nproc} 核" >> "${RESULT_FILE}"
        echo "完成时间: $(date)" >> "${RESULT_FILE}"
        
        # 从Slurm输出文件中提取墙钟时间
        local slurm_out="${SLURM_DIR}/slog/mpi_${nproc}.out"
        if [ -f "$slurm_out" ]; then
            local wall_time=$(grep "墙钟时间:" "$slurm_out" | awk '{print $2}')
            if [ -n "$wall_time" ]; then
                echo "墙钟时间: ${wall_time} 秒" >> "${RESULT_FILE}"
            fi
        fi
        
        echo "========================================" >> "${RESULT_FILE}"
        cat "${log_file}" >> "${RESULT_FILE}"
        echo "" >> "${RESULT_FILE}"
        
        return 0
    else
        print_error "日志文件不存在: ${log_file}"
        return 1
    fi
}

# 解析结果并生成CSV汇总
generate_summary() {
    print_info "生成性能汇总表..."
    
    # CSV表头
    echo "核数,点对点延迟(us),点对点带宽(MB/s),内存带宽(GB/s),内存带宽差异率(%),负载不均衡度(%),并行效率(%),强扩展计算时间(s)" > "${SUMMARY_FILE}"
    
    for nproc in "${CORE_COUNTS[@]}"; do
        local log_file="${RESULT_DIR}/mpi_test_${nproc}cores.log"
        
        if [ -f "${log_file}" ]; then
            # 提取关键性能指标
            
            # 点对点通信（取最大消息的延迟和带宽）
            local p2p_latency=$(grep -A 10 "测试1：点对点通信" "${log_file}" | grep -E "^[0-9]+" | tail -1 | awk '{print $3}')
            local p2p_bandwidth=$(grep -A 10 "测试1：点对点通信" "${log_file}" | grep -E "^[0-9]+" | tail -1 | awk '{print $2}')
            
            # 内存带宽
            local mem_bw=$(grep "平均内存带宽:" "${log_file}" | awk '{print $2}')
            local mem_bw_diff=$(grep "带宽差异率:" "${log_file}" | awk '{print $2}' | sed 's/%//')
            
            # 负载均衡
            local load_imbalance=$(grep "负载不均衡度:" "${log_file}" | awk '{print $2}' | sed 's/%//')
            local parallel_eff=$(grep "并行效率:" "${log_file}" | awk '{print $2}' | sed 's/%//')
            
            # 强扩展性测试时间
            local compute_time=$(grep "计算时间:" "${log_file}" | tail -1 | awk '{print $2}')
            
            # 写入CSV
            echo "${nproc},${p2p_latency:-N/A},${p2p_bandwidth:-N/A},${mem_bw:-N/A},${mem_bw_diff:-N/A},${load_imbalance:-N/A},${parallel_eff:-N/A},${compute_time:-N/A}" >> "${SUMMARY_FILE}"
        fi
    done
    
    print_success "汇总表已保存: ${SUMMARY_FILE}"
}

# 生成格式化的表格
generate_table() {
    local table_file="${RESULT_DIR}/mpi_performance_table.txt"
    
    print_info "生成格式化表格..."
    
    # 读取CSV文件并解析
    local -a cores=()
    local -a compute_times=()
    local -a mem_bws=()
    local -a load_imbs=()
    
    # 跳过表头，读取数据
    tail -n +2 "${SUMMARY_FILE}" | while IFS=',' read -r core p2p_lat p2p_bw mem_bw mem_bw_diff load_imb par_eff comp_time; do
        cores+=("$core")
        compute_times+=("$comp_time")
        mem_bws+=("$mem_bw")
        load_imbs+=("$load_imb")
    done
    
    # 生成表格
    {
        echo "================================================================================"
        echo "MPI并行性能诊断测试结果表格"
        echo "================================================================================"
        echo "测试时间: $(date '+%Y-%m-%d %H:%M:%S')"
        echo "测试工具: mpiTest (MPI性能诊断工具)"
        echo ""
        echo "--------------------------------------------------------------------------------"
        echo "详细性能指标请查看CSV文件: ${SUMMARY_FILE}"
        echo "--------------------------------------------------------------------------------"
        echo ""
        
        # 读取并显示CSV内容（格式化）
        column -t -s ',' "${SUMMARY_FILE}"
        
        echo ""
        echo "================================================================================"
        echo ""
        echo "说明："
        echo "  - 点对点延迟：越低越好，< 2us 为优秀"
        echo "  - 点对点带宽：越高越好，取决于网络类型"
        echo "  - 内存带宽：鲲鹏920理论峰值 ~150 GB/s per socket"
        echo "  - 带宽差异率：< 10% 优秀，10-20% 可接受，> 20% 需检查NUMA配置"
        echo "  - 负载不均衡度：< 5% 优秀，5-10% 良好，> 10% 需优化"
        echo "  - 并行效率：> 90% 优秀，80-90% 良好，< 80% 需分析原因"
        echo ""
    } > "$table_file"
    
    # 同时输出到屏幕
    cat "$table_file"
    
    print_success "表格已保存到: ${table_file}"
}

# 生成扩展性分析
generate_scaling_analysis() {
    local analysis_file="${RESULT_DIR}/scaling_analysis.txt"
    
    print_info "生成扩展性分析..."
    
    # 读取计算时间数据
    local -a cores=()
    local -a times=()
    
    tail -n +2 "${SUMMARY_FILE}" | while IFS=',' read -r core p2p_lat p2p_bw mem_bw mem_bw_diff load_imb par_eff comp_time; do
        if [ "$comp_time" != "N/A" ] && [ -n "$comp_time" ]; then
            cores+=("$core")
            times+=("$comp_time")
        fi
    done
    
    # 如果有数据，生成分析
    if [ ${#cores[@]} -gt 0 ]; then
        local base_time=${times[0]}
        local base_cores=${cores[0]}
        
        {
            echo "================================================================================"
            echo "强扩展性分析（固定问题规模）"
            echo "================================================================================"
            echo ""
            printf "%-12s %-20s %-15s %-15s %-15s\n" "核心数" "计算时间(s)" "加速比" "理想加速比" "并行效率(%)"
            echo "--------------------------------------------------------------------------------"
            
            for i in "${!cores[@]}"; do
                local core=${cores[$i]}
                local time=${times[$i]}
                
                # 计算加速比 = 基准时间 / 当前时间
                local speedup=$(awk -v base="$base_time" -v curr="$time" 'BEGIN{printf "%.2f", base/curr}')
                
                # 理想加速比 = 核数比
                local ideal=$(awk -v base="$base_cores" -v curr="$core" 'BEGIN{printf "%.2f", curr/base}')
                
                # 并行效率 = 实际加速比 / 理想加速比 × 100%
                local efficiency=$(awk -v sp="$speedup" -v id="$ideal" 'BEGIN{printf "%.2f", sp/id*100}')
                
                printf "%-12s %-20s %-15s %-15s %-15s\n" "$core" "$time" "$speedup" "$ideal" "$efficiency"
            done
            
            echo "================================================================================"
            echo ""
            echo "说明："
            echo "  - 加速比：实际性能提升倍数，理想情况下等于核数比"
            echo "  - 并行效率：实际加速比与理想加速比的比值"
            echo "  - 并行效率 > 80% 为良好扩展性"
            echo ""
        } > "$analysis_file"
        
        print_success "扩展性分析已保存: ${analysis_file}"
        cat "$analysis_file"
    fi
}

# 生成诊断报告
generate_diagnostic_report() {
    local report_file="${RESULT_DIR}/diagnostic_report_slurm.txt"
    
    print_info "生成诊断报告..."
    
    {
        echo "================================================================================"
        echo "  MPI并行性能诊断报告 - 超算环境（Slurm）"
        echo "================================================================================"
        echo "生成时间: $(date)"
        echo "测试核数范围: ${CORE_COUNTS[@]}"
        echo "分区: ${PARTITION}"
        echo ""
        echo "================================================================================"
        echo "  性能指标汇总"
        echo "================================================================================"
        echo ""
        
        # 读取CSV并格式化显示
        if [ -f "${SUMMARY_FILE}" ]; then
            column -t -s ',' "${SUMMARY_FILE}"
        fi
        
        # echo ""
        # echo "================================================================================"
        # echo "  诊断建议"
        # echo "================================================================================"
        # echo ""
        # echo "1. 通信性能分析："
        # echo "   - 检查点对点通信延迟是否随核数增加而上升"
        # echo "   - 同节点通信延迟应 < 1us"
        # echo "   - 跨节点通信延迟（InfiniBand）应 < 2us"
        # echo "   - 如果延迟 > 5us，检查网络配置和MPI绑定策略"
        # echo ""
        # echo "2. 内存带宽分析："
        # echo "   - 鲲鹏920理论内存带宽: ~150 GB/s per socket"
        # echo "   - 实测带宽应达到理论值的60-80%"
        # echo "   - 如果带宽差异率 > 20%，检查NUMA绑定策略"
        # echo "   - 使用 numactl --hardware 查看NUMA拓扑"
        # echo ""
        # echo "3. 负载均衡分析："
        # echo "   - 负载不均衡度应 < 5%（优秀）或 < 10%（良好）"
        # echo "   - 如果 > 10%，可能原因："
        # echo "     * 进程绑定不当（多个进程共享CPU核心）"
        # echo "     * CPU频率不一致（Turbo Boost效应）"
        # echo "     * NUMA远程访问延迟"
        # echo "     * 系统后台任务干扰"
        # echo ""
        # echo "4. 扩展性分析："
        # echo "   - 绘制加速比曲线：加速比 = T(基准) / T(当前)"
        # echo "   - 理想扩展性：加速比 = 核数比"
        # echo "   - 并行效率 = 加速比 / 核数比 × 100%"
        # echo "   - 如果并行效率快速下降："
        # echo "     * < 80%：可能有通信瓶颈或负载不均衡"
        # echo "     * < 60%：严重的扩展性问题，需详细分析"
        # echo ""
        # echo "5. 计算密集型任务分析："
        # echo "   - 检查矩阵乘法的GFLOPS是否随核数增长"
        # echo "   - 理想情况：GFLOPS线性增长"
        # echo "   - 如果增长缓慢，可能是："
        # echo "     * 内存带宽瓶颈（多核竞争）"
        # echo "     * 缓存冲突"
        # echo "     * NUMA配置不当"
        # echo ""
        # echo "================================================================================"
        # echo "  性能优化建议"
        # echo "================================================================================"
        # echo ""
        # echo "1. MPI进程绑定优化："
        # echo "   - OpenMPI: mpirun --bind-to core --map-by socket"
        # echo "   - Intel MPI: export I_MPI_PIN_DOMAIN=core"
        # echo "   - 确保每个进程绑定到独立的物理核心"
        # echo ""
        # echo "2. NUMA优化："
        # echo "   - 使用 numactl 限制进程的NUMA域"
        # echo "   - 例如：numactl --cpunodebind=0 --membind=0 mpirun ..."
        # echo "   - 避免跨NUMA域的内存访问"
        # echo ""
        # echo "3. 网络优化："
        # echo "   - InfiniBand: 检查 UCX 或 verbs 配置"
        # echo "   - 设置环境变量优化MPI通信"
        # echo "   - 例如：export UCX_NET_DEVICES=mlx5_0:1"
        # echo ""
        # echo "4. Slurm作业配置优化："
        # echo "   - 使用 --ntasks-per-node 控制每节点进程数"
        # echo "   - 使用 --cpus-per-task 为每个任务分配CPU"
        # echo "   - 使用 --exclusive 独占节点，避免干扰"
        # echo ""
        # echo "================================================================================"
        # echo "  进一步测试建议"
        # echo "================================================================================"
        # echo ""
        # echo "1. 如果怀疑是硬件问题："
        # echo "   - 运行STREAM内存带宽测试"
        # echo "   - 运行HPL (High Performance Linpack)"
        # echo "   - 使用 likwid-topology 检查CPU拓扑"
        # echo "   - 使用 ibdiagnet 检查InfiniBand网络"
        # echo ""
        # echo "2. 如果怀疑是算法问题："
        # echo "   - 使用性能分析工具（TAU, Intel VTune, Score-P）"
        # echo "   - 分析通信-计算比"
        # echo "   - 检查集合通信的频率和大小"
        # echo "   - 考虑使用异步通信（MPI_Isend/Irecv）"
        # echo ""
        # echo "3. 如果怀疑是MPI配置问题："
        # echo "   - 测试不同的MPI实现（OpenMPI vs Intel MPI）"
        # echo "   - 调整MPI缓冲区大小"
        # echo "   - 尝试不同的进程布局策略"
        # echo "   - 查看MPI库的性能调优文档"
        # echo ""
        # echo "================================================================================"
        # echo "  相关命令参考"
        # echo "================================================================================"
        # echo ""
        # echo "查看作业状态："
        # echo "  squeue -u \$USER"
        # echo ""
        # echo "查看节点信息："
        # echo "  sinfo -N -l"
        # echo ""
        # echo "查看NUMA拓扑："
        # echo "  numactl --hardware"
        # echo ""
        # echo "查看CPU信息："
        # echo "  lscpu"
        # echo ""
        # echo "取消作业："
        # echo "  scancel <job_id>"
        # echo ""
        # echo "================================================================================"
        
    } > "${report_file}"
    
    print_success "诊断报告已生成: ${report_file}"
    
    # 显示报告
    cat "${report_file}"
}

#===============================================================================
# 主程序
#===============================================================================

main() {
    print_info "=========================================="
    print_info "MPI并行性能诊断自动化测试（Slurm版）"
    print_info "=========================================="
    echo ""
    
    # 检查可执行文件
    check_executable
    
    # 初始化结果文件
    echo "MPI并行性能诊断测试结果（Slurm）" > "${RESULT_FILE}"
    echo "开始时间: $(date)" >> "${RESULT_FILE}"
    echo "测试核数: ${CORE_COUNTS[@]}" >> "${RESULT_FILE}"
    echo "分区: ${PARTITION}" >> "${RESULT_FILE}"
    echo "" >> "${RESULT_FILE}"
    
    # 显示配置信息
    print_info "测试配置:"
    print_info "  - 测试核数: ${CORE_COUNTS[@]}"
    print_info "  - Slurm分区: ${PARTITION}"
    print_info "  - 结果目录: ${RESULT_DIR}"
    echo ""
    
    # 循环提交测试
    local failed_tests=0
    for nproc in "${CORE_COUNTS[@]}"; do
        run_slurm_test ${nproc}
        
        if [ $? -ne 0 ]; then
            ((failed_tests++))
            print_warning "测试失败，继续下一个..."
        fi
        
        echo ""
        sleep 2  # 短暂延迟
    done
    
    # 生成汇总和报告
    print_info "=========================================="
    print_info "所有测试已完成，开始生成分析报告"
    print_info "=========================================="
    echo ""
    
    generate_summary
    generate_table
    generate_scaling_analysis
    generate_diagnostic_report
    
    # 输出最终结果
    print_success "=========================================="
    print_success "所有测试和分析完成！"
    print_success "=========================================="
    print_info "完整日志: ${RESULT_FILE}"
    print_info "性能汇总CSV: ${SUMMARY_FILE}"
    print_info "性能表格: ${RESULT_DIR}/mpi_performance_table.txt"
    print_info "扩展性分析: ${RESULT_DIR}/scaling_analysis.txt"
    print_info "诊断报告: ${RESULT_DIR}/diagnostic_report_slurm.txt"
    
    if [ ${failed_tests} -gt 0 ]; then
        print_warning "警告: ${failed_tests} 个测试失败"
    fi
    
    echo ""
    print_info "后续操作建议："
    print_info "  1. 查看CSV文件: cat ${SUMMARY_FILE}"
    print_info "  2. 使用Excel打开CSV文件，绘制扩展性曲线"
    print_info "  3. 查看详细日志: cat ${RESULT_DIR}/mpi_test_*cores.log"
    print_info "  4. 查看Slurm输出: cat ${SLURM_DIR}/slog/mpi_*.out"
    
    # 清理临时Slurm脚本（可选）
    # print_info "清理临时Slurm脚本..."
    # rm -f ${SLURM_DIR}/job_mpi_*.slurm
    
    print_success "测试流程完成！"
}

# 运行主程序
main

