#!/bin/bash -x

function set_cpu_performance()
{
        CPU_NUM=`cat /proc/cpuinfo | grep processor | wc -l`
        echo "set cpu peformance mode" | tee -a ${LOG}
        for((i=0;i<CPU_NUM;i++))
        do
                if [ -f "/sys/devices/system/cpu/cpu${i}/cpufreq/scaling_governor" ];then
                        echo performance > /sys/devices/system/cpu/cpu${i}/cpufreq/scaling_governor | tee -a ${LOG}
                fi
        done
}

set_cpu_performance

GPUS_PER_NODE=16
NNODES=1
NODE_RANK=16
MASTER_ADDR=127.0.0.1
MASTER_PORT=12345

LOG_DIR="logs_$(date +%m%d%H%M%S)"
mkdir -p ${LOG_DIR}


## 依赖包安装
pip3 uninstall autoawq
pip3 install pypi/*


## 权重格式转换 HF -> MG
TP=1
PP=4
# 微调权重路径
HF_LOAD_PATH="/home/weights/Qwen2.5-7B-Instruct/"
# 格式转为MG格式保存路径
MG_SAVE_PATH="/home/weights/Qwen2.5-7B-Instruct_MG/"
# 以下参数和 sft_configs/megatron_lm/megatron_config_qwen2_7b.json 对齐
# --target-tensor-model-parallel-size
# --target-pipeline-model-parallel-size
# --num-layers-per-stage
python3 convert_weight.py \
	--target-tensor-model-parallel-size ${TP} \
	--target-pipeline-model-parallel-size ${PP} \
	--load ${HF_LOAD_PATH} \
	--save ${MG_SAVE_PATH} \
	--dtype bf16 \
	--format "hf2mg" \
	--num-layers-per-stage 1 6 2 8 1 6 \



## Qwen2.5-7B SFT 单机多卡训练
export CUDA_DEVICE_MAX_CONNECTIONS=1
export NCCL_ALGO=Ring
export OMP_NUM_THREADS=4
FORCE_TORCHRUN=1 NNODES=${NNODES} RANK=${NODE_RANK} MASTER_ADDR=${MASTER_ADDR} MASTER_PORT=${MASTER_PORT} llamafactory-cli train sft_configs/train_full/qwen2_7b_full_sft_mg.yaml 2>&1 | tee ${LOG_DIR}/train_rank${NODE_RANK}.log


## 权重格式转换 MG -> HF
TP=1
PP=4
HF_LOAD_PATH="/home/weights/Qwen2.5-7B-Instruct/"
MG_LOAD_PATH="./saves/qwen2.5-7b/full/sft/checkpoint-8/"
HF_SAVE_PATH="./saves/qwen2.5-7b/full/Qwen2.5-7B-Instruct-SFT/"
python3 convert_weight.py \
        --target-tensor-model-parallel-size ${TP} \
        --target-pipeline-model-parallel-size ${PP} \
        --config_path ${HF_LOAD_PATH} \
        --load ${MG_LOAD_PATH} \
        --save ${HF_SAVE_PATH} \
        --dtype bf16 \
        --format "mg2hf" \
        --num-layers-per-stage 1 6 2 8 1 6
