## 这里使用Pai-Megatron-Patch这个开源库
## flash-attention-3的使用需要H系列的GPU芯片，配置参考如下：
# pip install "git+https://github.com/Dao-AILab/flash-attention.git#egg=flashattn-hopper&subdirectory=hopper"
# python_path=`python -c "import site; print(site.getsitepackages()[0])"`
# mkdir -p $python_path/flashattn_hopper
# wget -P $python_path/flashattn_hopper https://raw.githubusercontent.com/Dao-AILab/flash-attention/main/hopper/flash_attn_interface.py
## 脚本测试的环境是V100芯片，所以不做FA-3的配置

import modelscope
import os

os.environ['MKL_THREADING_LAYER']='GNU'

## path define
root_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
filename = os.path.basename(__file__).split('.')[0]
dataset_path = os.path.join(root_path, "datasets/pretrain/wiki-datasets")
model_ID = 'Qwen/Qwen2.5-0.5B'

model_path = os.path.join(root_path, 'llms/' + model_ID)

# 这里使用之前训练过的checkpoint来作为初始状态，如果你没有做过预训练，建议换成其他的
original_checkpoint_path = os.path.join(root_path,"checkpoints/test/Qwen2.5-0.5B")
# 这里作为本次训练的checkpoint的输出目录
target_checkpoint_path = os.path.join(root_path,"checkpoints/"+filename)
model_save_path = os.path.join(root_path,'llms/' + filename)
log_path = os.path.join(root_path,'logs/' + filename)

# initial model and tokenizer
modelscope.AutoConfig.from_pretrained(model_ID).save_pretrained(model_path)
modelscope.AutoTokenizer.from_pretrained(model_ID).save_pretrained(model_path)

## Pai-Megatron-Patch库提供了数据预处理的脚本工具，可以提前对数据进行token处理来加速训练效率，当然这不是必须的
## 如果不用提前处理，可以直接基于example中的Qwen2/pretrain_qwen.py的代码进行自定义魔改[除非你有极端的技术追求，其实LLM的预训练代码都大差不差]
## 我们遵循Megatron-Core官方库的设定只对库的参数设定进行调整来适配Qwen2.5-0.5B的预训练

# ### Data Process
split_data_cmd = f"""
NUM_PIECE=10

# 查询数据总长度，对数据进行拆分
NUM=$(sed -n '$=' {dataset_path}/wikipedia-zh-cn-20241020.json)
echo "total line of dataset is $NUM, data will be split into $NUM_PIECE pieces for processing"
NUM=`expr $NUM / $NUM_PIECE`
echo "each group is processing $NUM sample"
mkdir {dataset_path}/split
split -l $NUM --numeric-suffixes --additional-suffix=.jsonl {dataset_path}/wikipedia-zh-cn-20241020.json {dataset_path}/split/
"""
with os.popen(split_data_cmd, 'r') as file:
    output = file.read()
    print(output)

zst_data_cmd = f"""
# 数据压缩
o_path={dataset_path}/cleaned_zst/
mkdir -p $o_path
files=$(ls {dataset_path}/split/*.jsonl)
for filename in $files
do
   f=$(basename $filename)
   zstd -z $filename -o $o_path/$f.zst &
done
rm -rf {dataset_path}/split
"""
with os.popen(zst_data_cmd, 'r') as file:
    output = file.read()
    print(output)

process_data_cmd = f"""
# 数据处理 生成idxmap格式文件
cd {root_path}/frameworks/Pai-Megatron-Patch/toolkits/pretrain_data_preprocessing

bash run_make_pretraining_dataset.sh \
../.. \
{dataset_path}/cleaned_zst/ \
qwenbpe \
{dataset_path}/ \
{model_path} 

rm -rf {dataset_path}/cleaned_zst
"""
with os.popen(process_data_cmd, 'r') as file:
    output = file.read()
    print(output)
    
### Model Pretrain with Megatron
# 这里我们使用之前预训练之后的CheckPoint进行继续预训练
# step1:先把model的checkpoint转换为megatron core的dense
ck_to_megatron_core_cmd = f"""
cd {root_path}/frameworks/Pai-Megatron-Patch/toolkits/model_checkpoints_convertor/qwen

bash hf2mcore_qwen2.5_convertor.sh \
0.5B \
{original_checkpoint_path}/ \
{target_checkpoint_path}/  \
1  \
1  \
fp16 \
true \
false 
"""
with os.popen(ck_to_megatron_core_cmd, 'r') as file:
    output = file.read()
    print(output)

# step2: 调用megatron执行分布式训练，我们这里没有PAI的DSW和DLC环境，但是经过对代码的分析发现，DSW其实是单Node，DLC就是多Node，所以这里选择DSW
train_megatron_cmd = f"""
cd {root_path}/frameworks/Pai-Megatron-Patch/examples/qwen2_5

bash run_mcore_qwen.sh  \
dsw  \
0.5B   \
1    \
8 \
1e-5   \
1e-6   \
128  \
128  \
bf16  \
1   \
1  \
1 \
true \
true   \
true \
false \
false   \
false \
100000  \
{dataset_path}/wudao_qwenbpe_content_document   \
{dataset_path}/wudao_qwenbpe_content_document   \
{target_checkpoint_path}  \
10000  \
100   \
{model_save_path}
"""
with os.popen(train_megatron_cmd, 'r') as file:
    output = file.read()
    print(output)
