# CUDA_VISIBLE_DEVICES=7 llamafactory-cli train \
#     --stage sft \
#     --do_train \
#     --model_name_or_path /llm/nankai/xuyang_space/LLMs/Qwen2-7B-Instruct/ \
#     --dataset my_dpo_data \
#     --dataset_dir /llm/nankai/xuyang_space/project/LLaMA-Factory/data/ \
#     --template qwen \
#     --finetuning_type lora \
#     --output_dir ./saves/Qwen2-7B-Instruct/lora/sft \
#     --overwrite_cache \
#     --overwrite_output_dir \
#     --cutoff_len 1024 \
#     --preprocessing_num_workers 16 \
#     --per_device_train_batch_size 2 \
#     --per_device_eval_batch_size 1 \
#     --gradient_accumulation_steps 8 \
#     --lr_scheduler_type cosine \
#     --logging_steps 50 \
#     --warmup_steps 20 \
#     --save_steps 100 \
#     --eval_steps 50 \
#     --evaluation_strategy steps \
#     --load_best_model_at_end \
#     --learning_rate 5e-5 \
#     --num_train_epochs 5.0 \
#     --max_samples 1000 \
#     --val_size 0.1 \
#     --plot_loss \
#     --fp16

# lora训练
# CUDA_VISIBLE_DEVICES=5 llamafactory-cli train examples/train_lora/my_qwen2.5_lora_dpo.yaml

# lora合并
# llamafactory-cli export examples/merge_lora/my_qwen2.5_lora_dpo.yaml

# full训练
CUDA_VISIBLE_DEVICES=5,6 llamafactory-cli train examples/train_full/qwen2.5_full_dpo.yaml


# llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
# llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml