| # use `--loss_scale ignore_empty_think` | |
| # Avoid losing the think capability by ignoring the loss of empty `<think>\n\n</think>\n\n` | |
| # This method is also applicable to the Deepseek-R1 series of models. | |
| CUDA_VISIBLE_DEVICES=0 \ | |
| swift sft \ | |
| --model Qwen/Qwen3-8B \ | |
| --train_type lora \ | |
| --dataset 'swift/Qwen3-SFT-Mixin#2000' \ | |
| 'swift/self-cognition:empty_think#600' \ | |
| --torch_dtype bfloat16 \ | |
| --num_train_epochs 1 \ | |
| --per_device_train_batch_size 1 \ | |
| --per_device_eval_batch_size 1 \ | |
| --learning_rate 1e-4 \ | |
| --lora_rank 8 \ | |
| --lora_alpha 32 \ | |
| --target_modules all-linear \ | |
| --gradient_accumulation_steps 16 \ | |
| --eval_steps 50 \ | |
| --save_steps 50 \ | |
| --save_total_limit 2 \ | |
| --logging_steps 5 \ | |
| --max_length 2048 \ | |
| --output_dir output \ | |
| --warmup_ratio 0.05 \ | |
| --dataloader_num_workers 4 \ | |
| --use_liger_kernel true \ | |
| --loss_scale ignore_empty_think \ | |
| --model_author swift \ | |
| --model_name swift-robot | |