bf16: true cutoff_len: 4096 dataset: LangGPT_community,LangGPT_alpaca,LangGPT_seed dataset_dir: /datas/wangm/LLM4LangGPT do_train: true finetuning_type: lora flash_attn: auto gradient_accumulation_steps: 8 learning_rate: 5.0e-05 logging_steps: 5 lora_alpha: 16 lora_dropout: 0 lora_rank: 8 lora_target: qkv_proj lr_scheduler_type: cosine max_grad_norm: 1.0 max_samples: 100000 model_name_or_path: microsoft/Phi-3-mini-4k-instruct num_train_epochs: 5.0 optim: adamw_torch output_dir: /datas/wangm/LLM4LangGPT/output/Phi-3-mini-4k-instruct packing: false per_device_train_batch_size: 2 report_to: none save_steps: 100 stage: sft template: phi warmup_steps: 0