bf16: true cutoff_len: 1024 dataset: chapterization dataset_dir: data do_train: true finetuning_type: lora flash_attn: fa2 gradient_accumulation_steps: 4 learning_rate: 0.0002 logging_steps: 5 lora_alpha: 16 lora_dropout: 0 lora_rank: 8 lora_target: q_proj,v_proj lr_scheduler_type: cosine max_grad_norm: 0.3 max_samples: 100000 model_name_or_path: mistralai/Mistral-7B-Instruct-v0.2 num_train_epochs: 1.0 optim: adamw_torch output_dir: saves/Mistral-7B-v0.2-Chat/lora/Chapterization_Mistral-7B-v0.2-Chat_0.1.0 packing: false per_device_train_batch_size: 16 quantization_bit: 4 report_to: none save_steps: 100 stage: sft template: mistral warmup_steps: 0