base_model: gpt2-xl model_type: GPT2LMHeadModel tokenizer_type: GPT2Tokenizer is_gpt2_derived_model: true load_in_8bit: false load_in_4bit: true strict: false datasets: - path: train_naming.csv type: csv column_names: - Label - Text dataset_prepared_path: val_set_size: 0.05 output_dir: ./outputs/qlora-out adapter: qlora lora_model_dir: sequence_len: 1096 sample_packing: true pad_to_sequence_len: true lora_r: 32 lora_alpha: 16 lora_dropout: 0.05 lora_target_modules: lora_target_linear: true lora_fan_in_fan_out: wandb_project: wandb_entity: wandb_watch: wandb_name: wandb_log_model: mlflow_experiment_name: colab-example gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 4 max_steps: 20 optimizer: paged_adamw_32bit lr_scheduler: cosine learning_rate: 5e-5 train_on_inputs: false group_by_length: false bf16: false fp16: true tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: false flash_attention: true warmup_steps: 10 evals_per_epoch: saves_per_epoch: debug: deepspeed: weight_decay: 0.1 fsdp: fsdp_config: special_tokens: save_safetensors: true