base_model: codellama/CodeLlama-7b-hf base_model_config: codellama/CodeLlama-7b-hf model_type: LlamaForCausalLM tokenizer_type: CodeLlamaTokenizer load_in_8bit: false load_in_4bit: true strict: false datasets: - path: /home/interstellarninja/projects/axolotl/dataset/gorilla-16k.json type: alpaca dataset_prepared_path: last_run_prepared val_set_size: 0.05 output_dir: ./qlora-out adapter: qlora lora_model_dir: sequence_len: 2048 max_packed_sequence_len: lora_r: 64 lora_alpha: 16 lora_dropout: 0.00 lora_target_modules: - gate_proj - down_proj - up_proj - q_proj - v_proj - k_proj - o_proj lora_target_linear: true lora_fan_in_fan_out: wandb_project: llama-2-toolformer wandb_watch: wandb_log_model: data_seed: 42 seed: 42 gradient_accumulation_steps: 4 micro_batch_size: 4 num_epochs: 1 optimizer: adamw_bnb_8bit lr_scheduler: constant_with_warmup learning_rate: 0.00002 train_on_inputs: false group_by_length: false bf16: true fp16: false tf32: false gradient_checkpointing: true early_stopping_patience: 5 resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: false flash_attention: true eval_steps: 200 save_steps: 200 save_total_limit: 5 load_best_model_at_end: true greater_is_better: false metric_for_best_model: eval_loss do_mmlu_eval: true debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: special_tokens: bos_token: "" eos_token: "" unk_token: ""