base_model: /workspace/mistral-11b-base model_type: MistralForCausalLM tokenizer_type: LlamaTokenizer is_mistral_derived_model: true load_in_8bit: false load_in_4bit: false strict: false datasets: - path: Open-Orca/SlimOrca type: sharegpt conversation: mistral dataset_prepared_path: val_set_size: 0.005 output_dir: ./out sequence_len: 8192 sample_packing: true pad_to_sequence_len: true wandb_project: mistral-11b-noresidual wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 16 micro_batch_size: 1 num_epochs: 2 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.000005 train_on_inputs: true group_by_length: false bf16: true fp16: false tf32: false gradient_checkpointing: true flash_attention: true logging_steps: 1 warmup_steps: 10 eval_steps: 100 save_steps: 100 weight_decay: 0.0 special_tokens: bos_token: "" eos_token: "" unk_token: ""