axolotl2 / config.yml
Alignment-Lab-AI's picture
Upload folder using huggingface_hub
1bad0bb verified
raw
history blame contribute delete
No virus
1.44 kB
base_model: PygmalionAI/pyggel-ckpt-2947
load_in_8bit: false
load_in_4bit: false
strict: false
datasets:
- path: PygmalionAI/unified-rp-dataset
type: sharegpt
chat_template: llama3
dataset_prepared_path: ./datasetstuff3
hub_model_id: Alignment-Lab-AI/pygmalion-3-1m-2.4
wandb_project: pyg-1-m-2.3
hf_use_auth_token: true
output_dir: ./pyg1m2.3
wandb_watch: all
hub_private_repo: true
hub_strategy: all_checkpoints
push_to_hub: true
hf_use_auth_token: true
output_dir: ./pyggel
max_grad_norm: 0.6
sequence_len: 8192
sample_packing: true
pad_to_sequence_len: true
micro_batch_size: 1
gradient_accumulation_steps: 1
num_epochs: 3
learning_rate: 0.0001
optimizer: adamw_bnb_8bit
optim_args:
amsgrad: true
lr_scheduler: cosine
train_on_inputs: true
group_by_length: false
bfloat16: false
fp16:
tf32: false
neftune_noise_alpha: 15
gradient_checkpointing: unsloth
gradient_checkpointing_kwargs:
use_reentrant: true
logging_steps: 1
xformers_attention:
flash_attention: true
unsloth_cross_entropy_loss: true
#unsloth_lora_mlp: true
#unsloth_lora_qkv: true
#unsloth_lora_o: true
flash_attn_cross_entropy: false
flash_attn_rms_norm: true
flash_attn_fuse_qkv: false
flash_attn_fuse_mlp: true
warmup_ratio: 0.5
evals_per_step: 0.025
eval_table_size:
saves_per_epoch: 15
debug:
torch_compile: true
rank:
deepspeed: deepspeed_configs/zero1.json
weight_decay: 0.01
special_tokens:
eos_token: "<|eot_id|>"
pad_token: "<|end_of_text|>"