phi3-4x4b-v1 / axolotl_config.yml
Fizzarolli's picture
someone asked for the training config
a9b7b4e verified
base_model: Fizzarolli/phi3-4x4b-uninitialized
model_type: MixtralForCausalLM
tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast
load_in_8bit: false
load_in_4bit: true
strict: false
datasets:
- path: NeelNanda/pile-10k
type: completion
field: text
- path: BEE-spoke-data/gutenberg-en-v1-clean
type: completion
field: text
dataset_prepared_path: last_run_prepared
val_set_size: 0.05
output_dir: ./lora-out
adapter: qlora
lora_model_dir:
sequence_len: 4072
sample_packing: true
pad_to_sequence_len: true
lora_r: 32
lora_alpha: 64
lora_dropout: 0.05
lora_target_modules:
- k_proj
- q_proj
- v_proj
- o_proj
lora_target_linear: true
lora_fan_in_fan_out:
wandb_project: phixtral3
wandb_entity:
wandb_watch:
wandb_name:
wandb_log_model:
gradient_accumulation_steps: 4
micro_batch_size: 6
num_epochs: 1
optimizer: adamw_torch
lr_scheduler: cosine
learning_rate: 0.000001
train_on_inputs: false
group_by_length: false
bf16: auto
fp16:
tf32: false
gradient_checkpointing: true
gradient_checkpointing_kwargs:
use_reentrant: true
early_stopping_patience:
resume_from_checkpoint:
local_rank:
logging_steps: 1
xformers_attention:
flash_attention: true
warmup_steps: 10
evals_per_epoch: 0
eval_table_size:
saves_per_epoch: 20
debug:
fsdp:
- full_shard
- auto_wrap
fsdp_config:
fsdp_limit_all_gathers: true
fsdp_sync_module_states: true
fsdp_offload_params: true
fsdp_use_orig_params: false
fsdp_cpu_ram_efficient_loading: true
fsdp_transformer_layer_cls_to_wrap: MixtralSparseMoeBlock
fsdp_state_dict_type: FULL_STATE_DICT
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_sharding_strategy: FULL_SHARD
fsdp_forward_prefetch: false
fsdp_backward_prefetch: BACKWARD_PRE
weight_decay: 0.0
special_tokens: