LoRA / Alpha_config /config_file.toml
x1101's picture
up
cbd3d5f
raw
history blame
1.26 kB
[model_arguments]
v2 = false
v_parameterization = false
pretrained_model_name_or_path = "/content/pretrained_model/anylora_novaeFp16Pruned.ckpt"
vae = "/content/vae/anime.vae.pt"
[additional_network_arguments]
no_metadata = false
unet_lr = 0.0001
text_encoder_lr = 5e-5
network_module = "networks.lora"
network_dim = 32
network_alpha = 16
network_train_unet_only = false
network_train_text_encoder_only = false
[optimizer_arguments]
optimizer_type = "AdamW8bit"
learning_rate = 0.0001
max_grad_norm = 1.0
lr_scheduler = "cosine_with_restarts"
lr_warmup_steps = 300
lr_scheduler_num_cycles = 3
[dataset_arguments]
debug_dataset = false
[training_arguments]
output_dir = "/content/drive/MyDrive/LoRA/output"
output_name = "Alpha"
save_precision = "fp16"
save_every_n_epochs = 3
train_batch_size = 3
max_token_length = 225
mem_eff_attn = false
xformers = true
max_train_epochs = 10
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
gradient_checkpointing = false
gradient_accumulation_steps = 1
mixed_precision = "fp16"
clip_skip = 2
logging_dir = "/content/LoRA/logs"
log_prefix = "Alpha"
lowram = true
[sample_prompt_arguments]
sample_every_n_epochs = 999999
sample_sampler = "ddim"
[saving_arguments]
save_model_as = "safetensors"