lora-training / iroha /lora_character_iroha_95i7r_768_batch3_slower3epoch.json
khanon's picture
initial commit
7529c6f
{
"pretrained_model_name_or_path": "G:/sd/repo/models/Stable-diffusion/nai-animefull-final-pruned.safetensors",
"v2": false,
"v_parameterization": false,
"logging_dir": "",
"train_data_dir": "G:/sd/training/datasets/iroha",
"reg_data_dir": "G:/sd/training/datasets/regempty",
"output_dir": "G:/sd/repo/extensions/sd-webui-additional-networks/models/lora",
"max_resolution": "768,768",
"lr_scheduler": "constant_with_warmup",
"lr_warmup": "5",
"train_batch_size": 3,
"epoch": "3",
"save_every_n_epochs": "1",
"mixed_precision": "fp16",
"save_precision": "fp16",
"seed": "23",
"num_cpu_threads_per_process": 32,
"cache_latent": true,
"caption_extention": ".txt",
"enable_bucket": true,
"gradient_checkpointing": false,
"full_fp16": false,
"no_token_padding": false,
"stop_text_encoder_training": 0,
"use_8bit_adam": true,
"xformers": true,
"save_model_as": "safetensors",
"shuffle_caption": true,
"save_state": false,
"resume": "",
"prior_loss_weight": 1.0,
"text_encoder_lr": "1e-5",
"unet_lr": "1e-4",
"network_dim": 128,
"lora_network_weights": "",
"color_aug": false,
"flip_aug": false,
"clip_skip": 2,
"gradient_accumulation_steps": 1.0,
"mem_eff_attn": false,
"output_name": "iroha-v1-NAI-VAE-768px"
}