File size: 2,599 Bytes
29b561d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
[Basics]
pretrained_model_name_or_path = "D:\\stable-diffusion-webui\\models\\Stable-diffusion\\animefull-latest.ckpt"
train_data_dir = "D:\\train_data\\surtr_arknights"
resolution = "512,768"
seed = 23
max_train_steps = 99999
max_train_epochs = 24
clip_skip = 2
[Save]
output_dir = "D:\\train_out\\lora"
output_name = "surtr_arknights"
save_precision = "fp16"
save_model_as = "safetensors"
save_every_n_epochs = 2
save_every_n_steps = 9999
save_state = false
save_last_n_steps_state = 1
save_last_n_steps = 200
[SDv2]
v2 = false
v_parameterization = false
scale_v_pred_loss_like_noise_pred = false
[Network_setup]
network_dim = 4
network_alpha = 2
dim_from_weights = false
network_dropout = 0
network_train_unet_only = true
network_train_text_encoder_only = false
resume = false
[LyCORIS]
network_module = "lycoris.kohya"
network_args = [ "preset=attn-mlp", "algo=lora",]
[Optimizer]
train_batch_size = 8
gradient_checkpointing = true
gradient_accumulation_steps = 1
optimizer_type = "AdamW8bit"
unet_lr = 0.0006
text_encoder_lr = 0.0006
max_grad_norm = 1.0
optimizer_args = [ "weight_decay=0.1", "betas=0.9,0.99",]
[Lr_scheduler]
lr_scheduler_type = ""
lr_scheduler = "constant"
lr_warmup_steps = 0
lr_scheduler_num_cycles = 1
lr_scheduler_power = 1.0
[Training_preciscion]
mixed_precision = "fp16"
full_fp16 = false
full_bf16 = false
[Further_improvement]
min_snr_gamma = 0
multires_noise_discount = 0.3
multires_noise_iterations = 6
[ARB]
enable_bucket = true
min_bucket_reso = 320
max_bucket_reso = 960
bucket_reso_steps = 64
bucket_no_upscale = false
[Captions]
shuffle_caption = true
caption_extension = ".txt"
keep_tokens = 1
caption_dropout_rate = 0.05
caption_dropout_every_n_epochs = 0
caption_tag_dropout_rate = 0.0
max_token_length = 150
weighted_captions = false
token_warmup_min = 1
token_warmup_step = 0
[Attention]
mem_eff_attn = false
xformers = true
[Data_augmentation]
color_aug = false
flip_aug = false
random_crop = false
[Cache_latents]
cache_latents = true
vae_batch_size = 1
cache_latents_to_disk = true
[Sampling_during_training]
sample_sampler = "ddim"
[Logging]
logging_dir = "logs_training"
log_with = "tensorboard"
log_prefix = "lora_"
[Dataset]
max_data_loader_n_workers = 8
persistent_data_loader_workers = true
dataset_repeats = 1
[Regularization]
prior_loss_weight = 1.0
[Huggingface]
save_state_to_huggingface = false
resume_from_huggingface = false
async_upload = false
[Debugging]
debug_dataset = false
[Deprecated]
use_8bit_adam = false
use_lion_optimizer = false
learning_rate = 0.0002
[Others]
lowram = false
training_comment = "nebulae"
|