File size: 1,807 Bytes
831c14e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
[[subsets]]
caption_extension = ".txt"
image_dir = "E:/Everything artificial intelligence/loradataset/2_ohwx sweetonedollar"
name = "5_ohwx kiken"
num_repeats = 2

[general_args.args]
max_data_loader_n_workers = 1
persistent_data_loader_workers = true
pretrained_model_name_or_path = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Stable-diffusion/animefull-final-pruned-fp16.safetensors"
vae = "E:/Everything artificial intelligence/stable-diffusion-webui/models/VAE/klF8Anime2VAE_klF8Anime2VAE.safetensors"
clip_skip = 2
mixed_precision = "fp16"
seed = 23
max_token_length = 225
prior_loss_weight = 1.0
xformers = true
max_train_epochs = 10
cache_latents = true

[general_args.dataset_args]
resolution = 768
batch_size = 2

[network_args.args]
ip_noise_gamma = 0.1
network_dropout = 0.3
network_dim = 16
network_alpha = 8.0
min_timestep = 0
max_timestep = 1000

[optimizer_args.args]
learning_rate = 0.001
warmup_ratio = 0.15
text_encoder_lr = 1e-6
scale_weight_norms = 5.0
max_grad_norm = 1.0
min_snr_gamma = 8
optimizer_type = "AdaFactor"
lr_scheduler = "adafactor"

[saving_args.args]
save_precision = "fp16"
save_model_as = "safetensors"
save_every_n_epochs = 1
save_toml = true
output_dir = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/sweetonedollar/sd15-test5"
save_toml_location = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/sweetonedollar/sd15-test5"
output_name = "sweetonedollartest5"

[noise_args.args]
noise_offset = 0.0357

[bucket_args.dataset_args]
enable_bucket = true
bucket_no_upscale = true
min_bucket_reso = 512
max_bucket_reso = 2048
bucket_reso_steps = 64

[network_args.args.network_args]
conv_dim = 24
conv_alpha = 12.0

[optimizer_args.args.optimizer_args]
weight_decay = "0.05"
betas = "0.9,0.99"