AICUaki commited on
Commit
d7d7f62
1 Parent(s): 4917667

LuC4 test4

Browse files
LuC4_config/config_file.toml CHANGED
@@ -1,38 +1,37 @@
1
  [sdxl_arguments]
2
- cache_text_encoder_outputs = true
3
  no_half_vae = true
4
  min_timestep = 0
5
  max_timestep = 1000
6
- shuffle_caption = false
 
7
 
8
  [model_arguments]
9
- pretrained_model_name_or_path = "/content/pretrained_model/sd_xl_base_0.9.safetensors"
10
  vae = "/content/vae/sdxl_vae.safetensors"
11
 
12
  [dataset_arguments]
13
  debug_dataset = false
14
- in_json = "/content/fine_tune/meta_lat.json"
15
- train_data_dir = "/content/fine_tune/train_data"
16
  dataset_repeats = 1
17
  keep_tokens = 0
18
  resolution = "1024,1024"
19
- caption_dropout_rate = 0
20
- caption_tag_dropout_rate = 0
21
- caption_dropout_every_n_epochs = 0
22
  color_aug = false
23
  token_warmup_min = 1
24
  token_warmup_step = 0
25
 
26
  [training_arguments]
27
- output_dir = "/content/drive/MyDrive/kohya-trainer/output"
28
  output_name = "LuC4"
29
  save_precision = "fp16"
30
- save_every_n_steps = 1000
31
- train_batch_size = 1
32
  max_token_length = 225
33
  mem_eff_attn = false
34
- xformers = true
35
- max_train_steps = 1000
 
36
  max_data_loader_n_workers = 8
37
  persistent_data_loader_workers = true
38
  gradient_checkpointing = true
@@ -42,10 +41,10 @@ mixed_precision = "fp16"
42
  [logging_arguments]
43
  log_with = "wandb"
44
  log_tracker_name = "LuC4"
45
- logging_dir = "/content/fine_tune/logs"
46
 
47
  [sample_prompt_arguments]
48
- sample_every_n_steps = 100
49
  sample_sampler = "euler_a"
50
 
51
  [saving_arguments]
@@ -53,11 +52,24 @@ save_model_as = "safetensors"
53
 
54
  [optimizer_arguments]
55
  optimizer_type = "AdaFactor"
56
- learning_rate = 4e-7
57
- train_text_encoder = false
58
  max_grad_norm = 0
59
  optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",]
60
  lr_scheduler = "constant_with_warmup"
61
  lr_warmup_steps = 100
62
 
 
 
 
 
 
 
 
 
63
  [advanced_training_config]
 
 
 
 
 
 
 
1
  [sdxl_arguments]
2
+ cache_text_encoder_outputs = false
3
  no_half_vae = true
4
  min_timestep = 0
5
  max_timestep = 1000
6
+ shuffle_caption = true
7
+ lowram = true
8
 
9
  [model_arguments]
10
+ pretrained_model_name_or_path = "Linaqruf/animagine-xl"
11
  vae = "/content/vae/sdxl_vae.safetensors"
12
 
13
  [dataset_arguments]
14
  debug_dataset = false
15
+ in_json = "/content/LoRA/meta_lat.json"
16
+ train_data_dir = "/content/LoRA/train_data"
17
  dataset_repeats = 1
18
  keep_tokens = 0
19
  resolution = "1024,1024"
 
 
 
20
  color_aug = false
21
  token_warmup_min = 1
22
  token_warmup_step = 0
23
 
24
  [training_arguments]
25
+ output_dir = "/content/drive/MyDrive/kohya-trainer/output/LuC4"
26
  output_name = "LuC4"
27
  save_precision = "fp16"
28
+ save_every_n_epochs = 1
29
+ train_batch_size = 4
30
  max_token_length = 225
31
  mem_eff_attn = false
32
+ sdpa = true
33
+ xformers = false
34
+ max_train_epochs = 10
35
  max_data_loader_n_workers = 8
36
  persistent_data_loader_workers = true
37
  gradient_checkpointing = true
 
41
  [logging_arguments]
42
  log_with = "wandb"
43
  log_tracker_name = "LuC4"
44
+ logging_dir = "/content/LoRA/logs"
45
 
46
  [sample_prompt_arguments]
47
+ sample_every_n_epochs = 1
48
  sample_sampler = "euler_a"
49
 
50
  [saving_arguments]
 
52
 
53
  [optimizer_arguments]
54
  optimizer_type = "AdaFactor"
55
+ learning_rate = 0.0001
 
56
  max_grad_norm = 0
57
  optimizer_args = [ "scale_parameter=False", "relative_step=False", "warmup_init=False",]
58
  lr_scheduler = "constant_with_warmup"
59
  lr_warmup_steps = 100
60
 
61
+ [additional_network_arguments]
62
+ no_metadata = false
63
+ network_module = "networks.lora"
64
+ network_dim = 32
65
+ network_alpha = 16
66
+ network_args = []
67
+ network_train_unet_only = true
68
+
69
  [advanced_training_config]
70
+ save_state = false
71
+ save_last_n_epochs_state = false
72
+ caption_dropout_rate = 0
73
+ caption_tag_dropout_rate = 0.5
74
+ caption_dropout_every_n_epochs = 0
75
+ min_snr_gamma = 5
LuC4_config/sample_prompt.toml CHANGED
@@ -1,8 +1,9 @@
1
  [prompt]
 
2
  width = 1024
3
  height = 1024
4
- scale = 7
5
  sample_steps = 28
6
  [[prompt.subset]]
7
- prompt = "1boy, aqua eyes, baseball cap, red hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt"
8
 
 
1
  [prompt]
2
+ negative_prompt = "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, "
3
  width = 1024
4
  height = 1024
5
+ scale = 12
6
  sample_steps = 28
7
  [[prompt.subset]]
8
+ prompt = "masterpiece, best quality, face focus, cute, 1girl, green hair, sweater, looking at viewer, upper body, beanie, outdoors, night, turtleneck"
9