kilog commited on
Commit
92df8d6
1 Parent(s): b681eb8

feat: upload LSHAW lora model

Browse files
LSHAW_config/config_file.toml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [model_arguments]
2
+ v2 = false
3
+ v_parameterization = false
4
+ pretrained_model_name_or_path = "/content/pretrained_model/AOM3A3.safetensors"
5
+ vae = "/content/vae/anime.vae.pt"
6
+
7
+ [additional_network_arguments]
8
+ no_metadata = false
9
+ unet_lr = 0.00025
10
+ text_encoder_lr = 2e-5
11
+ network_module = "lycoris.kohya"
12
+ network_dim = 16
13
+ network_alpha = 8
14
+ network_args = [ "conv_dim=8", "conv_alpha=1", "algo=loha",]
15
+ network_train_unet_only = false
16
+ network_train_text_encoder_only = false
17
+
18
+ [optimizer_arguments]
19
+ min_snr_gamma = 5
20
+ optimizer_type = "AdamW8bit"
21
+ learning_rate = 0.00025
22
+ max_grad_norm = 1.0
23
+ lr_scheduler = "cosine_with_restarts"
24
+ lr_warmup_steps = 300
25
+ lr_scheduler_num_cycles = 3
26
+
27
+ [dataset_arguments]
28
+ cache_latents = true
29
+ debug_dataset = false
30
+ vae_batch_size = 2
31
+
32
+ [training_arguments]
33
+ output_dir = "/content/LoRA/output"
34
+ output_name = "LSHAW"
35
+ save_precision = "fp16"
36
+ save_every_n_epochs = 2
37
+ train_batch_size = 2
38
+ max_token_length = 225
39
+ mem_eff_attn = false
40
+ xformers = true
41
+ max_train_epochs = 50
42
+ max_data_loader_n_workers = 8
43
+ persistent_data_loader_workers = true
44
+ gradient_checkpointing = false
45
+ gradient_accumulation_steps = 1
46
+ mixed_precision = "fp16"
47
+ clip_skip = 2
48
+ logging_dir = "/content/LoRA/logs"
49
+ log_prefix = "LSHAW"
50
+ noise_offset = 0.03
51
+ lowram = true
52
+
53
+ [sample_prompt_arguments]
54
+ sample_every_n_epochs = 999999
55
+ sample_sampler = "ddim"
56
+
57
+ [dreambooth_arguments]
58
+ prior_loss_weight = 1.0
59
+
60
+ [saving_arguments]
61
+ save_model_as = "safetensors"
LSHAW_config/dataset_config.toml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[datasets]]
2
+ resolution = 896
3
+ min_bucket_reso = 320
4
+ max_bucket_reso = 1280
5
+ caption_dropout_rate = 0
6
+ caption_tag_dropout_rate = 0
7
+ caption_dropout_every_n_epochs = 0
8
+ flip_aug = false
9
+ color_aug = false
10
+ [[datasets.subsets]]
11
+ image_dir = "/content/LoRA/train_data"
12
+ class_tokens = "1male"
13
+ num_repeats = 4
14
+
15
+
16
+ [general]
17
+ enable_bucket = true
18
+ caption_extension = ".txt"
19
+ shuffle_caption = true
20
+ keep_tokens = 0
21
+ bucket_reso_steps = 64
22
+ bucket_no_upscale = false
LSHAW_config/sample_prompt.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ masterpiece, best quality, 1girl, aqua eyes, baseball cap, blonde hair, closed mouth, earrings, green background, hat, hoop earrings, jewelry, looking at viewer, shirt, short hair, simple background, solo, upper body, yellow shirt --n lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry --w 512 --h 768 --l 7 --s 28