lip421 commited on
Commit
ade7fe9
1 Parent(s): cca9ea9

Upload gongzhuqun LoRA models from kaggle.

Browse files
configs/datasets_config.toml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[datasets]]
2
+ resolution = [ 512, 768,]
3
+ min_bucket_reso = 256
4
+ max_bucket_reso = 2048
5
+ caption_dropout_rate = 0.1
6
+ caption_tag_dropout_rate = 0.1
7
+ caption_dropout_every_n_epochs = 0
8
+ flip_aug = false
9
+ color_aug = false
10
+ random_crop = false
11
+ token_warmup_min = 0
12
+ token_warmup_step = 0
13
+ [[datasets.subsets]]
14
+ image_dir = "/kaggle/input/gongzhuqun/gongzhuqun"
15
+ class_tokens = "gongzhuqun"
16
+ num_repeats = 20
17
+
18
+
19
+ [general]
20
+ enable_bucket = true
21
+ caption_extension = ".txt"
22
+ shuffle_caption = true
23
+ keep_tokens = 1
24
+ bucket_reso_steps = 64
25
+ bucket_no_upscale = true
configs/sample_prompts.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+
2
+ gongzhuqun, 1girl, solo, standing, bare shoulders, full body, white footwear, high heels, hand on hip, --n lowres,blurry,low quality,monochrome,grayscale,worstquality, --w 512 --h 768 --l 8 --s 30
3
+
configs/training_config.toml ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [model_arguments]
2
+ v2 = false
3
+ v_parameterization = false
4
+ pretrained_model_name_or_path = "/kaggle/working/pretrained_model/chilloutmix_NiPrunedFp32Fix.safetensors"
5
+ vae = "/kaggle/working/vae/vae-ft-mse-840000-ema-pruned.safetensors"
6
+
7
+ [additional_network_arguments]
8
+ unet_lr = 5e-5
9
+ text_encoder_lr = 1e-5
10
+ network_module = "networks.lora"
11
+ network_dim = 64
12
+ network_alpha = 32
13
+ network_args = []
14
+ scale_weight_norms = 1.0
15
+ network_train_unet_only = false
16
+ network_train_text_encoder_only = false
17
+ no_metadata = true
18
+ training_comment = "Trained by Kohya Trainer Script by DukeG_89"
19
+
20
+ [sai_model_spec]
21
+ metadata_title = "SD1.5 LoRA"
22
+ metadata_author = "lip421"
23
+
24
+ [optimizer_arguments]
25
+ min_snr_gamma = 5
26
+ optimizer_type = "Lion"
27
+ learning_rate = 1.0
28
+ max_grad_norm = 1.0
29
+ optimizer_args = [ "weight_decay=0.01", "betas=.95,.98",]
30
+ lr_scheduler = "cosine_with_restarts"
31
+ lr_warmup_steps = 0
32
+ lr_scheduler_num_cycles = 0
33
+
34
+ [dataset_arguments]
35
+ cache_latents = true
36
+ cache_latents_to_disk = false
37
+ debug_dataset = false
38
+ vae_batch_size = 1
39
+
40
+ [training_arguments]
41
+ output_dir = "/kaggle/working/Train_Results/outputs"
42
+ output_name = "gongzhuqun"
43
+ save_every_n_epochs = 1
44
+ save_state = false
45
+ train_batch_size = 1
46
+ max_token_length = 225
47
+ xformers = true
48
+ max_train_epochs = 20
49
+ max_data_loader_n_workers = 8
50
+ persistent_data_loader_workers = true
51
+ seed = 10086
52
+ gradient_checkpointing = true
53
+ mixed_precision = "fp16"
54
+ save_precision = "fp16"
55
+ clip_skip = 2
56
+ logging_dir = "/kaggle/working/Train_Results/logs"
57
+ log_prefix = "gongzhuqun"
58
+ lowram = true
59
+
60
+ [v_pred_loss]
61
+ scale_v_pred_loss_like_noise_pred = false
62
+
63
+ [noise_offset_arguments]
64
+
65
+ [pyramid_noise_arguments]
66
+ multires_noise_iterations = 8
67
+ multires_noise_discount = 0.3
68
+
69
+ [input_perturbation_noise]
70
+ ip_noise_gamma = 0.1
71
+
72
+ [debiased_estimation_loss]
73
+ debiased_estimation_loss = true
74
+
75
+ [sample_prompts_arguments]
76
+ sample_every_n_steps = 0
77
+ sample_every_n_epochs = 1
78
+ sample_sampler = "euler_a"
79
+ sample_at_first = true
80
+
81
+ [sdxl_arguments]
82
+
83
+ [dreambooth_arguments]
84
+ prior_loss_weight = 1.0
85
+
86
+ [SDXL_Controlnet_lllite]
87
+
88
+ [saving_arguments]
89
+ save_model_as = "safetensors"
90
+
91
+ [DDP_training_arguments]
92
+
93
+ [Torch_Compile_arguments]
94
+ dynamo_backend = "inductor"
95
+
96
+ [huggingface_arguments]
97
+ huggingface_token = "aGZfTmJOcEVZaldaVEdHb3lKSVFhbmlzRUNjSHl3bGRzZGJocg=="
98
+ huggingface_repo_id = "lip421/gongzhuqun"
99
+ huggingface_repo_type = "model"
100
+ huggingface_path_in_repo = "output"
101
+ huggingface_repo_visibility = "private"
102
+ async_upload = true
103
+
104
+ [wandb_arguments]
105
+ log_with = "all"
106
+ wandb_api_key = "OTAxZWM2NDUyNGY1OWVkOWQwMDQ0NmU0Nzc4OGQxMzVkNTNlZTZiNg=="
107
+ log_tracker_name = "gongzhuqun"