kanaejun commited on
Commit
6965127
1 Parent(s): b904804

Upload hypno_230527d_hugging.json

Browse files
Files changed (1) hide show
  1. hypno_230527d_hugging.json +81 -0
hypno_230527d_hugging.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pretrained_model_name_or_path": "C:/**************Stable-diffusion/AC_hypnobase_v_230528f-12.safetensors",
3
+ "v2": false,
4
+ "v_parameterization": false,
5
+ "logging_dir": "",
6
+ "train_data_dir": "D:/********************",
7
+ "reg_data_dir": "",
8
+ "output_dir": "D:/***********/_output",
9
+ "max_resolution": "512,512",
10
+ "learning_rate": "",
11
+ "lr_scheduler": "cosine",
12
+ "lr_warmup": "10",
13
+ "train_batch_size": 1,
14
+ "epoch": 20,
15
+ "save_every_n_epochs": 2,
16
+ "mixed_precision": "fp16",
17
+ "save_precision": "fp16",
18
+ "seed": "",
19
+ "num_cpu_threads_per_process": 3,
20
+ "cache_latents": true,
21
+ "caption_extension": ".txt",
22
+ "enable_bucket": true,
23
+ "gradient_checkpointing": false,
24
+ "full_fp16": false,
25
+ "no_token_padding": false,
26
+ "stop_text_encoder_training": 0,
27
+ "xformers": false,
28
+ "save_model_as": "safetensors",
29
+ "shuffle_caption": false,
30
+ "save_state": false,
31
+ "resume": "",
32
+ "prior_loss_weight": 1.0,
33
+ "text_encoder_lr": "5e-5",
34
+ "unet_lr": "1e-4",
35
+ "network_dim": 256,
36
+ "lora_network_weights": "",
37
+ "color_aug": false,
38
+ "flip_aug": false,
39
+ "clip_skip": "1",
40
+ "gradient_accumulation_steps": 1.0,
41
+ "mem_eff_attn": false,
42
+ "output_name": "tareme_eyes",
43
+ "model_list": "custom",
44
+ "max_token_length": "75",
45
+ "max_train_epochs": "",
46
+ "max_data_loader_n_workers": "",
47
+ "network_alpha": 128,
48
+ "training_comment": "",
49
+ "keep_tokens": "0",
50
+ "lr_scheduler_num_cycles": "",
51
+ "lr_scheduler_power": "",
52
+ "persistent_data_loader_workers": false,
53
+ "bucket_no_upscale": true,
54
+ "random_crop": false,
55
+ "bucket_reso_steps": 64.0,
56
+ "caption_dropout_every_n_epochs": 0.0,
57
+ "caption_dropout_rate": 0,
58
+ "optimizer": "AdamW",
59
+ "optimizer_args": "",
60
+ "noise_offset": "",
61
+ "LoRA_type": "Standard",
62
+ "conv_dim": 1,
63
+ "conv_alpha": 1,
64
+ "sample_every_n_steps": 0,
65
+ "sample_every_n_epochs": 0,
66
+ "sample_sampler": "euler_a",
67
+ "sample_prompts": "",
68
+ "additional_parameters": "",
69
+ "vae_batch_size": 0,
70
+ "min_snr_gamma": 0,
71
+ "down_lr_weight": "1,0,0,0,0,0,0,0,0,0,0,0",
72
+ "mid_lr_weight": "0",
73
+ "up_lr_weight": "1,1,1,1,1,1,1,1,1,1,1,1",
74
+ "block_lr_zero_threshold": "",
75
+ "block_dims": "",
76
+ "block_alphas": "",
77
+ "conv_dims": "",
78
+ "conv_alphas": "",
79
+ "weighted_captions": false,
80
+ "unit": 1
81
+ }