feat: upload lora model
Browse files- _config/config_file.toml +3 -3
- _config/sample_prompt.toml +1 -1
_config/config_file.toml
CHANGED
@@ -22,8 +22,8 @@ token_warmup_min = 1
|
|
22 |
token_warmup_step = 0
|
23 |
|
24 |
[training_arguments]
|
25 |
-
output_dir = "/content/LoRA/output/
|
26 |
-
output_name = "
|
27 |
save_precision = "fp16"
|
28 |
save_every_n_epochs = 2
|
29 |
train_batch_size = 4
|
@@ -41,7 +41,7 @@ mixed_precision = "fp16"
|
|
41 |
[logging_arguments]
|
42 |
log_with = "tensorboard"
|
43 |
logging_dir = "/content/LoRA/logs"
|
44 |
-
log_prefix = "
|
45 |
|
46 |
[sample_prompt_arguments]
|
47 |
sample_every_n_epochs = 1
|
|
|
22 |
token_warmup_step = 0
|
23 |
|
24 |
[training_arguments]
|
25 |
+
output_dir = "/content/LoRA/output/mutsuki"
|
26 |
+
output_name = "mutsuki"
|
27 |
save_precision = "fp16"
|
28 |
save_every_n_epochs = 2
|
29 |
train_batch_size = 4
|
|
|
41 |
[logging_arguments]
|
42 |
log_with = "tensorboard"
|
43 |
logging_dir = "/content/LoRA/logs"
|
44 |
+
log_prefix = "mutsuki"
|
45 |
|
46 |
[sample_prompt_arguments]
|
47 |
sample_every_n_epochs = 1
|
_config/sample_prompt.toml
CHANGED
@@ -5,5 +5,5 @@ height = 1024
|
|
5 |
scale = 12
|
6 |
sample_steps = 28
|
7 |
[[prompt.subset]]
|
8 |
-
prompt = "masterpiece,best quality,
|
9 |
|
|
|
5 |
scale = 12
|
6 |
sample_steps = 28
|
7 |
[[prompt.subset]]
|
8 |
+
prompt = "masterpiece,best quality,mutsuki,1girl"
|
9 |
|