Gary0076 commited on
Commit
06d2bb5
·
1 Parent(s): 18ba4c3

feat: upload ohishi-izumi lora model

Browse files
ohishi-izumi_config/config_file.toml CHANGED
@@ -1,7 +1,7 @@
1
  [model_arguments]
2
  v2 = false
3
  v_parameterization = false
4
- pretrained_model_name_or_path = "/content/pretrained_model/AnyLoRA.safetensors"
5
 
6
  [additional_network_arguments]
7
  no_metadata = false
@@ -18,7 +18,7 @@ optimizer_type = "AdamW8bit"
18
  learning_rate = 0.0001
19
  max_grad_norm = 1.0
20
  lr_scheduler = "constant_with_warmup"
21
- lr_warmup_steps = 3
22
 
23
  [dataset_arguments]
24
  debug_dataset = false
@@ -39,7 +39,7 @@ token_warmup_step = 0
39
  output_dir = "/content/LoRA/output"
40
  output_name = "ohishi-izumi"
41
  save_precision = "fp16"
42
- save_every_n_epochs = 6
43
  train_batch_size = 3
44
  max_token_length = 225
45
  mem_eff_attn = false
 
1
  [model_arguments]
2
  v2 = false
3
  v_parameterization = false
4
+ pretrained_model_name_or_path = "/content/pretrained_model/Animefull-final-pruned.ckpt"
5
 
6
  [additional_network_arguments]
7
  no_metadata = false
 
18
  learning_rate = 0.0001
19
  max_grad_norm = 1.0
20
  lr_scheduler = "constant_with_warmup"
21
+ lr_warmup_steps = 5
22
 
23
  [dataset_arguments]
24
  debug_dataset = false
 
39
  output_dir = "/content/LoRA/output"
40
  output_name = "ohishi-izumi"
41
  save_precision = "fp16"
42
+ save_every_n_epochs = 4
43
  train_batch_size = 3
44
  max_token_length = 225
45
  mem_eff_attn = false