naumnaum commited on
Commit
5f52f15
·
verified ·
1 Parent(s): 74fb21a

Upload v1/training_config.toml with huggingface_hub

Browse files
Files changed (1) hide show
  1. v1/training_config.toml +56 -0
v1/training_config.toml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pretrained_model_name_or_path = "/workspace/models/flux1-dev.safetensors"
2
+ ae = "/workspace/models/ae.safetensors"
3
+ t5xxl = "/workspace/models/t5xxl_fp16.safetensors"
4
+ clip_l = "/workspace/models/clip_l.safetensors"
5
+ output_dir = "/workspace/kohya_models/rita-v1"
6
+ dataset_config = "/workspace/kohya_models/rita-v1/dataset_config.json"
7
+ network_dim = 16
8
+ network_alpha = 16
9
+ train_batch_size = 2
10
+ optimizer_type = "Adamw8bit"
11
+ unet_lr = 0.0005
12
+ epoch = 12
13
+ max_train_steps = 1500
14
+ apply_t5_attn_mask = false
15
+ cache_latents = true
16
+ cache_latents_to_disk = true
17
+ cache_text_encoder_outputs = true
18
+ cache_text_encoder_outputs_to_disk = true
19
+ clip_skip = 1
20
+ discrete_flow_shift = 3.1582
21
+ full_bf16 = true
22
+ mixed_precision = "bf16"
23
+ gradient_accumulation_steps = 1
24
+ gradient_checkpointing = true
25
+ guidance_scale = 1.0
26
+ highvram = true
27
+ huber_c = 0.1
28
+ huber_schedule = "snr"
29
+ loss_type = "l2"
30
+ lr_scheduler = "cosine_with_restarts"
31
+ lr_scheduler_args = []
32
+ lr_scheduler_num_cycles = 3
33
+ lr_scheduler_power = 1
34
+ max_data_loader_n_workers = 0
35
+ max_grad_norm = 1
36
+ max_timestep = 1000
37
+ min_snr_gamma = 5
38
+ model_prediction_type = "raw"
39
+ network_args = [ "train_double_block_indices=all", "train_single_block_indices=all",]
40
+ network_module = "networks.lora_flux"
41
+ network_train_unet_only = true
42
+ noise_offset = 0.1
43
+ noise_offset_type = "Original"
44
+ optimizer_args = []
45
+ prior_loss_weight = 1
46
+ sample_sampler = "euler"
47
+ sdpa = true
48
+ seed = 42
49
+ t5xxl_max_token_length = 512
50
+ text_encoder_lr = []
51
+ timestep_sampling = "sigmoid"
52
+ output_name = "lora"
53
+ save_state_to_huggingface = true
54
+ save_model_as = "safetensors"
55
+ save_every_n_epochs = 1
56
+ save_precision = "bf16"