naumnaum commited on
Commit
766fe33
·
verified ·
1 Parent(s): 3c5eaed

Upload rita-v15-rembg-upscaled-8gpu/training_config.toml with huggingface_hub

Browse files
rita-v15-rembg-upscaled-8gpu/training_config.toml ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pretrained_model_name_or_path = "/home/ubuntu/sv-training-texas/models/flux1-dev.safetensors"
2
+ ae = "/home/ubuntu/sv-training-texas/models/ae.safetensors"
3
+ t5xxl = "/home/ubuntu/sv-training-texas/models/t5xxl_fp16.safetensors"
4
+ clip_l = "/home/ubuntu/sv-training-texas/models/clip_l.safetensors"
5
+ output_dir = "/home/ubuntu/sv-training-texas/kohya_models/rita-kohya-tests/rita-v15-rembg-upscaled-8gpu"
6
+ dataset_repeats = 10
7
+ resolution = "512,512"
8
+ train_batch_size = 2
9
+ network_dim = 16
10
+ network_alpha = 16
11
+ optimizer_type = "adamwschedulefree"
12
+ unet_lr = 0.0005
13
+ epoch = 12
14
+ max_train_steps = 100
15
+ apply_t5_attn_mask = false
16
+ cache_latents = true
17
+ cache_latents_to_disk = true
18
+ cache_text_encoder_outputs = true
19
+ cache_text_encoder_outputs_to_disk = true
20
+ clip_skip = 1
21
+ discrete_flow_shift = 3.1582
22
+ full_bf16 = true
23
+ mixed_precision = "bf16"
24
+ gradient_accumulation_steps = 1
25
+ gradient_checkpointing = true
26
+ guidance_scale = 1.0
27
+ highvram = true
28
+ huber_c = 0.1
29
+ huber_schedule = "snr"
30
+ loss_type = "l2"
31
+ lr_scheduler = "cosine_with_restarts"
32
+ lr_scheduler_args = []
33
+ lr_scheduler_num_cycles = 3
34
+ lr_scheduler_power = 1
35
+ max_data_loader_n_workers = 0
36
+ max_grad_norm = 1
37
+ max_timestep = 1000
38
+ min_snr_gamma = 5
39
+ model_prediction_type = "raw"
40
+ network_module = "networks.lora_flux"
41
+ network_train_unet_only = true
42
+ noise_offset = 0.1
43
+ noise_offset_type = "Original"
44
+ optimizer_args = []
45
+ prior_loss_weight = 1
46
+ sdpa = true
47
+ seed = 42
48
+ t5xxl_max_token_length = 512
49
+ text_encoder_lr = []
50
+ timestep_sampling = "sigmoid"
51
+ output_name = "lora"
52
+ save_state_to_huggingface = true
53
+ save_model_as = "safetensors"
54
+ save_every_n_epochs = 2
55
+ save_precision = "fp16"
56
+ caption_extension = ".txt"
57
+ xformers = "sdpa"
58
+ enable_bucket = false
59
+ dataset_config = "/home/ubuntu/sv-training-texas/kohya_models/rita-kohya-tests/rita-v15-rembg-upscaled-8gpu/dataset_config.json"
60
+ sample_sampler = "euler"
61
+ sample_prompts = "kohya_configs/sample_prompts.txt"
62
+ wandb_api_key = ""