pretrained_model_name_or_path = "/workspace/models/flux1-dev.safetensors" ae = "/workspace/models/ae.safetensors" t5xxl = "/workspace/models/t5xxl_fp16.safetensors" clip_l = "/workspace/models/clip_l.safetensors" output_dir = "/workspace/kohya_models/rita-v5" dataset_repeats = 16 resolution = "512,512" train_batch_size = 2 network_dim = 16 network_alpha = 16 optimizer_type = "Adamw8bit" unet_lr = 0.0005 epoch = 12 max_train_steps = 1500 apply_t5_attn_mask = false cache_latents = true cache_latents_to_disk = true cache_text_encoder_outputs = true cache_text_encoder_outputs_to_disk = true clip_skip = 1 discrete_flow_shift = 3.1582 full_bf16 = true mixed_precision = "bf16" gradient_accumulation_steps = 1 gradient_checkpointing = true guidance_scale = 1.0 highvram = true huber_c = 0.1 huber_schedule = "snr" loss_type = "l2" lr_scheduler = "cosine_with_restarts" lr_scheduler_args = [] lr_scheduler_num_cycles = 3 lr_scheduler_power = 1 max_data_loader_n_workers = 0 max_grad_norm = 1 max_timestep = 1000 min_snr_gamma = 5 model_prediction_type = "raw" network_args = [ "train_double_block_indices=all", "train_single_block_indices=all",] network_module = "networks.lora_flux" network_train_unet_only = true noise_offset = 0.1 noise_offset_type = "Original" optimizer_args = [] prior_loss_weight = 1 sample_sampler = "euler" sdpa = true seed = 42 t5xxl_max_token_length = 512 text_encoder_lr = [] timestep_sampling = "sigmoid" output_name = "lora" save_state_to_huggingface = true save_model_as = "safetensors" save_every_n_epochs = 1 save_precision = "bf16" caption_extension = ".txt" xformers = "sdpa" enable_bucket = false dataset_config = "/workspace/kohya_models/rita-v5/dataset_config.json"