bucket_reso_steps = 32 cache_latents = true cache_latents_to_disk = true caption_extension = ".txt" clip_skip = 2 dynamo_backend = "no" enable_bucket = true epoch = 20 gradient_accumulation_steps = 1 gradient_checkpointing = true huber_c = 0.1 huber_schedule = "snr" learning_rate = 0.0002 logging_dir = "./outputs/logs" loss_type = "l2" lr_scheduler = "constant" lr_scheduler_args = [] lr_scheduler_num_cycles = 1 lr_scheduler_power = 1 max_bucket_reso = 2176 max_data_loader_n_workers = 0 max_grad_norm = 1 max_timestep = 1000 max_token_length = 75 max_train_epochs = 20 min_bucket_reso = 384 mixed_precision = "fp16" network_alpha = 8 network_args = [] network_dim = 16 network_module = "networks.lora" no_half_vae = true noise_offset = 0.035 noise_offset_type = "Original" optimizer_args = [] optimizer_type = "AdamW" output_dir = "./outputs" output_name = "benghua3_pony" pretrained_model_name_or_path = "./sd-model/v6.safetensors" prior_loss_weight = 1 resolution = "1024,1024" sample_every_n_epochs = 1 sample_prompts = "./outputs/prompt.txt" sample_sampler = "euler_a" save_every_n_epochs = 5 save_last_n_steps_state = 1 save_model_as = "safetensors" save_precision = "fp16" seed = 12345 text_encoder_lr = 0.0001 train_batch_size = 20 train_data_dir = "./train/aki/honkai_impact_3rd" training_comment = "example" unet_lr = 0.0001 xformers = true