[[subsets]] num_repeats = 1 caption_extension = ".txt" shuffle_caption = true flip_aug = false is_reg = false image_dir = "E:/Everything artificial intelligence/loradataset\\5_ohwx sweetonedollar" keep_tokens = 0 [sample_args] [general_args.args] pretrained_model_name_or_path = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Stable-diffusion/ponyDiffusionV6XL_v6StartWithThisOne.safetensors" mixed_precision = "fp16" seed = 24 max_data_loader_n_workers = 1 persistent_data_loader_workers = true max_token_length = 225 prior_loss_weight = 1.0 vae = "E:/Everything artificial intelligence/stable-diffusion-webui/models/VAE/sdxl_vae.safetensors" sdxl = true xformers = true cache_latents = true cache_latents_to_disk = true gradient_accumulation_steps = 8 max_train_epochs = 10 [general_args.dataset_args] resolution = 1024 batch_size = 1 [network_args.args] network_dim = 8 network_alpha = 4.0 min_timestep = 0 max_timestep = 1000 network_dropout = 0.3 [optimizer_args.args] optimizer_type = "AdamW8bit" lr_scheduler = "cosine" learning_rate = 0.0001 max_grad_norm = 1.0 lr_scheduler_type = "LoraEasyCustomOptimizer.CustomOptimizers.CosineAnnealingWarmupRestarts" lr_scheduler_num_cycles = 4 text_encoder_lr = 5e-5 warmup_ratio = 0.1 min_snr_gamma = 8 scale_weight_norms = 5.0 unet_lr = 0.0003 [saving_args.args] output_dir = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/sweetonedollar" save_precision = "fp16" save_model_as = "safetensors" output_name = "sweetonedollar" save_toml = true save_toml_location = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/sweetonedollar" save_every_n_epochs = 1 [bucket_args.dataset_args] enable_bucket = true min_bucket_reso = 512 max_bucket_reso = 2048 bucket_reso_steps = 64 bucket_no_upscale = true [noise_args.args] noise_offset = 0.03 [logging_args.args] logging_dir = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/sweetonedollar" log_with = "tensorboard" [network_args.args.network_args] conv_dim = 12 conv_alpha = 6.0 module_dropout = 0.25 [optimizer_args.args.lr_scheduler_args] gamma = 0.7 min_lr = 1e-6 [optimizer_args.args.optimizer_args] weight_decay = "0.1" betas = "0.9,0.99"