[model_arguments] v2 = false v_parameterization = false pretrained_model_name_or_path = "/kaggle/working/pretrained_model/chilloutmix_NiPrunedFp32Fix.safetensors" vae = "/kaggle/working/vae/vae-ft-mse-840000-ema-pruned.safetensors" [additional_network_arguments] unet_lr = 5e-5 text_encoder_lr = 1e-5 network_module = "networks.lora" network_dim = 64 network_alpha = 32 network_args = [] scale_weight_norms = 1.0 network_train_unet_only = false network_train_text_encoder_only = false no_metadata = true training_comment = "Trained by Kohya Trainer Script by DukeG_89" [sai_model_spec] metadata_title = "SD1.5 LoRA" metadata_author = "lip421" [optimizer_arguments] min_snr_gamma = 5 optimizer_type = "Lion" learning_rate = 1.0 max_grad_norm = 1.0 optimizer_args = [ "weight_decay=0.01", "betas=.95,.98",] lr_scheduler = "cosine_with_restarts" lr_warmup_steps = 0 lr_scheduler_num_cycles = 0 [dataset_arguments] cache_latents = true cache_latents_to_disk = false debug_dataset = false vae_batch_size = 1 [training_arguments] output_dir = "/kaggle/working/Train_Results/outputs" output_name = "gongzhuqun" save_every_n_epochs = 1 save_state = false train_batch_size = 1 max_token_length = 225 xformers = true max_train_epochs = 20 max_data_loader_n_workers = 8 persistent_data_loader_workers = true seed = 10086 gradient_checkpointing = true mixed_precision = "fp16" save_precision = "fp16" clip_skip = 2 logging_dir = "/kaggle/working/Train_Results/logs" log_prefix = "gongzhuqun" lowram = true [v_pred_loss] scale_v_pred_loss_like_noise_pred = false [noise_offset_arguments] [pyramid_noise_arguments] multires_noise_iterations = 8 multires_noise_discount = 0.3 [input_perturbation_noise] ip_noise_gamma = 0.1 [debiased_estimation_loss] debiased_estimation_loss = true [sample_prompts_arguments] sample_every_n_steps = 0 sample_every_n_epochs = 1 sample_sampler = "euler_a" sample_at_first = true [sdxl_arguments] [dreambooth_arguments] prior_loss_weight = 1.0 [SDXL_Controlnet_lllite] [saving_arguments] save_model_as = "safetensors" [DDP_training_arguments] [Torch_Compile_arguments] dynamo_backend = "inductor" [huggingface_arguments] huggingface_token = "aGZfTmJOcEVZaldaVEdHb3lKSVFhbmlzRUNjSHl3bGRzZGJocg==" huggingface_repo_id = "lip421/gongzhuqun" huggingface_repo_type = "model" huggingface_path_in_repo = "output" huggingface_repo_visibility = "private" async_upload = true [wandb_arguments] log_with = "all" wandb_api_key = "OTAxZWM2NDUyNGY1OWVkOWQwMDQ0NmU0Nzc4OGQxMzVkNTNlZTZiNg==" log_tracker_name = "gongzhuqun"