[model_arguments] v2 = false v_parameterization = false pretrained_model_name_or_path = "/content/pretrained_model/model.ckpt" [additional_network_arguments] no_metadata = false unet_lr = 1.0 text_encoder_lr = 1.0 network_module = "networks.lora" network_dim = 64 network_alpha = 1 network_train_unet_only = false network_train_text_encoder_only = false [optimizer_arguments] optimizer_type = "Prodigy" learning_rate = 1.0 max_grad_norm = 1.0 optimizer_args = [ "decouple=True", "weight_decay=0.01", "d_coef=2", "use_bias_correction=True", "safeguard_warmup=True", "betas=0.9,0.99",] lr_scheduler = "constant_with_warmup" lr_warmup_steps = 100 [dataset_arguments] cache_latents = true debug_dataset = false vae_batch_size = 4 [training_arguments] output_dir = "/content/LoRA/output" output_name = "otogi" save_precision = "fp16" save_every_n_epochs = 10 train_batch_size = 5 max_token_length = 225 mem_eff_attn = false xformers = true max_train_epochs = 20 max_data_loader_n_workers = 8 persistent_data_loader_workers = true seed = 31337 gradient_checkpointing = false gradient_accumulation_steps = 1 mixed_precision = "fp16" clip_skip = 2 logging_dir = "/content/LoRA/logs" log_prefix = "otogi" lowram = true multires_noise_discount = 0.3 [sample_prompt_arguments] sample_every_n_epochs = 5 sample_sampler = "ddim" [dreambooth_arguments] prior_loss_weight = 1.0 [saving_arguments] save_model_as = "safetensors"