adam_beta1: 0.9 adam_beta2: 0.999 adam_epsilon: 1.0e-08 adam_weight_decay: 0.01 allow_tf32: false center_crop: false checkpointing_steps: 500 checkpoints_total_limit: null class_data_dir: class_images class_prompt: illustration style crops_coords_top_left_h: 0 crops_coords_top_left_w: 0 dataloader_num_workers: 0 enable_xformers_memory_efficient_attention: false gradient_accumulation_steps: 4 gradient_checkpointing: false hub_model_id: null hub_token: null instance_data_dir: training_images instance_prompt: in the style of wlat_mntn learning_rate: 1.0e-06 local_rank: -1 logging_dir: logs lr_num_cycles: 1 lr_power: 1.0 lr_scheduler: constant lr_warmup_steps: 0 max_grad_norm: 1.0 max_train_steps: 800 mixed_precision: fp16 num_class_images: 18 num_train_epochs: 160 num_validation_images: 4 output_dir: vectors-training-sdxl-1.0 pretrained_model_name_or_path: stabilityai/stable-diffusion-xl-base-1.0 pretrained_vae_model_name_or_path: null prior_generation_precision: null prior_loss_weight: 1.0 push_to_hub: true report_to: tensorboard resolution: 512 resume_from_checkpoint: null revision: null sample_batch_size: 4 scale_lr: false seed: 512351235 train_batch_size: 1 train_text_encoder: false use_8bit_adam: false validation_epochs: 50 validation_prompt: in the style of wlat_mntn, illustration of a tree with_prior_preservation: true