nob's picture
End of training
12e14ba
raw
history blame
1.28 kB
adam_beta1: 0.9
adam_beta2: 0.999
adam_epsilon: 1.0e-08
adam_weight_decay: 0.01
allow_tf32: false
center_crop: false
checkpointing_steps: 500
checkpoints_total_limit: null
class_data_dir: data/RD_img_left_resize
class_prompt: fundus
crops_coords_top_left_h: 0
crops_coords_top_left_w: 0
dataloader_num_workers: 0
enable_xformers_memory_efficient_attention: false
gradient_accumulation_steps: 4
gradient_checkpointing: false
hub_model_id: null
hub_token: null
instance_data_dir: data/RD_img_left_resize
instance_prompt: sks fundus
learning_rate: 0.0001
local_rank: 0
logging_dir: logs
lr_num_cycles: 1
lr_power: 1.0
lr_scheduler: constant
lr_warmup_steps: 10
max_grad_norm: 1.0
max_train_steps: 1000
mixed_precision: bf16
num_class_images: 100
num_train_epochs: 17
num_validation_images: 4
output_dir: lora-trained-xl
pretrained_model_name_or_path: stabilityai/stable-diffusion-xl-base-1.0
pretrained_vae_model_name_or_path: null
prior_generation_precision: null
prior_loss_weight: 1.0
push_to_hub: true
report_to: tensorboard
resolution: 1024
resume_from_checkpoint: null
revision: null
sample_batch_size: 4
scale_lr: false
seed: 0
train_batch_size: 1
train_text_encoder: false
use_8bit_adam: false
validation_epochs: 25
validation_prompt: sks fundus
with_prior_preservation: false