training: use_ema: True batch_size: 8 adam_beta1: 0.9 adam_beta2: 0.999 adam_weight_decay: 1e-2 adam_epsilon: 1e-08 lr_scheduler: constant lr_warmup_steps: 0 max_train_steps: 50000 text_finetune_step: 50 unet_finetune_step: 50 alpha: 0.1 min_lr: 1e-6 warmup_epochs: 0 num_train_epochs: 300 accumulate_step: 1 lr: 1e-6 resume: ' ' down_attn_shift: -1 down_attn_map: -1 mid_attn_shift: -1 mid_attn_map: -1 up_attn_shift: -1 up_attn_map: -1 inference: loss_scale: 30 batch_size: 1 loss_threshold: 0.2 max_iter: 5 index_step: 10 start_pair: 800 iteration_interval: 400 infer_iter: 0