|
{ |
|
"pretrained_model_name_or_path": "/data/gyl/sdft/experiments/dataset_p1/outputs/3000", |
|
"pretrained_vae_name_or_path": null, |
|
"revision": null, |
|
"tokenizer_name": null, |
|
"instance_data_dir": "/data3/zilun/gyl_sd/dataset_p1", |
|
"class_data_dir": null, |
|
"instance_prompt": null, |
|
"class_prompt": null, |
|
"save_sample_prompt": null, |
|
"save_sample_negative_prompt": null, |
|
"n_save_sample": 4, |
|
"save_guidance_scale": 7.5, |
|
"save_infer_steps": 20, |
|
"pad_tokens": false, |
|
"with_prior_preservation": false, |
|
"prior_loss_weight": 1.0, |
|
"num_class_images": 100, |
|
"output_dir": "/data/gyl/sdft/experiments/dataset_p1/outputs_3000", |
|
"seed": null, |
|
"resolution": 768, |
|
"center_crop": false, |
|
"train_text_encoder": true, |
|
"train_batch_size": 60, |
|
"sample_batch_size": 4, |
|
"num_train_epochs": 27, |
|
"max_train_steps": 22000, |
|
"gradient_accumulation_steps": 1, |
|
"gradient_checkpointing": true, |
|
"learning_rate": 1e-06, |
|
"scale_lr": false, |
|
"lr_scheduler": "constant", |
|
"lr_warmup_steps": 0, |
|
"use_8bit_adam": false, |
|
"adam_beta1": 0.9, |
|
"adam_beta2": 0.999, |
|
"adam_weight_decay": 0.01, |
|
"adam_epsilon": 1e-08, |
|
"max_grad_norm": 1.0, |
|
"push_to_hub": false, |
|
"hub_token": null, |
|
"hub_model_id": null, |
|
"logging_dir": "logs", |
|
"log_interval": 10, |
|
"save_interval": 1000, |
|
"save_min_steps": 0, |
|
"mixed_precision": "fp16", |
|
"not_cache_latents": false, |
|
"hflip": false, |
|
"local_rank": -1, |
|
"concepts_list": [ |
|
{ |
|
"instance_prompt": null, |
|
"class_prompt": null, |
|
"instance_data_dir": "/data3/zilun/gyl_sd/dataset_p1", |
|
"class_data_dir": null |
|
} |
|
], |
|
"read_prompts_from_txts": true |
|
} |