command: | |
- accelerate | |
- launch | |
- --config_file=/content/kohya-trainer/accelerate_config/config.yaml | |
- --num_cpu_threads_per_process=8 | |
- train_network.py | |
- --output_name=mandoLORA | |
- --pretrained_model_name_or_path=/content/pretrained_model/Stable-Diffusion-v1-5.safetensors | |
- --vae=/content/vae/stablediffusion.vae.pt | |
- --train_data_dir=/content/LoRA/train_data | |
- --in_json=/content/LoRA/meta_lat.json | |
- --output_dir=/content/LoRA/output | |
- --network_dim=128 | |
- --network_alpha=128 | |
- --network_module=networks.lora | |
- --optimizer_type=AdamW8bit | |
- --learning_rate=0.0001 | |
- --unet_lr=0.0001 | |
- --text_encoder_lr=5e-05 | |
- --lr_scheduler=constant | |
- --dataset_repeats=120 | |
- --resolution=512 | |
- --noise_offset=0.1 | |
- --train_batch_size=1 | |
- --max_train_steps=1 | |
- --mixed_precision=fp16 | |
- --save_precision=fp16 | |
- --save_n_epoch_ratio=3 | |
- --save_model_as=safetensors | |
- --max_token_length=225 | |
- --gradient_checkpointing | |
- --gradient_accumulation_steps=1 | |
- --clip_skip=1 | |
- --logging_dir=/content/LoRA/logs | |
- --log_prefix=mandoLORA | |
- --shuffle_caption | |