|
accelerate launch \ |
|
--mixed_precision bf16 \ |
|
--multi_gpu \ |
|
--gpu_ids 0,1 \ |
|
--num_processes 2 \ |
|
--num_cpu_threads_per_process 1 \ |
|
sd-scripts/flux_train_network.py \ |
|
--pretrained_model_name_or_path "/tmp/fluxgym/models/unet/flux1-dev-fp8.safetensors" \ |
|
--clip_l "/tmp/fluxgym/models/clip/clip_l.safetensors" \ |
|
--t5xxl "/tmp/fluxgym/models/clip/t5xxl_fp8_e4m3fn.safetensors" \ |
|
--ae "/tmp/fluxgym/models/vae/ae.sft" \ |
|
--cache_latents_to_disk \ |
|
--save_model_as safetensors \ |
|
--sdpa --persistent_data_loader_workers \ |
|
--max_data_loader_n_workers 2 \ |
|
--seed 42 \ |
|
--gradient_checkpointing \ |
|
--mixed_precision bf16 \ |
|
--save_precision fp16 \ |
|
--network_module networks.lora_flux \ |
|
--network_dim 32 \ |
|
--optimizer_type adafactor \ |
|
--optimizer_args "relative_step=False" "scale_parameter=False" "warmup_init=False" \ |
|
--lr_scheduler constant_with_warmup \ |
|
--max_grad_norm 0.0 \ |
|
--learning_rate 1e-4 \ |
|
--cache_text_encoder_outputs \ |
|
--cache_text_encoder_outputs_to_disk \ |
|
--fp8_base \ |
|
--highvram \ |
|
--max_train_epochs 10 \ |
|
--save_every_n_epochs 1 \ |
|
--dataset_config "/tmp/fluxgym/outputs/aidka/dataset.toml" \ |
|
--output_dir "/tmp/fluxgym/outputs/aidka" \ |
|
--output_name aidka \ |
|
--timestep_sampling shift \ |
|
--discrete_flow_shift 3.1582 \ |
|
--model_prediction_type raw \ |
|
--guidance_scale 1 \ |
|
--blocks_to_swap 10 \ |
|
--loss_type l2 \ |