job: extension config: name: lora process: - type: sd_trainer training_folder: output performance_log_every: 1000 device: cuda:0 network: type: lora linear: 16 linear_alpha: 16 save: dtype: float16 save_every: 250 max_step_saves_to_keep: 10 push_to_hub: false datasets: - folder_path: dataset caption_ext: txt caption_dropout_rate: 0.05 shuffle_tokens: false cache_latents_to_disk: true resolution: - 512 - 768 - 1024 train: batch_size: 2 steps: 6000 gradient_accumulation_steps: 1 train_unet: true train_text_encoder: false content_or_style: style gradient_checkpointing: true noise_scheduler: flowmatch optimizer: adamw8bit lr: 0.0001 skip_first_sample: true linear_timesteps: true ema_config: use_ema: true ema_decay: 0.99 dtype: bf16 model: name_or_path: black-forest-labs/FLUX.1-dev is_flux: true quantize: true sample: sampler: flowmatch sample_every: 250 width: 1024 height: 1024 prompts: - Bilibin, woman with red hair, playing chess at the park neg: '' seed: 42 walk_seed: true guidance_scale: 4 sample_steps: 20 meta: name: lora version: '1.0'