| { | |
| "project": "fluxdev-lora", | |
| "base_model": "black-forest-labs/FLUX.1-dev", | |
| "weights_hint": "flux1-dev-fp8.safetensors", | |
| "task": "dreambooth-lora", | |
| "resolution": 768, | |
| "rank": 16, | |
| "lora_alpha": 16, | |
| "lora_layers": "attn.to_k,attn.to_q,attn.to_v,attn.to_out.0", | |
| "train_transformer_frac": 1.0, | |
| "train_text_encoder_ti": true, | |
| "enable_t5_ti": false, | |
| "train_text_encoder_ti_frac": 0.25, | |
| "optimizer": "prodigy", | |
| "learning_rate": 1.0, | |
| "lr_scheduler": "constant", | |
| "max_train_steps": 1000, | |
| "train_batch_size": 1, | |
| "gradient_accumulation_steps": 1, | |
| "mixed_precision": "bf16", | |
| "guidance_scale": 1, | |
| "seed": 42, | |
| "dataset": { | |
| "type": "image-caption", | |
| "path_or_repo": "REPLACE_WITH_DATASET_OR_PATH", | |
| "image_column": "image", | |
| "caption_column": "caption", | |
| "instance_prompt": "TOK", | |
| "token_abstraction": "TOK", | |
| "repeats": 1, | |
| "shuffle": true | |
| }, | |
| "logging": { | |
| "wandb": false, | |
| "report_to": "tensorboard", | |
| "logging_steps": 25 | |
| }, | |
| "checkpointing": { | |
| "output_dir": "outputs/fluxdev-lora", | |
| "save_steps": 200, | |
| "push_to_hub": true, | |
| "hub_model_id": "Lachter0808/fluxdev-lora", | |
| "save_safetensors": true | |
| }, | |
| "hardware": { | |
| "gradient_checkpointing": true, | |
| "use_8bit_adam": false | |
| }, | |
| "notes": "Config afgestemd op diffusers Advanced Flux Dreambooth LoRA; attention-only LoRA, pivotal tuning op CLIP. Gebruik het base_model voor training; fp8-bestand is een referentie voor inferentie." | |
| } | |