Upload folder using huggingface_hub
Browse files- README.md +33 -0
- cat14-2.safetensors +3 -0
- dataset.toml +14 -0
- sample/cat14-2_000300_00_20241119102258.png +0 -0
- sample/cat14-2_000600_00_20241119104741.png +0 -0
- sample/cat14-2_000900_00_20241119111224.png +0 -0
- sample/cat14-2_001200_00_20241119113706.png +0 -0
- sample_prompts.txt +1 -0
- train.sh +34 -0
README.md
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- text-to-image
|
4 |
+
- flux
|
5 |
+
- lora
|
6 |
+
- diffusers
|
7 |
+
- template:sd-lora
|
8 |
+
- fluxgym
|
9 |
+
widget:
|
10 |
+
- output:
|
11 |
+
url: sample/cat14-2_001200_00_20241119113706.png
|
12 |
+
text: cat cat14 walking through grass
|
13 |
+
base_model: black-forest-labs/FLUX.1-dev
|
14 |
+
instance_prompt: cat14
|
15 |
+
license: other
|
16 |
+
license_name: flux-1-dev-non-commercial-license
|
17 |
+
license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
|
18 |
+
---
|
19 |
+
|
20 |
+
# cat14_2
|
21 |
+
|
22 |
+
A Flux LoRA trained on a local computer with [Fluxgym](https://github.com/cocktailpeanut/fluxgym)
|
23 |
+
|
24 |
+
<Gallery />
|
25 |
+
|
26 |
+
## Trigger words
|
27 |
+
|
28 |
+
You should use `cat14` to trigger the image generation.
|
29 |
+
|
30 |
+
## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, Forge, etc.
|
31 |
+
|
32 |
+
Weights for this model are available in Safetensors format.
|
33 |
+
|
cat14-2.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43a971693ed5ea78d15bc04069519697004dcb5de5ffaea83ccd90504fc18e1d
|
3 |
+
size 39759992
|
dataset.toml
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[general]
|
2 |
+
shuffle_caption = false
|
3 |
+
caption_extension = '.txt'
|
4 |
+
keep_tokens = 1
|
5 |
+
|
6 |
+
[[datasets]]
|
7 |
+
resolution = 1024
|
8 |
+
batch_size = 1
|
9 |
+
keep_tokens = 1
|
10 |
+
|
11 |
+
[[datasets.subsets]]
|
12 |
+
image_dir = '/app/fluxgym/datasets/cat14-2'
|
13 |
+
class_tokens = 'cat14'
|
14 |
+
num_repeats = 100
|
sample/cat14-2_000300_00_20241119102258.png
ADDED
sample/cat14-2_000600_00_20241119104741.png
ADDED
sample/cat14-2_000900_00_20241119111224.png
ADDED
sample/cat14-2_001200_00_20241119113706.png
ADDED
sample_prompts.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
cat cat14 walking through grass
|
train.sh
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate launch \
|
2 |
+
--mixed_precision bf16 \
|
3 |
+
--num_cpu_threads_per_process 1 \
|
4 |
+
sd-scripts/flux_train_network.py \
|
5 |
+
--pretrained_model_name_or_path "/app/fluxgym/models/unet/flux1-dev.sft" \
|
6 |
+
--clip_l "/app/fluxgym/models/clip/clip_l.safetensors" \
|
7 |
+
--t5xxl "/app/fluxgym/models/clip/t5xxl_fp16.safetensors" \
|
8 |
+
--ae "/app/fluxgym/models/vae/ae.sft" \
|
9 |
+
--cache_latents_to_disk \
|
10 |
+
--save_model_as safetensors \
|
11 |
+
--sdpa --persistent_data_loader_workers \
|
12 |
+
--max_data_loader_n_workers 2 \
|
13 |
+
--seed 42 \
|
14 |
+
--gradient_checkpointing \
|
15 |
+
--mixed_precision bf16 \
|
16 |
+
--save_precision bf16 \
|
17 |
+
--network_module networks.lora_flux \
|
18 |
+
--network_dim 4 \
|
19 |
+
--optimizer_type adamw8bit \--sample_prompts="/app/fluxgym/outputs/cat14-2/sample_prompts.txt" --sample_every_n_steps="300" \
|
20 |
+
--learning_rate 8e-4 \
|
21 |
+
--cache_text_encoder_outputs \
|
22 |
+
--cache_text_encoder_outputs_to_disk \
|
23 |
+
--fp8_base \
|
24 |
+
--highvram \
|
25 |
+
--max_train_epochs 1 \
|
26 |
+
--save_every_n_epochs 4 \
|
27 |
+
--dataset_config "/app/fluxgym/outputs/cat14-2/dataset.toml" \
|
28 |
+
--output_dir "/app/fluxgym/outputs/cat14-2" \
|
29 |
+
--output_name cat14-2 \
|
30 |
+
--timestep_sampling shift \
|
31 |
+
--discrete_flow_shift 3.1582 \
|
32 |
+
--model_prediction_type raw \
|
33 |
+
--guidance_scale 1 \
|
34 |
+
--loss_type l2 \
|