AlekseyCalvin commited on
Commit
c7dea59
1 Parent(s): ae4f16f

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +46 -3
  2. config.yaml +86 -0
  3. lora.safetensors +3 -0
README.md CHANGED
@@ -1,3 +1,46 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: flux-1-dev-non-commercial-license
4
+ license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md
5
+ language:
6
+ - en
7
+ tags:
8
+ - flux
9
+ - diffusers
10
+ - lora
11
+ - replicate
12
+ base_model: "black-forest-labs/FLUX.1-dev"
13
+ pipeline_tag: text-to-image
14
+ # widget:
15
+ # - text: >-
16
+ # prompt
17
+ # output:
18
+ # url: https://...
19
+ instance_prompt: TSVETAEVA
20
+ ---
21
+
22
+ # Tsvetaevaflux
23
+
24
+ <!-- <Gallery /> -->
25
+
26
+ Trained on Replicate using:
27
+
28
+ https://replicate.com/ostris/flux-dev-lora-trainer/train
29
+
30
+
31
+ ## Trigger words
32
+ You should use `TSVETAEVA` to trigger the image generation.
33
+
34
+
35
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
36
+
37
+ ```py
38
+ from diffusers import AutoPipelineForText2Image
39
+ import torch
40
+
41
+ pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.float16).to('cuda')
42
+ pipeline.load_lora_weights('AlekseyCalvin/TsvetaevaFlux', weight_name='lora.safetensors')
43
+ image = pipeline('your prompt').images[0]
44
+ ```
45
+
46
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
config.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ job: custom_job
2
+ config:
3
+ name: flux_train_replicate
4
+ process:
5
+ - type: custom_sd_trainer
6
+ training_folder: output
7
+ device: cuda:0
8
+ trigger_word: TSVETAEVA
9
+ network:
10
+ type: lora
11
+ linear: 24
12
+ linear_alpha: 24
13
+ network_kwargs:
14
+ only_if_contains:
15
+ - transformer.single_transformer_blocks.7.norm.linear
16
+ - transformer.single_transformer_blocks.7.proj_mlp
17
+ - transformer.single_transformer_blocks.7.proj_out
18
+ - transformer.single_transformer_blocks.7.attn.to_q
19
+ - transformer.single_transformer_blocks.7.attn.to_k
20
+ - transformer.single_transformer_blocks.7.attn.to_v
21
+ - transformer.single_transformer_blocks.12.norm.linear
22
+ - transformer.single_transformer_blocks.12.proj_mlp
23
+ - transformer.single_transformer_blocks.12.proj_out
24
+ - transformer.single_transformer_blocks.12.attn.to_q
25
+ - transformer.single_transformer_blocks.12.attn.to_k
26
+ - transformer.single_transformer_blocks.12.attn.to_v
27
+ - transformer.single_transformer_blocks.16.norm.linear
28
+ - transformer.single_transformer_blocks.16.proj_mlp
29
+ - transformer.single_transformer_blocks.16.proj_out
30
+ - transformer.single_transformer_blocks.16.attn.to_q
31
+ - transformer.single_transformer_blocks.16.attn.to_k
32
+ - transformer.single_transformer_blocks.16.attn.to_v
33
+ - transformer.single_transformer_blocks.20.norm.linear
34
+ - transformer.single_transformer_blocks.20.proj_mlp
35
+ - transformer.single_transformer_blocks.20.proj_out
36
+ - transformer.single_transformer_blocks.20.attn.to_q
37
+ - transformer.single_transformer_blocks.20.attn.to_k
38
+ - transformer.single_transformer_blocks.20.attn.to_v
39
+ save:
40
+ dtype: float16
41
+ save_every: 1001
42
+ max_step_saves_to_keep: 1
43
+ datasets:
44
+ - folder_path: input_images
45
+ caption_ext: txt
46
+ caption_dropout_rate: 0.05
47
+ shuffle_tokens: false
48
+ cache_latents_to_disk: false
49
+ cache_latents: true
50
+ resolution:
51
+ - 512
52
+ - 768
53
+ - 1024
54
+ train:
55
+ batch_size: 1
56
+ steps: 1000
57
+ gradient_accumulation_steps: 1
58
+ train_unet: true
59
+ train_text_encoder: false
60
+ content_or_style: balanced
61
+ gradient_checkpointing: true
62
+ noise_scheduler: flowmatch
63
+ optimizer: adamw8bit
64
+ lr: 0.0005
65
+ ema_config:
66
+ use_ema: true
67
+ ema_decay: 0.99
68
+ dtype: bf16
69
+ model:
70
+ name_or_path: FLUX.1-dev
71
+ is_flux: true
72
+ quantize: true
73
+ sample:
74
+ sampler: flowmatch
75
+ sample_every: 1001
76
+ width: 1024
77
+ height: 1024
78
+ prompts: []
79
+ neg: ''
80
+ seed: 42
81
+ walk_seed: true
82
+ guidance_scale: 3.5
83
+ sample_steps: 28
84
+ meta:
85
+ name: flux_train_replicate
86
+ version: '1.0'
lora.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87f4d0dca8b368c8195f925f855c8d696b2b436f27c50d26fe9342818e88da76
3
+ size 12393232