Update README.md
Browse files
README.md
CHANGED
@@ -2,197 +2,159 @@
|
|
2 |
library_name: diffusers
|
3 |
---
|
4 |
|
5 |
-
#
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
[
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
[
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
[
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
[
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
[
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
#### Hardware
|
163 |
-
|
164 |
-
[More Information Needed]
|
165 |
-
|
166 |
-
#### Software
|
167 |
-
|
168 |
-
[More Information Needed]
|
169 |
-
|
170 |
-
## Citation [optional]
|
171 |
-
|
172 |
-
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
173 |
-
|
174 |
-
**BibTeX:**
|
175 |
-
|
176 |
-
[More Information Needed]
|
177 |
-
|
178 |
-
**APA:**
|
179 |
-
|
180 |
-
[More Information Needed]
|
181 |
-
|
182 |
-
## Glossary [optional]
|
183 |
-
|
184 |
-
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
185 |
-
|
186 |
-
[More Information Needed]
|
187 |
-
|
188 |
-
## More Information [optional]
|
189 |
-
|
190 |
-
[More Information Needed]
|
191 |
-
|
192 |
-
## Model Card Authors [optional]
|
193 |
-
|
194 |
-
[More Information Needed]
|
195 |
-
|
196 |
-
## Model Card Contact
|
197 |
-
|
198 |
-
[More Information Needed]
|
|
|
2 |
library_name: diffusers
|
3 |
---
|
4 |
|
5 |
+
# yujiepan/stable-diffusion-3-tiny-random
|
6 |
+
|
7 |
+
This pipeline is intended from debugging. It is adapted from [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) with smaller size and randomly initialized parameters.
|
8 |
+
|
9 |
+
## Usage
|
10 |
+
```python
|
11 |
+
import torch
|
12 |
+
from diffusers import FluxPipeline
|
13 |
+
|
14 |
+
pipe = FluxPipeline.from_pretrained("yujiepan/FLUX.1-dev-tiny-random", torch_dtype=torch.bfloat16)
|
15 |
+
pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
|
16 |
+
prompt = "A cat holding a sign that says hello world"
|
17 |
+
image = pipe(
|
18 |
+
prompt,
|
19 |
+
height=1024,
|
20 |
+
width=1024,
|
21 |
+
guidance_scale=3.5,
|
22 |
+
num_inference_steps=50,
|
23 |
+
max_sequence_length=512,
|
24 |
+
generator=torch.Generator("cpu").manual_seed(0)
|
25 |
+
).images[0]
|
26 |
+
# image.save("flux-dev.png")
|
27 |
+
```
|
28 |
+
|
29 |
+
## Codes
|
30 |
+
```python
|
31 |
+
import importlib
|
32 |
+
|
33 |
+
import torch
|
34 |
+
import transformers
|
35 |
+
|
36 |
+
import diffusers
|
37 |
+
import rich
|
38 |
+
|
39 |
+
|
40 |
+
def get_original_model_configs(
|
41 |
+
pipeline_cls: type[diffusers.FluxPipeline],
|
42 |
+
pipeline_id: str
|
43 |
+
):
|
44 |
+
pipeline_config: dict[str, list[str]] = \
|
45 |
+
pipeline_cls.load_config(pipeline_id)
|
46 |
+
model_configs = {}
|
47 |
+
|
48 |
+
for subfolder, import_strings in pipeline_config.items():
|
49 |
+
if subfolder.startswith("_"):
|
50 |
+
continue
|
51 |
+
module = importlib.import_module(".".join(import_strings[:-1]))
|
52 |
+
cls = getattr(module, import_strings[-1])
|
53 |
+
if issubclass(cls, transformers.PreTrainedModel):
|
54 |
+
config_class: transformers.PretrainedConfig = cls.config_class
|
55 |
+
config = config_class.from_pretrained(
|
56 |
+
pipeline_id, subfolder=subfolder)
|
57 |
+
model_configs[subfolder] = config
|
58 |
+
elif issubclass(cls, diffusers.ModelMixin) and issubclass(cls, diffusers.ConfigMixin):
|
59 |
+
config = cls.load_config(pipeline_id, subfolder=subfolder)
|
60 |
+
model_configs[subfolder] = config
|
61 |
+
elif subfolder in ['scheduler', 'tokenizer', 'tokenizer_2', 'tokenizer_3']:
|
62 |
+
pass
|
63 |
+
else:
|
64 |
+
raise NotImplementedError(f"unknown {subfolder}: {import_strings}")
|
65 |
+
|
66 |
+
return model_configs
|
67 |
+
|
68 |
+
|
69 |
+
def load_pipeline(pipeline_cls: type[diffusers.DiffusionPipeline], pipeline_id: str, model_configs: dict[str, dict]):
|
70 |
+
pipeline_config: dict[str, list[str]
|
71 |
+
] = pipeline_cls.load_config(pipeline_id)
|
72 |
+
components = {}
|
73 |
+
for subfolder, import_strings in pipeline_config.items():
|
74 |
+
if subfolder.startswith("_"):
|
75 |
+
continue
|
76 |
+
module = importlib.import_module(".".join(import_strings[:-1]))
|
77 |
+
cls = getattr(module, import_strings[-1])
|
78 |
+
print(f"Loading:", ".".join(import_strings))
|
79 |
+
if issubclass(cls, transformers.PreTrainedModel):
|
80 |
+
config = model_configs[subfolder]
|
81 |
+
component = cls(config)
|
82 |
+
elif issubclass(cls, transformers.PreTrainedTokenizerBase):
|
83 |
+
component = cls.from_pretrained(pipeline_id, subfolder=subfolder)
|
84 |
+
elif issubclass(cls, diffusers.ModelMixin) and issubclass(cls, diffusers.ConfigMixin):
|
85 |
+
config = model_configs[subfolder]
|
86 |
+
component = cls.from_config(config)
|
87 |
+
elif issubclass(cls, diffusers.SchedulerMixin) and issubclass(cls, diffusers.ConfigMixin):
|
88 |
+
component = cls.from_pretrained(pipeline_id, subfolder=subfolder)
|
89 |
+
else:
|
90 |
+
raise (f"unknown {subfolder}: {import_strings}")
|
91 |
+
components[subfolder] = component
|
92 |
+
if 'transformer' in component.__class__.__name__.lower():
|
93 |
+
print(component)
|
94 |
+
pipeline = pipeline_cls(**components)
|
95 |
+
return pipeline
|
96 |
+
|
97 |
+
|
98 |
+
def get_pipeline():
|
99 |
+
torch.manual_seed(42)
|
100 |
+
pipeline_id = "black-forest-labs/FLUX.1-dev"
|
101 |
+
pipeline_cls = diffusers.FluxPipeline
|
102 |
+
model_configs = get_original_model_configs(pipeline_cls, pipeline_id)
|
103 |
+
|
104 |
+
HIDDEN_SIZE = 8
|
105 |
+
model_configs["text_encoder"].hidden_size = HIDDEN_SIZE
|
106 |
+
model_configs["text_encoder"].intermediate_size = HIDDEN_SIZE * 2
|
107 |
+
model_configs["text_encoder"].num_attention_heads = 2
|
108 |
+
model_configs["text_encoder"].num_hidden_layers = 2
|
109 |
+
model_configs["text_encoder"].projection_dim = HIDDEN_SIZE
|
110 |
+
|
111 |
+
model_configs["text_encoder_2"].d_model = HIDDEN_SIZE
|
112 |
+
model_configs["text_encoder_2"].d_ff = HIDDEN_SIZE * 2
|
113 |
+
model_configs["text_encoder_2"].d_kv = HIDDEN_SIZE // 2
|
114 |
+
model_configs["text_encoder_2"].num_heads = 2
|
115 |
+
model_configs["text_encoder_2"].num_layers = 2
|
116 |
+
|
117 |
+
model_configs["transformer"]["num_layers"] = 2
|
118 |
+
model_configs["transformer"]["num_single_layers"] = 4
|
119 |
+
model_configs["transformer"]["num_attention_heads"] = 2
|
120 |
+
model_configs["transformer"]["attention_head_dim"] = HIDDEN_SIZE
|
121 |
+
model_configs["transformer"]["pooled_projection_dim"] = HIDDEN_SIZE
|
122 |
+
model_configs["transformer"]["joint_attention_dim"] = HIDDEN_SIZE
|
123 |
+
model_configs["transformer"]["axes_dims_rope"] = (4, 2, 2)
|
124 |
+
# model_configs["transformer"]["caption_projection_dim"] = HIDDEN_SIZE
|
125 |
+
|
126 |
+
model_configs["vae"]["layers_per_block"] = 1
|
127 |
+
model_configs["vae"]["block_out_channels"] = [HIDDEN_SIZE] * 4
|
128 |
+
model_configs["vae"]["norm_num_groups"] = 2
|
129 |
+
model_configs["vae"]["latent_channels"] = 16
|
130 |
+
|
131 |
+
pipeline = load_pipeline(pipeline_cls, pipeline_id, model_configs)
|
132 |
+
return pipeline
|
133 |
+
|
134 |
+
|
135 |
+
pipe = get_pipeline()
|
136 |
+
pipe = pipe.to(torch.bfloat16)
|
137 |
+
|
138 |
+
from pathlib import Path
|
139 |
+
save_folder = '/tmp/yujiepan/FLUX.1-dev-tiny-random'
|
140 |
+
Path(save_folder).mkdir(parents=True, exist_ok=True)
|
141 |
+
pipe.save_pretrained(save_folder)
|
142 |
+
|
143 |
+
pipe = diffusers.FluxPipeline.from_pretrained(save_folder, torch_dtype=torch.bfloat16)
|
144 |
+
pipe.enable_model_cpu_offload()
|
145 |
+
prompt = "A cat holding a sign that says hello world"
|
146 |
+
image = pipe(
|
147 |
+
prompt,
|
148 |
+
height=1024,
|
149 |
+
width=1024,
|
150 |
+
guidance_scale=3.5,
|
151 |
+
num_inference_steps=50,
|
152 |
+
max_sequence_length=512,
|
153 |
+
generator=torch.Generator("cpu").manual_seed(0)
|
154 |
+
).images[0]
|
155 |
+
|
156 |
+
configs = get_original_model_configs(diffusers.FluxPipeline, save_folder)
|
157 |
+
rich.print(configs)
|
158 |
+
|
159 |
+
pipe.push_to_hub(save_folder.removeprefix('/tmp/'))
|
160 |
+
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|