carsonyang blanchon commited on
Commit
4af2ded
0 Parent(s):

Duplicate from blanchon/qrcode-diffusion

Browse files

Co-authored-by: Julien BLANCHON <blanchon@users.noreply.huggingface.co>

Files changed (8) hide show
  1. .gitattributes +34 -0
  2. .gitignore +2 -0
  3. Dockerfile +14 -0
  4. README.md +43 -0
  5. app.py +241 -0
  6. cache/.gitkeep +0 -0
  7. docker-compose.yml +14 -0
  8. requirements.txt +7 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ gradio_cached_examples/*
2
+ cache/models*
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime
2
+
3
+ # Install dependencies
4
+ COPY requirements.txt /tmp/requirements.txt
5
+ RUN python -m pip install --upgrade pip && \
6
+ python -m pip install -r /tmp/requirements.txt
7
+
8
+ # Copy source code
9
+ WORKDIR /app
10
+ RUN mkdir /app/cache
11
+ COPY ./app.py /app/app.py
12
+
13
+ # Run the application
14
+ CMD ["python", "app.py"]
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: QrCode Diffusion
3
+ emoji: 📱
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ python_version: 3.10.11
7
+ sdk: gradio
8
+ sdk_version: 3.34.0
9
+ app_file: app.py
10
+ tags:
11
+ - qrcode
12
+ - stable-diffusion
13
+ - controlnet
14
+ pinned: true
15
+ duplicated_from: blanchon/qrcode-diffusion
16
+ ---
17
+
18
+ # QrCode Diffusion
19
+
20
+ ## Description
21
+
22
+ This is a simple application that allows you to generate a QrCode and apply a stable diffusion algorithm to it. The diffusion algorithm used is the ControlNet algorithm.
23
+
24
+ ## How to use
25
+
26
+ ```python
27
+ python app.py
28
+ ```
29
+
30
+ And then go to the link that appears in the terminal.
31
+
32
+ ## References
33
+
34
+ - ControlNet
35
+ - Stable Diffusion
36
+
37
+ ## Credits
38
+
39
+ The original idea is from [nhciao](https://www.reddit.com/user/nhciao/) ([Twitter](https://twitter.com/nhciao)) and [this post](https://www.reddit.com/r/StableDiffusion/comments/141hg9x/controlnet_for_qr_code/).
40
+
41
+ ## Other
42
+
43
+ This is also fun <https://qrbtf.com/> and [open source ](https://github.com/ciaochaos/qrbtf).
app.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import gradio as gr
4
+ import qrcode
5
+ import torch
6
+ from diffusers import (
7
+ ControlNetModel,
8
+ EulerAncestralDiscreteScheduler,
9
+ StableDiffusionControlNetPipeline,
10
+ )
11
+ from gradio.components import Image, Radio, Slider, Textbox, Number
12
+ from PIL import Image as PilImage
13
+ from typing_extensions import Literal
14
+
15
+
16
+ def main():
17
+ device = (
18
+ 'cuda' if torch.cuda.is_available()
19
+ else 'mps' if torch.backends.mps.is_available()
20
+ else 'cpu'
21
+ )
22
+
23
+ controlnet_tile = ControlNetModel.from_pretrained(
24
+ "lllyasviel/control_v11f1e_sd15_tile",
25
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
26
+ use_safetensors=False,
27
+ cache_dir="./cache"
28
+ ).to(device)
29
+
30
+ controlnet_brightness = ControlNetModel.from_pretrained(
31
+ "ioclab/control_v1p_sd15_brightness",
32
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
33
+ use_safetensors=True,
34
+ cache_dir="./cache"
35
+ ).to(device)
36
+
37
+ def make_pipe(hf_repo: str, device: str) -> StableDiffusionControlNetPipeline:
38
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
39
+ hf_repo,
40
+ controlnet=[controlnet_tile, controlnet_brightness],
41
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
42
+ cache_dir="./cache",
43
+ )
44
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
45
+ # pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
46
+ return pipe.to(device)
47
+
48
+ pipes = {
49
+ "DreamShaper": make_pipe("Lykon/DreamShaper", device),
50
+ # "DreamShaper": make_pipe("Lykon/DreamShaper", "cpu"),
51
+ # "Realistic Vision V1.4": make_pipe("SG161222/Realistic_Vision_V1.4", "cpu"),
52
+ # "OpenJourney": make_pipe("prompthero/openjourney", "cpu"),
53
+ # "Anything V3": make_pipe("Linaqruf/anything-v3.0", "cpu"),
54
+ }
55
+
56
+ def move_pipe(hf_repo: str):
57
+ for pipe_name, pipe in pipes.items():
58
+ if pipe_name != hf_repo:
59
+ pipe.to("cpu")
60
+ return pipes[hf_repo].to(device)
61
+
62
+ def predict(
63
+ model: Literal[
64
+ "DreamShaper",
65
+ # "Realistic Vision V1.4",
66
+ # "OpenJourney",
67
+ # "Anything V3"
68
+ ],
69
+ qrcode_data: str,
70
+ prompt: str,
71
+ negative_prompt: Optional[str] = None,
72
+ num_inference_steps: int = 100,
73
+ guidance_scale: int = 9,
74
+ controlnet_conditioning_tile: float = 0.25,
75
+ controlnet_conditioning_brightness: float = 0.45,
76
+ seed: int = 1331,
77
+ ) -> PilImage:
78
+ generator = torch.Generator(device).manual_seed(seed)
79
+ if model == "DreamShaper":
80
+ pipe = pipes["DreamShaper"]
81
+ # pipe = move_pipe("DreamShaper Vision V1.4")
82
+ # elif model == "Realistic Vision V1.4":
83
+ # pipe = move_pipe("Realistic Vision V1.4")
84
+ # elif model == "OpenJourney":
85
+ # pipe = move_pipe("OpenJourney")
86
+ # elif model == "Anything V3":
87
+ # pipe = move_pipe("Anything V3")
88
+
89
+
90
+ qr = qrcode.QRCode(
91
+ error_correction=qrcode.constants.ERROR_CORRECT_H,
92
+ box_size=11,
93
+ border=9,
94
+ )
95
+ qr.add_data(qrcode_data)
96
+ qr.make(fit=True)
97
+ qrcode_image = qr.make_image(
98
+ fill_color="black",
99
+ back_color="white"
100
+ ).convert("RGB")
101
+ qrcode_image = qrcode_image.resize((512, 512), PilImage.LANCZOS)
102
+
103
+ image = pipe(
104
+ prompt,
105
+ [qrcode_image, qrcode_image],
106
+ num_inference_steps=num_inference_steps,
107
+ generator=generator,
108
+ negative_prompt=negative_prompt,
109
+ guidance_scale=guidance_scale,
110
+ controlnet_conditioning_scale=[
111
+ controlnet_conditioning_tile,
112
+ controlnet_conditioning_brightness
113
+ ]
114
+ ).images[0]
115
+
116
+ return image
117
+
118
+
119
+ ui = gr.Interface(
120
+ fn=predict,
121
+ inputs=[
122
+ Radio(
123
+ value="DreamShaper",
124
+ label="Model",
125
+ choices=[
126
+ "DreamShaper",
127
+ # "Realistic Vision V1.4",
128
+ # "OpenJourney",
129
+ # "Anything V3"
130
+ ],
131
+ ),
132
+ Textbox(
133
+ value="https://twitter.com/JulienBlanchon",
134
+ label="QR Code Data",
135
+ ),
136
+ Textbox(
137
+ value="Japanese ramen with chopsticks, egg and steam, ultra detailed 8k",
138
+ label="Prompt",
139
+ ),
140
+ Textbox(
141
+ value="logo, watermark, signature, text, BadDream, UnrealisticDream",
142
+ label="Negative Prompt",
143
+ optional=True
144
+ ),
145
+ Slider(
146
+ value=100,
147
+ label="Number of Inference Steps",
148
+ minimum=10,
149
+ maximum=400,
150
+ step=1,
151
+ ),
152
+ Slider(
153
+ value=9,
154
+ label="Guidance Scale",
155
+ minimum=1,
156
+ maximum=20,
157
+ step=1,
158
+ ),
159
+ Slider(
160
+ value=0.25,
161
+ label="Controlnet Conditioning Tile",
162
+ minimum=0.0,
163
+ maximum=1.0,
164
+ step=0.05,
165
+
166
+ ),
167
+ Slider(
168
+ value=0.45,
169
+ label="Controlnet Conditioning Brightness",
170
+ minimum=0.0,
171
+ maximum=1.0,
172
+ step=0.05,
173
+ ),
174
+ Number(
175
+ value=1,
176
+ label="Seed",
177
+ precision=0,
178
+ ),
179
+
180
+ ],
181
+ outputs=Image(
182
+ label="Generated Image",
183
+ type="pil",
184
+ ),
185
+ examples=[
186
+ [
187
+ "DreamShaper",
188
+ "https://twitter.com/JulienBlanchon",
189
+ "rock, mountain",
190
+ "",
191
+ 100,
192
+ 9,
193
+ 0.25,
194
+ 0.45,
195
+ 1,
196
+ ],
197
+ [
198
+ "DreamShaper",
199
+ "https://twitter.com/JulienBlanchon",
200
+ "Japanese ramen with chopsticks, egg and steam, ultra detailed 8k",
201
+ "logo, watermark, signature, text, BadDream, UnrealisticDream",
202
+ 100,
203
+ 9,
204
+ 0.25,
205
+ 0.45,
206
+ 1,
207
+ ],
208
+ # [
209
+ # "Anything V3",
210
+ # "https://twitter.com/JulienBlanchon",
211
+ # "Japanese ramen with chopsticks, egg and steam, ultra detailed 8k",
212
+ # "logo, watermark, signature, text, BadDream, UnrealisticDream",
213
+ # 100,
214
+ # 9,
215
+ # 0.25,
216
+ # 0.60,
217
+ # 1,
218
+ # ],
219
+ [
220
+ "DreamShaper",
221
+ "https://twitter.com/JulienBlanchon",
222
+ "processor, chipset, electricity, black and white board",
223
+ "logo, watermark, signature, text, BadDream, UnrealisticDream",
224
+ 300,
225
+ 9,
226
+ 0.50,
227
+ 0.30,
228
+ 1,
229
+ ],
230
+ ],
231
+ cache_examples=True,
232
+ title="Stable Diffusion QR Code Controlnet",
233
+ description="Generate QR Code with Stable Diffusion and Controlnet",
234
+ allow_flagging="never",
235
+ max_batch_size=1,
236
+ )
237
+
238
+ ui.queue(concurrency_count=10).launch()
239
+
240
+ if __name__ == "__main__":
241
+ main()
cache/.gitkeep ADDED
File without changes
docker-compose.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ gradio:
3
+ build: .
4
+ ports:
5
+ - "7860:7860"
6
+ volumes:
7
+ - ./cache:/app/cache
8
+ deploy:
9
+ resources:
10
+ reservations:
11
+ devices:
12
+ - driver: nvidia
13
+ count: 1
14
+ capabilities: [ gpu ]
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ diffusers
2
+ transformers
3
+ accelerate
4
+ safetensors
5
+ gradio
6
+ qrcode
7
+ opencv-python-headless