gaparmar commited on
Commit
98322fb
1 Parent(s): 9214a02

adding initial demo

Browse files
Files changed (2) hide show
  1. app.py +272 -0
  2. requirements.txt +28 -0
app.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 3.43.1
3
+ """
4
+
5
+ import os
6
+ import sys
7
+ import pdb
8
+ import random
9
+ import numpy as np
10
+ from PIL import Image
11
+ import base64
12
+ from io import BytesIO
13
+
14
+ import torch
15
+ from torchvision import transforms
16
+ import torchvision.transforms.functional as TF
17
+ import gradio as gr
18
+
19
+ from src.model import make_1step_sched
20
+ from src.pix2pix_turbo import Pix2Pix_Turbo
21
+
22
+ model = Pix2Pix_Turbo("sketch_to_image_stochastic")
23
+
24
+ style_list = [
25
+ {
26
+ "name": "Cinematic",
27
+ "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy",
28
+ },
29
+ {
30
+ "name": "3D Model",
31
+ "prompt": "professional 3d model {prompt} . octane render, highly detailed, volumetric, dramatic lighting",
32
+ },
33
+ {
34
+ "name": "Anime",
35
+ "prompt": "anime artwork {prompt} . anime style, key visual, vibrant, studio anime, highly detailed",
36
+ },
37
+ {
38
+ "name": "Digital Art",
39
+ "prompt": "concept art {prompt} . digital artwork, illustrative, painterly, matte painting, highly detailed",
40
+ },
41
+ {
42
+ "name": "Photographic",
43
+ "prompt": "cinematic photo {prompt} . 35mm photograph, film, bokeh, professional, 4k, highly detailed",
44
+ },
45
+ {
46
+ "name": "Pixel art",
47
+ "prompt": "pixel-art {prompt} . low-res, blocky, pixel art style, 8-bit graphics",
48
+ },
49
+ {
50
+ "name": "Fantasy art",
51
+ "prompt": "ethereal fantasy concept art of {prompt} . magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy",
52
+ },
53
+ {
54
+ "name": "Neonpunk",
55
+ "prompt": "neonpunk style {prompt} . cyberpunk, vaporwave, neon, vibes, vibrant, stunningly beautiful, crisp, detailed, sleek, ultramodern, magenta highlights, dark purple shadows, high contrast, cinematic, ultra detailed, intricate, professional",
56
+ },
57
+ {
58
+ "name": "Manga",
59
+ "prompt": "manga style {prompt} . vibrant, high-energy, detailed, iconic, Japanese comic style",
60
+ },
61
+ ]
62
+
63
+ styles = {k["name"]: k["prompt"] for k in style_list}
64
+ STYLE_NAMES = list(styles.keys())
65
+ DEFAULT_STYLE_NAME = "Fantasy art"
66
+ MAX_SEED = np.iinfo(np.int32).max
67
+
68
+
69
+ def pil_image_to_data_uri(img, format='PNG'):
70
+ buffered = BytesIO()
71
+ img.save(buffered, format=format)
72
+ img_str = base64.b64encode(buffered.getvalue()).decode()
73
+ return f"data:image/{format.lower()};base64,{img_str}"
74
+
75
+
76
+ def run(image, prompt, prompt_template, style_name, seed, val_r):
77
+ print(f"prompt: {prompt}")
78
+ print("sketch updated")
79
+ if image is None:
80
+ ones = Image.new("L", (512, 512), 255)
81
+ return ones
82
+ prompt = prompt_template.replace("{prompt}", prompt)
83
+ image = image.convert("RGB")
84
+ image_t = TF.to_tensor(image) > 0.5
85
+ image_pil = TF.to_pil_image(image_t.to(torch.float32))
86
+ print(f"r_val={val_r}, seed={seed}")
87
+ with torch.no_grad():
88
+ c_t = image_t.unsqueeze(0).cuda().float()
89
+ torch.manual_seed(seed)
90
+ B,C,H,W = c_t.shape
91
+ noise = torch.randn((1,4,H//8, W//8), device=c_t.device)
92
+ output_image = model(c_t, prompt, deterministic=False, r=val_r, noise_map=noise)
93
+ output_pil = TF.to_pil_image(output_image[0].cpu()*0.5+0.5)
94
+ input_sketch_uri = pil_image_to_data_uri(Image.fromarray(255-np.array(image)))
95
+ output_image_uri = pil_image_to_data_uri(output_pil)
96
+ return output_pil, gr.update(link=input_sketch_uri), gr.update(link=output_image_uri)
97
+
98
+
99
+ def update_canvas(use_line, use_eraser):
100
+ if use_eraser:
101
+ _color = "#ffffff"
102
+ brush_size = 20
103
+ if use_line:
104
+ _color = "#000000"
105
+ brush_size = 4
106
+ return gr.update(brush_radius=brush_size, brush_color=_color, interactive=True)
107
+
108
+
109
+ def upload_sketch(file):
110
+ _img = Image.open(file.name)
111
+ _img = _img.convert("L")
112
+ return gr.update(value=_img, source="upload", interactive=True)
113
+
114
+
115
+ scripts = """
116
+ async () => {
117
+ globalThis.theSketchDownloadFunction = () => {
118
+ console.log("test")
119
+ var link = document.createElement("a");
120
+ dataUri = document.getElementById('download_sketch').href
121
+ link.setAttribute("href", dataUri)
122
+ link.setAttribute("download", "sketch.png")
123
+ document.body.appendChild(link); // Required for Firefox
124
+ link.click();
125
+ document.body.removeChild(link); // Clean up
126
+ return false
127
+ }
128
+
129
+ globalThis.theOutputDownloadFunction = () => {
130
+ console.log("test output download function")
131
+ var link = document.createElement("a");
132
+ dataUri = document.getElementById('download_output').href
133
+ link.setAttribute("href", dataUri);
134
+ link.setAttribute("download", "output.png");
135
+ document.body.appendChild(link); // Required for Firefox
136
+ link.click();
137
+ document.body.removeChild(link); // Clean up
138
+ return false
139
+ }
140
+
141
+ globalThis.UNDO_SKETCH_FUNCTION = () => {
142
+ console.log("undo sketch function")
143
+ var button_undo = document.querySelector('#input_image > div.image-container.svelte-p3y7hu > div.svelte-s6ybro > button:nth-child(1)');
144
+ // Create a new 'click' event
145
+ var event = new MouseEvent('click', {
146
+ 'view': window,
147
+ 'bubbles': true,
148
+ 'cancelable': true
149
+ });
150
+ button_undo.dispatchEvent(event);
151
+ }
152
+
153
+ globalThis.DELETE_SKETCH_FUNCTION = () => {
154
+ console.log("delete sketch function")
155
+ var button_del = document.querySelector('#input_image > div.image-container.svelte-p3y7hu > div.svelte-s6ybro > button:nth-child(2)');
156
+ // Create a new 'click' event
157
+ var event = new MouseEvent('click', {
158
+ 'view': window,
159
+ 'bubbles': true,
160
+ 'cancelable': true
161
+ });
162
+ button_del.dispatchEvent(event);
163
+ }
164
+
165
+ globalThis.togglePencil = () => {
166
+ el_pencil = document.getElementById('my-toggle-pencil');
167
+ el_pencil.classList.toggle('clicked');
168
+ // simulate a click on the gradio button
169
+ btn_gradio = document.querySelector("#cb-line > label > input");
170
+ var event = new MouseEvent('click', {
171
+ 'view': window,
172
+ 'bubbles': true,
173
+ 'cancelable': true
174
+ });
175
+ btn_gradio.dispatchEvent(event);
176
+ if (el_pencil.classList.contains('clicked')) {
177
+ document.getElementById('my-toggle-eraser').classList.remove('clicked');
178
+ document.getElementById('my-div-pencil').style.backgroundColor = "gray";
179
+ document.getElementById('my-div-eraser').style.backgroundColor = "white";
180
+ }
181
+ else {
182
+ document.getElementById('my-toggle-eraser').classList.add('clicked');
183
+ document.getElementById('my-div-pencil').style.backgroundColor = "white";
184
+ document.getElementById('my-div-eraser').style.backgroundColor = "gray";
185
+ }
186
+
187
+ }
188
+
189
+ globalThis.toggleEraser = () => {
190
+ element = document.getElementById('my-toggle-eraser');
191
+ element.classList.toggle('clicked');
192
+ // simulate a click on the gradio button
193
+ btn_gradio = document.querySelector("#cb-eraser > label > input");
194
+ var event = new MouseEvent('click', {
195
+ 'view': window,
196
+ 'bubbles': true,
197
+ 'cancelable': true
198
+ });
199
+ btn_gradio.dispatchEvent(event);
200
+ if (element.classList.contains('clicked')) {
201
+ document.getElementById('my-toggle-pencil').classList.remove('clicked');
202
+ document.getElementById('my-div-pencil').style.backgroundColor = "white";
203
+ document.getElementById('my-div-eraser').style.backgroundColor = "gray";
204
+ }
205
+ else {
206
+ document.getElementById('my-toggle-pencil').classList.add('clicked');
207
+ document.getElementById('my-div-pencil').style.backgroundColor = "gray";
208
+ document.getElementById('my-div-eraser').style.backgroundColor = "white";
209
+ }
210
+ }
211
+ }
212
+ """
213
+
214
+ with gr.Blocks(css="style.css") as demo:
215
+ gr.Markdown("# Pix2pix-Turbo: **Sketch**", elem_id="description")
216
+ # these are hidden buttons that are used to trigger the canvas changes
217
+ line = gr.Checkbox(label="line", value=False, elem_id="cb-line")
218
+ eraser = gr.Checkbox(label="eraser", value=False, elem_id="cb-eraser")
219
+ with gr.Row(elem_id="main_row"):
220
+ with gr.Column(elem_id="column_input"):
221
+ gr.Markdown("## INPUT", elem_id="input_header")
222
+ image = gr.Image(
223
+ source="canvas", tool="color-sketch", type="pil", image_mode="L",
224
+ invert_colors=True, shape=(512, 512), brush_radius=4, height=440, width=440,
225
+ brush_color="#000000", interactive=True, show_download_button=True, elem_id="input_image", show_label=False)
226
+ download_sketch = gr.Button("Download sketch", scale=1, elem_id="download_sketch")
227
+
228
+ gr.HTML("""
229
+ <div class="button-row">
230
+ <div id="my-div-pencil" class="pad2"> <button id="my-toggle-pencil" onclick="return togglePencil(this)"></button> </div>
231
+ <div id="my-div-eraser" class="pad2"> <button id="my-toggle-eraser" onclick="return toggleEraser(this)"></button> </div>
232
+ <div class="pad2"> <button id="my-button-undo" onclick="return UNDO_SKETCH_FUNCTION(this)"></button> </div>
233
+ <div class="pad2"> <button id="my-button-clear" onclick="return DELETE_SKETCH_FUNCTION(this)"></button> </div>
234
+ <div class="pad2"> <button href="TODO" download="image" id="my-button-down" onclick='return theSketchDownloadFunction()'></button> </div>
235
+ </div>
236
+ """)
237
+ gr.Markdown("## Prompt", elem_id="tools_header")
238
+ prompt = gr.Textbox(label=None, value="", show_label=False)
239
+ with gr.Row():
240
+ style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME, scale=1)
241
+ prompt_temp = gr.Textbox(label="Prompt Style Template", value=styles[DEFAULT_STYLE_NAME], scale=2, max_lines=1)
242
+
243
+ with gr.Row():
244
+ val_r = gr.Slider(label="sketch guidance r: ", show_label=True, minimum=0, maximum=1, value=0.4, step=0.01, scale=3)
245
+ seed = gr.Textbox(label="Seed", value=42, scale=1, min_width=50)
246
+ randomize_seed = gr.Button("Random", scale=1, min_width=50)
247
+
248
+ with gr.Column(elem_id="column_process", min_width=50, scale=0.2):
249
+ run_button = gr.Button("Run", min_width=50)
250
+
251
+ with gr.Column(elem_id="column_output"):
252
+ gr.Markdown("## OUTPUT", elem_id="output_header")
253
+ result = gr.Image(label="Result", height=440, width=440, elem_id="output_image", show_label=False, show_download_button=True)
254
+ download_output = gr.Button("Download output", elem_id="download_output")
255
+
256
+ eraser.change(fn=lambda x: gr.update(value=not x), inputs=[eraser], outputs=[line], queue=False, api_name=False).then(update_canvas, [line, eraser], [image])
257
+ line.change(fn=lambda x: gr.update(value=not x), inputs=[line], outputs=[eraser], queue=False, api_name=False).then(update_canvas, [line, eraser], [image])
258
+
259
+ demo.load(None,None,None,_js=scripts)
260
+ randomize_seed.click(lambda x: random.randint(0, MAX_SEED), inputs=[], outputs=seed, queue=False, api_name=False)
261
+ inputs = [image, prompt, prompt_temp, style, seed, val_r]
262
+ outputs = [result, download_sketch, download_output]
263
+ prompt.submit(fn=run, inputs=inputs, outputs=outputs, api_name=False)
264
+ style.change(lambda x: styles[x], inputs=[style], outputs=[prompt_temp], queue=False, api_name=False).then(
265
+ fn=run, inputs=inputs, outputs=outputs, api_name=False,)
266
+ val_r.change(run, inputs=inputs, outputs=outputs, queue=False, api_name=False)
267
+ run_button.click(fn=run, inputs=inputs, outputs=outputs, api_name=False )
268
+ image.change(run, inputs=inputs, outputs=outputs, queue=False, api_name=False)
269
+
270
+ if __name__ == "__main__":
271
+ demo.queue().launch(debug=True, share=True)
272
+
requirements.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ clip @ git+https://github.com/openai/CLIP.git
2
+ einops>=0.6.1
3
+ numpy>=1.24.4
4
+ open-clip-torch>=2.20.0
5
+ opencv-python==4.6.0.66
6
+ pillow>=9.5.0
7
+ scipy==1.11.1
8
+ timm>=0.9.2
9
+ tokenizers==0.12.1
10
+ torch>=2.0.1
11
+
12
+ torchaudio>=2.0.2
13
+ torchdata==0.6.1
14
+ torchmetrics>=1.0.1
15
+ torchvision>=0.15.2
16
+
17
+ tqdm>=4.65.0
18
+ transformers==4.35.2
19
+ triton==2.0.0
20
+ urllib3<1.27,>=1.25.4
21
+ xformers>=0.0.20
22
+ streamlit-keyup==0.2.0
23
+ lpips
24
+ clean-fid
25
+ peft
26
+ dominate
27
+ diffusers==0.25.1
28
+ gradio==3.43.1