zenafey commited on
Commit
bf9ad1e
1 Parent(s): f507805

Add "send to txt2img" button to PNGInfo

Browse files
Files changed (1) hide show
  1. app.py +202 -89
app.py CHANGED
@@ -10,6 +10,54 @@ import PIL
10
  from PIL.ExifTags import TAGS
11
  import html
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  class Prodia:
15
  def __init__(self, api_key, base=None):
@@ -80,6 +128,68 @@ def image_to_base64(image_path):
80
 
81
  return img_str.decode('utf-8') # Convert bytes to string
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
 
85
  prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
@@ -110,7 +220,7 @@ css = """
110
 
111
  with gr.Blocks(css=css) as demo:
112
 
113
-
114
  with gr.Row():
115
  with gr.Column(scale=6):
116
  model = gr.Dropdown(interactive=True,value="absolutereality_v181.safetensors [3d9d4d2b]", show_label=True, label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
@@ -118,96 +228,99 @@ with gr.Blocks(css=css) as demo:
118
  with gr.Column(scale=1):
119
  gr.Markdown(elem_id="powered-by-prodia", value="AUTOMATIC1111 Stable Diffusion Web UI.<br>Powered by [Prodia](https://prodia.com).")
120
 
121
- with gr.Tab("txt2img"):
122
- with gr.Row():
123
- with gr.Column(scale=6, min_width=600):
124
- prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
125
- negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
126
- with gr.Column():
127
- text_button = gr.Button("Generate", variant='primary', elem_id="generate")
128
-
129
- with gr.Row():
130
- with gr.Column(scale=3):
131
- with gr.Tab("Generation"):
132
- with gr.Row():
133
- with gr.Column(scale=1):
134
- sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method", choices=[
135
- "Euler",
136
- "Euler a",
137
- "LMS",
138
- "Heun",
139
- "DPM2",
140
- "DPM2 a",
141
- "DPM++ 2S a",
142
- "DPM++ 2M",
143
- "DPM++ SDE",
144
- "DPM fast",
145
- "DPM adaptive",
146
- "LMS Karras",
147
- "DPM2 Karras",
148
- "DPM2 a Karras",
149
- "DPM++ 2S a Karras",
150
- "DPM++ 2M Karras",
151
- "DPM++ SDE Karras",
152
- "DDIM",
153
- "PLMS",
154
- ])
 
 
 
 
 
 
 
 
 
155
 
156
- with gr.Column(scale=1):
157
- steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=30, value=25, step=1)
158
-
159
- with gr.Row():
160
- with gr.Column(scale=1):
161
- width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
162
- height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
163
-
164
- with gr.Column(scale=1):
165
- batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
166
- batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
167
-
168
- cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
169
- seed = gr.Number(label="Seed", value=-1)
170
-
171
-
172
- with gr.Column(scale=2):
173
- image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
174
-
175
- text_button.click(flip_text, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed], outputs=image_output)
176
 
177
- with gr.Tab("PNG Info"):
178
- def plaintext_to_html(text, classname=None):
179
- content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
180
-
181
- return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
182
-
183
-
184
- def get_exif_data(image):
185
- items = image.info
186
-
187
- info = ''
188
- for key, text in items.items():
189
- info += f"""
190
- <div>
191
- <p><b>{plaintext_to_html(str(key))}</b></p>
192
- <p>{plaintext_to_html(str(text))}</p>
193
- </div>
194
- """.strip()+"\n"
195
-
196
- if len(info) == 0:
197
- message = "Nothing found in the image."
198
- info = f"<div><p>{message}<p></div>"
199
-
200
- return info
201
-
202
- with gr.Row():
203
- with gr.Column():
204
- image_input = gr.Image(type="pil")
205
-
206
- with gr.Column():
207
- exif_output = gr.HTML(label="EXIF Data")
208
-
209
- image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
210
-
 
 
 
 
 
 
 
 
 
 
 
211
 
212
  demo.queue(concurrency_count=32)
213
  demo.launch()
 
10
  from PIL.ExifTags import TAGS
11
  import html
12
 
13
+ model_names = {
14
+ 'absolutereality_V16': 'absolutereality_V16.safetensors [37db0fc3]',
15
+ 'absolutereality_v181': 'absolutereality_v181.safetensors [3d9d4d2b]',
16
+ 'analog-diffusion-1.0': 'analog-diffusion-1.0.ckpt [9ca13f02]',
17
+ 'anythingv3_0-pruned': 'anythingv3_0-pruned.ckpt [2700c435]',
18
+ 'anything-v4.5-pruned': 'anything-v4.5-pruned.ckpt [65745d25]',
19
+ 'anythingV5_PrtRE': 'anythingV5_PrtRE.safetensors [893e49b9]',
20
+ 'AOM3A3_orangemixs': 'AOM3A3_orangemixs.safetensors [9600da17]',
21
+ 'childrensStories_v13D': 'childrensStories_v13D.safetensors [9dfaabcb]',
22
+ 'childrensStories_v1SemiReal': 'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
23
+ 'childrensStories_v1ToonAnime': 'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
24
+ 'cyberrealistic_v33': 'cyberrealistic_v33.safetensors [82b0d085]',
25
+ 'deliberate_v2': 'deliberate_v2.safetensors [10ec4b29]',
26
+ 'deliberate_v3': 'deliberate_v3.safetensors [afd9d2d4]',
27
+ 'dreamlike-anime-1.0': 'dreamlike-anime-1.0.safetensors [4520e090]',
28
+ 'dreamlike-diffusion-1.0': 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
29
+ 'dreamlike-photoreal-2.0': 'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
30
+ 'dreamshaper_6BakedVae': 'dreamshaper_6BakedVae.safetensors [114c8abb]',
31
+ 'dreamshaper_7': 'dreamshaper_7.safetensors [5cf5ae06]',
32
+ 'dreamshaper_8': 'dreamshaper_8.safetensors [9d40847d]',
33
+ 'edgeOfRealism_eorV20': 'edgeOfRealism_eorV20.safetensors [3ed5de15]',
34
+ 'EimisAnimeDiffusion_V1': 'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
35
+ 'elldreths-vivid-mix': 'elldreths-vivid-mix.safetensors [342d9d26]',
36
+ 'epicrealism_naturalSinRC1VAE': 'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
37
+ 'ICantBelieveItsNotPhotography_seco': 'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
38
+ 'juggernaut_aftermath': 'juggernaut_aftermath.safetensors [5e20c455]',
39
+ 'lyriel_v16': 'lyriel_v16.safetensors [68fceea2]',
40
+ 'mechamix_v10': 'mechamix_v10.safetensors [ee685731]',
41
+ 'meinamix_meinaV9': 'meinamix_meinaV9.safetensors [2ec66ab0]',
42
+ 'meinamix_meinaV11': 'meinamix_meinaV11.safetensors [b56ce717]',
43
+ 'openjourney_V4': 'openjourney_V4.ckpt [ca2f377f]',
44
+ 'portraitplus_V1.0': 'portraitplus_V1.0.safetensors [1400e684]',
45
+ 'Realistic_Vision_V1.4-pruned-fp16': 'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
46
+ 'Realistic_Vision_V2.0': 'Realistic_Vision_V2.0.safetensors [79587710]',
47
+ 'Realistic_Vision_V4.0': 'Realistic_Vision_V4.0.safetensors [29a7afaa]',
48
+ 'Realistic_Vision_V5.0': 'Realistic_Vision_V5.0.safetensors [614d1063]',
49
+ 'redshift_diffusion-V10': 'redshift_diffusion-V10.safetensors [1400e684]',
50
+ 'revAnimated_v122': 'revAnimated_v122.safetensors [3f4fefd9]',
51
+ 'rundiffusionFX25D_v10': 'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
52
+ 'rundiffusionFX_v10': 'rundiffusionFX_v10.safetensors [cd4e694d]',
53
+ 'sdv1_4': 'sdv1_4.ckpt [7460a6fa]',
54
+ 'v1-5-pruned-emaonly': 'v1-5-pruned-emaonly.safetensors [d7049739]',
55
+ 'shoninsBeautiful_v10': 'shoninsBeautiful_v10.safetensors [25d8c546]',
56
+ 'theallys-mix-ii-churned': 'theallys-mix-ii-churned.safetensors [5d9225a4]',
57
+ 'timeless-1.0': 'timeless-1.0.ckpt [7c4971d4]',
58
+ 'toonyou_beta6': 'toonyou_beta6.safetensors [980f6b15]'
59
+ }
60
+
61
 
62
  class Prodia:
63
  def __init__(self, api_key, base=None):
 
128
 
129
  return img_str.decode('utf-8') # Convert bytes to string
130
 
131
+ def get_data(text):
132
+ results = {}
133
+ patterns = {
134
+ 'prompt': r'(.*)',
135
+ 'negative_prompt': r'Negative prompt: (.*)',
136
+ 'steps': r'Steps: (\d+),',
137
+ 'seed': r'Seed: (\d+),',
138
+ 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
139
+ 'model': r'Model:\s*([^\s,]+)',
140
+ 'cfg_scale': r'CFG scale:\s*([\d\.]+)',
141
+ 'size': r'Size:\s*([0-9]+x[0-9]+)'
142
+ }
143
+ for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
144
+ match = re.search(patterns[key], text)
145
+ if match:
146
+ results[key] = match.group(1)
147
+ else:
148
+ results[key] = None
149
+ if results['size'] is not None:
150
+ w, h = results['size'].split("x")
151
+ results['w'] = w
152
+ results['h'] = h
153
+ else:
154
+ results['w'] = None
155
+ results['h'] = None
156
+ return results
157
+
158
+ def send_to_txt2img(image):
159
+
160
+ result = {tabs: gr.Tabs.update(selected="t2i")}
161
+
162
+ try:
163
+ text = image.info['parameters']
164
+ data = get_data(text)
165
+ result[prompt] = gr.update(value=data['prompt'])
166
+ result[negative_prompt] = gr.update(value=data['negative_prompt']) if data['negative_prompt'] is not None else gr.update()
167
+ result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
168
+ result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
169
+ result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
170
+ result[width] = gr.update(value=int(data['w'])) if data['w'] is not None else gr.update()
171
+ result[height] = gr.update(value=int(data['h'])) if data['h'] is not None else gr.update()
172
+ result[sampler] = gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update()
173
+ if model in model_names:
174
+ result[model] = gr.update(value=model_names[model])
175
+ else:
176
+ result[model] = gr.update()
177
+ return result
178
+
179
+ except Exception as e:
180
+ print(e)
181
+ result[prompt] = gr.update()
182
+ result[negative_prompt] = gr.update()
183
+ result[steps] = gr.update()
184
+ result[seed] = gr.update()
185
+ result[cfg_scale] = gr.update()
186
+ result[width] = gr.update()
187
+ result[height] = gr.update()
188
+ result[sampler] = gr.update()
189
+ result[model] = gr.update()
190
+
191
+ return result
192
+
193
 
194
 
195
  prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
 
220
 
221
  with gr.Blocks(css=css) as demo:
222
 
223
+
224
  with gr.Row():
225
  with gr.Column(scale=6):
226
  model = gr.Dropdown(interactive=True,value="absolutereality_v181.safetensors [3d9d4d2b]", show_label=True, label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
 
228
  with gr.Column(scale=1):
229
  gr.Markdown(elem_id="powered-by-prodia", value="AUTOMATIC1111 Stable Diffusion Web UI.<br>Powered by [Prodia](https://prodia.com).")
230
 
231
+ with gr.Tabs() as tabs:
232
+ with gr.Tab("txt2img", id='t2i'):
233
+ with gr.Row():
234
+ with gr.Column(scale=6, min_width=600):
235
+ prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", placeholder="Prompt", show_label=False, lines=3)
236
+ negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
237
+ with gr.Column():
238
+ text_button = gr.Button("Generate", variant='primary', elem_id="generate")
239
+
240
+ with gr.Row():
241
+ with gr.Column(scale=3):
242
+ with gr.Tab("Generation"):
243
+ with gr.Row():
244
+ with gr.Column(scale=1):
245
+ sampler = gr.Dropdown(value="Euler a", show_label=True, label="Sampling Method", choices=[
246
+ "Euler",
247
+ "Euler a",
248
+ "LMS",
249
+ "Heun",
250
+ "DPM2",
251
+ "DPM2 a",
252
+ "DPM++ 2S a",
253
+ "DPM++ 2M",
254
+ "DPM++ SDE",
255
+ "DPM fast",
256
+ "DPM adaptive",
257
+ "LMS Karras",
258
+ "DPM2 Karras",
259
+ "DPM2 a Karras",
260
+ "DPM++ 2S a Karras",
261
+ "DPM++ 2M Karras",
262
+ "DPM++ SDE Karras",
263
+ "DDIM",
264
+ "PLMS",
265
+ ])
266
+
267
+ with gr.Column(scale=1):
268
+ steps = gr.Slider(label="Sampling Steps", minimum=1, maximum=30, value=25, step=1)
269
+
270
+ with gr.Row():
271
+ with gr.Column(scale=1):
272
+ width = gr.Slider(label="Width", maximum=1024, value=512, step=8)
273
+ height = gr.Slider(label="Height", maximum=1024, value=512, step=8)
274
 
275
+ with gr.Column(scale=1):
276
+ batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
277
+ batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
 
279
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, step=1)
280
+ seed = gr.Number(label="Seed", value=-1)
281
+
282
+
283
+ with gr.Column(scale=2):
284
+ image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
285
+
286
+ text_button.click(flip_text, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed], outputs=image_output)
287
+
288
+ with gr.Tab("PNG Info"):
289
+ def plaintext_to_html(text, classname=None):
290
+ content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
291
+
292
+ return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
293
+
294
+
295
+ def get_exif_data(image):
296
+ items = image.info
297
+
298
+ info = ''
299
+ for key, text in items.items():
300
+ info += f"""
301
+ <div>
302
+ <p><b>{plaintext_to_html(str(key))}</b></p>
303
+ <p>{plaintext_to_html(str(text))}</p>
304
+ </div>
305
+ """.strip()+"\n"
306
+
307
+ if len(info) == 0:
308
+ message = "Nothing found in the image."
309
+ info = f"<div><p>{message}<p></div>"
310
+
311
+ return info
312
+
313
+ with gr.Row():
314
+ with gr.Column():
315
+ image_input = gr.Image(type="pil")
316
+
317
+ with gr.Column():
318
+ exif_output = gr.HTML(label="EXIF Data")
319
+ send_to_txt2img_btn = gr.Button("Send to txt2img")
320
+
321
+ image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
322
+ send_to_txt2img_btn.click(send_to_txt2img, inputs=[image_input], outputs=[tabs, prompt, negative_prompt, steps, seed,
323
+ model, sampler, width, height, cfg_scale])
324
 
325
  demo.queue(concurrency_count=32)
326
  demo.launch()