ehristoforu commited on
Commit
87854ee
1 Parent(s): cac8990

Upload 6 files

Browse files
Files changed (6) hide show
  1. MagicPrompt.txt +36 -0
  2. Upscaler.txt +8 -0
  3. app.txt +343 -0
  4. ideas (1).txt +0 -0
  5. requirements.txt +11 -0
  6. style.txt +16 -0
MagicPrompt.txt ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, set_seed
2
+ import gradio as gr, random, re
3
+
4
+
5
+
6
+
7
+ def MagicPromptSD(current_MagicPrompt, starting_text):
8
+ gpt2_pipe = pipeline('text-generation', model=current_MagicPrompt, tokenizer='gpt2')
9
+ with open("ideas.txt", "r") as f:
10
+ line = f.readlines()
11
+
12
+ for count in range(4):
13
+ seed = random.randint(100, 1000000)
14
+ set_seed(seed)
15
+
16
+ if starting_text == "":
17
+ starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
18
+ starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
19
+ print(starting_text)
20
+
21
+ response = gpt2_pipe(starting_text, max_length=random.randint(60, 90), num_return_sequences=4)
22
+ response_list = []
23
+ for x in response:
24
+ resp = x['generated_text'].strip()
25
+ if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
26
+ response_list.append(resp+'\n')
27
+
28
+ response_end = "\n".join(response_list)
29
+ response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
30
+ response_end = response_end.replace("<", "").replace(">", "")
31
+
32
+ if response_end != "":
33
+ return response_end
34
+ if count == 4:
35
+ return response_end
36
+
Upscaler.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ import numpy as np
4
+
5
+ def upscale_image(input_image, radio_input):
6
+ upscale_factor = radio_input
7
+ output_image = cv2.resize(input_image, None, fx = upscale_factor, fy = upscale_factor, interpolation = cv2.INTER_CUBIC)
8
+ return output_image
app.txt ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is NEW release of DreamDrop V2.0!
3
+
4
+ Features added:
5
+ 1. Can generate up to 10 images at a time
6
+ 2. Image Upscaler (x8) appeared
7
+ 3. Integrated MagicPrompt (for Stable Diffusion and for Dall•E)
8
+ 4. Added generation parameters menu (Steps, Samplers and CFG Sсale)
9
+
10
+ Enjoy!
11
+ """
12
+
13
+
14
+ import numpy as np
15
+ import gradio as gr
16
+ import requests
17
+ import time
18
+ import json
19
+ import base64
20
+ import os
21
+ from io import BytesIO
22
+ import PIL
23
+ from PIL.ExifTags import TAGS
24
+ import html
25
+ import re
26
+
27
+ from MagicPrompt import MagicPromptSD
28
+ from Upscaler import upscale_image
29
+
30
+ batch_count = 1
31
+ batch_size = 1
32
+
33
+ i2i_batch_count = 1
34
+ i2i_batch_size = 1
35
+
36
+ class Prodia:
37
+ def __init__(self, api_key, base=None):
38
+ self.base = base or "https://api.prodia.com/v1"
39
+ self.headers = {
40
+ "X-Prodia-Key": api_key
41
+ }
42
+
43
+ def generate(self, params):
44
+ response = self._post(f"{self.base}/sd/generate", params)
45
+ return response.json()
46
+
47
+ def transform(self, params):
48
+ response = self._post(f"{self.base}/sd/transform", params)
49
+ return response.json()
50
+
51
+ def controlnet(self, params):
52
+ response = self._post(f"{self.base}/sd/controlnet", params)
53
+ return response.json()
54
+
55
+ def get_job(self, job_id):
56
+ response = self._get(f"{self.base}/job/{job_id}")
57
+ return response.json()
58
+
59
+ def wait(self, job):
60
+ job_result = job
61
+
62
+ while job_result['status'] not in ['succeeded', 'failed']:
63
+ time.sleep(0.25)
64
+ job_result = self.get_job(job['job'])
65
+
66
+ return job_result
67
+
68
+ def list_models(self):
69
+ response = self._get(f"{self.base}/sd/models")
70
+ return response.json()
71
+
72
+ def list_samplers(self):
73
+ response = self._get(f"{self.base}/sd/samplers")
74
+ return response.json()
75
+
76
+ def _post(self, url, params):
77
+ headers = {
78
+ **self.headers,
79
+ "Content-Type": "application/json"
80
+ }
81
+ response = requests.post(url, headers=headers, data=json.dumps(params))
82
+
83
+ if response.status_code != 200:
84
+ raise Exception(f"Bad Prodia Response: {response.status_code}")
85
+
86
+ return response
87
+
88
+ def _get(self, url):
89
+ response = requests.get(url, headers=self.headers)
90
+
91
+ if response.status_code != 200:
92
+ raise Exception(f"Bad Prodia Response: {response.status_code}")
93
+
94
+ return response
95
+
96
+
97
+ def image_to_base64(image):
98
+ # Convert the image to bytes
99
+ buffered = BytesIO()
100
+ image.save(buffered, format="PNG") # You can change format to PNG if needed
101
+
102
+ # Encode the bytes to base64
103
+ img_str = base64.b64encode(buffered.getvalue())
104
+
105
+ return img_str.decode('utf-8') # Convert bytes to string
106
+
107
+ def remove_id_and_ext(text):
108
+ text = re.sub(r'\[.*\]$', '', text)
109
+ extension = text[-12:].strip()
110
+ if extension == "safetensors":
111
+ text = text[:-13]
112
+ elif extension == "ckpt":
113
+ text = text[:-4]
114
+ return text
115
+
116
+ def get_data(text):
117
+ results = {}
118
+ patterns = {
119
+ 'prompt': r'(.*)',
120
+ 'negative_prompt': r'Negative prompt: (.*)',
121
+ 'steps': r'Steps: (\d+),',
122
+ 'seed': r'Seed: (\d+),',
123
+ 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
124
+ 'model': r'Model:\s*([^\s,]+)',
125
+ 'cfg_scale': r'CFG scale:\s*([\d\.]+)',
126
+ 'size': r'Size:\s*([0-9]+x[0-9]+)'
127
+ }
128
+ for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
129
+ match = re.search(patterns[key], text)
130
+ if match:
131
+ results[key] = match.group(1)
132
+ else:
133
+ results[key] = None
134
+ if results['size'] is not None:
135
+ w, h = results['size'].split("x")
136
+ results['w'] = w
137
+ results['h'] = h
138
+ else:
139
+ results['w'] = None
140
+ results['h'] = None
141
+ return results
142
+
143
+ def send_to_txt2img(image):
144
+
145
+ result = {tabs: gr.Tabs.update(selected="t2i")}
146
+
147
+ try:
148
+ text = image.info['parameters']
149
+ data = get_data(text)
150
+ result[prompt] = gr.update(value=data['prompt'])
151
+ result[negative_prompt] = gr.update(value=data['negative_prompt']) if data['negative_prompt'] is not None else gr.update()
152
+ result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
153
+ result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
154
+ result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
155
+ result[width] = gr.update(value=int(data['w'])) if data['w'] is not None else gr.update()
156
+ result[height] = gr.update(value=int(data['h'])) if data['h'] is not None else gr.update()
157
+ result[sampler] = gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update()
158
+ if model in model_names:
159
+ result[model] = gr.update(value=model_names[model])
160
+ else:
161
+ result[model] = gr.update()
162
+ return result
163
+
164
+ except Exception as e:
165
+ print(e)
166
+ result[prompt] = gr.update()
167
+ result[negative_prompt] = gr.update()
168
+ result[steps] = gr.update()
169
+ result[seed] = gr.update()
170
+ result[cfg_scale] = gr.update()
171
+ result[width] = gr.update()
172
+ result[height] = gr.update()
173
+ result[sampler] = gr.update()
174
+ result[model] = gr.update()
175
+
176
+ return result
177
+
178
+
179
+ prodia_client = Prodia(api_key=os.environ.get("API_X_KEY")) # You can get the API key on https://docs.prodia.com/reference/getting-started-guide
180
+ model_list = prodia_client.list_models()
181
+ model_names = {}
182
+
183
+ for model_name in model_list:
184
+ name_without_ext = remove_id_and_ext(model_name)
185
+ model_names[name_without_ext] = model_name
186
+
187
+ def txt2img(prompt, negative_prompt, model, sampler, steps, cfg_scale, width, height, num_images):
188
+ generated_images = []
189
+ for _ in range(num_images):
190
+ result = prodia_client.generate({
191
+ "prompt": prompt,
192
+ "negative_prompt": negative_prompt,
193
+ "model": model,
194
+ "steps": steps,
195
+ "sampler": sampler,
196
+ "cfg_scale": cfg_scale,
197
+ "width": width,
198
+ "height": height,
199
+ "seed": -1
200
+ })
201
+
202
+ job = prodia_client.wait(result)
203
+ generated_images.append(job["imageUrl"])
204
+
205
+ return generated_images
206
+
207
+
208
+
209
+ def img2img(input_image, denoising, prompt, negative_prompt, model, sampler, steps, cfg_scale, i2i_width, i2i_height):
210
+ result = prodia_client.transform({
211
+ "imageData": image_to_base64(input_image),
212
+ "denoising_strength": denoising,
213
+ "prompt": prompt,
214
+ "negative_prompt": negative_prompt,
215
+ "model": i2i_model.value,
216
+ "steps": steps,
217
+ "sampler": sampler,
218
+ "cfg_scale": cfg_scale,
219
+ "width": i2i_width,
220
+ "height": i2i_height,
221
+ "seed": -1
222
+ })
223
+
224
+ job = prodia_client.wait(result)
225
+
226
+ return job["imageUrl"]
227
+
228
+
229
+
230
+ with gr.Blocks(css="style.css", theme="zenafey/prodia-web") as demo:
231
+ gr.Markdown("""
232
+ # 🥏 DreamDrop ```V2.0```
233
+ """)
234
+ with gr.Tabs() as tabs:
235
+ with gr.Tab("Text-to-Image", id='t2i'):
236
+ with gr.Row():
237
+ with gr.Column(scale=6, min_width=600):
238
+ prompt = gr.Textbox(label="Prompt", placeholder="a cute cat, 8k", lines=2)
239
+ negative_prompt = gr.Textbox(label="Negative Prompt", value="text, blurry, fuzziness", lines=1)
240
+ text_button = gr.Button("Generate", variant='primary')
241
+
242
+ with gr.Row():
243
+ with gr.Column(scale=5):
244
+ images_output = gr.Gallery(label="Result Image(s)", num_rows=1, num_cols=5, scale=1, allow_preview=True, preview=True)
245
+ with gr.Row():
246
+ with gr.Accordion("⚙️ Settings", open=False):
247
+ with gr.Column(scale=1):
248
+ model = gr.Dropdown(interactive=True, value="absolutereality_v181.safetensors [3d9d4d2b]",
249
+ show_label=True, label="Model",
250
+ choices=prodia_client.list_models())
251
+ with gr.Column(scale=1):
252
+ sampler = gr.Dropdown(label="Sampler", choices=prodia_client.list_samplers(), value="DPM++ SDE", interactive=True)
253
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=25, interactive=True)
254
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, interactive=True)
255
+ width = gr.Slider(label="↔️ Width", maximum=1024, value=768, step=8)
256
+ height = gr.Slider(label="↕️ Height", maximum=1024, value=768, step=8)
257
+ num_images = gr.Slider(minimum=1, maximum=10, value=1, step=1, label="Image Count", interactive=True)
258
+
259
+ text_button.click(txt2img, inputs=[prompt, negative_prompt, model, sampler, steps, cfg_scale, width, height, num_images], outputs=images_output)
260
+
261
+ with gr.Tab("Image-to-Image", id='i2i'):
262
+ with gr.Row():
263
+ with gr.Column(scale=6):
264
+ with gr.Column(scale=1):
265
+ i2i_image_input = gr.Image(label="Input Image", type="pil", interactive=True)
266
+ with gr.Column(scale=6, min_width=600):
267
+ i2i_prompt = gr.Textbox(label="Prompt", placeholder="a cute cat, 8k", lines=2)
268
+ i2i_negative_prompt = gr.Textbox(label="Negative Prompt", lines=1, value="text, blurry, fuzziness")
269
+ with gr.Column():
270
+ i2i_text_button = gr.Button("Generate", variant='primary', elem_id="generate")
271
+
272
+ with gr.Column(scale=1):
273
+ i2i_image_output = gr.Image(label="Result Image(s)")
274
+ with gr.Row():
275
+ with gr.Accordion("⚙️ Settings", open=False):
276
+ with gr.Column(scale=1):
277
+ i2i_model = gr.Dropdown(interactive=True,
278
+ value="absolutereality_v181.safetensors [3d9d4d2b]",
279
+ show_label=True, label="Model",
280
+ choices=prodia_client.list_models())
281
+
282
+ with gr.Column(scale=1):
283
+ i2i_denoising = gr.Slider(label="Denoising Strength", minimum=0, maximum=1, value=0.7, step=0.1)
284
+ sampler = gr.Dropdown(label="Sampler", choices=prodia_client.list_samplers(), value="DPM++ SDE", interactive=True)
285
+ steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=25, interactive=True)
286
+ cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, value=7, interactive=True)
287
+ i2i_width = gr.Slider(label="↔️ Width", maximum=1024, value=768, step=8)
288
+ i2i_height = gr.Slider(label="↕️ Height", maximum=1024, value=768, step=8)
289
+
290
+ i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt, model, sampler, steps, cfg_scale, i2i_width, i2i_height], outputs=i2i_image_output)
291
+
292
+ with gr.Tab("Upscaler"):
293
+ gr.Markdown("""
294
+ # Upscaler ```x8```
295
+ """)
296
+ radio_input = gr.Radio(label="Upscale Levels", choices=[2, 4, 6, 8], value=2)
297
+ gr.Interface(fn=upscale_image, inputs = [gr.Image(label="Input Image", interactive=True), radio_input], outputs = gr.Image(label="Upscaled Image"))
298
+
299
+ with gr.Tab("PNG-Info"):
300
+ def plaintext_to_html(text, classname=None):
301
+ content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
302
+
303
+ return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
304
+
305
+
306
+ def get_exif_data(image):
307
+ items = image.info
308
+
309
+ info = ''
310
+ for key, text in items.items():
311
+ info += f"""
312
+ <div>
313
+ <p><b>{plaintext_to_html(str(key))}</b></p>
314
+ <p>{plaintext_to_html(str(text))}</p>
315
+ </div>
316
+ """.strip()+"\n"
317
+
318
+ if len(info) == 0:
319
+ message = "Nothing found in the image."
320
+ info = f"<div><p>{message}<p></div>"
321
+
322
+ return info
323
+
324
+ with gr.Row():
325
+ gr.Markdown("""
326
+ # PNG-Info
327
+ """)
328
+ with gr.Column():
329
+ image_input = gr.Image(type="pil", label="Input Image", interactive=True)
330
+
331
+ with gr.Column():
332
+ exif_output = gr.HTML(label="EXIF Data")
333
+
334
+ image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
335
+
336
+
337
+ with gr.Tab("MagicPrompt"):
338
+ gr.Markdown("""
339
+ # MagicPrompt
340
+ """)
341
+ gr.Interface(fn=MagicPromptSD, inputs=[gr.Radio(label="Prompt Model", choices=["Gustavosta/MagicPrompt-Stable-Diffusion", "Gustavosta/MagicPrompt-Dalle"], value="Gustavosta/MagicPrompt-Stable-Diffusion"), gr.Textbox(label="Enter your idea")], outputs=gr.Textbox(label="Output Prompt", interactive=False), allow_flagging='never')
342
+
343
+ demo.launch(show_api=False)
ideas (1).txt ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ gradio
3
+ requests
4
+ pillow
5
+ pyexif
6
+ jinja2==3.1.2
7
+ transformers==4.22.2
8
+ sentencepiece
9
+ torch
10
+ opencv-python
11
+ rembg
style.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ color: white;
8
+ background: #1565c0;
9
+ border-radius: 100vh;
10
+ }
11
+
12
+ #component-0 {
13
+ max-width: 900px;
14
+ margin: auto;
15
+ padding-top: 1.5rem;
16
+ }