zenafey commited on
Commit
3756e16
1 Parent(s): 3a7cb85

Upload 4 files

Browse files
Files changed (4) hide show
  1. css.py +87 -0
  2. grutils.py +60 -0
  3. inference.py +96 -0
  4. utils.py +87 -0
css.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ css = """
2
+ :root, .dark{
3
+ --checkbox-label-gap: 0.25em 0.1em;
4
+ --section-header-text-size: 12pt;
5
+ --block-background-fill: transparent;
6
+ }
7
+ .block.padded:not(.gradio-accordion) {
8
+ padding: 0 !important;
9
+ }
10
+ div.gradio-container{
11
+ max-width: unset !important;
12
+ }
13
+ .compact{
14
+ background: transparent !important;
15
+ padding: 0 !important;
16
+ }
17
+ div.form{
18
+ border-width: 0;
19
+ box-shadow: none;
20
+ background: transparent;
21
+ overflow: visible;
22
+ gap: 0.5em;
23
+ }
24
+ .block.gradio-dropdown,
25
+ .block.gradio-slider,
26
+ .block.gradio-checkbox,
27
+ .block.gradio-textbox,
28
+ .block.gradio-radio,
29
+ .block.gradio-checkboxgroup,
30
+ .block.gradio-number,
31
+ .block.gradio-colorpicker {
32
+ border-width: 0 !important;
33
+ box-shadow: none !important;
34
+ }
35
+ .gradio-dropdown label span:not(.has-info),
36
+ .gradio-textbox label span:not(.has-info),
37
+ .gradio-number label span:not(.has-info)
38
+ {
39
+ margin-bottom: 0;
40
+ }
41
+ .gradio-dropdown ul.options{
42
+ z-index: 3000;
43
+ min-width: fit-content;
44
+ max-width: inherit;
45
+ white-space: nowrap;
46
+ }
47
+ .gradio-dropdown ul.options li.item {
48
+ padding: 0.05em 0;
49
+ }
50
+ .gradio-dropdown ul.options li.item.selected {
51
+ background-color: var(--neutral-100);
52
+ }
53
+ .dark .gradio-dropdown ul.options li.item.selected {
54
+ background-color: var(--neutral-900);
55
+ }
56
+ .gradio-dropdown div.wrap.wrap.wrap.wrap{
57
+ box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
58
+ }
59
+ .gradio-dropdown:not(.multiselect) .wrap-inner.wrap-inner.wrap-inner{
60
+ flex-wrap: unset;
61
+ }
62
+ .gradio-dropdown .single-select{
63
+ white-space: nowrap;
64
+ overflow: hidden;
65
+ }
66
+ .gradio-dropdown .token-remove.remove-all.remove-all{
67
+ display: none;
68
+ }
69
+ .gradio-dropdown.multiselect .token-remove.remove-all.remove-all{
70
+ display: flex;
71
+ }
72
+ .gradio-slider input[type="number"]{
73
+ width: 6em;
74
+ }
75
+ .block.gradio-checkbox {
76
+ margin: 0.75em 1.5em 0 0;
77
+ }
78
+ .gradio-html div.wrap{
79
+ height: 100%;
80
+ }
81
+ div.gradio-html.min{
82
+ min-height: 0;
83
+ }
84
+ #model_dd {
85
+ width: 16%;
86
+ }
87
+ """
grutils.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utils import extract_data, remove_id_and_ext
3
+ from prodiapy import Custom
4
+ import os
5
+
6
+ pipe = Custom(os.getenv('PRODIA_API_KEY'))
7
+
8
+
9
+ model_list = pipe.constant("/sd/models")
10
+ model_names = {}
11
+
12
+ for model_name in model_list:
13
+ name_without_ext = remove_id_and_ext(model_name)
14
+ model_names[name_without_ext] = model_name
15
+
16
+
17
+ def update_btn_start():
18
+ return [
19
+ gr.update(visible=False),
20
+ gr.update(visible=True)
21
+ ]
22
+
23
+
24
+ def update_btn_end():
25
+ return [
26
+ gr.update(visible=True),
27
+ gr.update(visible=False)
28
+ ]
29
+
30
+
31
+ def switch_to_t2i():
32
+ return gr.Tabs.update(selected="t2i")
33
+
34
+
35
+ def send_to_txt2img(image):
36
+ try:
37
+ text = image.info['parameters']
38
+ data = extract_data(text)
39
+
40
+ if data['model'] in model_names:
41
+ model = gr.update(value=model_names[data['model']])
42
+ else:
43
+ model = gr.update()
44
+
45
+ result = [
46
+ gr.update(value=data['prompt']),
47
+ gr.update(value=data['negative_prompt']) if data['negative_prompt'] is not None else gr.update(),
48
+ gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update(),
49
+ gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update(),
50
+ gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update(),
51
+ gr.update(value=int(data['w'])) if data['w'] is not None else gr.update(),
52
+ gr.update(value=int(data['h'])) if data['h'] is not None else gr.update(),
53
+ gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update(),
54
+ model
55
+ ]
56
+ return result
57
+
58
+ except Exception as e:
59
+ print(e)
60
+ return
inference.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from prodiapy import Custom
2
+ from prodiapy.util import load
3
+ from PIL import Image
4
+ from threading import Thread
5
+ from utils import image_to_base64
6
+ import gradio as gr
7
+ import gradio_user_history as gr_user_history
8
+
9
+ pipe = Custom(os.getenv("PRODIA_API_KEY"))
10
+
11
+
12
+ def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed, batch_count, profile: gr.OAuthProfile | None):
13
+ total_images = []
14
+ threads = []
15
+
16
+ def generate_one_image():
17
+ result = pipe.create(
18
+ "/sd/generate",
19
+ prompt=prompt,
20
+ negative_prompt=negative_prompt,
21
+ model=model,
22
+ steps=steps,
23
+ cfg_scale=cfg_scale,
24
+ sampler=sampler,
25
+ width=width,
26
+ height=height,
27
+ seed=seed
28
+ )
29
+ job = pipe.wait_for(result)
30
+ total_images.append(job['imageUrl'])
31
+
32
+ for x in range(batch_count):
33
+ t = Thread(target=generate_one_image)
34
+ threads.append(t)
35
+ t.start()
36
+
37
+ for t in threads:
38
+ t.join()
39
+
40
+ for image in total_images:
41
+ gr_user_history.save_image(label=prompt, image=Image.open(load(image)), profile=profile)
42
+
43
+ return gr.update(value=total_images, preview=False)
44
+
45
+
46
+ def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed,
47
+ batch_count):
48
+ if input_image is None:
49
+ return
50
+
51
+ total_images = []
52
+ threads = []
53
+
54
+ def generate_one_image():
55
+ result = pipe.create(
56
+ "/sd/transform",
57
+ imageData=image_to_base64(input_image),
58
+ denoising_strength=denoising,
59
+ prompt=prompt,
60
+ negative_prompt=negative_prompt,
61
+ model=model,
62
+ steps=steps,
63
+ cfg_scale=cfg_scale,
64
+ sampler=sampler,
65
+ width=width,
66
+ height=height,
67
+ seed=seed
68
+
69
+ )
70
+ job = pipe.wait_for(result)
71
+ total_images.append(job['imageUrl'])
72
+
73
+ for x in range(batch_count):
74
+ t = Thread(target=generate_one_image)
75
+ threads.append(t)
76
+ t.start()
77
+
78
+ for t in threads:
79
+ t.join()
80
+
81
+ return gr.update(value=total_images, preview=False)
82
+
83
+
84
+ def upscale(image, scale, profile: gr.OAuthProfile | None):
85
+ if image is None:
86
+ return
87
+
88
+ job = pipe.create(
89
+ '/upscale',
90
+ imageData=image_to_base64(image),
91
+ resize=scale
92
+ )
93
+ image = pipe.wait_for(job)['imageUrl']
94
+ gr_user_history.save_image(label=f'upscale by {scale}', image=Image.open(load(image)), profile=profile)
95
+
96
+ return image
utils.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import base64
3
+ import html
4
+ from io import BytesIO
5
+
6
+
7
+ def image_to_base64(image):
8
+ # Convert the image to bytes
9
+ buffered = BytesIO()
10
+ image.save(buffered, format="PNG") # You can change format to PNG if needed
11
+
12
+ # Encode the bytes to base64
13
+ img_str = base64.b64encode(buffered.getvalue())
14
+
15
+ return img_str.decode('utf-8') # Convert bytes to string
16
+
17
+
18
+ def remove_id_and_ext(text):
19
+ text = re.sub(r'\[.*\]$', '', text)
20
+ extension = text[-12:].strip()
21
+ if extension == "safetensors":
22
+ text = text[:-13]
23
+ elif extension == "ckpt":
24
+ text = text[:-4]
25
+ return text
26
+
27
+
28
+ def extract_data(text):
29
+ results = {}
30
+ patterns = {
31
+ 'prompt': r'(.*)',
32
+ 'negative_prompt': r'Negative prompt: (.*)',
33
+ 'steps': r'Steps: (\d+),',
34
+ 'seed': r'Seed: (\d+),',
35
+ 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
36
+ 'model': r'Model:\s*([^\s,]+)',
37
+ 'cfg_scale': r'CFG scale:\s*([\d\.]+)',
38
+ 'size': r'Size:\s*([0-9]+x[0-9]+)'
39
+ }
40
+ for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
41
+ match = re.search(patterns[key], text)
42
+ if match:
43
+ results[key] = match.group(1)
44
+ else:
45
+ results[key] = None
46
+ if results['size'] is not None:
47
+ w, h = results['size'].split("x")
48
+ results['w'] = w
49
+ results['h'] = h
50
+ else:
51
+ results['w'] = None
52
+ results['h'] = None
53
+ return results
54
+
55
+
56
+ def place_lora(current_prompt, lora_name):
57
+ pattern = r"<lora:" + lora_name + r":.*?>"
58
+
59
+ if re.search(pattern, current_prompt):
60
+ yield re.sub(pattern, "", current_prompt)
61
+ else:
62
+ yield current_prompt + " <lora:" + lora_name + ":1> "
63
+
64
+
65
+ def plaintext_to_html(text, classname=None):
66
+ content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
67
+
68
+ return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
69
+
70
+
71
+ def get_exif_data(image):
72
+ items = image.info
73
+
74
+ info = ''
75
+ for key, text in items.items():
76
+ info += f"""
77
+ <div>
78
+ <p><b>{plaintext_to_html(str(key))}</b></p>
79
+ <p>{plaintext_to_html(str(text))}</p>
80
+ </div>
81
+ """.strip() + "\n"
82
+
83
+ if len(info) == 0:
84
+ message = "Nothing found in the image."
85
+ info = f"<div><p>{message}<p></div>"
86
+
87
+ return info