Spaces:
Runtime error
Runtime error
TheStinger
commited on
Commit
·
d692ff9
1
Parent(s):
5611356
initial commit
Browse files- .gitattributes +1 -0
- gradio_gui.py +43 -0
- image2.png +3 -0
- requirements.txt +9 -0
- upscaler.py +128 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
image2.png filter=lfs diff=lfs merge=lfs -text
|
gradio_gui.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import upscaler
|
3 |
+
import os
|
4 |
+
from PIL import Image
|
5 |
+
|
6 |
+
def upscale_image(prompt,negative_prompt, rows=3,seed=0, image=None,enable_custom_sliders=False,guidance=7,iterations=50,xformers_input=False,cpu_offload_input=False,attention_slicing_input=False):
|
7 |
+
cols = rows
|
8 |
+
output_image = upscaler.upscale_image(image, int(rows), int(cols),int(seed), prompt,negative_prompt,xformers_input,cpu_offload_input,attention_slicing_input,enable_custom_sliders,guidance,iterations)
|
9 |
+
output_image_path = "result.png"
|
10 |
+
output_image.save(output_image_path)
|
11 |
+
|
12 |
+
return output_image_path
|
13 |
+
|
14 |
+
|
15 |
+
image_input = gr.inputs.Image(label="Input Image")
|
16 |
+
prompt_input = gr.inputs.Textbox(default="8k, photography, cgi, unreal engine, octane render, best quality",label="Prompt")
|
17 |
+
negative_prompt_input = gr.inputs.Textbox(default="jpeg artifacts, lowres, bad quality",label="Negative prompt")
|
18 |
+
seed_input = gr.inputs.Number(default=-1, label="Seed")
|
19 |
+
row_input = gr.inputs.Number(default=1, label="Tile grid dimension amount (number of rows and columns) - v x v")
|
20 |
+
xformers_input = gr.inputs.Checkbox(default=True,label="Enable Xformers memory efficient attention")
|
21 |
+
enable_custom_sliders = gr.inputs.Checkbox(default=False,label="(NOT RECOMMENDED) Click to enable the sliders below; if unchecked, it will ignore them and use the default settings")
|
22 |
+
cpu_offload_input = gr.inputs.Checkbox(default=True,label="Enable sequential CPU offload")
|
23 |
+
attention_slicing_input = gr.inputs.Checkbox(default=True,label="Enable attention slicing")
|
24 |
+
output_image = gr.outputs.Image(label="Output Image",type='pil')
|
25 |
+
guidance = gr.Slider(2, 15, 7, step=1, label='Guidance Scale: How much the AI influences the Upscaling.')
|
26 |
+
iterations = gr.Slider(10, 75, 50, step=1, label='Number of Iterations')
|
27 |
+
#save_png_button, save_png_halfsize_button ; I don't know how to implement them
|
28 |
+
save_png_button = gr.Button(label="Save as a PNG image") # Added this button with the save_png function
|
29 |
+
save_png_halfsize_button = gr.Button(label="Save as a PNG image (half size)") # Added this button with the save_png_halfsize function
|
30 |
+
|
31 |
+
gr.Interface(fn=upscale_image,
|
32 |
+
inputs=[prompt_input,negative_prompt_input,row_input,
|
33 |
+
seed_input,
|
34 |
+
image_input,
|
35 |
+
enable_custom_sliders,
|
36 |
+
guidance,
|
37 |
+
iterations,
|
38 |
+
xformers_input,
|
39 |
+
cpu_offload_input,
|
40 |
+
attention_slicing_input],
|
41 |
+
outputs=[output_image],
|
42 |
+
title="Stable Diffusion x4 Upscaler - Web GUI",
|
43 |
+
allow_flagging=False).launch()
|
image2.png
ADDED
Git LFS Details
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Automatically generated by https://github.com/damnever/pigar.
|
2 |
+
|
3 |
+
Pillow == 9.4.0
|
4 |
+
diffusers
|
5 |
+
gradio == 3.15.0
|
6 |
+
split_image == 2.0.1
|
7 |
+
torch == 1.13.1
|
8 |
+
transformers
|
9 |
+
git+https://github.com/facebookresearch/xformers.git@main#egg=xformers
|
upscaler.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image
|
2 |
+
from diffusers import StableDiffusionUpscalePipeline
|
3 |
+
import torch
|
4 |
+
from split_image import split
|
5 |
+
import os
|
6 |
+
import random
|
7 |
+
|
8 |
+
|
9 |
+
def split_image(im, rows, cols, should_square, should_quiet=False):
|
10 |
+
im_width, im_height = im.size
|
11 |
+
row_width = int(im_width / cols)
|
12 |
+
row_height = int(im_height / rows)
|
13 |
+
name = "image"
|
14 |
+
ext = ".png"
|
15 |
+
name = os.path.basename(name)
|
16 |
+
images = []
|
17 |
+
if should_square:
|
18 |
+
min_dimension = min(im_width, im_height)
|
19 |
+
max_dimension = max(im_width, im_height)
|
20 |
+
if not should_quiet:
|
21 |
+
print("Resizing image to a square...")
|
22 |
+
print("Determining background color...")
|
23 |
+
bg_color = split.determine_bg_color(im)
|
24 |
+
if not should_quiet:
|
25 |
+
print("Background color is... " + str(bg_color))
|
26 |
+
im_r = Image.new("RGBA" if ext == "png" else "RGB",
|
27 |
+
(max_dimension, max_dimension), bg_color)
|
28 |
+
offset = int((max_dimension - min_dimension) / 2)
|
29 |
+
if im_width > im_height:
|
30 |
+
im_r.paste(im, (0, offset))
|
31 |
+
else:
|
32 |
+
im_r.paste(im, (offset, 0))
|
33 |
+
im = im_r
|
34 |
+
row_width = int(max_dimension / cols)
|
35 |
+
row_height = int(max_dimension / rows)
|
36 |
+
n = 0
|
37 |
+
for i in range(0, rows):
|
38 |
+
for j in range(0, cols):
|
39 |
+
box = (j * row_width, i * row_height, j * row_width +
|
40 |
+
row_width, i * row_height + row_height)
|
41 |
+
outp = im.crop(box)
|
42 |
+
outp_path = name + "_" + str(n) + ext
|
43 |
+
if not should_quiet:
|
44 |
+
print("Exporting image tile: " + outp_path)
|
45 |
+
images.append(outp)
|
46 |
+
n += 1
|
47 |
+
return [img for img in images]
|
48 |
+
|
49 |
+
def upscale_image(img, rows, cols,seed,prompt,negative_prompt,xformers,cpu_offload,attention_slicing,enable_custom_sliders=False,guidance=7,iterations=50):
|
50 |
+
model_id = "stabilityai/stable-diffusion-x4-upscaler"
|
51 |
+
try:
|
52 |
+
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
53 |
+
except:
|
54 |
+
pipeline = StableDiffusionUpscalePipeline.from_pretrained(model_id, torch_dtype=torch.float16, local_files_only=True)
|
55 |
+
pipeline = pipeline.to("cuda")
|
56 |
+
if xformers:
|
57 |
+
pipeline.enable_xformers_memory_efficient_attention()
|
58 |
+
else:
|
59 |
+
pipeline.disable_xformers_memory_efficient_attention()
|
60 |
+
if cpu_offload:
|
61 |
+
try:
|
62 |
+
pipeline.enable_sequential_cpu_offload()
|
63 |
+
except:
|
64 |
+
pass
|
65 |
+
if attention_slicing:
|
66 |
+
pipeline.enable_attention_slicing()
|
67 |
+
else:
|
68 |
+
pipeline.disable_attention_slicing()
|
69 |
+
img = Image.fromarray(img)
|
70 |
+
# load model and scheduler
|
71 |
+
if seed==-1:
|
72 |
+
generator = torch.manual_seed(random.randint(0, 9999999))
|
73 |
+
else:
|
74 |
+
generator = torch.manual_seed(seed)
|
75 |
+
|
76 |
+
original_width, original_height = img.size
|
77 |
+
max_dimension = max(original_width, original_height)
|
78 |
+
tiles = split_image(img, rows, cols, True, False)
|
79 |
+
ups_tiles = []
|
80 |
+
i = 0
|
81 |
+
for x in tiles:
|
82 |
+
i=i+1
|
83 |
+
if enable_custom_sliders:
|
84 |
+
ups_tile = pipeline(prompt=prompt,negative_prompt=negative_prompt,guidance_scale=guidance, num_inference_steps=iterations, image=x.convert("RGB"),generator=generator).images[0]
|
85 |
+
else:
|
86 |
+
ups_tile = pipeline(prompt=prompt,negative_prompt=negative_prompt, image=x.convert("RGB"),generator=generator).images[0]
|
87 |
+
ups_tiles.append(ups_tile)
|
88 |
+
|
89 |
+
# Determine the size of the merged upscaled image
|
90 |
+
total_width = 0
|
91 |
+
total_height = 0
|
92 |
+
side = 0
|
93 |
+
for ups_tile in ups_tiles:
|
94 |
+
side = ups_tile.width
|
95 |
+
break
|
96 |
+
for x in tiles:
|
97 |
+
tsize = x.width
|
98 |
+
break
|
99 |
+
|
100 |
+
ups_times = abs(side/tsize)
|
101 |
+
new_size = (max_dimension * ups_times, max_dimension * ups_times)
|
102 |
+
total_width = cols*side
|
103 |
+
total_height = rows*side
|
104 |
+
|
105 |
+
# Create a blank image with the calculated size
|
106 |
+
merged_image = Image.new("RGB", (total_width, total_height))
|
107 |
+
|
108 |
+
# Paste each upscaled tile into the blank image
|
109 |
+
current_width = 0
|
110 |
+
current_height = 0
|
111 |
+
maximum_width = cols*side
|
112 |
+
for ups_tile in ups_tiles:
|
113 |
+
merged_image.paste(ups_tile, (current_width, current_height))
|
114 |
+
current_width += ups_tile.width
|
115 |
+
if current_width>=maximum_width:
|
116 |
+
current_width = 0
|
117 |
+
current_height = current_height+side
|
118 |
+
|
119 |
+
# Using the center of the image as pivot, crop the image to the original dimension times four
|
120 |
+
crop_left = (new_size[0] - original_width * ups_times) // 2
|
121 |
+
crop_upper = (new_size[1] - original_height * ups_times) // 2
|
122 |
+
crop_right = crop_left + original_width * ups_times
|
123 |
+
crop_lower = crop_upper + original_height * ups_times
|
124 |
+
final_img = merged_image.crop((crop_left, crop_upper, crop_right, crop_lower))
|
125 |
+
|
126 |
+
# The resulting image should be identical to the original image in proportions / aspect ratio, with no loss of elements.
|
127 |
+
# Save the merged image
|
128 |
+
return final_img
|