toto10 commited on
Commit
d925d12
1 Parent(s): 2462654

28e56f0d55e2186f63ab0522015f27b434593fd0fac74aa8e3bd6c786b28680d

Browse files
scripts/__pycache__/prompts_from_file.cpython-310.pyc ADDED
Binary file (4.98 kB). View file
 
scripts/__pycache__/sd_upscale.cpython-310.pyc ADDED
Binary file (3.58 kB). View file
 
scripts/__pycache__/xyz_grid.cpython-310.pyc ADDED
Binary file (25.6 kB). View file
 
scripts/custom_code.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import modules.scripts as scripts
2
+ import gradio as gr
3
+ import ast
4
+ import copy
5
+
6
+ from modules.processing import Processed
7
+ from modules.shared import cmd_opts
8
+
9
+
10
+ def convertExpr2Expression(expr):
11
+ expr.lineno = 0
12
+ expr.col_offset = 0
13
+ result = ast.Expression(expr.value, lineno=0, col_offset = 0)
14
+
15
+ return result
16
+
17
+
18
+ def exec_with_return(code, module):
19
+ """
20
+ like exec() but can return values
21
+ https://stackoverflow.com/a/52361938/5862977
22
+ """
23
+ code_ast = ast.parse(code)
24
+
25
+ init_ast = copy.deepcopy(code_ast)
26
+ init_ast.body = code_ast.body[:-1]
27
+
28
+ last_ast = copy.deepcopy(code_ast)
29
+ last_ast.body = code_ast.body[-1:]
30
+
31
+ exec(compile(init_ast, "<ast>", "exec"), module.__dict__)
32
+ if type(last_ast.body[0]) == ast.Expr:
33
+ return eval(compile(convertExpr2Expression(last_ast.body[0]), "<ast>", "eval"), module.__dict__)
34
+ else:
35
+ exec(compile(last_ast, "<ast>", "exec"), module.__dict__)
36
+
37
+
38
+ class Script(scripts.Script):
39
+
40
+ def title(self):
41
+ return "Custom code"
42
+
43
+ def show(self, is_img2img):
44
+ return cmd_opts.allow_code
45
+
46
+ def ui(self, is_img2img):
47
+ example = """from modules.processing import process_images
48
+
49
+ p.width = 768
50
+ p.height = 768
51
+ p.batch_size = 2
52
+ p.steps = 10
53
+
54
+ return process_images(p)
55
+ """
56
+
57
+
58
+ code = gr.Code(value=example, language="python", label="Python code", elem_id=self.elem_id("code"))
59
+ indent_level = gr.Number(label='Indent level', value=2, precision=0, elem_id=self.elem_id("indent_level"))
60
+
61
+ return [code, indent_level]
62
+
63
+ def run(self, p, code, indent_level):
64
+ assert cmd_opts.allow_code, '--allow-code option must be enabled'
65
+
66
+ display_result_data = [[], -1, ""]
67
+
68
+ def display(imgs, s=display_result_data[1], i=display_result_data[2]):
69
+ display_result_data[0] = imgs
70
+ display_result_data[1] = s
71
+ display_result_data[2] = i
72
+
73
+ from types import ModuleType
74
+ module = ModuleType("testmodule")
75
+ module.__dict__.update(globals())
76
+ module.p = p
77
+ module.display = display
78
+
79
+ indent = " " * indent_level
80
+ indented = code.replace('\n', f"\n{indent}")
81
+ body = f"""def __webuitemp__():
82
+ {indent}{indented}
83
+ __webuitemp__()"""
84
+
85
+ result = exec_with_return(body, module)
86
+
87
+ if isinstance(result, Processed):
88
+ return result
89
+
90
+ return Processed(p, *display_result_data)
scripts/img2imgalt.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+
3
+ import numpy as np
4
+ from tqdm import trange
5
+
6
+ import modules.scripts as scripts
7
+ import gradio as gr
8
+
9
+ from modules import processing, shared, sd_samplers, sd_samplers_common
10
+
11
+ import torch
12
+ import k_diffusion as K
13
+
14
+ def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
15
+ x = p.init_latent
16
+
17
+ s_in = x.new_ones([x.shape[0]])
18
+ if shared.sd_model.parameterization == "v":
19
+ dnw = K.external.CompVisVDenoiser(shared.sd_model)
20
+ skip = 1
21
+ else:
22
+ dnw = K.external.CompVisDenoiser(shared.sd_model)
23
+ skip = 0
24
+ sigmas = dnw.get_sigmas(steps).flip(0)
25
+
26
+ shared.state.sampling_steps = steps
27
+
28
+ for i in trange(1, len(sigmas)):
29
+ shared.state.sampling_step += 1
30
+
31
+ x_in = torch.cat([x] * 2)
32
+ sigma_in = torch.cat([sigmas[i] * s_in] * 2)
33
+ cond_in = torch.cat([uncond, cond])
34
+
35
+ image_conditioning = torch.cat([p.image_conditioning] * 2)
36
+ cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
37
+
38
+ c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)[skip:]]
39
+ t = dnw.sigma_to_t(sigma_in)
40
+
41
+ eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
42
+ denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
43
+
44
+ denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
45
+
46
+ d = (x - denoised) / sigmas[i]
47
+ dt = sigmas[i] - sigmas[i - 1]
48
+
49
+ x = x + d * dt
50
+
51
+ sd_samplers_common.store_latent(x)
52
+
53
+ # This shouldn't be necessary, but solved some VRAM issues
54
+ del x_in, sigma_in, cond_in, c_out, c_in, t,
55
+ del eps, denoised_uncond, denoised_cond, denoised, d, dt
56
+
57
+ shared.state.nextjob()
58
+
59
+ return x / x.std()
60
+
61
+
62
+ Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"])
63
+
64
+
65
+ # Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736
66
+ def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
67
+ x = p.init_latent
68
+
69
+ s_in = x.new_ones([x.shape[0]])
70
+ if shared.sd_model.parameterization == "v":
71
+ dnw = K.external.CompVisVDenoiser(shared.sd_model)
72
+ skip = 1
73
+ else:
74
+ dnw = K.external.CompVisDenoiser(shared.sd_model)
75
+ skip = 0
76
+ sigmas = dnw.get_sigmas(steps).flip(0)
77
+
78
+ shared.state.sampling_steps = steps
79
+
80
+ for i in trange(1, len(sigmas)):
81
+ shared.state.sampling_step += 1
82
+
83
+ x_in = torch.cat([x] * 2)
84
+ sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
85
+ cond_in = torch.cat([uncond, cond])
86
+
87
+ image_conditioning = torch.cat([p.image_conditioning] * 2)
88
+ cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}
89
+
90
+ c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)[skip:]]
91
+
92
+ if i == 1:
93
+ t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
94
+ else:
95
+ t = dnw.sigma_to_t(sigma_in)
96
+
97
+ eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
98
+ denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)
99
+
100
+ denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale
101
+
102
+ if i == 1:
103
+ d = (x - denoised) / (2 * sigmas[i])
104
+ else:
105
+ d = (x - denoised) / sigmas[i - 1]
106
+
107
+ dt = sigmas[i] - sigmas[i - 1]
108
+ x = x + d * dt
109
+
110
+ sd_samplers_common.store_latent(x)
111
+
112
+ # This shouldn't be necessary, but solved some VRAM issues
113
+ del x_in, sigma_in, cond_in, c_out, c_in, t,
114
+ del eps, denoised_uncond, denoised_cond, denoised, d, dt
115
+
116
+ shared.state.nextjob()
117
+
118
+ return x / sigmas[-1]
119
+
120
+
121
+ class Script(scripts.Script):
122
+ def __init__(self):
123
+ self.cache = None
124
+
125
+ def title(self):
126
+ return "img2img alternative test"
127
+
128
+ def show(self, is_img2img):
129
+ return is_img2img
130
+
131
+ def ui(self, is_img2img):
132
+ info = gr.Markdown('''
133
+ * `CFG Scale` should be 2 or lower.
134
+ ''')
135
+
136
+ override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=self.elem_id("override_sampler"))
137
+
138
+ override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=self.elem_id("override_prompt"))
139
+ original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=self.elem_id("original_prompt"))
140
+ original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=self.elem_id("original_negative_prompt"))
141
+
142
+ override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=self.elem_id("override_steps"))
143
+ st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=self.elem_id("st"))
144
+
145
+ override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=self.elem_id("override_strength"))
146
+
147
+ cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=self.elem_id("cfg"))
148
+ randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=self.elem_id("randomness"))
149
+ sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment"))
150
+
151
+ return [
152
+ info,
153
+ override_sampler,
154
+ override_prompt, original_prompt, original_negative_prompt,
155
+ override_steps, st,
156
+ override_strength,
157
+ cfg, randomness, sigma_adjustment,
158
+ ]
159
+
160
+ def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment):
161
+ # Override
162
+ if override_sampler:
163
+ p.sampler_name = "Euler"
164
+ if override_prompt:
165
+ p.prompt = original_prompt
166
+ p.negative_prompt = original_negative_prompt
167
+ if override_steps:
168
+ p.steps = st
169
+ if override_strength:
170
+ p.denoising_strength = 1.0
171
+
172
+ def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
173
+ lat = (p.init_latent.cpu().numpy() * 10).astype(int)
174
+
175
+ same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
176
+ and self.cache.original_prompt == original_prompt \
177
+ and self.cache.original_negative_prompt == original_negative_prompt \
178
+ and self.cache.sigma_adjustment == sigma_adjustment
179
+ same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100
180
+
181
+ if same_everything:
182
+ rec_noise = self.cache.noise
183
+ else:
184
+ shared.state.job_count += 1
185
+ cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])
186
+ uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])
187
+ if sigma_adjustment:
188
+ rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)
189
+ else:
190
+ rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)
191
+ self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)
192
+
193
+ rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)
194
+
195
+ combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
196
+
197
+ sampler = sd_samplers.create_sampler(p.sampler_name, p.sd_model)
198
+
199
+ sigmas = sampler.model_wrap.get_sigmas(p.steps)
200
+
201
+ noise_dt = combined_noise - (p.init_latent / sigmas[0])
202
+
203
+ p.seed = p.seed + 1
204
+
205
+ return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)
206
+
207
+ p.sample = sample_extra
208
+
209
+ p.extra_generation_params["Decode prompt"] = original_prompt
210
+ p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
211
+ p.extra_generation_params["Decode CFG scale"] = cfg
212
+ p.extra_generation_params["Decode steps"] = st
213
+ p.extra_generation_params["Randomness"] = randomness
214
+ p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment
215
+
216
+ processed = processing.process_images(p)
217
+
218
+ return processed
scripts/loopback.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import gradio as gr
4
+ import modules.scripts as scripts
5
+ from modules import deepbooru, images, processing, shared
6
+ from modules.processing import Processed
7
+ from modules.shared import opts, state
8
+
9
+
10
+ class Script(scripts.Script):
11
+ def title(self):
12
+ return "Loopback"
13
+
14
+ def show(self, is_img2img):
15
+ return is_img2img
16
+
17
+ def ui(self, is_img2img):
18
+ loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops"))
19
+ final_denoising_strength = gr.Slider(minimum=0, maximum=1, step=0.01, label='Final denoising strength', value=0.5, elem_id=self.elem_id("final_denoising_strength"))
20
+ denoising_curve = gr.Dropdown(label="Denoising strength curve", choices=["Aggressive", "Linear", "Lazy"], value="Linear")
21
+ append_interrogation = gr.Dropdown(label="Append interrogated prompt at each iteration", choices=["None", "CLIP", "DeepBooru"], value="None")
22
+
23
+ return [loops, final_denoising_strength, denoising_curve, append_interrogation]
24
+
25
+ def run(self, p, loops, final_denoising_strength, denoising_curve, append_interrogation):
26
+ processing.fix_seed(p)
27
+ batch_count = p.n_iter
28
+ p.extra_generation_params = {
29
+ "Final denoising strength": final_denoising_strength,
30
+ "Denoising curve": denoising_curve
31
+ }
32
+
33
+ p.batch_size = 1
34
+ p.n_iter = 1
35
+
36
+ info = None
37
+ initial_seed = None
38
+ initial_info = None
39
+ initial_denoising_strength = p.denoising_strength
40
+
41
+ grids = []
42
+ all_images = []
43
+ original_init_image = p.init_images
44
+ original_prompt = p.prompt
45
+ original_inpainting_fill = p.inpainting_fill
46
+ state.job_count = loops * batch_count
47
+
48
+ initial_color_corrections = [processing.setup_color_correction(p.init_images[0])]
49
+
50
+ def calculate_denoising_strength(loop):
51
+ strength = initial_denoising_strength
52
+
53
+ if loops == 1:
54
+ return strength
55
+
56
+ progress = loop / (loops - 1)
57
+ if denoising_curve == "Aggressive":
58
+ strength = math.sin((progress) * math.pi * 0.5)
59
+ elif denoising_curve == "Lazy":
60
+ strength = 1 - math.cos((progress) * math.pi * 0.5)
61
+ else:
62
+ strength = progress
63
+
64
+ change = (final_denoising_strength - initial_denoising_strength) * strength
65
+ return initial_denoising_strength + change
66
+
67
+ history = []
68
+
69
+ for n in range(batch_count):
70
+ # Reset to original init image at the start of each batch
71
+ p.init_images = original_init_image
72
+
73
+ # Reset to original denoising strength
74
+ p.denoising_strength = initial_denoising_strength
75
+
76
+ last_image = None
77
+
78
+ for i in range(loops):
79
+ p.n_iter = 1
80
+ p.batch_size = 1
81
+ p.do_not_save_grid = True
82
+
83
+ if opts.img2img_color_correction:
84
+ p.color_corrections = initial_color_corrections
85
+
86
+ if append_interrogation != "None":
87
+ p.prompt = f"{original_prompt}, " if original_prompt else ""
88
+ if append_interrogation == "CLIP":
89
+ p.prompt += shared.interrogator.interrogate(p.init_images[0])
90
+ elif append_interrogation == "DeepBooru":
91
+ p.prompt += deepbooru.model.tag(p.init_images[0])
92
+
93
+ state.job = f"Iteration {i + 1}/{loops}, batch {n + 1}/{batch_count}"
94
+
95
+ processed = processing.process_images(p)
96
+
97
+ # Generation cancelled.
98
+ if state.interrupted:
99
+ break
100
+
101
+ if initial_seed is None:
102
+ initial_seed = processed.seed
103
+ initial_info = processed.info
104
+
105
+ p.seed = processed.seed + 1
106
+ p.denoising_strength = calculate_denoising_strength(i + 1)
107
+
108
+ if state.skipped:
109
+ break
110
+
111
+ last_image = processed.images[0]
112
+ p.init_images = [last_image]
113
+ p.inpainting_fill = 1 # Set "masked content" to "original" for next loop.
114
+
115
+ if batch_count == 1:
116
+ history.append(last_image)
117
+ all_images.append(last_image)
118
+
119
+ if batch_count > 1 and not state.skipped and not state.interrupted:
120
+ history.append(last_image)
121
+ all_images.append(last_image)
122
+
123
+ p.inpainting_fill = original_inpainting_fill
124
+
125
+ if state.interrupted:
126
+ break
127
+
128
+ if len(history) > 1:
129
+ grid = images.image_grid(history, rows=1)
130
+ if opts.grid_save:
131
+ images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p)
132
+
133
+ if opts.return_grid:
134
+ grids.append(grid)
135
+
136
+ all_images = grids + all_images
137
+
138
+ processed = Processed(p, all_images, initial_seed, initial_info)
139
+
140
+ return processed
scripts/outpainting_mk_2.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import numpy as np
4
+ import skimage
5
+
6
+ import modules.scripts as scripts
7
+ import gradio as gr
8
+ from PIL import Image, ImageDraw
9
+
10
+ from modules import images
11
+ from modules.processing import Processed, process_images
12
+ from modules.shared import opts, state
13
+
14
+
15
+ # this function is taken from https://github.com/parlance-zz/g-diffuser-bot
16
+ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.05):
17
+ # helper fft routines that keep ortho normalization and auto-shift before and after fft
18
+ def _fft2(data):
19
+ if data.ndim > 2: # has channels
20
+ out_fft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
21
+ for c in range(data.shape[2]):
22
+ c_data = data[:, :, c]
23
+ out_fft[:, :, c] = np.fft.fft2(np.fft.fftshift(c_data), norm="ortho")
24
+ out_fft[:, :, c] = np.fft.ifftshift(out_fft[:, :, c])
25
+ else: # one channel
26
+ out_fft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
27
+ out_fft[:, :] = np.fft.fft2(np.fft.fftshift(data), norm="ortho")
28
+ out_fft[:, :] = np.fft.ifftshift(out_fft[:, :])
29
+
30
+ return out_fft
31
+
32
+ def _ifft2(data):
33
+ if data.ndim > 2: # has channels
34
+ out_ifft = np.zeros((data.shape[0], data.shape[1], data.shape[2]), dtype=np.complex128)
35
+ for c in range(data.shape[2]):
36
+ c_data = data[:, :, c]
37
+ out_ifft[:, :, c] = np.fft.ifft2(np.fft.fftshift(c_data), norm="ortho")
38
+ out_ifft[:, :, c] = np.fft.ifftshift(out_ifft[:, :, c])
39
+ else: # one channel
40
+ out_ifft = np.zeros((data.shape[0], data.shape[1]), dtype=np.complex128)
41
+ out_ifft[:, :] = np.fft.ifft2(np.fft.fftshift(data), norm="ortho")
42
+ out_ifft[:, :] = np.fft.ifftshift(out_ifft[:, :])
43
+
44
+ return out_ifft
45
+
46
+ def _get_gaussian_window(width, height, std=3.14, mode=0):
47
+ window_scale_x = float(width / min(width, height))
48
+ window_scale_y = float(height / min(width, height))
49
+
50
+ window = np.zeros((width, height))
51
+ x = (np.arange(width) / width * 2. - 1.) * window_scale_x
52
+ for y in range(height):
53
+ fy = (y / height * 2. - 1.) * window_scale_y
54
+ if mode == 0:
55
+ window[:, y] = np.exp(-(x ** 2 + fy ** 2) * std)
56
+ else:
57
+ window[:, y] = (1 / ((x ** 2 + 1.) * (fy ** 2 + 1.))) ** (std / 3.14) # hey wait a minute that's not gaussian
58
+
59
+ return window
60
+
61
+ def _get_masked_window_rgb(np_mask_grey, hardness=1.):
62
+ np_mask_rgb = np.zeros((np_mask_grey.shape[0], np_mask_grey.shape[1], 3))
63
+ if hardness != 1.:
64
+ hardened = np_mask_grey[:] ** hardness
65
+ else:
66
+ hardened = np_mask_grey[:]
67
+ for c in range(3):
68
+ np_mask_rgb[:, :, c] = hardened[:]
69
+ return np_mask_rgb
70
+
71
+ width = _np_src_image.shape[0]
72
+ height = _np_src_image.shape[1]
73
+ num_channels = _np_src_image.shape[2]
74
+
75
+ _np_src_image[:] * (1. - np_mask_rgb)
76
+ np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
77
+ img_mask = np_mask_grey > 1e-6
78
+ ref_mask = np_mask_grey < 1e-3
79
+
80
+ windowed_image = _np_src_image * (1. - _get_masked_window_rgb(np_mask_grey))
81
+ windowed_image /= np.max(windowed_image)
82
+ windowed_image += np.average(_np_src_image) * np_mask_rgb # / (1.-np.average(np_mask_rgb)) # rather than leave the masked area black, we get better results from fft by filling the average unmasked color
83
+
84
+ src_fft = _fft2(windowed_image) # get feature statistics from masked src img
85
+ src_dist = np.absolute(src_fft)
86
+ src_phase = src_fft / src_dist
87
+
88
+ # create a generator with a static seed to make outpainting deterministic / only follow global seed
89
+ rng = np.random.default_rng(0)
90
+
91
+ noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
92
+ noise_rgb = rng.random((width, height, num_channels))
93
+ noise_grey = (np.sum(noise_rgb, axis=2) / 3.)
94
+ noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
95
+ for c in range(num_channels):
96
+ noise_rgb[:, :, c] += (1. - color_variation) * noise_grey
97
+
98
+ noise_fft = _fft2(noise_rgb)
99
+ for c in range(num_channels):
100
+ noise_fft[:, :, c] *= noise_window
101
+ noise_rgb = np.real(_ifft2(noise_fft))
102
+ shaped_noise_fft = _fft2(noise_rgb)
103
+ shaped_noise_fft[:, :, :] = np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist ** noise_q) * src_phase # perform the actual shaping
104
+
105
+ brightness_variation = 0. # color_variation # todo: temporarily tieing brightness variation to color variation for now
106
+ contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2.
107
+
108
+ # scikit-image is used for histogram matching, very convenient!
109
+ shaped_noise = np.real(_ifft2(shaped_noise_fft))
110
+ shaped_noise -= np.min(shaped_noise)
111
+ shaped_noise /= np.max(shaped_noise)
112
+ shaped_noise[img_mask, :] = skimage.exposure.match_histograms(shaped_noise[img_mask, :] ** 1., contrast_adjusted_np_src[ref_mask, :], channel_axis=1)
113
+ shaped_noise = _np_src_image[:] * (1. - np_mask_rgb) + shaped_noise * np_mask_rgb
114
+
115
+ matched_noise = shaped_noise[:]
116
+
117
+ return np.clip(matched_noise, 0., 1.)
118
+
119
+
120
+
121
+ class Script(scripts.Script):
122
+ def title(self):
123
+ return "Outpainting mk2"
124
+
125
+ def show(self, is_img2img):
126
+ return is_img2img
127
+
128
+ def ui(self, is_img2img):
129
+ if not is_img2img:
130
+ return None
131
+
132
+ info = gr.HTML("<p style=\"margin-bottom:0.75em\">Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8</p>")
133
+
134
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
135
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=self.elem_id("mask_blur"))
136
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
137
+ noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=self.elem_id("noise_q"))
138
+ color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=self.elem_id("color_variation"))
139
+
140
+ return [info, pixels, mask_blur, direction, noise_q, color_variation]
141
+
142
+ def run(self, p, _, pixels, mask_blur, direction, noise_q, color_variation):
143
+ initial_seed_and_info = [None, None]
144
+
145
+ process_width = p.width
146
+ process_height = p.height
147
+
148
+ p.inpaint_full_res = False
149
+ p.inpainting_fill = 1
150
+ p.do_not_save_samples = True
151
+ p.do_not_save_grid = True
152
+
153
+ left = pixels if "left" in direction else 0
154
+ right = pixels if "right" in direction else 0
155
+ up = pixels if "up" in direction else 0
156
+ down = pixels if "down" in direction else 0
157
+
158
+ if left > 0 or right > 0:
159
+ mask_blur_x = mask_blur
160
+ else:
161
+ mask_blur_x = 0
162
+
163
+ if up > 0 or down > 0:
164
+ mask_blur_y = mask_blur
165
+ else:
166
+ mask_blur_y = 0
167
+
168
+ p.mask_blur_x = mask_blur_x*4
169
+ p.mask_blur_y = mask_blur_y*4
170
+
171
+ init_img = p.init_images[0]
172
+ target_w = math.ceil((init_img.width + left + right) / 64) * 64
173
+ target_h = math.ceil((init_img.height + up + down) / 64) * 64
174
+
175
+ if left > 0:
176
+ left = left * (target_w - init_img.width) // (left + right)
177
+
178
+ if right > 0:
179
+ right = target_w - init_img.width - left
180
+
181
+ if up > 0:
182
+ up = up * (target_h - init_img.height) // (up + down)
183
+
184
+ if down > 0:
185
+ down = target_h - init_img.height - up
186
+
187
+ def expand(init, count, expand_pixels, is_left=False, is_right=False, is_top=False, is_bottom=False):
188
+ is_horiz = is_left or is_right
189
+ is_vert = is_top or is_bottom
190
+ pixels_horiz = expand_pixels if is_horiz else 0
191
+ pixels_vert = expand_pixels if is_vert else 0
192
+
193
+ images_to_process = []
194
+ output_images = []
195
+ for n in range(count):
196
+ res_w = init[n].width + pixels_horiz
197
+ res_h = init[n].height + pixels_vert
198
+ process_res_w = math.ceil(res_w / 64) * 64
199
+ process_res_h = math.ceil(res_h / 64) * 64
200
+
201
+ img = Image.new("RGB", (process_res_w, process_res_h))
202
+ img.paste(init[n], (pixels_horiz if is_left else 0, pixels_vert if is_top else 0))
203
+ mask = Image.new("RGB", (process_res_w, process_res_h), "white")
204
+ draw = ImageDraw.Draw(mask)
205
+ draw.rectangle((
206
+ expand_pixels + mask_blur_x if is_left else 0,
207
+ expand_pixels + mask_blur_y if is_top else 0,
208
+ mask.width - expand_pixels - mask_blur_x if is_right else res_w,
209
+ mask.height - expand_pixels - mask_blur_y if is_bottom else res_h,
210
+ ), fill="black")
211
+
212
+ np_image = (np.asarray(img) / 255.0).astype(np.float64)
213
+ np_mask = (np.asarray(mask) / 255.0).astype(np.float64)
214
+ noised = get_matched_noise(np_image, np_mask, noise_q, color_variation)
215
+ output_images.append(Image.fromarray(np.clip(noised * 255., 0., 255.).astype(np.uint8), mode="RGB"))
216
+
217
+ target_width = min(process_width, init[n].width + pixels_horiz) if is_horiz else img.width
218
+ target_height = min(process_height, init[n].height + pixels_vert) if is_vert else img.height
219
+ p.width = target_width if is_horiz else img.width
220
+ p.height = target_height if is_vert else img.height
221
+
222
+ crop_region = (
223
+ 0 if is_left else output_images[n].width - target_width,
224
+ 0 if is_top else output_images[n].height - target_height,
225
+ target_width if is_left else output_images[n].width,
226
+ target_height if is_top else output_images[n].height,
227
+ )
228
+ mask = mask.crop(crop_region)
229
+ p.image_mask = mask
230
+
231
+ image_to_process = output_images[n].crop(crop_region)
232
+ images_to_process.append(image_to_process)
233
+
234
+ p.init_images = images_to_process
235
+
236
+ latent_mask = Image.new("RGB", (p.width, p.height), "white")
237
+ draw = ImageDraw.Draw(latent_mask)
238
+ draw.rectangle((
239
+ expand_pixels + mask_blur_x * 2 if is_left else 0,
240
+ expand_pixels + mask_blur_y * 2 if is_top else 0,
241
+ mask.width - expand_pixels - mask_blur_x * 2 if is_right else res_w,
242
+ mask.height - expand_pixels - mask_blur_y * 2 if is_bottom else res_h,
243
+ ), fill="black")
244
+ p.latent_mask = latent_mask
245
+
246
+ proc = process_images(p)
247
+
248
+ if initial_seed_and_info[0] is None:
249
+ initial_seed_and_info[0] = proc.seed
250
+ initial_seed_and_info[1] = proc.info
251
+
252
+ for n in range(count):
253
+ output_images[n].paste(proc.images[n], (0 if is_left else output_images[n].width - proc.images[n].width, 0 if is_top else output_images[n].height - proc.images[n].height))
254
+ output_images[n] = output_images[n].crop((0, 0, res_w, res_h))
255
+
256
+ return output_images
257
+
258
+ batch_count = p.n_iter
259
+ batch_size = p.batch_size
260
+ p.n_iter = 1
261
+ state.job_count = batch_count * ((1 if left > 0 else 0) + (1 if right > 0 else 0) + (1 if up > 0 else 0) + (1 if down > 0 else 0))
262
+ all_processed_images = []
263
+
264
+ for i in range(batch_count):
265
+ imgs = [init_img] * batch_size
266
+ state.job = f"Batch {i + 1} out of {batch_count}"
267
+
268
+ if left > 0:
269
+ imgs = expand(imgs, batch_size, left, is_left=True)
270
+ if right > 0:
271
+ imgs = expand(imgs, batch_size, right, is_right=True)
272
+ if up > 0:
273
+ imgs = expand(imgs, batch_size, up, is_top=True)
274
+ if down > 0:
275
+ imgs = expand(imgs, batch_size, down, is_bottom=True)
276
+
277
+ all_processed_images += imgs
278
+
279
+ all_images = all_processed_images
280
+
281
+ combined_grid_image = images.image_grid(all_processed_images)
282
+ unwanted_grid_because_of_img_count = len(all_processed_images) < 2 and opts.grid_only_if_multiple
283
+ if opts.return_grid and not unwanted_grid_because_of_img_count:
284
+ all_images = [combined_grid_image] + all_processed_images
285
+
286
+ res = Processed(p, all_images, initial_seed_and_info[0], initial_seed_and_info[1])
287
+
288
+ if opts.samples_save:
289
+ for img in all_processed_images:
290
+ images.save_image(img, p.outpath_samples, "", res.seed, p.prompt, opts.samples_format, info=res.info, p=p)
291
+
292
+ if opts.grid_save and not unwanted_grid_because_of_img_count:
293
+ images.save_image(combined_grid_image, p.outpath_grids, "grid", res.seed, p.prompt, opts.grid_format, info=res.info, short_filename=not opts.grid_extended_filename, grid=True, p=p)
294
+
295
+ return res
scripts/poor_mans_outpainting.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import modules.scripts as scripts
4
+ import gradio as gr
5
+ from PIL import Image, ImageDraw
6
+
7
+ from modules import images, devices
8
+ from modules.processing import Processed, process_images
9
+ from modules.shared import opts, state
10
+
11
+
12
+ class Script(scripts.Script):
13
+ def title(self):
14
+ return "Poor man's outpainting"
15
+
16
+ def show(self, is_img2img):
17
+ return is_img2img
18
+
19
+ def ui(self, is_img2img):
20
+ if not is_img2img:
21
+ return None
22
+
23
+ pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
24
+ mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur"))
25
+ inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill"))
26
+ direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
27
+
28
+ return [pixels, mask_blur, inpainting_fill, direction]
29
+
30
+ def run(self, p, pixels, mask_blur, inpainting_fill, direction):
31
+ initial_seed = None
32
+ initial_info = None
33
+
34
+ p.mask_blur = mask_blur * 2
35
+ p.inpainting_fill = inpainting_fill
36
+ p.inpaint_full_res = False
37
+
38
+ left = pixels if "left" in direction else 0
39
+ right = pixels if "right" in direction else 0
40
+ up = pixels if "up" in direction else 0
41
+ down = pixels if "down" in direction else 0
42
+
43
+ init_img = p.init_images[0]
44
+ target_w = math.ceil((init_img.width + left + right) / 64) * 64
45
+ target_h = math.ceil((init_img.height + up + down) / 64) * 64
46
+
47
+ if left > 0:
48
+ left = left * (target_w - init_img.width) // (left + right)
49
+ if right > 0:
50
+ right = target_w - init_img.width - left
51
+
52
+ if up > 0:
53
+ up = up * (target_h - init_img.height) // (up + down)
54
+
55
+ if down > 0:
56
+ down = target_h - init_img.height - up
57
+
58
+ img = Image.new("RGB", (target_w, target_h))
59
+ img.paste(init_img, (left, up))
60
+
61
+ mask = Image.new("L", (img.width, img.height), "white")
62
+ draw = ImageDraw.Draw(mask)
63
+ draw.rectangle((
64
+ left + (mask_blur * 2 if left > 0 else 0),
65
+ up + (mask_blur * 2 if up > 0 else 0),
66
+ mask.width - right - (mask_blur * 2 if right > 0 else 0),
67
+ mask.height - down - (mask_blur * 2 if down > 0 else 0)
68
+ ), fill="black")
69
+
70
+ latent_mask = Image.new("L", (img.width, img.height), "white")
71
+ latent_draw = ImageDraw.Draw(latent_mask)
72
+ latent_draw.rectangle((
73
+ left + (mask_blur//2 if left > 0 else 0),
74
+ up + (mask_blur//2 if up > 0 else 0),
75
+ mask.width - right - (mask_blur//2 if right > 0 else 0),
76
+ mask.height - down - (mask_blur//2 if down > 0 else 0)
77
+ ), fill="black")
78
+
79
+ devices.torch_gc()
80
+
81
+ grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=pixels)
82
+ grid_mask = images.split_grid(mask, tile_w=p.width, tile_h=p.height, overlap=pixels)
83
+ grid_latent_mask = images.split_grid(latent_mask, tile_w=p.width, tile_h=p.height, overlap=pixels)
84
+
85
+ p.n_iter = 1
86
+ p.batch_size = 1
87
+ p.do_not_save_grid = True
88
+ p.do_not_save_samples = True
89
+
90
+ work = []
91
+ work_mask = []
92
+ work_latent_mask = []
93
+ work_results = []
94
+
95
+ for (y, h, row), (_, _, row_mask), (_, _, row_latent_mask) in zip(grid.tiles, grid_mask.tiles, grid_latent_mask.tiles):
96
+ for tiledata, tiledata_mask, tiledata_latent_mask in zip(row, row_mask, row_latent_mask):
97
+ x, w = tiledata[0:2]
98
+
99
+ if x >= left and x+w <= img.width - right and y >= up and y+h <= img.height - down:
100
+ continue
101
+
102
+ work.append(tiledata[2])
103
+ work_mask.append(tiledata_mask[2])
104
+ work_latent_mask.append(tiledata_latent_mask[2])
105
+
106
+ batch_count = len(work)
107
+ print(f"Poor man's outpainting will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)}.")
108
+
109
+ state.job_count = batch_count
110
+
111
+ for i in range(batch_count):
112
+ p.init_images = [work[i]]
113
+ p.image_mask = work_mask[i]
114
+ p.latent_mask = work_latent_mask[i]
115
+
116
+ state.job = f"Batch {i + 1} out of {batch_count}"
117
+ processed = process_images(p)
118
+
119
+ if initial_seed is None:
120
+ initial_seed = processed.seed
121
+ initial_info = processed.info
122
+
123
+ p.seed = processed.seed + 1
124
+ work_results += processed.images
125
+
126
+
127
+ image_index = 0
128
+ for y, h, row in grid.tiles:
129
+ for tiledata in row:
130
+ x, w = tiledata[0:2]
131
+
132
+ if x >= left and x+w <= img.width - right and y >= up and y+h <= img.height - down:
133
+ continue
134
+
135
+ tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
136
+ image_index += 1
137
+
138
+ combined_image = images.combine_grid(grid)
139
+
140
+ if opts.samples_save:
141
+ images.save_image(combined_image, p.outpath_samples, "", initial_seed, p.prompt, opts.samples_format, info=initial_info, p=p)
142
+
143
+ processed = Processed(p, [combined_image], initial_seed, initial_info)
144
+
145
+ return processed
146
+
scripts/postprocessing_codeformer.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+
4
+ from modules import scripts_postprocessing, codeformer_model
5
+ import gradio as gr
6
+
7
+ from modules.ui_components import FormRow
8
+
9
+
10
+ class ScriptPostprocessingCodeFormer(scripts_postprocessing.ScriptPostprocessing):
11
+ name = "CodeFormer"
12
+ order = 3000
13
+
14
+ def ui(self):
15
+ with FormRow():
16
+ codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, elem_id="extras_codeformer_visibility")
17
+ codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, elem_id="extras_codeformer_weight")
18
+
19
+ return {
20
+ "codeformer_visibility": codeformer_visibility,
21
+ "codeformer_weight": codeformer_weight,
22
+ }
23
+
24
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, codeformer_visibility, codeformer_weight):
25
+ if codeformer_visibility == 0:
26
+ return
27
+
28
+ restored_img = codeformer_model.codeformer.restore(np.array(pp.image, dtype=np.uint8), w=codeformer_weight)
29
+ res = Image.fromarray(restored_img)
30
+
31
+ if codeformer_visibility < 1.0:
32
+ res = Image.blend(pp.image, res, codeformer_visibility)
33
+
34
+ pp.image = res
35
+ pp.info["CodeFormer visibility"] = round(codeformer_visibility, 3)
36
+ pp.info["CodeFormer weight"] = round(codeformer_weight, 3)
scripts/postprocessing_gfpgan.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+
4
+ from modules import scripts_postprocessing, gfpgan_model
5
+ import gradio as gr
6
+
7
+ from modules.ui_components import FormRow
8
+
9
+
10
+ class ScriptPostprocessingGfpGan(scripts_postprocessing.ScriptPostprocessing):
11
+ name = "GFPGAN"
12
+ order = 2000
13
+
14
+ def ui(self):
15
+ with FormRow():
16
+ gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, elem_id="extras_gfpgan_visibility")
17
+
18
+ return {
19
+ "gfpgan_visibility": gfpgan_visibility,
20
+ }
21
+
22
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, gfpgan_visibility):
23
+ if gfpgan_visibility == 0:
24
+ return
25
+
26
+ restored_img = gfpgan_model.gfpgan_fix_faces(np.array(pp.image, dtype=np.uint8))
27
+ res = Image.fromarray(restored_img)
28
+
29
+ if gfpgan_visibility < 1.0:
30
+ res = Image.blend(pp.image, res, gfpgan_visibility)
31
+
32
+ pp.image = res
33
+ pp.info["GFPGAN visibility"] = round(gfpgan_visibility, 3)
scripts/postprocessing_upscale.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+
4
+ from modules import scripts_postprocessing, shared
5
+ import gradio as gr
6
+
7
+ from modules.ui_components import FormRow, ToolButton
8
+ from modules.ui import switch_values_symbol
9
+
10
+ upscale_cache = {}
11
+
12
+
13
+ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
14
+ name = "Upscale"
15
+ order = 1000
16
+
17
+ def ui(self):
18
+ selected_tab = gr.State(value=0)
19
+
20
+ with gr.Column():
21
+ with FormRow():
22
+ with gr.Tabs(elem_id="extras_resize_mode"):
23
+ with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
24
+ upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
25
+ with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
26
+ with gr.Row(elem_id="upscaling_column_size"):
27
+ upscaling_resize_w = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="extras_upscaling_resize_w")
28
+ upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn")
29
+ upscaling_resize_h = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="extras_upscaling_resize_h")
30
+ with gr.Row(elem_id="upscaling_dimensions_row", elem_classes="dimensions-tools"):
31
+ upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
32
+ with FormRow():
33
+ extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
34
+ with FormRow():
35
+ extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
36
+ extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
37
+
38
+ upscaling_res_switch_btn.click(lambda w, h: (h, w), inputs=[upscaling_resize_w, upscaling_resize_h], outputs=[upscaling_resize_w, upscaling_resize_h], show_progress=False)
39
+ tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
40
+ tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
41
+
42
+ return {
43
+ "upscale_mode": selected_tab,
44
+ "upscale_by": upscaling_resize,
45
+ "upscale_to_width": upscaling_resize_w,
46
+ "upscale_to_height": upscaling_resize_h,
47
+ "upscale_crop": upscaling_crop,
48
+ "upscaler_1_name": extras_upscaler_1,
49
+ "upscaler_2_name": extras_upscaler_2,
50
+ "upscaler_2_visibility": extras_upscaler_2_visibility,
51
+ }
52
+
53
+ def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop):
54
+ if upscale_mode == 1:
55
+ upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height)
56
+ info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}"
57
+ else:
58
+ info["Postprocess upscale by"] = upscale_by
59
+
60
+ cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
61
+ cached_image = upscale_cache.pop(cache_key, None)
62
+
63
+ if cached_image is not None:
64
+ image = cached_image
65
+ else:
66
+ image = upscaler.scaler.upscale(image, upscale_by, upscaler.data_path)
67
+
68
+ upscale_cache[cache_key] = image
69
+ if len(upscale_cache) > shared.opts.upscaling_max_images_in_cache:
70
+ upscale_cache.pop(next(iter(upscale_cache), None), None)
71
+
72
+ if upscale_mode == 1 and upscale_crop:
73
+ cropped = Image.new("RGB", (upscale_to_width, upscale_to_height))
74
+ cropped.paste(image, box=(upscale_to_width // 2 - image.width // 2, upscale_to_height // 2 - image.height // 2))
75
+ image = cropped
76
+ info["Postprocess crop to"] = f"{image.width}x{image.height}"
77
+
78
+ return image
79
+
80
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
81
+ if upscaler_1_name == "None":
82
+ upscaler_1_name = None
83
+
84
+ upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_1_name]), None)
85
+ assert upscaler1 or (upscaler_1_name is None), f'could not find upscaler named {upscaler_1_name}'
86
+
87
+ if not upscaler1:
88
+ return
89
+
90
+ if upscaler_2_name == "None":
91
+ upscaler_2_name = None
92
+
93
+ upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None)
94
+ assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}'
95
+
96
+ upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
97
+ pp.info["Postprocess upscaler"] = upscaler1.name
98
+
99
+ if upscaler2 and upscaler_2_visibility > 0:
100
+ second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
101
+ upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility)
102
+
103
+ pp.info["Postprocess upscaler 2"] = upscaler2.name
104
+
105
+ pp.image = upscaled_image
106
+
107
+ def image_changed(self):
108
+ upscale_cache.clear()
109
+
110
+
111
+ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
112
+ name = "Simple Upscale"
113
+ order = 900
114
+
115
+ def ui(self):
116
+ with FormRow():
117
+ upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
118
+ upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2)
119
+
120
+ return {
121
+ "upscale_by": upscale_by,
122
+ "upscaler_name": upscaler_name,
123
+ }
124
+
125
+ def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
126
+ if upscaler_name is None or upscaler_name == "None":
127
+ return
128
+
129
+ upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None)
130
+ assert upscaler1, f'could not find upscaler named {upscaler_name}'
131
+
132
+ pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
133
+ pp.info["Postprocess upscaler"] = upscaler1.name
scripts/prompt_matrix.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import modules.scripts as scripts
4
+ import gradio as gr
5
+
6
+ from modules import images
7
+ from modules.processing import process_images
8
+ from modules.shared import opts, state
9
+ import modules.sd_samplers
10
+
11
+
12
+ def draw_xy_grid(xs, ys, x_label, y_label, cell):
13
+ res = []
14
+
15
+ ver_texts = [[images.GridAnnotation(y_label(y))] for y in ys]
16
+ hor_texts = [[images.GridAnnotation(x_label(x))] for x in xs]
17
+
18
+ first_processed = None
19
+
20
+ state.job_count = len(xs) * len(ys)
21
+
22
+ for iy, y in enumerate(ys):
23
+ for ix, x in enumerate(xs):
24
+ state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
25
+
26
+ processed = cell(x, y)
27
+ if first_processed is None:
28
+ first_processed = processed
29
+
30
+ res.append(processed.images[0])
31
+
32
+ grid = images.image_grid(res, rows=len(ys))
33
+ grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
34
+
35
+ first_processed.images = [grid]
36
+
37
+ return first_processed
38
+
39
+
40
+ class Script(scripts.Script):
41
+ def title(self):
42
+ return "Prompt matrix"
43
+
44
+ def ui(self, is_img2img):
45
+ gr.HTML('<br />')
46
+ with gr.Row():
47
+ with gr.Column():
48
+ put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start"))
49
+ different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds"))
50
+ with gr.Column():
51
+ prompt_type = gr.Radio(["positive", "negative"], label="Select prompt", elem_id=self.elem_id("prompt_type"), value="positive")
52
+ variations_delimiter = gr.Radio(["comma", "space"], label="Select joining char", elem_id=self.elem_id("variations_delimiter"), value="comma")
53
+ with gr.Column():
54
+ margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
55
+
56
+ return [put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size]
57
+
58
+ def run(self, p, put_at_start, different_seeds, prompt_type, variations_delimiter, margin_size):
59
+ modules.processing.fix_seed(p)
60
+ # Raise error if promp type is not positive or negative
61
+ if prompt_type not in ["positive", "negative"]:
62
+ raise ValueError(f"Unknown prompt type {prompt_type}")
63
+ # Raise error if variations delimiter is not comma or space
64
+ if variations_delimiter not in ["comma", "space"]:
65
+ raise ValueError(f"Unknown variations delimiter {variations_delimiter}")
66
+
67
+ prompt = p.prompt if prompt_type == "positive" else p.negative_prompt
68
+ original_prompt = prompt[0] if type(prompt) == list else prompt
69
+ positive_prompt = p.prompt[0] if type(p.prompt) == list else p.prompt
70
+
71
+ delimiter = ", " if variations_delimiter == "comma" else " "
72
+
73
+ all_prompts = []
74
+ prompt_matrix_parts = original_prompt.split("|")
75
+ combination_count = 2 ** (len(prompt_matrix_parts) - 1)
76
+ for combination_num in range(combination_count):
77
+ selected_prompts = [text.strip().strip(',') for n, text in enumerate(prompt_matrix_parts[1:]) if combination_num & (1 << n)]
78
+
79
+ if put_at_start:
80
+ selected_prompts = selected_prompts + [prompt_matrix_parts[0]]
81
+ else:
82
+ selected_prompts = [prompt_matrix_parts[0]] + selected_prompts
83
+
84
+ all_prompts.append(delimiter.join(selected_prompts))
85
+
86
+ p.n_iter = math.ceil(len(all_prompts) / p.batch_size)
87
+ p.do_not_save_grid = True
88
+
89
+ print(f"Prompt matrix will create {len(all_prompts)} images using a total of {p.n_iter} batches.")
90
+
91
+ if prompt_type == "positive":
92
+ p.prompt = all_prompts
93
+ else:
94
+ p.negative_prompt = all_prompts
95
+ p.seed = [p.seed + (i if different_seeds else 0) for i in range(len(all_prompts))]
96
+ p.prompt_for_display = positive_prompt
97
+ processed = process_images(p)
98
+
99
+ grid = images.image_grid(processed.images, p.batch_size, rows=1 << ((len(prompt_matrix_parts) - 1) // 2))
100
+ grid = images.draw_prompt_matrix(grid, processed.images[0].width, processed.images[0].height, prompt_matrix_parts, margin_size)
101
+ processed.images.insert(0, grid)
102
+ processed.index_of_first_image = 1
103
+ processed.infotexts.insert(0, processed.infotexts[0])
104
+
105
+ if opts.grid_save:
106
+ images.save_image(processed.images[0], p.outpath_grids, "prompt_matrix", extension=opts.grid_format, prompt=original_prompt, seed=processed.seed, grid=True, p=p)
107
+
108
+ return processed
scripts/prompts_from_file.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import random
3
+ import shlex
4
+
5
+ import modules.scripts as scripts
6
+ import gradio as gr
7
+
8
+ from modules import sd_samplers, errors
9
+ from modules.processing import Processed, process_images
10
+ from modules.shared import state
11
+
12
+
13
+ def process_string_tag(tag):
14
+ return tag
15
+
16
+
17
+ def process_int_tag(tag):
18
+ return int(tag)
19
+
20
+
21
+ def process_float_tag(tag):
22
+ return float(tag)
23
+
24
+
25
+ def process_boolean_tag(tag):
26
+ return True if (tag == "true") else False
27
+
28
+
29
+ prompt_tags = {
30
+ "sd_model": None,
31
+ "outpath_samples": process_string_tag,
32
+ "outpath_grids": process_string_tag,
33
+ "prompt_for_display": process_string_tag,
34
+ "prompt": process_string_tag,
35
+ "negative_prompt": process_string_tag,
36
+ "styles": process_string_tag,
37
+ "seed": process_int_tag,
38
+ "subseed_strength": process_float_tag,
39
+ "subseed": process_int_tag,
40
+ "seed_resize_from_h": process_int_tag,
41
+ "seed_resize_from_w": process_int_tag,
42
+ "sampler_index": process_int_tag,
43
+ "sampler_name": process_string_tag,
44
+ "batch_size": process_int_tag,
45
+ "n_iter": process_int_tag,
46
+ "steps": process_int_tag,
47
+ "cfg_scale": process_float_tag,
48
+ "width": process_int_tag,
49
+ "height": process_int_tag,
50
+ "restore_faces": process_boolean_tag,
51
+ "tiling": process_boolean_tag,
52
+ "do_not_save_samples": process_boolean_tag,
53
+ "do_not_save_grid": process_boolean_tag
54
+ }
55
+
56
+
57
+ def cmdargs(line):
58
+ args = shlex.split(line)
59
+ pos = 0
60
+ res = {}
61
+
62
+ while pos < len(args):
63
+ arg = args[pos]
64
+
65
+ assert arg.startswith("--"), f'must start with "--": {arg}'
66
+ assert pos+1 < len(args), f'missing argument for command line option {arg}'
67
+
68
+ tag = arg[2:]
69
+
70
+ if tag == "prompt" or tag == "negative_prompt":
71
+ pos += 1
72
+ prompt = args[pos]
73
+ pos += 1
74
+ while pos < len(args) and not args[pos].startswith("--"):
75
+ prompt += " "
76
+ prompt += args[pos]
77
+ pos += 1
78
+ res[tag] = prompt
79
+ continue
80
+
81
+
82
+ func = prompt_tags.get(tag, None)
83
+ assert func, f'unknown commandline option: {arg}'
84
+
85
+ val = args[pos+1]
86
+ if tag == "sampler_name":
87
+ val = sd_samplers.samplers_map.get(val.lower(), None)
88
+
89
+ res[tag] = func(val)
90
+
91
+ pos += 2
92
+
93
+ return res
94
+
95
+
96
+ def load_prompt_file(file):
97
+ if file is None:
98
+ return None, gr.update(), gr.update(lines=7)
99
+ else:
100
+ lines = [x.strip() for x in file.decode('utf8', errors='ignore').split("\n")]
101
+ return None, "\n".join(lines), gr.update(lines=7)
102
+
103
+
104
+ class Script(scripts.Script):
105
+ def title(self):
106
+ return "Prompts from file or textbox"
107
+
108
+ def ui(self, is_img2img):
109
+ checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate"))
110
+ checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch"))
111
+
112
+ prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=self.elem_id("prompt_txt"))
113
+ file = gr.File(label="Upload prompt inputs", type='binary', elem_id=self.elem_id("file"))
114
+
115
+ file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt], show_progress=False)
116
+
117
+ # We start at one line. When the text changes, we jump to seven lines, or two lines if no \n.
118
+ # We don't shrink back to 1, because that causes the control to ignore [enter], and it may
119
+ # be unclear to the user that shift-enter is needed.
120
+ prompt_txt.change(lambda tb: gr.update(lines=7) if ("\n" in tb) else gr.update(lines=2), inputs=[prompt_txt], outputs=[prompt_txt], show_progress=False)
121
+ return [checkbox_iterate, checkbox_iterate_batch, prompt_txt]
122
+
123
+ def run(self, p, checkbox_iterate, checkbox_iterate_batch, prompt_txt: str):
124
+ lines = [x for x in (x.strip() for x in prompt_txt.splitlines()) if x]
125
+
126
+ p.do_not_save_grid = True
127
+
128
+ job_count = 0
129
+ jobs = []
130
+
131
+ for line in lines:
132
+ if "--" in line:
133
+ try:
134
+ args = cmdargs(line)
135
+ except Exception:
136
+ errors.report(f"Error parsing line {line} as commandline", exc_info=True)
137
+ args = {"prompt": line}
138
+ else:
139
+ args = {"prompt": line}
140
+
141
+ job_count += args.get("n_iter", p.n_iter)
142
+
143
+ jobs.append(args)
144
+
145
+ print(f"Will process {len(lines)} lines in {job_count} jobs.")
146
+ if (checkbox_iterate or checkbox_iterate_batch) and p.seed == -1:
147
+ p.seed = int(random.randrange(4294967294))
148
+
149
+ state.job_count = job_count
150
+
151
+ images = []
152
+ all_prompts = []
153
+ infotexts = []
154
+ for args in jobs:
155
+ state.job = f"{state.job_no + 1} out of {state.job_count}"
156
+
157
+ copy_p = copy.copy(p)
158
+ for k, v in args.items():
159
+ setattr(copy_p, k, v)
160
+
161
+ proc = process_images(copy_p)
162
+ images += proc.images
163
+
164
+ if checkbox_iterate:
165
+ p.seed = p.seed + (p.batch_size * p.n_iter)
166
+ all_prompts += proc.all_prompts
167
+ infotexts += proc.infotexts
168
+
169
+ return Processed(p, images, p.seed, "", all_prompts=all_prompts, infotexts=infotexts)
scripts/sd_upscale.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import modules.scripts as scripts
4
+ import gradio as gr
5
+ from PIL import Image
6
+
7
+ from modules import processing, shared, images, devices
8
+ from modules.processing import Processed
9
+ from modules.shared import opts, state
10
+
11
+
12
+ class Script(scripts.Script):
13
+ def title(self):
14
+ return "SD upscale"
15
+
16
+ def show(self, is_img2img):
17
+ return is_img2img
18
+
19
+ def ui(self, is_img2img):
20
+ info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image by the selected scale factor; use width and height sliders to set tile size</p>")
21
+ overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap"))
22
+ scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor"))
23
+ upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=self.elem_id("upscaler_index"))
24
+
25
+ return [info, overlap, upscaler_index, scale_factor]
26
+
27
+ def run(self, p, _, overlap, upscaler_index, scale_factor):
28
+ if isinstance(upscaler_index, str):
29
+ upscaler_index = [x.name.lower() for x in shared.sd_upscalers].index(upscaler_index.lower())
30
+ processing.fix_seed(p)
31
+ upscaler = shared.sd_upscalers[upscaler_index]
32
+
33
+ p.extra_generation_params["SD upscale overlap"] = overlap
34
+ p.extra_generation_params["SD upscale upscaler"] = upscaler.name
35
+
36
+ initial_info = None
37
+ seed = p.seed
38
+
39
+ init_img = p.init_images[0]
40
+ init_img = images.flatten(init_img, opts.img2img_background_color)
41
+
42
+ if upscaler.name != "None":
43
+ img = upscaler.scaler.upscale(init_img, scale_factor, upscaler.data_path)
44
+ else:
45
+ img = init_img
46
+
47
+ devices.torch_gc()
48
+
49
+ grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=overlap)
50
+
51
+ batch_size = p.batch_size
52
+ upscale_count = p.n_iter
53
+ p.n_iter = 1
54
+ p.do_not_save_grid = True
55
+ p.do_not_save_samples = True
56
+
57
+ work = []
58
+
59
+ for _y, _h, row in grid.tiles:
60
+ for tiledata in row:
61
+ work.append(tiledata[2])
62
+
63
+ batch_count = math.ceil(len(work) / batch_size)
64
+ state.job_count = batch_count * upscale_count
65
+
66
+ print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches.")
67
+
68
+ result_images = []
69
+ for n in range(upscale_count):
70
+ start_seed = seed + n
71
+ p.seed = start_seed
72
+
73
+ work_results = []
74
+ for i in range(batch_count):
75
+ p.batch_size = batch_size
76
+ p.init_images = work[i * batch_size:(i + 1) * batch_size]
77
+
78
+ state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
79
+ processed = processing.process_images(p)
80
+
81
+ if initial_info is None:
82
+ initial_info = processed.info
83
+
84
+ p.seed = processed.seed + 1
85
+ work_results += processed.images
86
+
87
+ image_index = 0
88
+ for _y, _h, row in grid.tiles:
89
+ for tiledata in row:
90
+ tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
91
+ image_index += 1
92
+
93
+ combined_image = images.combine_grid(grid)
94
+ result_images.append(combined_image)
95
+
96
+ if opts.samples_save:
97
+ images.save_image(combined_image, p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p)
98
+
99
+ processed = Processed(p, result_images, seed, initial_info)
100
+
101
+ return processed
scripts/xyz_grid.py ADDED
@@ -0,0 +1,734 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import namedtuple
2
+ from copy import copy
3
+ from itertools import permutations, chain
4
+ import random
5
+ import csv
6
+ from io import StringIO
7
+ from PIL import Image
8
+ import numpy as np
9
+
10
+ import modules.scripts as scripts
11
+ import gradio as gr
12
+
13
+ from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_samplers_kdiffusion
14
+ from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
15
+ from modules.shared import opts, state
16
+ import modules.shared as shared
17
+ import modules.sd_samplers
18
+ import modules.sd_models
19
+ import modules.sd_vae
20
+ import re
21
+
22
+ from modules.ui_components import ToolButton
23
+
24
+ fill_values_symbol = "\U0001f4d2" # 📒
25
+
26
+ AxisInfo = namedtuple('AxisInfo', ['axis', 'values'])
27
+
28
+
29
+ def apply_field(field):
30
+ def fun(p, x, xs):
31
+ setattr(p, field, x)
32
+
33
+ return fun
34
+
35
+
36
+ def apply_prompt(p, x, xs):
37
+ if xs[0] not in p.prompt and xs[0] not in p.negative_prompt:
38
+ raise RuntimeError(f"Prompt S/R did not find {xs[0]} in prompt or negative prompt.")
39
+
40
+ p.prompt = p.prompt.replace(xs[0], x)
41
+ p.negative_prompt = p.negative_prompt.replace(xs[0], x)
42
+
43
+
44
+ def apply_order(p, x, xs):
45
+ token_order = []
46
+
47
+ # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
48
+ for token in x:
49
+ token_order.append((p.prompt.find(token), token))
50
+
51
+ token_order.sort(key=lambda t: t[0])
52
+
53
+ prompt_parts = []
54
+
55
+ # Split the prompt up, taking out the tokens
56
+ for _, token in token_order:
57
+ n = p.prompt.find(token)
58
+ prompt_parts.append(p.prompt[0:n])
59
+ p.prompt = p.prompt[n + len(token):]
60
+
61
+ # Rebuild the prompt with the tokens in the order we want
62
+ prompt_tmp = ""
63
+ for idx, part in enumerate(prompt_parts):
64
+ prompt_tmp += part
65
+ prompt_tmp += x[idx]
66
+ p.prompt = prompt_tmp + p.prompt
67
+
68
+
69
+ def apply_sampler(p, x, xs):
70
+ sampler_name = sd_samplers.samplers_map.get(x.lower(), None)
71
+ if sampler_name is None:
72
+ raise RuntimeError(f"Unknown sampler: {x}")
73
+
74
+ p.sampler_name = sampler_name
75
+
76
+
77
+ def confirm_samplers(p, xs):
78
+ for x in xs:
79
+ if x.lower() not in sd_samplers.samplers_map:
80
+ raise RuntimeError(f"Unknown sampler: {x}")
81
+
82
+
83
+ def apply_checkpoint(p, x, xs):
84
+ info = modules.sd_models.get_closet_checkpoint_match(x)
85
+ if info is None:
86
+ raise RuntimeError(f"Unknown checkpoint: {x}")
87
+ p.override_settings['sd_model_checkpoint'] = info.name
88
+
89
+
90
+ def confirm_checkpoints(p, xs):
91
+ for x in xs:
92
+ if modules.sd_models.get_closet_checkpoint_match(x) is None:
93
+ raise RuntimeError(f"Unknown checkpoint: {x}")
94
+
95
+
96
+ def apply_clip_skip(p, x, xs):
97
+ opts.data["CLIP_stop_at_last_layers"] = x
98
+
99
+
100
+ def apply_upscale_latent_space(p, x, xs):
101
+ if x.lower().strip() != '0':
102
+ opts.data["use_scale_latent_for_hires_fix"] = True
103
+ else:
104
+ opts.data["use_scale_latent_for_hires_fix"] = False
105
+
106
+
107
+ def find_vae(name: str):
108
+ if name.lower() in ['auto', 'automatic']:
109
+ return modules.sd_vae.unspecified
110
+ if name.lower() == 'none':
111
+ return None
112
+ else:
113
+ choices = [x for x in sorted(modules.sd_vae.vae_dict, key=lambda x: len(x)) if name.lower().strip() in x.lower()]
114
+ if len(choices) == 0:
115
+ print(f"No VAE found for {name}; using automatic")
116
+ return modules.sd_vae.unspecified
117
+ else:
118
+ return modules.sd_vae.vae_dict[choices[0]]
119
+
120
+
121
+ def apply_vae(p, x, xs):
122
+ modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=find_vae(x))
123
+
124
+
125
+ def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
126
+ p.styles.extend(x.split(','))
127
+
128
+
129
+ def apply_uni_pc_order(p, x, xs):
130
+ opts.data["uni_pc_order"] = min(x, p.steps - 1)
131
+
132
+
133
+ def apply_face_restore(p, opt, x):
134
+ opt = opt.lower()
135
+ if opt == 'codeformer':
136
+ is_active = True
137
+ p.face_restoration_model = 'CodeFormer'
138
+ elif opt == 'gfpgan':
139
+ is_active = True
140
+ p.face_restoration_model = 'GFPGAN'
141
+ else:
142
+ is_active = opt in ('true', 'yes', 'y', '1')
143
+
144
+ p.restore_faces = is_active
145
+
146
+
147
+ def apply_override(field, boolean: bool = False):
148
+ def fun(p, x, xs):
149
+ if boolean:
150
+ x = True if x.lower() == "true" else False
151
+ p.override_settings[field] = x
152
+ return fun
153
+
154
+
155
+ def boolean_choice(reverse: bool = False):
156
+ def choice():
157
+ return ["False", "True"] if reverse else ["True", "False"]
158
+ return choice
159
+
160
+
161
+ def format_value_add_label(p, opt, x):
162
+ if type(x) == float:
163
+ x = round(x, 8)
164
+
165
+ return f"{opt.label}: {x}"
166
+
167
+
168
+ def format_value(p, opt, x):
169
+ if type(x) == float:
170
+ x = round(x, 8)
171
+ return x
172
+
173
+
174
+ def format_value_join_list(p, opt, x):
175
+ return ", ".join(x)
176
+
177
+
178
+ def do_nothing(p, x, xs):
179
+ pass
180
+
181
+
182
+ def format_nothing(p, opt, x):
183
+ return ""
184
+
185
+
186
+ def str_permutations(x):
187
+ """dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
188
+ return x
189
+
190
+
191
+ class AxisOption:
192
+ def __init__(self, label, type, apply, format_value=format_value_add_label, confirm=None, cost=0.0, choices=None):
193
+ self.label = label
194
+ self.type = type
195
+ self.apply = apply
196
+ self.format_value = format_value
197
+ self.confirm = confirm
198
+ self.cost = cost
199
+ self.choices = choices
200
+
201
+
202
+ class AxisOptionImg2Img(AxisOption):
203
+ def __init__(self, *args, **kwargs):
204
+ super().__init__(*args, **kwargs)
205
+ self.is_img2img = True
206
+
207
+ class AxisOptionTxt2Img(AxisOption):
208
+ def __init__(self, *args, **kwargs):
209
+ super().__init__(*args, **kwargs)
210
+ self.is_img2img = False
211
+
212
+
213
+ axis_options = [
214
+ AxisOption("Nothing", str, do_nothing, format_value=format_nothing),
215
+ AxisOption("Seed", int, apply_field("seed")),
216
+ AxisOption("Var. seed", int, apply_field("subseed")),
217
+ AxisOption("Var. strength", float, apply_field("subseed_strength")),
218
+ AxisOption("Steps", int, apply_field("steps")),
219
+ AxisOptionTxt2Img("Hires steps", int, apply_field("hr_second_pass_steps")),
220
+ AxisOption("CFG Scale", float, apply_field("cfg_scale")),
221
+ AxisOptionImg2Img("Image CFG Scale", float, apply_field("image_cfg_scale")),
222
+ AxisOption("Prompt S/R", str, apply_prompt, format_value=format_value),
223
+ AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list),
224
+ AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]),
225
+ AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]),
226
+ AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)),
227
+ AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")),
228
+ AxisOption("Sigma Churn", float, apply_field("s_churn")),
229
+ AxisOption("Sigma min", float, apply_field("s_tmin")),
230
+ AxisOption("Sigma max", float, apply_field("s_tmax")),
231
+ AxisOption("Sigma noise", float, apply_field("s_noise")),
232
+ AxisOption("Schedule type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
233
+ AxisOption("Schedule min sigma", float, apply_override("sigma_min")),
234
+ AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
235
+ AxisOption("Schedule rho", float, apply_override("rho")),
236
+ AxisOption("Eta", float, apply_field("eta")),
237
+ AxisOption("Clip skip", int, apply_clip_skip),
238
+ AxisOption("Denoising", float, apply_field("denoising_strength")),
239
+ AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
240
+ AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
241
+ AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ['None'] + list(sd_vae.vae_dict)),
242
+ AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
243
+ AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5),
244
+ AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
245
+ AxisOption("Token merging ratio", float, apply_override('token_merging_ratio')),
246
+ AxisOption("Token merging ratio high-res", float, apply_override('token_merging_ratio_hr')),
247
+ AxisOption("Always discard next-to-last sigma", str, apply_override('always_discard_next_to_last_sigma', boolean=True), choices=boolean_choice(reverse=True)),
248
+ ]
249
+
250
+
251
+ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend, include_lone_images, include_sub_grids, first_axes_processed, second_axes_processed, margin_size):
252
+ hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
253
+ ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
254
+ title_texts = [[images.GridAnnotation(z)] for z in z_labels]
255
+
256
+ list_size = (len(xs) * len(ys) * len(zs))
257
+
258
+ processed_result = None
259
+
260
+ state.job_count = list_size * p.n_iter
261
+
262
+ def process_cell(x, y, z, ix, iy, iz):
263
+ nonlocal processed_result
264
+
265
+ def index(ix, iy, iz):
266
+ return ix + iy * len(xs) + iz * len(xs) * len(ys)
267
+
268
+ state.job = f"{index(ix, iy, iz) + 1} out of {list_size}"
269
+
270
+ processed: Processed = cell(x, y, z, ix, iy, iz)
271
+
272
+ if processed_result is None:
273
+ # Use our first processed result object as a template container to hold our full results
274
+ processed_result = copy(processed)
275
+ processed_result.images = [None] * list_size
276
+ processed_result.all_prompts = [None] * list_size
277
+ processed_result.all_seeds = [None] * list_size
278
+ processed_result.infotexts = [None] * list_size
279
+ processed_result.index_of_first_image = 1
280
+
281
+ idx = index(ix, iy, iz)
282
+ if processed.images:
283
+ # Non-empty list indicates some degree of success.
284
+ processed_result.images[idx] = processed.images[0]
285
+ processed_result.all_prompts[idx] = processed.prompt
286
+ processed_result.all_seeds[idx] = processed.seed
287
+ processed_result.infotexts[idx] = processed.infotexts[0]
288
+ else:
289
+ cell_mode = "P"
290
+ cell_size = (processed_result.width, processed_result.height)
291
+ if processed_result.images[0] is not None:
292
+ cell_mode = processed_result.images[0].mode
293
+ #This corrects size in case of batches:
294
+ cell_size = processed_result.images[0].size
295
+ processed_result.images[idx] = Image.new(cell_mode, cell_size)
296
+
297
+
298
+ if first_axes_processed == 'x':
299
+ for ix, x in enumerate(xs):
300
+ if second_axes_processed == 'y':
301
+ for iy, y in enumerate(ys):
302
+ for iz, z in enumerate(zs):
303
+ process_cell(x, y, z, ix, iy, iz)
304
+ else:
305
+ for iz, z in enumerate(zs):
306
+ for iy, y in enumerate(ys):
307
+ process_cell(x, y, z, ix, iy, iz)
308
+ elif first_axes_processed == 'y':
309
+ for iy, y in enumerate(ys):
310
+ if second_axes_processed == 'x':
311
+ for ix, x in enumerate(xs):
312
+ for iz, z in enumerate(zs):
313
+ process_cell(x, y, z, ix, iy, iz)
314
+ else:
315
+ for iz, z in enumerate(zs):
316
+ for ix, x in enumerate(xs):
317
+ process_cell(x, y, z, ix, iy, iz)
318
+ elif first_axes_processed == 'z':
319
+ for iz, z in enumerate(zs):
320
+ if second_axes_processed == 'x':
321
+ for ix, x in enumerate(xs):
322
+ for iy, y in enumerate(ys):
323
+ process_cell(x, y, z, ix, iy, iz)
324
+ else:
325
+ for iy, y in enumerate(ys):
326
+ for ix, x in enumerate(xs):
327
+ process_cell(x, y, z, ix, iy, iz)
328
+
329
+ if not processed_result:
330
+ # Should never happen, I've only seen it on one of four open tabs and it needed to refresh.
331
+ print("Unexpected error: Processing could not begin, you may need to refresh the tab or restart the service.")
332
+ return Processed(p, [])
333
+ elif not any(processed_result.images):
334
+ print("Unexpected error: draw_xyz_grid failed to return even a single processed image")
335
+ return Processed(p, [])
336
+
337
+ z_count = len(zs)
338
+
339
+ for i in range(z_count):
340
+ start_index = (i * len(xs) * len(ys)) + i
341
+ end_index = start_index + len(xs) * len(ys)
342
+ grid = images.image_grid(processed_result.images[start_index:end_index], rows=len(ys))
343
+ if draw_legend:
344
+ grid = images.draw_grid_annotations(grid, processed_result.images[start_index].size[0], processed_result.images[start_index].size[1], hor_texts, ver_texts, margin_size)
345
+ processed_result.images.insert(i, grid)
346
+ processed_result.all_prompts.insert(i, processed_result.all_prompts[start_index])
347
+ processed_result.all_seeds.insert(i, processed_result.all_seeds[start_index])
348
+ processed_result.infotexts.insert(i, processed_result.infotexts[start_index])
349
+
350
+ sub_grid_size = processed_result.images[0].size
351
+ z_grid = images.image_grid(processed_result.images[:z_count], rows=1)
352
+ if draw_legend:
353
+ z_grid = images.draw_grid_annotations(z_grid, sub_grid_size[0], sub_grid_size[1], title_texts, [[images.GridAnnotation()]])
354
+ processed_result.images.insert(0, z_grid)
355
+ #TODO: Deeper aspects of the program rely on grid info being misaligned between metadata arrays, which is not ideal.
356
+ #processed_result.all_prompts.insert(0, processed_result.all_prompts[0])
357
+ #processed_result.all_seeds.insert(0, processed_result.all_seeds[0])
358
+ processed_result.infotexts.insert(0, processed_result.infotexts[0])
359
+
360
+ return processed_result
361
+
362
+
363
+ class SharedSettingsStackHelper(object):
364
+ def __enter__(self):
365
+ self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
366
+ self.vae = opts.sd_vae
367
+ self.uni_pc_order = opts.uni_pc_order
368
+
369
+ def __exit__(self, exc_type, exc_value, tb):
370
+ opts.data["sd_vae"] = self.vae
371
+ opts.data["uni_pc_order"] = self.uni_pc_order
372
+ modules.sd_models.reload_model_weights()
373
+ modules.sd_vae.reload_vae_weights()
374
+
375
+ opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
376
+
377
+
378
+ re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
379
+ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
380
+
381
+ re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
382
+ re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
383
+
384
+
385
+ class Script(scripts.Script):
386
+ def title(self):
387
+ return "X/Y/Z plot"
388
+
389
+ def ui(self, is_img2img):
390
+ self.current_axis_options = [x for x in axis_options if type(x) == AxisOption or x.is_img2img == is_img2img]
391
+
392
+ with gr.Row():
393
+ with gr.Column(scale=19):
394
+ with gr.Row():
395
+ x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type"))
396
+ x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values"))
397
+ x_values_dropdown = gr.Dropdown(label="X values",visible=False,multiselect=True,interactive=True)
398
+ fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False)
399
+
400
+ with gr.Row():
401
+ y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type"))
402
+ y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values"))
403
+ y_values_dropdown = gr.Dropdown(label="Y values",visible=False,multiselect=True,interactive=True)
404
+ fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False)
405
+
406
+ with gr.Row():
407
+ z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type"))
408
+ z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values"))
409
+ z_values_dropdown = gr.Dropdown(label="Z values",visible=False,multiselect=True,interactive=True)
410
+ fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False)
411
+
412
+ with gr.Row(variant="compact", elem_id="axis_options"):
413
+ with gr.Column():
414
+ draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend"))
415
+ no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds"))
416
+ with gr.Column():
417
+ include_lone_images = gr.Checkbox(label='Include Sub Images', value=False, elem_id=self.elem_id("include_lone_images"))
418
+ include_sub_grids = gr.Checkbox(label='Include Sub Grids', value=False, elem_id=self.elem_id("include_sub_grids"))
419
+ with gr.Column():
420
+ margin_size = gr.Slider(label="Grid margins (px)", minimum=0, maximum=500, value=0, step=2, elem_id=self.elem_id("margin_size"))
421
+
422
+ with gr.Row(variant="compact", elem_id="swap_axes"):
423
+ swap_xy_axes_button = gr.Button(value="Swap X/Y axes", elem_id="xy_grid_swap_axes_button")
424
+ swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button")
425
+ swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button")
426
+
427
+ def swap_axes(axis1_type, axis1_values, axis1_values_dropdown, axis2_type, axis2_values, axis2_values_dropdown):
428
+ return self.current_axis_options[axis2_type].label, axis2_values, axis2_values_dropdown, self.current_axis_options[axis1_type].label, axis1_values, axis1_values_dropdown
429
+
430
+ xy_swap_args = [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown]
431
+ swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args)
432
+ yz_swap_args = [y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown]
433
+ swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args)
434
+ xz_swap_args = [x_type, x_values, x_values_dropdown, z_type, z_values, z_values_dropdown]
435
+ swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args)
436
+
437
+ def fill(x_type):
438
+ axis = self.current_axis_options[x_type]
439
+ return axis.choices() if axis.choices else gr.update()
440
+
441
+ fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values_dropdown])
442
+ fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown])
443
+ fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown])
444
+
445
+ def select_axis(axis_type,axis_values_dropdown):
446
+ choices = self.current_axis_options[axis_type].choices
447
+ has_choices = choices is not None
448
+ current_values = axis_values_dropdown
449
+ if has_choices:
450
+ choices = choices()
451
+ if isinstance(current_values,str):
452
+ current_values = current_values.split(",")
453
+ current_values = list(filter(lambda x: x in choices, current_values))
454
+ return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices if has_choices else None,visible=has_choices,value=current_values)
455
+
456
+ x_type.change(fn=select_axis, inputs=[x_type,x_values_dropdown], outputs=[fill_x_button,x_values,x_values_dropdown])
457
+ y_type.change(fn=select_axis, inputs=[y_type,y_values_dropdown], outputs=[fill_y_button,y_values,y_values_dropdown])
458
+ z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown])
459
+
460
+ def get_dropdown_update_from_params(axis,params):
461
+ val_key = f"{axis} Values"
462
+ vals = params.get(val_key,"")
463
+ valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x]
464
+ return gr.update(value = valslist)
465
+
466
+ self.infotext_fields = (
467
+ (x_type, "X Type"),
468
+ (x_values, "X Values"),
469
+ (x_values_dropdown, lambda params:get_dropdown_update_from_params("X",params)),
470
+ (y_type, "Y Type"),
471
+ (y_values, "Y Values"),
472
+ (y_values_dropdown, lambda params:get_dropdown_update_from_params("Y",params)),
473
+ (z_type, "Z Type"),
474
+ (z_values, "Z Values"),
475
+ (z_values_dropdown, lambda params:get_dropdown_update_from_params("Z",params)),
476
+ )
477
+
478
+ return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size]
479
+
480
+ def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size):
481
+ if not no_fixed_seeds:
482
+ modules.processing.fix_seed(p)
483
+
484
+ if not opts.return_grid:
485
+ p.batch_size = 1
486
+
487
+ def process_axis(opt, vals, vals_dropdown):
488
+ if opt.label == 'Nothing':
489
+ return [0]
490
+
491
+ if opt.choices is not None:
492
+ valslist = vals_dropdown
493
+ else:
494
+ valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x]
495
+
496
+ if opt.type == int:
497
+ valslist_ext = []
498
+
499
+ for val in valslist:
500
+ m = re_range.fullmatch(val)
501
+ mc = re_range_count.fullmatch(val)
502
+ if m is not None:
503
+ start = int(m.group(1))
504
+ end = int(m.group(2))+1
505
+ step = int(m.group(3)) if m.group(3) is not None else 1
506
+
507
+ valslist_ext += list(range(start, end, step))
508
+ elif mc is not None:
509
+ start = int(mc.group(1))
510
+ end = int(mc.group(2))
511
+ num = int(mc.group(3)) if mc.group(3) is not None else 1
512
+
513
+ valslist_ext += [int(x) for x in np.linspace(start=start, stop=end, num=num).tolist()]
514
+ else:
515
+ valslist_ext.append(val)
516
+
517
+ valslist = valslist_ext
518
+ elif opt.type == float:
519
+ valslist_ext = []
520
+
521
+ for val in valslist:
522
+ m = re_range_float.fullmatch(val)
523
+ mc = re_range_count_float.fullmatch(val)
524
+ if m is not None:
525
+ start = float(m.group(1))
526
+ end = float(m.group(2))
527
+ step = float(m.group(3)) if m.group(3) is not None else 1
528
+
529
+ valslist_ext += np.arange(start, end + step, step).tolist()
530
+ elif mc is not None:
531
+ start = float(mc.group(1))
532
+ end = float(mc.group(2))
533
+ num = int(mc.group(3)) if mc.group(3) is not None else 1
534
+
535
+ valslist_ext += np.linspace(start=start, stop=end, num=num).tolist()
536
+ else:
537
+ valslist_ext.append(val)
538
+
539
+ valslist = valslist_ext
540
+ elif opt.type == str_permutations:
541
+ valslist = list(permutations(valslist))
542
+
543
+ valslist = [opt.type(x) for x in valslist]
544
+
545
+ # Confirm options are valid before starting
546
+ if opt.confirm:
547
+ opt.confirm(p, valslist)
548
+
549
+ return valslist
550
+
551
+ x_opt = self.current_axis_options[x_type]
552
+ if x_opt.choices is not None:
553
+ x_values = ",".join(x_values_dropdown)
554
+ xs = process_axis(x_opt, x_values, x_values_dropdown)
555
+
556
+ y_opt = self.current_axis_options[y_type]
557
+ if y_opt.choices is not None:
558
+ y_values = ",".join(y_values_dropdown)
559
+ ys = process_axis(y_opt, y_values, y_values_dropdown)
560
+
561
+ z_opt = self.current_axis_options[z_type]
562
+ if z_opt.choices is not None:
563
+ z_values = ",".join(z_values_dropdown)
564
+ zs = process_axis(z_opt, z_values, z_values_dropdown)
565
+
566
+ # this could be moved to common code, but unlikely to be ever triggered anywhere else
567
+ Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
568
+ grid_mp = round(len(xs) * len(ys) * len(zs) * p.width * p.height / 1000000)
569
+ assert grid_mp < opts.img_max_size_mp, f'Error: Resulting grid would be too large ({grid_mp} MPixels) (max configured size is {opts.img_max_size_mp} MPixels)'
570
+
571
+ def fix_axis_seeds(axis_opt, axis_list):
572
+ if axis_opt.label in ['Seed', 'Var. seed']:
573
+ return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list]
574
+ else:
575
+ return axis_list
576
+
577
+ if not no_fixed_seeds:
578
+ xs = fix_axis_seeds(x_opt, xs)
579
+ ys = fix_axis_seeds(y_opt, ys)
580
+ zs = fix_axis_seeds(z_opt, zs)
581
+
582
+ if x_opt.label == 'Steps':
583
+ total_steps = sum(xs) * len(ys) * len(zs)
584
+ elif y_opt.label == 'Steps':
585
+ total_steps = sum(ys) * len(xs) * len(zs)
586
+ elif z_opt.label == 'Steps':
587
+ total_steps = sum(zs) * len(xs) * len(ys)
588
+ else:
589
+ total_steps = p.steps * len(xs) * len(ys) * len(zs)
590
+
591
+ if isinstance(p, StableDiffusionProcessingTxt2Img) and p.enable_hr:
592
+ if x_opt.label == "Hires steps":
593
+ total_steps += sum(xs) * len(ys) * len(zs)
594
+ elif y_opt.label == "Hires steps":
595
+ total_steps += sum(ys) * len(xs) * len(zs)
596
+ elif z_opt.label == "Hires steps":
597
+ total_steps += sum(zs) * len(xs) * len(ys)
598
+ elif p.hr_second_pass_steps:
599
+ total_steps += p.hr_second_pass_steps * len(xs) * len(ys) * len(zs)
600
+ else:
601
+ total_steps *= 2
602
+
603
+ total_steps *= p.n_iter
604
+
605
+ image_cell_count = p.n_iter * p.batch_size
606
+ cell_console_text = f"; {image_cell_count} images per cell" if image_cell_count > 1 else ""
607
+ plural_s = 's' if len(zs) > 1 else ''
608
+ print(f"X/Y/Z plot will create {len(xs) * len(ys) * len(zs) * image_cell_count} images on {len(zs)} {len(xs)}x{len(ys)} grid{plural_s}{cell_console_text}. (Total steps to process: {total_steps})")
609
+ shared.total_tqdm.updateTotal(total_steps)
610
+
611
+ state.xyz_plot_x = AxisInfo(x_opt, xs)
612
+ state.xyz_plot_y = AxisInfo(y_opt, ys)
613
+ state.xyz_plot_z = AxisInfo(z_opt, zs)
614
+
615
+ # If one of the axes is very slow to change between (like SD model
616
+ # checkpoint), then make sure it is in the outer iteration of the nested
617
+ # `for` loop.
618
+ first_axes_processed = 'z'
619
+ second_axes_processed = 'y'
620
+ if x_opt.cost > y_opt.cost and x_opt.cost > z_opt.cost:
621
+ first_axes_processed = 'x'
622
+ if y_opt.cost > z_opt.cost:
623
+ second_axes_processed = 'y'
624
+ else:
625
+ second_axes_processed = 'z'
626
+ elif y_opt.cost > x_opt.cost and y_opt.cost > z_opt.cost:
627
+ first_axes_processed = 'y'
628
+ if x_opt.cost > z_opt.cost:
629
+ second_axes_processed = 'x'
630
+ else:
631
+ second_axes_processed = 'z'
632
+ elif z_opt.cost > x_opt.cost and z_opt.cost > y_opt.cost:
633
+ first_axes_processed = 'z'
634
+ if x_opt.cost > y_opt.cost:
635
+ second_axes_processed = 'x'
636
+ else:
637
+ second_axes_processed = 'y'
638
+
639
+ grid_infotext = [None] * (1 + len(zs))
640
+
641
+ def cell(x, y, z, ix, iy, iz):
642
+ if shared.state.interrupted:
643
+ return Processed(p, [], p.seed, "")
644
+
645
+ pc = copy(p)
646
+ pc.styles = pc.styles[:]
647
+ x_opt.apply(pc, x, xs)
648
+ y_opt.apply(pc, y, ys)
649
+ z_opt.apply(pc, z, zs)
650
+
651
+ res = process_images(pc)
652
+
653
+ # Sets subgrid infotexts
654
+ subgrid_index = 1 + iz
655
+ if grid_infotext[subgrid_index] is None and ix == 0 and iy == 0:
656
+ pc.extra_generation_params = copy(pc.extra_generation_params)
657
+ pc.extra_generation_params['Script'] = self.title()
658
+
659
+ if x_opt.label != 'Nothing':
660
+ pc.extra_generation_params["X Type"] = x_opt.label
661
+ pc.extra_generation_params["X Values"] = x_values
662
+ if x_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
663
+ pc.extra_generation_params["Fixed X Values"] = ", ".join([str(x) for x in xs])
664
+
665
+ if y_opt.label != 'Nothing':
666
+ pc.extra_generation_params["Y Type"] = y_opt.label
667
+ pc.extra_generation_params["Y Values"] = y_values
668
+ if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
669
+ pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys])
670
+
671
+ grid_infotext[subgrid_index] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds)
672
+
673
+ # Sets main grid infotext
674
+ if grid_infotext[0] is None and ix == 0 and iy == 0 and iz == 0:
675
+ pc.extra_generation_params = copy(pc.extra_generation_params)
676
+
677
+ if z_opt.label != 'Nothing':
678
+ pc.extra_generation_params["Z Type"] = z_opt.label
679
+ pc.extra_generation_params["Z Values"] = z_values
680
+ if z_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds:
681
+ pc.extra_generation_params["Fixed Z Values"] = ", ".join([str(z) for z in zs])
682
+
683
+ grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds)
684
+
685
+ return res
686
+
687
+ with SharedSettingsStackHelper():
688
+ processed = draw_xyz_grid(
689
+ p,
690
+ xs=xs,
691
+ ys=ys,
692
+ zs=zs,
693
+ x_labels=[x_opt.format_value(p, x_opt, x) for x in xs],
694
+ y_labels=[y_opt.format_value(p, y_opt, y) for y in ys],
695
+ z_labels=[z_opt.format_value(p, z_opt, z) for z in zs],
696
+ cell=cell,
697
+ draw_legend=draw_legend,
698
+ include_lone_images=include_lone_images,
699
+ include_sub_grids=include_sub_grids,
700
+ first_axes_processed=first_axes_processed,
701
+ second_axes_processed=second_axes_processed,
702
+ margin_size=margin_size
703
+ )
704
+
705
+ if not processed.images:
706
+ # It broke, no further handling needed.
707
+ return processed
708
+
709
+ z_count = len(zs)
710
+
711
+ # Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids)
712
+ processed.infotexts[:1+z_count] = grid_infotext[:1+z_count]
713
+
714
+ if not include_lone_images:
715
+ # Don't need sub-images anymore, drop from list:
716
+ processed.images = processed.images[:z_count+1]
717
+
718
+ if opts.grid_save:
719
+ # Auto-save main and sub-grids:
720
+ grid_count = z_count + 1 if z_count > 1 else 1
721
+ for g in range(grid_count):
722
+ #TODO: See previous comment about intentional data misalignment.
723
+ adj_g = g-1 if g > 0 else g
724
+ images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed)
725
+
726
+ if not include_sub_grids:
727
+ # Done with sub-grids, drop all related information:
728
+ for _ in range(z_count):
729
+ del processed.images[1]
730
+ del processed.all_prompts[1]
731
+ del processed.all_seeds[1]
732
+ del processed.infotexts[1]
733
+
734
+ return processed
style.css ADDED
The diff for this file is too large to render. See raw diff
 
test/__init__.py ADDED
File without changes
test/conftest.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pytest
4
+ from PIL import Image
5
+ from gradio.processing_utils import encode_pil_to_base64
6
+
7
+ test_files_path = os.path.dirname(__file__) + "/test_files"
8
+
9
+
10
+ @pytest.fixture(scope="session") # session so we don't read this over and over
11
+ def img2img_basic_image_base64() -> str:
12
+ return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "img2img_basic.png")))
13
+
14
+
15
+ @pytest.fixture(scope="session") # session so we don't read this over and over
16
+ def mask_basic_image_base64() -> str:
17
+ return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "mask_basic.png")))
test/test_extras.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+
4
+ def test_simple_upscaling_performed(base_url, img2img_basic_image_base64):
5
+ payload = {
6
+ "resize_mode": 0,
7
+ "show_extras_results": True,
8
+ "gfpgan_visibility": 0,
9
+ "codeformer_visibility": 0,
10
+ "codeformer_weight": 0,
11
+ "upscaling_resize": 2,
12
+ "upscaling_resize_w": 128,
13
+ "upscaling_resize_h": 128,
14
+ "upscaling_crop": True,
15
+ "upscaler_1": "Lanczos",
16
+ "upscaler_2": "None",
17
+ "extras_upscaler_2_visibility": 0,
18
+ "image": img2img_basic_image_base64,
19
+ }
20
+ assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200
21
+
22
+
23
+ def test_png_info_performed(base_url, img2img_basic_image_base64):
24
+ payload = {
25
+ "image": img2img_basic_image_base64,
26
+ }
27
+ assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200
28
+
29
+
30
+ def test_interrogate_performed(base_url, img2img_basic_image_base64):
31
+ payload = {
32
+ "image": img2img_basic_image_base64,
33
+ "model": "clip",
34
+ }
35
+ assert requests.post(f"{base_url}/sdapi/v1/extra-single-image", json=payload).status_code == 200
test/test_files/empty.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d030ad8db708280fcae77d87e973102039acd23a11bdecc3db8eb6c0ac940ee1
3
+ size 431
test/test_files/img2img_basic.png ADDED
test/test_files/mask_basic.png ADDED
test/test_img2img.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import pytest
3
+ import requests
4
+
5
+
6
+ @pytest.fixture()
7
+ def url_img2img(base_url):
8
+ return f"{base_url}/sdapi/v1/img2img"
9
+
10
+
11
+ @pytest.fixture()
12
+ def simple_img2img_request(img2img_basic_image_base64):
13
+ return {
14
+ "batch_size": 1,
15
+ "cfg_scale": 7,
16
+ "denoising_strength": 0.75,
17
+ "eta": 0,
18
+ "height": 64,
19
+ "include_init_images": False,
20
+ "init_images": [img2img_basic_image_base64],
21
+ "inpaint_full_res": False,
22
+ "inpaint_full_res_padding": 0,
23
+ "inpainting_fill": 0,
24
+ "inpainting_mask_invert": False,
25
+ "mask": None,
26
+ "mask_blur": 4,
27
+ "n_iter": 1,
28
+ "negative_prompt": "",
29
+ "override_settings": {},
30
+ "prompt": "example prompt",
31
+ "resize_mode": 0,
32
+ "restore_faces": False,
33
+ "s_churn": 0,
34
+ "s_noise": 1,
35
+ "s_tmax": 0,
36
+ "s_tmin": 0,
37
+ "sampler_index": "Euler a",
38
+ "seed": -1,
39
+ "seed_resize_from_h": -1,
40
+ "seed_resize_from_w": -1,
41
+ "steps": 3,
42
+ "styles": [],
43
+ "subseed": -1,
44
+ "subseed_strength": 0,
45
+ "tiling": False,
46
+ "width": 64,
47
+ }
48
+
49
+
50
+ def test_img2img_simple_performed(url_img2img, simple_img2img_request):
51
+ assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
52
+
53
+
54
+ def test_inpainting_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64):
55
+ simple_img2img_request["mask"] = mask_basic_image_base64
56
+ assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
57
+
58
+
59
+ def test_inpainting_with_inverted_masked_performed(url_img2img, simple_img2img_request, mask_basic_image_base64):
60
+ simple_img2img_request["mask"] = mask_basic_image_base64
61
+ simple_img2img_request["inpainting_mask_invert"] = True
62
+ assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
63
+
64
+
65
+ def test_img2img_sd_upscale_performed(url_img2img, simple_img2img_request):
66
+ simple_img2img_request["script_name"] = "sd upscale"
67
+ simple_img2img_request["script_args"] = ["", 8, "Lanczos", 2.0]
68
+ assert requests.post(url_img2img, json=simple_img2img_request).status_code == 200
test/test_txt2img.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import pytest
3
+ import requests
4
+
5
+
6
+ @pytest.fixture()
7
+ def url_txt2img(base_url):
8
+ return f"{base_url}/sdapi/v1/txt2img"
9
+
10
+
11
+ @pytest.fixture()
12
+ def simple_txt2img_request():
13
+ return {
14
+ "batch_size": 1,
15
+ "cfg_scale": 7,
16
+ "denoising_strength": 0,
17
+ "enable_hr": False,
18
+ "eta": 0,
19
+ "firstphase_height": 0,
20
+ "firstphase_width": 0,
21
+ "height": 64,
22
+ "n_iter": 1,
23
+ "negative_prompt": "",
24
+ "prompt": "example prompt",
25
+ "restore_faces": False,
26
+ "s_churn": 0,
27
+ "s_noise": 1,
28
+ "s_tmax": 0,
29
+ "s_tmin": 0,
30
+ "sampler_index": "Euler a",
31
+ "seed": -1,
32
+ "seed_resize_from_h": -1,
33
+ "seed_resize_from_w": -1,
34
+ "steps": 3,
35
+ "styles": [],
36
+ "subseed": -1,
37
+ "subseed_strength": 0,
38
+ "tiling": False,
39
+ "width": 64,
40
+ }
41
+
42
+
43
+ def test_txt2img_simple_performed(url_txt2img, simple_txt2img_request):
44
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
45
+
46
+
47
+ def test_txt2img_with_negative_prompt_performed(url_txt2img, simple_txt2img_request):
48
+ simple_txt2img_request["negative_prompt"] = "example negative prompt"
49
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
50
+
51
+
52
+ def test_txt2img_with_complex_prompt_performed(url_txt2img, simple_txt2img_request):
53
+ simple_txt2img_request["prompt"] = "((emphasis)), (emphasis1:1.1), [to:1], [from::2], [from:to:0.3], [alt|alt1]"
54
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
55
+
56
+
57
+ def test_txt2img_not_square_image_performed(url_txt2img, simple_txt2img_request):
58
+ simple_txt2img_request["height"] = 128
59
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
60
+
61
+
62
+ def test_txt2img_with_hrfix_performed(url_txt2img, simple_txt2img_request):
63
+ simple_txt2img_request["enable_hr"] = True
64
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
65
+
66
+
67
+ def test_txt2img_with_tiling_performed(url_txt2img, simple_txt2img_request):
68
+ simple_txt2img_request["tiling"] = True
69
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
70
+
71
+
72
+ def test_txt2img_with_restore_faces_performed(url_txt2img, simple_txt2img_request):
73
+ simple_txt2img_request["restore_faces"] = True
74
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
75
+
76
+
77
+ @pytest.mark.parametrize("sampler", ["PLMS", "DDIM", "UniPC"])
78
+ def test_txt2img_with_vanilla_sampler_performed(url_txt2img, simple_txt2img_request, sampler):
79
+ simple_txt2img_request["sampler_index"] = sampler
80
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
81
+
82
+
83
+ def test_txt2img_multiple_batches_performed(url_txt2img, simple_txt2img_request):
84
+ simple_txt2img_request["n_iter"] = 2
85
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
86
+
87
+
88
+ def test_txt2img_batch_performed(url_txt2img, simple_txt2img_request):
89
+ simple_txt2img_request["batch_size"] = 2
90
+ assert requests.post(url_txt2img, json=simple_txt2img_request).status_code == 200
test/test_utils.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import requests
3
+
4
+
5
+ def test_options_write(base_url):
6
+ url_options = f"{base_url}/sdapi/v1/options"
7
+ response = requests.get(url_options)
8
+ assert response.status_code == 200
9
+
10
+ pre_value = response.json()["send_seed"]
11
+
12
+ assert requests.post(url_options, json={'send_seed': (not pre_value)}).status_code == 200
13
+
14
+ response = requests.get(url_options)
15
+ assert response.status_code == 200
16
+ assert response.json()['send_seed'] == (not pre_value)
17
+
18
+ requests.post(url_options, json={"send_seed": pre_value})
19
+
20
+
21
+ @pytest.mark.parametrize("url", [
22
+ "sdapi/v1/cmd-flags",
23
+ "sdapi/v1/samplers",
24
+ "sdapi/v1/upscalers",
25
+ "sdapi/v1/sd-models",
26
+ "sdapi/v1/hypernetworks",
27
+ "sdapi/v1/face-restorers",
28
+ "sdapi/v1/realesrgan-models",
29
+ "sdapi/v1/prompt-styles",
30
+ "sdapi/v1/embeddings",
31
+ ])
32
+ def test_get_api_url(base_url, url):
33
+ assert requests.get(f"{base_url}/{url}").status_code == 200
textual_inversion_templates/hypernetwork.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a photo of a [filewords]
2
+ a rendering of a [filewords]
3
+ a cropped photo of the [filewords]
4
+ the photo of a [filewords]
5
+ a photo of a clean [filewords]
6
+ a photo of a dirty [filewords]
7
+ a dark photo of the [filewords]
8
+ a photo of my [filewords]
9
+ a photo of the cool [filewords]
10
+ a close-up photo of a [filewords]
11
+ a bright photo of the [filewords]
12
+ a cropped photo of a [filewords]
13
+ a photo of the [filewords]
14
+ a good photo of the [filewords]
15
+ a photo of one [filewords]
16
+ a close-up photo of the [filewords]
17
+ a rendition of the [filewords]
18
+ a photo of the clean [filewords]
19
+ a rendition of a [filewords]
20
+ a photo of a nice [filewords]
21
+ a good photo of a [filewords]
22
+ a photo of the nice [filewords]
23
+ a photo of the small [filewords]
24
+ a photo of the weird [filewords]
25
+ a photo of the large [filewords]
26
+ a photo of a cool [filewords]
27
+ a photo of a small [filewords]
textual_inversion_templates/none.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ picture
textual_inversion_templates/style.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a painting, art by [name]
2
+ a rendering, art by [name]
3
+ a cropped painting, art by [name]
4
+ the painting, art by [name]
5
+ a clean painting, art by [name]
6
+ a dirty painting, art by [name]
7
+ a dark painting, art by [name]
8
+ a picture, art by [name]
9
+ a cool painting, art by [name]
10
+ a close-up painting, art by [name]
11
+ a bright painting, art by [name]
12
+ a cropped painting, art by [name]
13
+ a good painting, art by [name]
14
+ a close-up painting, art by [name]
15
+ a rendition, art by [name]
16
+ a nice painting, art by [name]
17
+ a small painting, art by [name]
18
+ a weird painting, art by [name]
19
+ a large painting, art by [name]
textual_inversion_templates/style_filewords.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a painting of [filewords], art by [name]
2
+ a rendering of [filewords], art by [name]
3
+ a cropped painting of [filewords], art by [name]
4
+ the painting of [filewords], art by [name]
5
+ a clean painting of [filewords], art by [name]
6
+ a dirty painting of [filewords], art by [name]
7
+ a dark painting of [filewords], art by [name]
8
+ a picture of [filewords], art by [name]
9
+ a cool painting of [filewords], art by [name]
10
+ a close-up painting of [filewords], art by [name]
11
+ a bright painting of [filewords], art by [name]
12
+ a cropped painting of [filewords], art by [name]
13
+ a good painting of [filewords], art by [name]
14
+ a close-up painting of [filewords], art by [name]
15
+ a rendition of [filewords], art by [name]
16
+ a nice painting of [filewords], art by [name]
17
+ a small painting of [filewords], art by [name]
18
+ a weird painting of [filewords], art by [name]
19
+ a large painting of [filewords], art by [name]
textual_inversion_templates/subject.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a photo of a [name]
2
+ a rendering of a [name]
3
+ a cropped photo of the [name]
4
+ the photo of a [name]
5
+ a photo of a clean [name]
6
+ a photo of a dirty [name]
7
+ a dark photo of the [name]
8
+ a photo of my [name]
9
+ a photo of the cool [name]
10
+ a close-up photo of a [name]
11
+ a bright photo of the [name]
12
+ a cropped photo of a [name]
13
+ a photo of the [name]
14
+ a good photo of the [name]
15
+ a photo of one [name]
16
+ a close-up photo of the [name]
17
+ a rendition of the [name]
18
+ a photo of the clean [name]
19
+ a rendition of a [name]
20
+ a photo of a nice [name]
21
+ a good photo of a [name]
22
+ a photo of the nice [name]
23
+ a photo of the small [name]
24
+ a photo of the weird [name]
25
+ a photo of the large [name]
26
+ a photo of a cool [name]
27
+ a photo of a small [name]
textual_inversion_templates/subject_filewords.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ a photo of a [name], [filewords]
2
+ a rendering of a [name], [filewords]
3
+ a cropped photo of the [name], [filewords]
4
+ the photo of a [name], [filewords]
5
+ a photo of a clean [name], [filewords]
6
+ a photo of a dirty [name], [filewords]
7
+ a dark photo of the [name], [filewords]
8
+ a photo of my [name], [filewords]
9
+ a photo of the cool [name], [filewords]
10
+ a close-up photo of a [name], [filewords]
11
+ a bright photo of the [name], [filewords]
12
+ a cropped photo of a [name], [filewords]
13
+ a photo of the [name], [filewords]
14
+ a good photo of the [name], [filewords]
15
+ a photo of one [name], [filewords]
16
+ a close-up photo of the [name], [filewords]
17
+ a rendition of the [name], [filewords]
18
+ a photo of the clean [name], [filewords]
19
+ a rendition of a [name], [filewords]
20
+ a photo of a nice [name], [filewords]
21
+ a good photo of a [name], [filewords]
22
+ a photo of the nice [name], [filewords]
23
+ a photo of the small [name], [filewords]
24
+ a photo of the weird [name], [filewords]
25
+ a photo of the large [name], [filewords]
26
+ a photo of a cool [name], [filewords]
27
+ a photo of a small [name], [filewords]
tmp/tagAutocompletePath.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ extensions/a1111-microsoftexcel-tagcomplete/tags
ui-config.json ADDED
The diff for this file is too large to render. See raw diff
 
webui-macos-env.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ ####################################################################
3
+ # macOS defaults #
4
+ # Please modify webui-user.sh to change these instead of this file #
5
+ ####################################################################
6
+
7
+ if [[ -x "$(command -v python3.10)" ]]
8
+ then
9
+ python_cmd="python3.10"
10
+ fi
11
+
12
+ export install_dir="$HOME"
13
+ export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
14
+ export TORCH_COMMAND="pip install torch==2.0.1 torchvision==0.15.2"
15
+ export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git"
16
+ export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71"
17
+ export PYTORCH_ENABLE_MPS_FALLBACK=1
18
+
19
+ ####################################################################
webui-user.bat ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ set PYTHON=
4
+ set GIT=
5
+ set VENV_DIR=
6
+ set COMMANDLINE_ARGS=
7
+
8
+ call webui.bat
webui-user.sh ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #########################################################
3
+ # Uncomment and change the variables below to your need:#
4
+ #########################################################
5
+
6
+ # Install directory without trailing slash
7
+ #install_dir="/home/$(whoami)"
8
+
9
+ # Name of the subdirectory
10
+ #clone_dir="stable-diffusion-webui"
11
+
12
+ # Commandline arguments for webui.py, for example: export COMMANDLINE_ARGS="--medvram --opt-split-attention"
13
+ #export COMMANDLINE_ARGS=""
14
+
15
+ # python3 executable
16
+ #python_cmd="python3"
17
+
18
+ # git executable
19
+ #export GIT="git"
20
+
21
+ # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
22
+ #venv_dir="venv"
23
+
24
+ # script to launch to start the app
25
+ #export LAUNCH_SCRIPT="launch.py"
26
+
27
+ # install command for torch
28
+ #export TORCH_COMMAND="pip install torch==1.12.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113"
29
+
30
+ # Requirements file to use for stable-diffusion-webui
31
+ #export REQS_FILE="requirements_versions.txt"
32
+
33
+ # Fixed git repos
34
+ #export K_DIFFUSION_PACKAGE=""
35
+ #export GFPGAN_PACKAGE=""
36
+
37
+ # Fixed git commits
38
+ #export STABLE_DIFFUSION_COMMIT_HASH=""
39
+ #export CODEFORMER_COMMIT_HASH=""
40
+ #export BLIP_COMMIT_HASH=""
41
+
42
+ # Uncomment to enable accelerated launch
43
+ #export ACCELERATE="True"
44
+
45
+ # Uncomment to disable TCMalloc
46
+ #export NO_TCMALLOC="True"
47
+
48
+ ###########################################
webui.bat ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ if not defined PYTHON (set PYTHON=python)
4
+ if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv")
5
+
6
+ set SD_WEBUI_RESTART=tmp/restart
7
+ set ERROR_REPORTING=FALSE
8
+
9
+ mkdir tmp 2>NUL
10
+
11
+ %PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt
12
+ if %ERRORLEVEL% == 0 goto :check_pip
13
+ echo Couldn't launch python
14
+ goto :show_stdout_stderr
15
+
16
+ :check_pip
17
+ %PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt
18
+ if %ERRORLEVEL% == 0 goto :start_venv
19
+ if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr
20
+ %PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt
21
+ if %ERRORLEVEL% == 0 goto :start_venv
22
+ echo Couldn't install pip
23
+ goto :show_stdout_stderr
24
+
25
+ :start_venv
26
+ if ["%VENV_DIR%"] == ["-"] goto :skip_venv
27
+ if ["%SKIP_VENV%"] == ["1"] goto :skip_venv
28
+
29
+ dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt
30
+ if %ERRORLEVEL% == 0 goto :activate_venv
31
+
32
+ for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i"
33
+ echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME%
34
+ %PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt
35
+ if %ERRORLEVEL% == 0 goto :activate_venv
36
+ echo Unable to create venv in directory "%VENV_DIR%"
37
+ goto :show_stdout_stderr
38
+
39
+ :activate_venv
40
+ set PYTHON="%VENV_DIR%\Scripts\Python.exe"
41
+ echo venv %PYTHON%
42
+
43
+ :skip_venv
44
+ if [%ACCELERATE%] == ["True"] goto :accelerate
45
+ goto :launch
46
+
47
+ :accelerate
48
+ echo Checking for accelerate
49
+ set ACCELERATE="%VENV_DIR%\Scripts\accelerate.exe"
50
+ if EXIST %ACCELERATE% goto :accelerate_launch
51
+
52
+ :launch
53
+ %PYTHON% launch.py %*
54
+ if EXIST tmp/restart goto :skip_venv
55
+ pause
56
+ exit /b
57
+
58
+ :accelerate_launch
59
+ echo Accelerating
60
+ %ACCELERATE% launch --num_cpu_threads_per_process=6 launch.py
61
+ if EXIST tmp/restart goto :skip_venv
62
+ pause
63
+ exit /b
64
+
65
+ :show_stdout_stderr
66
+
67
+ echo.
68
+ echo exit code: %errorlevel%
69
+
70
+ for /f %%i in ("tmp\stdout.txt") do set size=%%~zi
71
+ if %size% equ 0 goto :show_stderr
72
+ echo.
73
+ echo stdout:
74
+ type tmp\stdout.txt
75
+
76
+ :show_stderr
77
+ for /f %%i in ("tmp\stderr.txt") do set size=%%~zi
78
+ if %size% equ 0 goto :show_stderr
79
+ echo.
80
+ echo stderr:
81
+ type tmp\stderr.txt
82
+
83
+ :endofscript
84
+
85
+ echo.
86
+ echo Launch unsuccessful. Exiting.
87
+ pause
webui.py ADDED
@@ -0,0 +1,485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ import time
6
+ import importlib
7
+ import signal
8
+ import re
9
+ import warnings
10
+ import json
11
+ from threading import Thread
12
+ from typing import Iterable
13
+
14
+ from fastapi import FastAPI
15
+ from fastapi.middleware.cors import CORSMiddleware
16
+ from fastapi.middleware.gzip import GZipMiddleware
17
+ from packaging import version
18
+
19
+ import logging
20
+
21
+ # We can't use cmd_opts for this because it will not have been initialized at this point.
22
+ log_level = os.environ.get("SD_WEBUI_LOG_LEVEL")
23
+ if log_level:
24
+ log_level = getattr(logging, log_level.upper(), None) or logging.INFO
25
+ logging.basicConfig(
26
+ level=log_level,
27
+ format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
28
+ datefmt='%Y-%m-%d %H:%M:%S',
29
+ )
30
+
31
+ logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
32
+ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
33
+
34
+ from modules import timer
35
+ startup_timer = timer.startup_timer
36
+ startup_timer.record("launcher")
37
+
38
+ import torch
39
+ import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
40
+ warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
41
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
42
+ startup_timer.record("import torch")
43
+
44
+ import gradio # noqa: F401
45
+ startup_timer.record("import gradio")
46
+
47
+ from modules import paths, timer, import_hook, errors, devices # noqa: F401
48
+ startup_timer.record("setup paths")
49
+
50
+ import ldm.modules.encoders.modules # noqa: F401
51
+ startup_timer.record("import ldm")
52
+
53
+ from modules import extra_networks
54
+ from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401
55
+
56
+ # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
57
+ if ".dev" in torch.__version__ or "+git" in torch.__version__:
58
+ torch.__long_version__ = torch.__version__
59
+ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
60
+
61
+ from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
62
+ import modules.codeformer_model as codeformer
63
+ import modules.face_restoration
64
+ import modules.gfpgan_model as gfpgan
65
+ import modules.img2img
66
+
67
+ import modules.lowvram
68
+ import modules.scripts
69
+ import modules.sd_hijack
70
+ import modules.sd_hijack_optimizations
71
+ import modules.sd_models
72
+ import modules.sd_vae
73
+ import modules.sd_unet
74
+ import modules.txt2img
75
+ import modules.script_callbacks
76
+ import modules.textual_inversion.textual_inversion
77
+ import modules.progress
78
+
79
+ import modules.ui
80
+ from modules import modelloader
81
+ from modules.shared import cmd_opts
82
+ import modules.hypernetworks.hypernetwork
83
+
84
+ startup_timer.record("other imports")
85
+
86
+
87
+ if cmd_opts.server_name:
88
+ server_name = cmd_opts.server_name
89
+ else:
90
+ server_name = "0.0.0.0" if cmd_opts.listen else None
91
+
92
+
93
+ def fix_asyncio_event_loop_policy():
94
+ """
95
+ The default `asyncio` event loop policy only automatically creates
96
+ event loops in the main threads. Other threads must create event
97
+ loops explicitly or `asyncio.get_event_loop` (and therefore
98
+ `.IOLoop.current`) will fail. Installing this policy allows event
99
+ loops to be created automatically on any thread, matching the
100
+ behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
101
+ """
102
+
103
+ import asyncio
104
+
105
+ if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
106
+ # "Any thread" and "selector" should be orthogonal, but there's not a clean
107
+ # interface for composing policies so pick the right base.
108
+ _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
109
+ else:
110
+ _BasePolicy = asyncio.DefaultEventLoopPolicy
111
+
112
+ class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
113
+ """Event loop policy that allows loop creation on any thread.
114
+ Usage::
115
+
116
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
117
+ """
118
+
119
+ def get_event_loop(self) -> asyncio.AbstractEventLoop:
120
+ try:
121
+ return super().get_event_loop()
122
+ except (RuntimeError, AssertionError):
123
+ # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
124
+ # and changed to a RuntimeError in 3.4.3.
125
+ # "There is no current event loop in thread %r"
126
+ loop = self.new_event_loop()
127
+ self.set_event_loop(loop)
128
+ return loop
129
+
130
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
131
+
132
+
133
+ def check_versions():
134
+ if shared.cmd_opts.skip_version_check:
135
+ return
136
+
137
+ expected_torch_version = "2.0.0"
138
+
139
+ if version.parse(torch.__version__) < version.parse(expected_torch_version):
140
+ errors.print_error_explanation(f"""
141
+ You are running torch {torch.__version__}.
142
+ The program is tested to work with torch {expected_torch_version}.
143
+ To reinstall the desired version, run with commandline flag --reinstall-torch.
144
+ Beware that this will cause a lot of large files to be downloaded, as well as
145
+ there are reports of issues with training tab on the latest version.
146
+
147
+ Use --skip-version-check commandline argument to disable this check.
148
+ """.strip())
149
+
150
+ expected_xformers_version = "0.0.20"
151
+ if shared.xformers_available:
152
+ import xformers
153
+
154
+ if version.parse(xformers.__version__) < version.parse(expected_xformers_version):
155
+ errors.print_error_explanation(f"""
156
+ You are running xformers {xformers.__version__}.
157
+ The program is tested to work with xformers {expected_xformers_version}.
158
+ To reinstall the desired version, run with commandline flag --reinstall-xformers.
159
+
160
+ Use --skip-version-check commandline argument to disable this check.
161
+ """.strip())
162
+
163
+
164
+ def restore_config_state_file():
165
+ config_state_file = shared.opts.restore_config_state_file
166
+ if config_state_file == "":
167
+ return
168
+
169
+ shared.opts.restore_config_state_file = ""
170
+ shared.opts.save(shared.config_filename)
171
+
172
+ if os.path.isfile(config_state_file):
173
+ print(f"*** About to restore extension state from file: {config_state_file}")
174
+ with open(config_state_file, "r", encoding="utf-8") as f:
175
+ config_state = json.load(f)
176
+ config_states.restore_extension_config(config_state)
177
+ startup_timer.record("restore extension config")
178
+ elif config_state_file:
179
+ print(f"!!! Config state backup not found: {config_state_file}")
180
+
181
+
182
+ def validate_tls_options():
183
+ if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
184
+ return
185
+
186
+ try:
187
+ if not os.path.exists(cmd_opts.tls_keyfile):
188
+ print("Invalid path to TLS keyfile given")
189
+ if not os.path.exists(cmd_opts.tls_certfile):
190
+ print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
191
+ except TypeError:
192
+ cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
193
+ print("TLS setup invalid, running webui without TLS")
194
+ else:
195
+ print("Running with TLS")
196
+ startup_timer.record("TLS")
197
+
198
+
199
+ def get_gradio_auth_creds() -> Iterable[tuple[str, ...]]:
200
+ """
201
+ Convert the gradio_auth and gradio_auth_path commandline arguments into
202
+ an iterable of (username, password) tuples.
203
+ """
204
+ def process_credential_line(s) -> tuple[str, ...] | None:
205
+ s = s.strip()
206
+ if not s:
207
+ return None
208
+ return tuple(s.split(':', 1))
209
+
210
+ if cmd_opts.gradio_auth:
211
+ for cred in cmd_opts.gradio_auth.split(','):
212
+ cred = process_credential_line(cred)
213
+ if cred:
214
+ yield cred
215
+
216
+ if cmd_opts.gradio_auth_path:
217
+ with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
218
+ for line in file.readlines():
219
+ for cred in line.strip().split(','):
220
+ cred = process_credential_line(cred)
221
+ if cred:
222
+ yield cred
223
+
224
+
225
+ def configure_sigint_handler():
226
+ # make the program just exit at ctrl+c without waiting for anything
227
+ def sigint_handler(sig, frame):
228
+ print(f'Interrupted with signal {sig} in {frame}')
229
+ os._exit(0)
230
+
231
+ if not os.environ.get("COVERAGE_RUN"):
232
+ # Don't install the immediate-quit handler when running under coverage,
233
+ # as then the coverage report won't be generated.
234
+ signal.signal(signal.SIGINT, sigint_handler)
235
+
236
+
237
+ def configure_opts_onchange():
238
+ shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False)
239
+ shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
240
+ shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
241
+ shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
242
+ shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
243
+ shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
244
+ startup_timer.record("opts onchange")
245
+
246
+
247
+ def initialize():
248
+ fix_asyncio_event_loop_policy()
249
+ validate_tls_options()
250
+ configure_sigint_handler()
251
+ check_versions()
252
+ modelloader.cleanup_models()
253
+ configure_opts_onchange()
254
+
255
+ modules.sd_models.setup_model()
256
+ startup_timer.record("setup SD model")
257
+
258
+ codeformer.setup_model(cmd_opts.codeformer_models_path)
259
+ startup_timer.record("setup codeformer")
260
+
261
+ gfpgan.setup_model(cmd_opts.gfpgan_models_path)
262
+ startup_timer.record("setup gfpgan")
263
+
264
+ initialize_rest(reload_script_modules=False)
265
+
266
+
267
+ def initialize_rest(*, reload_script_modules=False):
268
+ """
269
+ Called both from initialize() and when reloading the webui.
270
+ """
271
+ sd_samplers.set_samplers()
272
+ extensions.list_extensions()
273
+ startup_timer.record("list extensions")
274
+
275
+ restore_config_state_file()
276
+
277
+ if cmd_opts.ui_debug_mode:
278
+ shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
279
+ modules.scripts.load_scripts()
280
+ return
281
+
282
+ modules.sd_models.list_models()
283
+ startup_timer.record("list SD models")
284
+
285
+ localization.list_localizations(cmd_opts.localizations_dir)
286
+
287
+ with startup_timer.subcategory("load scripts"):
288
+ modules.scripts.load_scripts()
289
+
290
+ if reload_script_modules:
291
+ for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
292
+ importlib.reload(module)
293
+ startup_timer.record("reload script modules")
294
+
295
+ modelloader.load_upscalers()
296
+ startup_timer.record("load upscalers")
297
+
298
+ modules.sd_vae.refresh_vae_list()
299
+ startup_timer.record("refresh VAE")
300
+ modules.textual_inversion.textual_inversion.list_textual_inversion_templates()
301
+ startup_timer.record("refresh textual inversion templates")
302
+
303
+ modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers)
304
+ modules.sd_hijack.list_optimizers()
305
+ startup_timer.record("scripts list_optimizers")
306
+
307
+ modules.sd_unet.list_unets()
308
+ startup_timer.record("scripts list_unets")
309
+
310
+ def load_model():
311
+ """
312
+ Accesses shared.sd_model property to load model.
313
+ After it's available, if it has been loaded before this access by some extension,
314
+ its optimization may be None because the list of optimizaers has neet been filled
315
+ by that time, so we apply optimization again.
316
+ """
317
+
318
+ shared.sd_model # noqa: B018
319
+
320
+ if modules.sd_hijack.current_optimizer is None:
321
+ modules.sd_hijack.apply_optimizations()
322
+
323
+ Thread(target=load_model).start()
324
+
325
+ Thread(target=devices.first_time_calculation).start()
326
+
327
+ shared.reload_hypernetworks()
328
+ startup_timer.record("reload hypernetworks")
329
+
330
+ ui_extra_networks.initialize()
331
+ ui_extra_networks.register_default_pages()
332
+
333
+ extra_networks.initialize()
334
+ extra_networks.register_default_extra_networks()
335
+ startup_timer.record("initialize extra networks")
336
+
337
+
338
+ def setup_middleware(app):
339
+ app.middleware_stack = None # reset current middleware to allow modifying user provided list
340
+ app.add_middleware(GZipMiddleware, minimum_size=1000)
341
+ configure_cors_middleware(app)
342
+ app.build_middleware_stack() # rebuild middleware stack on-the-fly
343
+
344
+
345
+ def configure_cors_middleware(app):
346
+ cors_options = {
347
+ "allow_methods": ["*"],
348
+ "allow_headers": ["*"],
349
+ "allow_credentials": True,
350
+ }
351
+ if cmd_opts.cors_allow_origins:
352
+ cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
353
+ if cmd_opts.cors_allow_origins_regex:
354
+ cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
355
+ app.add_middleware(CORSMiddleware, **cors_options)
356
+
357
+
358
+ def create_api(app):
359
+ from modules.api.api import Api
360
+ api = Api(app, queue_lock)
361
+ return api
362
+
363
+
364
+ def api_only():
365
+ initialize()
366
+
367
+ app = FastAPI()
368
+ setup_middleware(app)
369
+ api = create_api(app)
370
+
371
+ modules.script_callbacks.app_started_callback(None, app)
372
+
373
+ print(f"Startup time: {startup_timer.summary()}.")
374
+ api.launch(
375
+ server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1",
376
+ port=cmd_opts.port if cmd_opts.port else 7861,
377
+ root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else ""
378
+ )
379
+
380
+
381
+ def webui():
382
+ launch_api = cmd_opts.api
383
+ initialize()
384
+
385
+ while 1:
386
+ if shared.opts.clean_temp_dir_at_start:
387
+ ui_tempdir.cleanup_tmpdr()
388
+ startup_timer.record("cleanup temp dir")
389
+
390
+ modules.script_callbacks.before_ui_callback()
391
+ startup_timer.record("scripts before_ui_callback")
392
+
393
+ shared.demo = modules.ui.create_ui()
394
+ startup_timer.record("create ui")
395
+
396
+ if not cmd_opts.no_gradio_queue:
397
+ shared.demo.queue(concurrency_count=16)
398
+ else:
399
+ print('Server queues disabled')
400
+ shared.demo.progress_tracking = False
401
+
402
+ gradio_auth_creds = list(get_gradio_auth_creds()) or None
403
+
404
+ app, local_url, share_url = shared.demo.launch(
405
+ share=cmd_opts.share,
406
+ server_name=server_name,
407
+ server_port=cmd_opts.port,
408
+ ssl_keyfile=cmd_opts.tls_keyfile,
409
+ ssl_certfile=cmd_opts.tls_certfile,
410
+ ssl_verify=cmd_opts.disable_tls_verify,
411
+ debug=cmd_opts.gradio_debug,
412
+ auth=gradio_auth_creds,
413
+ inbrowser=cmd_opts.autolaunch and os.getenv('SD_WEBUI_RESTARTING') != '1',
414
+ prevent_thread_lock=True,
415
+ allowed_paths=cmd_opts.gradio_allowed_path,
416
+ app_kwargs={
417
+ "docs_url": "/docs",
418
+ "redoc_url": "/redoc",
419
+ },
420
+ root_path=f"/{cmd_opts.subpath}" if cmd_opts.subpath else "",
421
+ )
422
+
423
+ # after initial launch, disable --autolaunch for subsequent restarts
424
+ cmd_opts.autolaunch = False
425
+
426
+ startup_timer.record("gradio launch")
427
+
428
+ # gradio uses a very open CORS policy via app.user_middleware, which makes it possible for
429
+ # an attacker to trick the user into opening a malicious HTML page, which makes a request to the
430
+ # running web ui and do whatever the attacker wants, including installing an extension and
431
+ # running its code. We disable this here. Suggested by RyotaK.
432
+ app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
433
+
434
+ setup_middleware(app)
435
+
436
+ modules.progress.setup_progress_api(app)
437
+ modules.ui.setup_ui_api(app)
438
+
439
+ if launch_api:
440
+ create_api(app)
441
+
442
+ ui_extra_networks.add_pages_to_demo(app)
443
+
444
+ startup_timer.record("add APIs")
445
+
446
+ with startup_timer.subcategory("app_started_callback"):
447
+ modules.script_callbacks.app_started_callback(shared.demo, app)
448
+
449
+ timer.startup_record = startup_timer.dump()
450
+ print(f"Startup time: {startup_timer.summary()}.")
451
+
452
+ try:
453
+ while True:
454
+ server_command = shared.state.wait_for_server_command(timeout=5)
455
+ if server_command:
456
+ if server_command in ("stop", "restart"):
457
+ break
458
+ else:
459
+ print(f"Unknown server command: {server_command}")
460
+ except KeyboardInterrupt:
461
+ print('Caught KeyboardInterrupt, stopping...')
462
+ server_command = "stop"
463
+
464
+ if server_command == "stop":
465
+ print("Stopping server...")
466
+ # If we catch a keyboard interrupt, we want to stop the server and exit.
467
+ shared.demo.close()
468
+ break
469
+
470
+ print('Restarting UI...')
471
+ shared.demo.close()
472
+ time.sleep(0.5)
473
+ startup_timer.reset()
474
+ modules.script_callbacks.app_reload_callback()
475
+ startup_timer.record("app reload callback")
476
+ modules.script_callbacks.script_unloaded_callback()
477
+ startup_timer.record("scripts unloaded callback")
478
+ initialize_rest(reload_script_modules=True)
479
+
480
+
481
+ if __name__ == "__main__":
482
+ if cmd_opts.nowebui:
483
+ api_only()
484
+ else:
485
+ webui()
webui.sh ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ #################################################
3
+ # Please do not make any changes to this file, #
4
+ # change the variables in webui-user.sh instead #
5
+ #################################################
6
+
7
+
8
+ use_venv=1
9
+ if [[ $venv_dir == "-" ]]; then
10
+ use_venv=0
11
+ fi
12
+
13
+ SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
14
+
15
+
16
+ # If run from macOS, load defaults from webui-macos-env.sh
17
+ if [[ "$OSTYPE" == "darwin"* ]]; then
18
+ if [[ -f "$SCRIPT_DIR"/webui-macos-env.sh ]]
19
+ then
20
+ source "$SCRIPT_DIR"/webui-macos-env.sh
21
+ fi
22
+ fi
23
+
24
+ # Read variables from webui-user.sh
25
+ # shellcheck source=/dev/null
26
+ if [[ -f "$SCRIPT_DIR"/webui-user.sh ]]
27
+ then
28
+ source "$SCRIPT_DIR"/webui-user.sh
29
+ fi
30
+
31
+ # Set defaults
32
+ # Install directory without trailing slash
33
+ if [[ -z "${install_dir}" ]]
34
+ then
35
+ install_dir="$SCRIPT_DIR"
36
+ fi
37
+
38
+ # Name of the subdirectory (defaults to stable-diffusion-webui)
39
+ if [[ -z "${clone_dir}" ]]
40
+ then
41
+ clone_dir="stable-diffusion-webui-ux"
42
+ fi
43
+
44
+ # python3 executable
45
+ if [[ -z "${python_cmd}" ]]
46
+ then
47
+ python_cmd="python3"
48
+ fi
49
+
50
+ # git executable
51
+ if [[ -z "${GIT}" ]]
52
+ then
53
+ export GIT="git"
54
+ fi
55
+
56
+ # python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
57
+ if [[ -z "${venv_dir}" ]] && [[ $use_venv -eq 1 ]]
58
+ then
59
+ venv_dir="venv"
60
+ fi
61
+
62
+ if [[ -z "${LAUNCH_SCRIPT}" ]]
63
+ then
64
+ LAUNCH_SCRIPT="launch.py"
65
+ fi
66
+
67
+ # this script cannot be run as root by default
68
+ can_run_as_root=0
69
+
70
+ # read any command line flags to the webui.sh script
71
+ while getopts "f" flag > /dev/null 2>&1
72
+ do
73
+ case ${flag} in
74
+ f) can_run_as_root=1;;
75
+ *) break;;
76
+ esac
77
+ done
78
+
79
+ # Disable sentry logging
80
+ export ERROR_REPORTING=FALSE
81
+
82
+ # Do not reinstall existing pip packages on Debian/Ubuntu
83
+ export PIP_IGNORE_INSTALLED=0
84
+
85
+ # Pretty print
86
+ delimiter="################################################################"
87
+
88
+ printf "\n%s\n" "${delimiter}"
89
+ printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n"
90
+ printf "\e[1m\e[34mTested on Debian 11 (Bullseye)\e[0m"
91
+ printf "\n%s\n" "${delimiter}"
92
+
93
+ # Do not run as root
94
+ if [[ $(id -u) -eq 0 && can_run_as_root -eq 0 ]]
95
+ then
96
+ printf "\n%s\n" "${delimiter}"
97
+ printf "\e[1m\e[31mERROR: This script must not be launched as root, aborting...\e[0m"
98
+ printf "\n%s\n" "${delimiter}"
99
+ exit 1
100
+ else
101
+ printf "\n%s\n" "${delimiter}"
102
+ printf "Running on \e[1m\e[32m%s\e[0m user" "$(whoami)"
103
+ printf "\n%s\n" "${delimiter}"
104
+ fi
105
+
106
+ if [[ $(getconf LONG_BIT) = 32 ]]
107
+ then
108
+ printf "\n%s\n" "${delimiter}"
109
+ printf "\e[1m\e[31mERROR: Unsupported Running on a 32bit OS\e[0m"
110
+ printf "\n%s\n" "${delimiter}"
111
+ exit 1
112
+ fi
113
+
114
+ if [[ -d .git ]]
115
+ then
116
+ printf "\n%s\n" "${delimiter}"
117
+ printf "Repo already cloned, using it as install directory"
118
+ printf "\n%s\n" "${delimiter}"
119
+ install_dir="${PWD}/../"
120
+ clone_dir="${PWD##*/}"
121
+ fi
122
+
123
+ # Check prerequisites
124
+ gpu_info=$(lspci 2>/dev/null | grep -E "VGA|Display")
125
+ case "$gpu_info" in
126
+ *"Navi 1"*)
127
+ export HSA_OVERRIDE_GFX_VERSION=10.3.0
128
+ if [[ -z "${TORCH_COMMAND}" ]]
129
+ then
130
+ pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')"
131
+ if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]]
132
+ then
133
+ # Navi users will still use torch 1.13 because 2.0 does not seem to work.
134
+ export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2"
135
+ else
136
+ printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m"
137
+ exit 1
138
+ fi
139
+ fi
140
+ ;;
141
+ *"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
142
+ ;;
143
+ *"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \
144
+ export TORCH_COMMAND="pip install --pre torch==2.1.0.dev-20230614+rocm5.5 torchvision==0.16.0.dev-20230614+rocm5.5 --index-url https://download.pytorch.org/whl/nightly/rocm5.5"
145
+ # Navi 3 needs at least 5.5 which is only on the nightly chain
146
+ ;;
147
+ *"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
148
+ printf "\n%s\n" "${delimiter}"
149
+ printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half"
150
+ printf "\n%s\n" "${delimiter}"
151
+ ;;
152
+ *)
153
+ ;;
154
+ esac
155
+ if ! echo "$gpu_info" | grep -q "NVIDIA";
156
+ then
157
+ if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
158
+ then
159
+ export TORCH_COMMAND="pip install torch==2.0.1+rocm5.4.2 torchvision==0.15.2+rocm5.4.2 --index-url https://download.pytorch.org/whl/rocm5.4.2"
160
+ fi
161
+ fi
162
+
163
+ for preq in "${GIT}" "${python_cmd}"
164
+ do
165
+ if ! hash "${preq}" &>/dev/null
166
+ then
167
+ printf "\n%s\n" "${delimiter}"
168
+ printf "\e[1m\e[31mERROR: %s is not installed, aborting...\e[0m" "${preq}"
169
+ printf "\n%s\n" "${delimiter}"
170
+ exit 1
171
+ fi
172
+ done
173
+
174
+ if [[ $use_venv -eq 1 ]] && ! "${python_cmd}" -c "import venv" &>/dev/null
175
+ then
176
+ printf "\n%s\n" "${delimiter}"
177
+ printf "\e[1m\e[31mERROR: python3-venv is not installed, aborting...\e[0m"
178
+ printf "\n%s\n" "${delimiter}"
179
+ exit 1
180
+ fi
181
+
182
+ cd "${install_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/, aborting...\e[0m" "${install_dir}"; exit 1; }
183
+ if [[ -d "${clone_dir}" ]]
184
+ then
185
+ cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
186
+ else
187
+ printf "\n%s\n" "${delimiter}"
188
+ printf "Clone stable-diffusion-webui"
189
+ printf "\n%s\n" "${delimiter}"
190
+ "${GIT}" clone https://github.com/anapnoe/stable-diffusion-webui-ux.git "${clone_dir}"
191
+ cd "${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
192
+ fi
193
+
194
+ if [[ $use_venv -eq 1 ]] && [[ -z "${VIRTUAL_ENV}" ]];
195
+ then
196
+ printf "\n%s\n" "${delimiter}"
197
+ printf "Create and activate python venv"
198
+ printf "\n%s\n" "${delimiter}"
199
+ cd "${install_dir}"/"${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
200
+ if [[ ! -d "${venv_dir}" ]]
201
+ then
202
+ "${python_cmd}" -m venv "${venv_dir}"
203
+ first_launch=1
204
+ fi
205
+ # shellcheck source=/dev/null
206
+ if [[ -f "${venv_dir}"/bin/activate ]]
207
+ then
208
+ source "${venv_dir}"/bin/activate
209
+ else
210
+ printf "\n%s\n" "${delimiter}"
211
+ printf "\e[1m\e[31mERROR: Cannot activate python venv, aborting...\e[0m"
212
+ printf "\n%s\n" "${delimiter}"
213
+ exit 1
214
+ fi
215
+ else
216
+ printf "\n%s\n" "${delimiter}"
217
+ printf "python venv already activate or run without venv: ${VIRTUAL_ENV}"
218
+ printf "\n%s\n" "${delimiter}"
219
+ fi
220
+
221
+ # Try using TCMalloc on Linux
222
+ prepare_tcmalloc() {
223
+ if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then
224
+ TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)"
225
+ if [[ ! -z "${TCMALLOC}" ]]; then
226
+ echo "Using TCMalloc: ${TCMALLOC}"
227
+ export LD_PRELOAD="${TCMALLOC}"
228
+ else
229
+ printf "\e[1m\e[31mCannot locate TCMalloc (improves CPU memory usage)\e[0m\n"
230
+ fi
231
+ fi
232
+ }
233
+
234
+ KEEP_GOING=1
235
+ export SD_WEBUI_RESTART=tmp/restart
236
+ while [[ "$KEEP_GOING" -eq "1" ]]; do
237
+ if [[ ! -z "${ACCELERATE}" ]] && [ ${ACCELERATE}="True" ] && [ -x "$(command -v accelerate)" ]; then
238
+ printf "\n%s\n" "${delimiter}"
239
+ printf "Accelerating launch.py..."
240
+ printf "\n%s\n" "${delimiter}"
241
+ prepare_tcmalloc
242
+ accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@"
243
+ else
244
+ printf "\n%s\n" "${delimiter}"
245
+ printf "Launching launch.py..."
246
+ printf "\n%s\n" "${delimiter}"
247
+ prepare_tcmalloc
248
+ "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"
249
+ fi
250
+
251
+ if [[ ! -f tmp/restart ]]; then
252
+ KEEP_GOING=0
253
+ fi
254
+ done