DmitrMakeev commited on
Commit
fee99f1
1 Parent(s): cafa4f8

Upload 3 files

Browse files
script/ContorlNet_I2I_sequence_toyxyz_V2.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ import shutil
4
+
5
+ import cv2
6
+ import gradio as gr
7
+ import numpy as np
8
+ import modules.scripts as scripts
9
+
10
+ from modules import images, processing
11
+ from modules.processing import process_images, Processed
12
+ from modules.shared import opts
13
+ from PIL import Image, ImageFilter, ImageColor, ImageOps
14
+ from pathlib import Path
15
+ from typing import List, Tuple, Iterable
16
+
17
+
18
+ #Returns a list of images located in the input path. For ControlNet iamges
19
+ def get_all_frames_from_path(path):
20
+ if not os.path.isdir(path):
21
+ return None
22
+ frame_list = []
23
+ for filename in sorted(os.listdir(path)):
24
+ if filename.endswith(".jpg") or filename.endswith(".png"):
25
+ img_path = os.path.join(path, filename)
26
+ img = cv2.imread(img_path)
27
+ if img is not None:
28
+ frame_list.append(img)
29
+ frame_list.insert(0, frame_list[0])
30
+ return frame_list
31
+
32
+
33
+ #Returns a list of images located in the input path. For Color iamges
34
+ def get_images_from_path(path):
35
+ if not os.path.isdir(path):
36
+ return None
37
+ images = []
38
+ for filename in os.listdir(path):
39
+ if filename.endswith('.jpg') or filename.endswith('.png'):
40
+ img_path = os.path.join(path, filename)
41
+ img = Image.open(img_path)
42
+ images.append(img)
43
+ images.append(images[-1])
44
+ images.insert(0, images[0])
45
+ return images
46
+
47
+ #Returns the number of the smallest number in the entire image sequence list. For ControlNet
48
+ def get_min_frame_num(video_list):
49
+ min_frame_num = -1
50
+ for video in video_list:
51
+ if video is None:
52
+ continue
53
+ else:
54
+ frame_num = len(video)
55
+ print(frame_num)
56
+ if min_frame_num < 0:
57
+ min_frame_num = frame_num
58
+ elif frame_num < min_frame_num:
59
+ min_frame_num = frame_num
60
+ return min_frame_num
61
+
62
+
63
+ #Blende method
64
+
65
+
66
+ def basic(target, blend, opacity):
67
+ return target * opacity + blend * (1-opacity)
68
+
69
+ def blender(func):
70
+ def blend(target, blend, opacity=1, *args):
71
+ res = func(target, blend, *args)
72
+ res = basic(res, blend, opacity)
73
+ return np.clip(res, 0, 1)
74
+ return blend
75
+
76
+
77
+ class Blend:
78
+ @classmethod
79
+ def method(cls, name):
80
+ return getattr(cls, name)
81
+
82
+ normal = basic
83
+
84
+ @staticmethod
85
+ @blender
86
+ def darken(target, blend, *args):
87
+ return np.minimum(target, blend)
88
+
89
+ @staticmethod
90
+ @blender
91
+ def multiply(target, blend, *args):
92
+ return target * blend
93
+
94
+ @staticmethod
95
+ @blender
96
+ def color_burn(target, blend, *args):
97
+ return 1 - (1-target)/blend
98
+
99
+ @staticmethod
100
+ @blender
101
+ def linear_burn(target, blend, *args):
102
+ return target+blend-1
103
+
104
+ @staticmethod
105
+ @blender
106
+ def lighten(target, blend, *args):
107
+ return np.maximum(target, blend)
108
+
109
+ @staticmethod
110
+ @blender
111
+ def screen(target, blend, *args):
112
+ return 1 - (1-target) * (1-blend)
113
+
114
+ @staticmethod
115
+ @blender
116
+ def color_dodge(target, blend, *args):
117
+ return target/(1-blend)
118
+
119
+ @staticmethod
120
+ @blender
121
+ def linear_dodge(target, blend, *args):
122
+ return target+blend
123
+
124
+ @staticmethod
125
+ @blender
126
+ def overlay(target, blend, *args):
127
+ return (target>0.5) * (1-(2-2*target)*(1-blend)) +\
128
+ (target<=0.5) * (2*target*blend)
129
+
130
+ @staticmethod
131
+ @blender
132
+ def soft_light(target, blend, *args):
133
+ return (blend>0.5) * (1 - (1-target)*(1-(blend-0.5))) +\
134
+ (blend<=0.5) * (target*(blend+0.5))
135
+
136
+ @staticmethod
137
+ @blender
138
+ def hard_light(target, blend, *args):
139
+ return (blend>0.5) * (1 - (1-target)*(2-2*blend)) +\
140
+ (blend<=0.5) * (2*target*blend)
141
+
142
+ @staticmethod
143
+ @blender
144
+ def vivid_light(target, blend, *args):
145
+ return (blend>0.5) * (1 - (1-target)/(2*blend-1)) +\
146
+ (blend<=0.5) * (target/(1-2*blend))
147
+
148
+ @staticmethod
149
+ @blender
150
+ def linear_light(target, blend, *args):
151
+ return (blend>0.5) * (target + 2*(blend-0.5)) +\
152
+ (blend<=0.5) * (target + 2*blend)
153
+
154
+ @staticmethod
155
+ @blender
156
+ def pin_light(target, blend, *args):
157
+ return (blend>0.5) * np.maximum(target,2*(blend-0.5)) +\
158
+ (blend<=0.5) * np.minimum(target,2*blend)
159
+
160
+ @staticmethod
161
+ @blender
162
+ def difference(target, blend, *args):
163
+ return np.abs(target - blend)
164
+
165
+ @staticmethod
166
+ @blender
167
+ def exclusion(target, blend, *args):
168
+ return 0.5 - 2*(target-0.5)*(blend-0.5)
169
+
170
+ blend_methods = [i for i in Blend.__dict__.keys() if i[0]!='_' and i!='method']
171
+
172
+
173
+
174
+ def blend_images(base_img, blend_img, blend_method, blend_opacity, do_invert):
175
+
176
+ img_base = np.array(base_img.convert("RGB")).astype(np.float64)/255
177
+
178
+ if do_invert:
179
+ img_to_blend = ImageOps.invert(blend_img.convert('RGB'))
180
+ else:
181
+ img_to_blend = blend_img
182
+
183
+ img_to_blend = img_to_blend.resize((int(base_img.width), int(base_img.height)))
184
+
185
+ img_to_blend = np.array(img_to_blend.convert("RGB")).astype(np.float64)/255
186
+
187
+ img_blended = Blend.method(blend_method)(img_to_blend, img_base, blend_opacity)
188
+
189
+ img_blended *= 255
190
+
191
+ img_blended = Image.fromarray(img_blended.astype(np.uint8), mode='RGB')
192
+
193
+ return img_blended
194
+
195
+
196
+ #Define UI and script properties.
197
+ class Script(scripts.Script):
198
+
199
+ def title(self):
200
+ return "controlnet I2I sequence_toyxyz_v2"
201
+
202
+ def show(self, is_img2img):
203
+ return is_img2img
204
+
205
+ def ui(self, is_img2img):
206
+
207
+ ctrls_group = ()
208
+ max_models = opts.data.get("control_net_max_models_num", 1)
209
+
210
+ input_list = []
211
+
212
+ with gr.Group():
213
+ with gr.Accordion("ControlNet-I2I-sequence-toyxyz", open = True):
214
+ with gr.Column():
215
+
216
+ feed_prev_frame = gr.Checkbox(value=False, label="Feed previous frame / Reduce flickering by feeding the previous frame image generated by Img2Img")
217
+
218
+ use_init_img = gr.Checkbox(value=False, label="Blend color image / Blend the color image sequence with the initial Img2Img image or previous frame")
219
+
220
+ use_TemporalNet = gr.Checkbox(value=False, label="Use TemporalNet / Using TemporalNet to reduce flicker between image sequences. Add TemporalNet in addition to the multi-controlnet you need. It should be placed at the end of the controlnet list.")
221
+
222
+ blendmode = gr.Dropdown(blend_methods, value='normal', label='Blend mode / Choose how to blend the color image with the Previous frame or Img2Img initial image')
223
+
224
+ opacityvalue = gr.Slider(0, 1, value=0, label="Opacity / Previous frame or Img2Img initial image + (color image * opacity)", info="Choose betwen 0 and 1")
225
+
226
+
227
+ for i in range(max_models):
228
+ input_path = gr.Textbox(label=f"ControlNet-{i}", placeholder="image sequence path")
229
+ input_list.append(input_path)
230
+
231
+ tone_image_path = gr.Textbox(label=f"Color_Image / Color images to be used for Img2Img in sequence", placeholder="image sequence path")
232
+
233
+ output_path = gr.Textbox(label=f"Output_path / Deletes the contents located in the path, and creates a new path if it does not exist", placeholder="Output path")
234
+
235
+ ctrls_group += tuple(input_list) + (use_TemporalNet, use_init_img, opacityvalue, blendmode, feed_prev_frame, tone_image_path, output_path)
236
+
237
+ return ctrls_group
238
+
239
+
240
+
241
+ #Image Generate Definition
242
+ def run(self, p, *args):
243
+
244
+ path = p.outpath_samples
245
+
246
+ output_path = args[-1] # get the last argument, which is the output path
247
+
248
+ feedprev = args[-3]
249
+
250
+ blendm = args[-4]
251
+
252
+ opacityval = args[-5]
253
+
254
+ useinit = args[-6]
255
+
256
+ usetempo = args[-7]
257
+
258
+
259
+ # Check whether the output path exists, if it does, delete it and create a new one.
260
+ if os.path.isdir(output_path):
261
+ for file in os.scandir(output_path):
262
+ os.remove(file.path)
263
+ else :
264
+ os.mkdir(output_path)
265
+
266
+ #Get the number of controlnet models.
267
+ video_num = opts.data.get("control_net_max_models_num", 1)
268
+
269
+ # Get the ControlNet image sequence list.
270
+ image_list = [get_all_frames_from_path(image) for image in args[:video_num]]
271
+
272
+ # Get a list of color image sequences.
273
+ color_image_list = get_images_from_path(args[-2])
274
+
275
+ # Get the first frame
276
+ previmg = p.init_images
277
+
278
+ tempoimg = p.init_images[0]
279
+
280
+ #If img2img color correction is enabled in webui settings, color correction is performed based on the first frame.
281
+ initial_color_corrections = [processing.setup_color_correction(p.init_images[0])]
282
+
283
+ #Save initial img2img image
284
+ initial_image = p.init_images[0]
285
+
286
+ # Get the total number of frames.
287
+ frame_num = get_min_frame_num(image_list)
288
+
289
+ # image processing
290
+ if frame_num > 0:
291
+ output_image_list = []
292
+
293
+ for frame in range(frame_num):
294
+ copy_p = copy.copy(p)
295
+ copy_p.control_net_input_image = []
296
+ for video in image_list:
297
+ if video is None:
298
+ continue
299
+ copy_p.control_net_input_image.append(video[frame])
300
+
301
+ if usetempo == True :
302
+ copy_p.control_net_input_image.append(tempoimg)
303
+
304
+
305
+ if color_image_list and feedprev == False:
306
+
307
+ if frame<len(color_image_list):
308
+ tone_image = color_image_list[frame+1]
309
+
310
+ if useinit:
311
+ tone_image = blend_images(initial_image, tone_image, blendm, opacityval, False)
312
+
313
+ p.init_images = [tone_image.convert("RGB")]
314
+
315
+ proc = process_images(copy_p)
316
+
317
+
318
+
319
+ if feedprev == True and useinit == False:
320
+ if previmg is None:
321
+ continue
322
+ else:
323
+ previmg = proc.images[0]
324
+
325
+ if frame == 0:
326
+ previmg = initial_image
327
+
328
+ p.init_images = [previmg]
329
+
330
+ if opts.img2img_color_correction:
331
+ p.color_corrections = initial_color_corrections
332
+
333
+
334
+ if feedprev == True and color_image_list and useinit:
335
+ if previmg is None:
336
+ continue
337
+ else:
338
+ previmg = proc.images[0]
339
+
340
+ if frame == 0:
341
+ previmg = initial_image
342
+
343
+ previmg = blend_images(previmg, color_image_list[frame+1], blendm, opacityval, False)
344
+
345
+
346
+ p.init_images = [previmg]
347
+
348
+ if opts.img2img_color_correction:
349
+ p.color_corrections = initial_color_corrections
350
+
351
+ img = proc.images[0]
352
+
353
+ if usetempo == True :
354
+ if frame > 0 :
355
+ tempoimg = proc.images[0]
356
+
357
+
358
+ #Save image
359
+ if(frame>0):
360
+ images.save_image(img, output_path, f"Frame_{frame}")
361
+ copy_p.close()
362
+
363
+
364
+ else:
365
+ proc = process_images(p)
366
+
367
+ return proc
script/loopback_wave.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import platform
3
+ import numpy as np
4
+ from tqdm import trange
5
+ import math
6
+ import subprocess as sp
7
+ import string
8
+ import random
9
+ from functools import reduce
10
+ import re
11
+
12
+ import modules.scripts as scripts
13
+ import gradio as gr
14
+
15
+ from modules import processing, shared, sd_samplers, images
16
+ from modules.processing import Processed
17
+ from modules.sd_samplers import samplers
18
+ from modules.shared import opts, cmd_opts, state
19
+ import subprocess
20
+
21
+
22
+ wave_completed_regex = r'@wave_completed\(([\-]?[0-9]*\.?[0-9]+), ?([\-]?[0-9]*\.?[0-9]+)\)'
23
+ wave_remaining_regex = r'@wave_remaining\(([\-]?[0-9]*\.?[0-9]+), ?([\-]?[0-9]*\.?[0-9]+)\)'
24
+
25
+ def run_cmd(cmd):
26
+ cmd = list(map(lambda arg: str(arg), cmd))
27
+ print("Executing %s" % " ".join(cmd))
28
+ popen_params = {"stdout": sp.DEVNULL, "stderr": sp.PIPE, "stdin": sp.DEVNULL}
29
+
30
+ if os.name == "nt":
31
+ popen_params["creationflags"] = 0x08000000
32
+
33
+ proc = sp.Popen(cmd, **popen_params)
34
+ out, err = proc.communicate() # proc.wait()
35
+ proc.stderr.close()
36
+
37
+ if proc.returncode:
38
+ raise IOError(err.decode("utf8"))
39
+
40
+ del proc
41
+
42
+ def encode_video(input_pattern, starting_number, output_dir, fps, quality, encoding, create_segments, segment_duration, ffmpeg_path):
43
+ two_pass = (encoding == "VP9 (webm)")
44
+ alpha_channel = ("webm" in encoding)
45
+ suffix = "webm" if "webm" in encoding else "mp4"
46
+ output_location = output_dir + f".{suffix}"
47
+
48
+ encoding_lib = {
49
+ "VP9 (webm)": "libvpx-vp9",
50
+ "VP8 (webm)": "libvpx",
51
+ "H.264 (mp4)": "libx264",
52
+ "H.265 (mp4)": "libx265",
53
+ }[encoding]
54
+
55
+ args = [
56
+ "-framerate", fps,
57
+ "-start_number", int(starting_number),
58
+ "-i", input_pattern,
59
+ "-c:v", encoding_lib,
60
+ "-b:v","0",
61
+ "-crf", quality,
62
+ ]
63
+
64
+ if encoding_lib == "libvpx-vp9":
65
+ args += ["-pix_fmt", "yuva420p"]
66
+
67
+ if(ffmpeg_path == ""):
68
+ ffmpeg_path = "ffmpeg"
69
+ if(platform.system == "Windows"):
70
+ ffmpeg_path += ".exe"
71
+
72
+ print("\n\n")
73
+ if two_pass:
74
+ first_pass_args = args + [
75
+ "-pass", "1",
76
+ "-an",
77
+ "-f", "null",
78
+ os.devnull
79
+ ]
80
+
81
+ second_pass_args = args + [
82
+ "-pass", "2",
83
+ output_location
84
+ ]
85
+
86
+ print("Running first pass ffmpeg encoding")
87
+
88
+ run_cmd([ffmpeg_path] + first_pass_args)
89
+ print("Running second pass ffmpeg encoding. This could take awhile...")
90
+ run_cmd([ffmpeg_path] + second_pass_args)
91
+ else:
92
+ print("Running ffmpeg encoding. This could take awhile...")
93
+ run_cmd([ffmpeg_path] + args + [output_location])
94
+
95
+ if(create_segments):
96
+ print("Segmenting video")
97
+ run_cmd([ffmpeg_path] + [
98
+ "-i", output_location,
99
+ "-f", "segment",
100
+ "-segment_time", segment_duration,
101
+ "-vcodec", "copy",
102
+ "-acodec", "copy",
103
+ f"{output_dir}.%d.{suffix}"
104
+ ])
105
+
106
+ def set_weights(match_obj, wave_progress):
107
+ weight_0 = 0
108
+ weight_1 = 0
109
+ if match_obj.group(1) is not None:
110
+ weight_0 = float(match_obj.group(1))
111
+ if match_obj.group(2) is not None:
112
+ weight_1 = float(match_obj.group(2))
113
+
114
+ max_weight = max(weight_0, weight_1)
115
+ min_weight = min(weight_0, weight_1)
116
+
117
+ weight_range = max_weight - min_weight
118
+ weight = min_weight + weight_range * wave_progress
119
+ return str(weight)
120
+
121
+
122
+ class Script(scripts.Script):
123
+ def title(self):
124
+ return "Loopback Wave V1.4.1"
125
+
126
+ def show(self, is_img2img):
127
+ return is_img2img
128
+
129
+ def ui(self, is_img2img):
130
+ frames = gr.Slider(minimum=1, maximum=2048, step=1, label='Frames', value=100)
131
+ frames_per_wave = gr.Slider(minimum=0, maximum=120, step=1, label='Frames Per Wave', value=20)
132
+ denoising_strength_change_amplitude = gr.Slider(minimum=0, maximum=1, step=0.01, label='Max additional denoise', value=0.6)
133
+ denoising_strength_change_offset = gr.Number(minimum=0, maximum=180, step=1, label='Wave offset (ignore this if you don\'t know what it means)', value=0)
134
+ initial_image_number = gr.Number(minimum=0, label='Initial generated image number', value=0)
135
+
136
+ save_prompts = gr.Checkbox(label='Save prompts as text file', value=True)
137
+ prompts = gr.Textbox(label="Prompt Changes", lines=5, value="")
138
+
139
+ save_video = gr.Checkbox(label='Save results as video', value=True)
140
+ output_dir = gr.Textbox(label="Video Name", lines=1, value="")
141
+ video_fps = gr.Slider(minimum=1, maximum=120, step=1, label='Frames per second', value=10)
142
+ video_quality = gr.Slider(minimum=0, maximum=60, step=1, label='Video Quality (crf)', value=40)
143
+ video_encoding = gr.Dropdown(label='Video encoding', value="VP9 (webm)", choices=["VP9 (webm)", "VP8 (webm)", "H.265 (mp4)", "H.264 (mp4)"])
144
+ ffmpeg_path = gr.Textbox(label="ffmpeg binary. Only set this if it fails otherwise.", lines=1, value="")
145
+
146
+ segment_video = gr.Checkbox(label='Cut video in to segments', value=True)
147
+ video_segment_duration = gr.Slider(minimum=10, maximum=60, step=1, label='Video Segment Duration (seconds)', value=20)
148
+
149
+
150
+ return [frames, denoising_strength_change_amplitude, frames_per_wave, denoising_strength_change_offset,initial_image_number, prompts, save_prompts, save_video, output_dir, video_fps, video_quality, video_encoding, ffmpeg_path, segment_video, video_segment_duration]
151
+
152
+ def run(self, p, frames, denoising_strength_change_amplitude, frames_per_wave, denoising_strength_change_offset, initial_image_number, prompts: str,save_prompts, save_video, output_dir, video_fps, video_quality, video_encoding, ffmpeg_path, segment_video, video_segment_duration):
153
+ processing.fix_seed(p)
154
+ batch_count = p.n_iter
155
+ p.extra_generation_params = {
156
+ "Max Additional Denoise": denoising_strength_change_amplitude,
157
+ "Frames per wave": frames_per_wave,
158
+ "Wave Offset": denoising_strength_change_offset,
159
+ }
160
+
161
+ # We save them ourselves for the sake of ffmpeg
162
+ p.do_not_save_samples = True
163
+
164
+ changes_dict = {}
165
+
166
+
167
+ p.batch_size = 1
168
+ p.n_iter = 1
169
+
170
+ output_images, info = None, None
171
+ initial_seed = None
172
+ initial_info = None
173
+
174
+ grids = []
175
+ all_images = []
176
+ original_init_image = p.init_images
177
+ state.job_count = frames * batch_count
178
+
179
+ initial_color_corrections = [processing.setup_color_correction(p.init_images[0])]
180
+ initial_denoising_strength = p.denoising_strength
181
+
182
+ if(output_dir==""):
183
+ output_dir = str(p.seed)
184
+ else:
185
+ output_dir = output_dir + "-" + str(p.seed)
186
+
187
+ loopback_wave_path = os.path.join(p.outpath_samples, "loopback-wave")
188
+ loopback_wave_images_path = os.path.join(loopback_wave_path, output_dir)
189
+
190
+ os.makedirs(loopback_wave_images_path, exist_ok=True)
191
+
192
+ p.outpath_samples = loopback_wave_images_path
193
+
194
+ prompts = prompts.strip()
195
+
196
+ if save_prompts:
197
+ with open(loopback_wave_images_path + "-prompts.txt", "w") as f:
198
+ generation_settings = [
199
+ "Generation Settings",
200
+ f"Total Frames: {frames}",
201
+ f"Frames Per Wave: {frames_per_wave}",
202
+ f"Wave Offset: {denoising_strength_change_offset}",
203
+ f"Base Denoising Strength: {initial_denoising_strength}",
204
+ f"Max Additional Denoise: {denoising_strength_change_amplitude}",
205
+ f"Initial Image Number: {initial_image_number}",
206
+ "",
207
+ "Video Encoding Settings",
208
+ f"Save Video: {save_video}"
209
+ ]
210
+
211
+ if save_video:
212
+ generation_settings = generation_settings + [
213
+ f"Framerate: {video_fps}",
214
+ f"Quality: {video_quality}",
215
+ f"Encoding: {video_encoding}",
216
+ f"Create Segmented Video: {segment_video}"
217
+ ]
218
+
219
+ if segment_video:
220
+ generation_settings = generation_settings + [f"Segment Duration: {video_segment_duration}"]
221
+
222
+ generation_settings = generation_settings + [
223
+ "",
224
+ "Prompt Details",
225
+ "Initial Prompt:" + p.prompt,
226
+ "",
227
+ "Negative Prompt:" + p.negative_prompt,
228
+ "",
229
+ "Frame change prompts:",
230
+ prompts
231
+ ]
232
+
233
+
234
+
235
+ f.write('\n'.join(generation_settings))
236
+
237
+ if prompts:
238
+ lines = prompts.split("\n")
239
+ for prompt_line in lines:
240
+ params = prompt_line.split("::")
241
+ if len(params) == 2:
242
+ changes_dict[params[0]] = { "prompt": params[1] }
243
+ elif len(params) == 3:
244
+ changes_dict[params[0]] = { "seed": params[1], "prompt": params[2] }
245
+ else:
246
+ raise IOError(f"Invalid input in prompt line: {prompt_line}")
247
+
248
+ raw_prompt = p.prompt
249
+
250
+ for n in range(batch_count):
251
+ history = []
252
+
253
+ # Reset to original init image at the start of each batch
254
+ p.init_images = original_init_image
255
+
256
+ seed_state = "adding"
257
+ current_seed = p.seed
258
+
259
+ for i in range(frames):
260
+ current_seed = p.seed
261
+ state.job = ""
262
+
263
+ if str(i) in changes_dict:
264
+ raw_prompt = changes_dict[str(i)]["prompt"]
265
+ state.job = "New prompt: %s\n" % raw_prompt
266
+
267
+ if "seed" in changes_dict[str(i)]:
268
+ current_seed = changes_dict[str(i)]["seed"]
269
+
270
+ if current_seed.startswith("+"):
271
+ seed_state = "adding"
272
+ current_seed = current_seed.strip("+")
273
+ elif current_seed.startswith("-"):
274
+ seed_state = "subtracting"
275
+ current_seed = current_seed.strip("-")
276
+ else:
277
+ seed_state = "constant"
278
+
279
+ current_seed = int(current_seed)
280
+ p.seed = current_seed
281
+
282
+
283
+
284
+ p.n_iter = 1
285
+ p.batch_size = 1
286
+ p.do_not_save_grid = True
287
+
288
+ if opts.img2img_color_correction:
289
+ p.color_corrections = initial_color_corrections
290
+
291
+
292
+ wave_progress = float(1)/(float(frames_per_wave - 1))*float(((float(i)%float(frames_per_wave)) + ((float(1)/float(180))*denoising_strength_change_offset)))
293
+ print(wave_progress)
294
+ new_prompt = re.sub(wave_completed_regex, lambda x: set_weights(x, wave_progress), raw_prompt)
295
+ new_prompt = re.sub(wave_remaining_regex, lambda x: set_weights(x, 1 - wave_progress), new_prompt)
296
+ p.prompt = new_prompt
297
+
298
+ print(new_prompt)
299
+
300
+ denoising_strength_change_rate = 180/frames_per_wave
301
+
302
+ cos = abs(math.cos(math.radians(i*denoising_strength_change_rate + denoising_strength_change_offset)))
303
+ p.denoising_strength = initial_denoising_strength + denoising_strength_change_amplitude - (cos * denoising_strength_change_amplitude)
304
+
305
+ state.job += f"Iteration {i + 1}/{frames}, batch {n + 1}/{batch_count}. Denoising Strength: {p.denoising_strength}"
306
+
307
+ processed = processing.process_images(p)
308
+
309
+ if initial_seed is None:
310
+ initial_seed = processed.seed
311
+ initial_info = processed.info
312
+
313
+ init_img = processed.images[0]
314
+
315
+ p.init_images = [init_img]
316
+
317
+ if seed_state == "adding":
318
+ p.seed = processed.seed + 1
319
+ elif seed_state == "subtracting":
320
+ p.seed = processed.seed - 1
321
+
322
+ image_number = int(initial_image_number + i)
323
+ images.save_image(init_img, p.outpath_samples, "", processed.seed, processed.prompt, forced_filename=str(image_number))
324
+
325
+ history.append(init_img)
326
+
327
+ grid = images.image_grid(history, rows=1)
328
+ if opts.grid_save:
329
+ images.save_image(grid, p.outpath_grids, "grid", initial_seed, p.prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename, grid=True, p=p)
330
+
331
+ grids.append(grid)
332
+ all_images += history
333
+
334
+ if opts.return_grid:
335
+ all_images = grids + all_images
336
+
337
+ if save_video:
338
+ input_pattern = os.path.join(loopback_wave_images_path, "%d.png")
339
+ encode_video(input_pattern, initial_image_number, loopback_wave_images_path, video_fps, video_quality, video_encoding, segment_video, video_segment_duration, ffmpeg_path)
340
+
341
+ processed = Processed(p, all_images, initial_seed, initial_info)
342
+
343
+ return processed
344
+
345
+
script/run_n_times.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ import sys
4
+ import traceback
5
+
6
+ import modules.scripts as scripts
7
+ import gradio as gr
8
+
9
+ from modules.processing import Processed, process_images
10
+
11
+ class Script(scripts.Script):
12
+ def title(self):
13
+ return "Run n times"
14
+
15
+ def ui(self, is_img2img):
16
+ n = gr.Textbox(label="n")
17
+ return [n]
18
+
19
+ def run(self, p, n):
20
+ for x in range(int(n)):
21
+ p.seed = -1
22
+ proc = process_images(p)
23
+ image = proc.images
24
+ return Processed(p, image, p.seed, proc.info)