Spaces:
Running
on
A10G
Running
on
A10G
Update app.py
Browse files
app.py
CHANGED
@@ -26,16 +26,11 @@ def pix2pix(
|
|
26 |
input_image: Image.Image,
|
27 |
instruction: str,
|
28 |
steps: int,
|
29 |
-
randomize_seed: bool,
|
30 |
seed: int,
|
31 |
-
randomize_cfg: bool,
|
32 |
text_cfg_scale: float,
|
33 |
image_cfg_scale: float,
|
34 |
):
|
35 |
-
|
36 |
-
text_cfg_scale = round(random.uniform(6.0, 9.0), ndigits=2) if randomize_cfg else text_cfg_scale
|
37 |
-
image_cfg_scale = round(random.uniform(1.2, 1.8), ndigits=2) if randomize_cfg else image_cfg_scale
|
38 |
-
|
39 |
width, height = input_image.size
|
40 |
factor = 512 / max(width, height)
|
41 |
factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
|
@@ -119,7 +114,8 @@ def infer(prompt,video_in, seed_in, trim_value):
|
|
119 |
|
120 |
for i in frames_list[0:int(n_frame)]:
|
121 |
pil_i = Image.open(i)
|
122 |
-
|
|
|
123 |
print(pix2pix_img)
|
124 |
image = Image.open(pix2pix_img)
|
125 |
rgb_im = image.convert("RGB")
|
|
|
26 |
input_image: Image.Image,
|
27 |
instruction: str,
|
28 |
steps: int,
|
|
|
29 |
seed: int,
|
|
|
30 |
text_cfg_scale: float,
|
31 |
image_cfg_scale: float,
|
32 |
):
|
33 |
+
|
|
|
|
|
|
|
34 |
width, height = input_image.size
|
35 |
factor = 512 / max(width, height)
|
36 |
factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
|
|
|
114 |
|
115 |
for i in frames_list[0:int(n_frame)]:
|
116 |
pil_i = Image.open(i)
|
117 |
+
print(pil_i.Image)
|
118 |
+
pix2pix_img = pix2pix(pil_i.Image, prompt, 50, seed_in, 7.5, 1.5)
|
119 |
print(pix2pix_img)
|
120 |
image = Image.open(pix2pix_img)
|
121 |
rgb_im = image.convert("RGB")
|