Spaces:
Build error
Build error
import torch | |
class InstructPixToPixConditioning: | |
def INPUT_TYPES(s): | |
return {"required": {"positive": ("CONDITIONING", ), | |
"negative": ("CONDITIONING", ), | |
"vae": ("VAE", ), | |
"pixels": ("IMAGE", ), | |
}} | |
RETURN_TYPES = ("CONDITIONING","CONDITIONING","LATENT") | |
RETURN_NAMES = ("positive", "negative", "latent") | |
FUNCTION = "encode" | |
CATEGORY = "conditioning/instructpix2pix" | |
def encode(self, positive, negative, pixels, vae): | |
x = (pixels.shape[1] // 8) * 8 | |
y = (pixels.shape[2] // 8) * 8 | |
if pixels.shape[1] != x or pixels.shape[2] != y: | |
x_offset = (pixels.shape[1] % 8) // 2 | |
y_offset = (pixels.shape[2] % 8) // 2 | |
pixels = pixels[:,x_offset:x + x_offset, y_offset:y + y_offset,:] | |
concat_latent = vae.encode(pixels) | |
out_latent = {} | |
out_latent["samples"] = torch.zeros_like(concat_latent) | |
out = [] | |
for conditioning in [positive, negative]: | |
c = [] | |
for t in conditioning: | |
d = t[1].copy() | |
d["concat_latent_image"] = concat_latent | |
n = [t[0], d] | |
c.append(n) | |
out.append(c) | |
return (out[0], out[1], out_latent) | |
NODE_CLASS_MAPPINGS = { | |
"InstructPixToPixConditioning": InstructPixToPixConditioning, | |
} | |