Spaces:
Running
on
Zero
Running
on
Zero
custum_3d_diffusion/custum_pipeline/unifield_pipeline_img2mvimg.py
CHANGED
@@ -214,8 +214,9 @@ class StableDiffusionImage2MVCustomPipeline(
|
|
214 |
# 3. Encode input image
|
215 |
emb_image = image
|
216 |
|
217 |
-
image_embeddings = self._encode_image(emb_image, device, num_images_per_prompt, do_classifier_free_guidance).to(device=
|
218 |
print("DEBUG: image_embeddings", image_embeddings.dtype, image_embeddings.device)
|
|
|
219 |
cond_latents = self.encode_latents(image, image_embeddings.device, image_embeddings.dtype, height_cond, width_cond)
|
220 |
cond_latents = torch.cat([torch.zeros_like(cond_latents), cond_latents]) if do_classifier_free_guidance else cond_latents
|
221 |
image_pixels = self.feature_extractor(images=emb_image, return_tensors="pt").pixel_values
|
|
|
214 |
# 3. Encode input image
|
215 |
emb_image = image
|
216 |
|
217 |
+
image_embeddings = self._encode_image(emb_image, device, num_images_per_prompt, do_classifier_free_guidance).to(device=self.unet.device, dtype=elf.unet.dtype)
|
218 |
print("DEBUG: image_embeddings", image_embeddings.dtype, image_embeddings.device)
|
219 |
+
print("DEBUG: version v111")
|
220 |
cond_latents = self.encode_latents(image, image_embeddings.device, image_embeddings.dtype, height_cond, width_cond)
|
221 |
cond_latents = torch.cat([torch.zeros_like(cond_latents), cond_latents]) if do_classifier_free_guidance else cond_latents
|
222 |
image_pixels = self.feature_extractor(images=emb_image, return_tensors="pt").pixel_values
|