karimbenharrak commited on
Commit
9d02d34
·
verified ·
1 Parent(s): 6e00ac0

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +15 -4
handler.py CHANGED
@@ -1,9 +1,10 @@
1
  from typing import Dict, List, Any
2
  import torch
3
- from diffusers import StableDiffusionXLImg2ImgPipeline
4
  from PIL import Image
5
  import base64
6
  from io import BytesIO
 
7
 
8
 
9
  # set device
@@ -31,12 +32,22 @@ class EndpointHandler():
31
 
32
  prompt = data.pop("prompt", "")
33
 
34
-
35
  if encoded_image is not None:
36
  image = self.decode_base64_image(encoded_image)
37
- print(image.shape)
 
 
 
 
 
 
 
 
 
 
 
38
  self.smooth_pipe.enable_xformers_memory_efficient_attention()
39
- out = self.smooth_pipe(prompt, image=image).images[0]
40
 
41
  return out
42
 
 
1
  from typing import Dict, List, Any
2
  import torch
3
+ from diffusers import StableDiffusionXLImg2ImgPipeline, DiffusionPipeline, AutoencoderKL
4
  from PIL import Image
5
  import base64
6
  from io import BytesIO
7
+ from diffusers.image_processor import VaeImageProcessor
8
 
9
 
10
  # set device
 
32
 
33
  prompt = data.pop("prompt", "")
34
 
 
35
  if encoded_image is not None:
36
  image = self.decode_base64_image(encoded_image)
37
+
38
+ image_processor = VaeImageProcessor();
39
+ latents = image_processor.preprocess(image)
40
+ latents = latents.to(device="cuda")
41
+
42
+ vae = AutoencoderKL.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",
43
+ subfolder="vae", use_safetensors=True,
44
+ ).to("cuda")
45
+
46
+ with torch.no_grad():
47
+ latents_dist = vae.encode(latents).latent_dist.sample() * vae.config.scaling_factor
48
+
49
  self.smooth_pipe.enable_xformers_memory_efficient_attention()
50
+ out = self.smooth_pipe(prompt, image=latents_dist).images[0]
51
 
52
  return out
53