karimbenharrak commited on
Commit
4e2d172
1 Parent(s): 5f4e1b1

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +3 -3
handler.py CHANGED
@@ -45,7 +45,7 @@ class EndpointHandler():
45
  torch_dtype=torch.float16,
46
  )
47
  # use DPMSolverMultistepScheduler
48
- self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
49
 
50
  self.pipe.enable_model_cpu_offload()
51
 
@@ -130,7 +130,7 @@ class EndpointHandler():
130
  image=image,
131
  mask_image=mask_image,
132
  guidance_scale=guidance_scale, #8.0
133
- num_inference_steps=num_inference_steps, #100
134
  strength=strength, #0.2
135
  output_type="latent", # let's keep in latent to save some VRAM
136
  ).images[0]
@@ -141,7 +141,7 @@ class EndpointHandler():
141
  prompt=prompt,
142
  image=image,
143
  guidance_scale=guidance_scale, #8.0
144
- num_inference_steps=num_inference_steps, #100
145
  strength=strength, #0.2
146
  ).images[0]
147
 
 
45
  torch_dtype=torch.float16,
46
  )
47
  # use DPMSolverMultistepScheduler
48
+ self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config)
49
 
50
  self.pipe.enable_model_cpu_offload()
51
 
 
130
  image=image,
131
  mask_image=mask_image,
132
  guidance_scale=guidance_scale, #8.0
133
+ num_inference_steps=int(num_inference_steps/10), #100
134
  strength=strength, #0.2
135
  output_type="latent", # let's keep in latent to save some VRAM
136
  ).images[0]
 
141
  prompt=prompt,
142
  image=image,
143
  guidance_scale=guidance_scale, #8.0
144
+ num_inference_steps=int(num_inference_steps/10), #100
145
  strength=strength, #0.2
146
  ).images[0]
147