Update inference_manager.py
Browse files- inference_manager.py +4 -4
inference_manager.py
CHANGED
@@ -205,12 +205,12 @@ class InferenceManager:
|
|
205 |
print(ckpt_dir)
|
206 |
pipe = DiffusionPipeline.from_pretrained(
|
207 |
ckpt_dir,
|
208 |
-
|
209 |
#unet=unet,
|
210 |
torch_dtype=torch.bfloat16,
|
211 |
use_safetensors=True,
|
212 |
#variant="fp16",
|
213 |
-
|
214 |
)
|
215 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
216 |
clip_skip = cfg.get("clip_skip", 1)
|
@@ -501,7 +501,7 @@ class ModelManager:
|
|
501 |
if not images:
|
502 |
raise Exception(f"face images not provided")
|
503 |
start = time.time()
|
504 |
-
|
505 |
if not self.app:
|
506 |
self.app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider'])#, 'CPUExecutionProvider'
|
507 |
self.app.prepare(ctx_id=0, det_size=(512, 512))
|
@@ -524,7 +524,7 @@ class ModelManager:
|
|
524 |
prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
|
525 |
#generator = torch.Generator(model.base_model_pipeline.device).manual_seed(seed)
|
526 |
print(f"generate: p={p}, np={negative_prompt}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
|
527 |
-
|
528 |
images = ip_model.generate(
|
529 |
prompt=prompt_str,
|
530 |
negative_prompt=negative_prompt,
|
|
|
205 |
print(ckpt_dir)
|
206 |
pipe = DiffusionPipeline.from_pretrained(
|
207 |
ckpt_dir,
|
208 |
+
vae=vae,
|
209 |
#unet=unet,
|
210 |
torch_dtype=torch.bfloat16,
|
211 |
use_safetensors=True,
|
212 |
#variant="fp16",
|
213 |
+
custom_pipeline = "lpw_stable_diffusion_xl",
|
214 |
)
|
215 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
216 |
clip_skip = cfg.get("clip_skip", 1)
|
|
|
501 |
if not images:
|
502 |
raise Exception(f"face images not provided")
|
503 |
start = time.time()
|
504 |
+
ip_model.pipe.to("cuda")
|
505 |
if not self.app:
|
506 |
self.app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider'])#, 'CPUExecutionProvider'
|
507 |
self.app.prepare(ctx_id=0, det_size=(512, 512))
|
|
|
524 |
prompt_str = cfg.get("prompt", "{prompt}").replace("{prompt}", p)
|
525 |
#generator = torch.Generator(model.base_model_pipeline.device).manual_seed(seed)
|
526 |
print(f"generate: p={p}, np={negative_prompt}, steps={steps}, guidance_scale={guidance_scale}, size={width},{height}, seed={seed}")
|
527 |
+
print(f"device: embedding={average_embedding.device}, generator={generator.device}, ip_model={ip_model.pipe}, pipe={model.base_model_pipeline.device}")
|
528 |
images = ip_model.generate(
|
529 |
prompt=prompt_str,
|
530 |
negative_prompt=negative_prompt,
|