Spaces:
Running
on
A10G
Running
on
A10G
rynmurdock
commited on
Commit
•
f8cc74c
1
Parent(s):
225937a
app.py
CHANGED
@@ -43,7 +43,7 @@ unet = UNet2DConditionModel.from_config(model_id, subfolder="unet", low_cpu_mem_
|
|
43 |
unet.load_state_dict(load_file(hf_hub_download(sdxl_lightening, ckpt)))
|
44 |
|
45 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained("h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=DEVICE)
|
46 |
-
pipe = AutoPipelineForText2Image.from_pretrained(model_id, unet=unet, torch_dtype=torch.float16, variant="fp16", image_encoder=image_encoder, low_cpu_mem_usage=True
|
47 |
pipe.unet._load_ip_adapter_weights(torch.load(hf_hub_download('h94/IP-Adapter', 'sdxl_models/ip-adapter_sdxl_vit-h.bin')))
|
48 |
pipe.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl_vit-h.bin")
|
49 |
pipe.register_modules(image_encoder = image_encoder)
|
|
|
43 |
unet.load_state_dict(load_file(hf_hub_download(sdxl_lightening, ckpt)))
|
44 |
|
45 |
image_encoder = CLIPVisionModelWithProjection.from_pretrained("h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map=DEVICE)
|
46 |
+
pipe = AutoPipelineForText2Image.from_pretrained(model_id, unet=unet, torch_dtype=torch.float16, variant="fp16", image_encoder=image_encoder, low_cpu_mem_usage=True)
|
47 |
pipe.unet._load_ip_adapter_weights(torch.load(hf_hub_download('h94/IP-Adapter', 'sdxl_models/ip-adapter_sdxl_vit-h.bin')))
|
48 |
pipe.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl_vit-h.bin")
|
49 |
pipe.register_modules(image_encoder = image_encoder)
|