rynmurdock commited on
Commit
1913873
1 Parent(s): f5c8f7e
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -39,17 +39,17 @@ start_time = time.time()
39
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
40
  sdxl_lightening = "ByteDance/SDXL-Lightning"
41
  ckpt = "sdxl_lightning_2step_unet.safetensors"
42
- unet = UNet2DConditionModel.from_config(model_id, subfolder="unet").to(DEVICE, torch.float16)
43
- unet.load_state_dict(load_file(hf_hub_download(sdxl_lightening, ckpt), device=DEVICE))
44
 
45
- image_encoder = CLIPVisionModelWithProjection.from_pretrained("h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16,).to(DEVICE)
46
- pipe = AutoPipelineForText2Image.from_pretrained(model_id, unet=unet, torch_dtype=torch.float16, variant="fp16", image_encoder=image_encoder).to(DEVICE)
47
  pipe.unet._load_ip_adapter_weights(torch.load(hf_hub_download('h94/IP-Adapter', 'sdxl_models/ip-adapter_sdxl_vit-h.bin')))
48
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl_vit-h.bin")
49
  pipe.register_modules(image_encoder = image_encoder)
50
  pipe.set_ip_adapter_scale(0.8)
51
 
52
- pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16)
53
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
54
 
55
  pipe.to(device=DEVICE)
 
39
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
40
  sdxl_lightening = "ByteDance/SDXL-Lightning"
41
  ckpt = "sdxl_lightning_2step_unet.safetensors"
42
+ unet = UNet2DConditionModel.from_config(model_id, subfolder="unet", low_cpu_mem_usage=True).to(torch.float16)
43
+ unet.load_state_dict(load_file(hf_hub_download(sdxl_lightening, ckpt)))
44
 
45
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained("h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16, low_cpu_mem_usage=True)
46
+ pipe = AutoPipelineForText2Image.from_pretrained(model_id, unet=unet, torch_dtype=torch.float16, variant="fp16", image_encoder=image_encoder, low_cpu_mem_usage=True)
47
  pipe.unet._load_ip_adapter_weights(torch.load(hf_hub_download('h94/IP-Adapter', 'sdxl_models/ip-adapter_sdxl_vit-h.bin')))
48
  pipe.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl_vit-h.bin")
49
  pipe.register_modules(image_encoder = image_encoder)
50
  pipe.set_ip_adapter_scale(0.8)
51
 
52
+ pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16k, low_cpu_mem_usage=True)
53
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
54
 
55
  pipe.to(device=DEVICE)