lsb commited on
Commit
4ba5fc1
1 Parent(s): b6f29e3
Files changed (1) hide show
  1. app.py +16 -6
app.py CHANGED
@@ -4,6 +4,7 @@ import torch
4
  from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
5
  from quanto import qfloat8, quantize, freeze
6
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
 
7
  from diffusers.utils import make_image_grid
8
 
9
  atkbold = ImageFont.truetype("Atkinson-Hyperlegible-Bold-102.otf",50)
@@ -45,12 +46,12 @@ def mask_image_factory(mask_text="ASK FOR\nA SNACK", width=default_width, height
45
 
46
  preferred_device = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu")
47
  # preferred_device = "cpu"
48
- preferred_dtype = torch.float32
49
 
50
  controlnet = ControlNetModel.from_pretrained(
51
- "monster-labs/control_v1p_sd15_qrcode_monster",
52
- # "monster-labs/control_v1p_sdxl_qrcode_monster",
53
- subfolder="v2",
54
  torch_dtype=preferred_dtype,
55
  #torch_dtype=unet_preferred_dtype
56
  ).to(preferred_device)
@@ -58,13 +59,22 @@ controlnet = ControlNetModel.from_pretrained(
58
  #quantize(controlnet, weights=qfloat8)
59
  #freeze(controlnet)
60
 
61
- ctlpipe = StableDiffusionControlNetPipeline.from_pretrained(
62
- "SimianLuo/LCM_Dreamshaper_v7",
 
 
 
 
 
 
 
63
  controlnet=controlnet,
64
  torch_dtype=preferred_dtype,
65
  safety_checker=None,
66
  ).to(preferred_device)
67
 
 
 
68
  #quantize(ctlpipe.unet, weights=qfloat8)
69
  #freeze(ctlpipe.unet)
70
  #quantize(ctlpipe.text_encoder, weights=qfloat8)
 
4
  from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageEnhance
5
  from quanto import qfloat8, quantize, freeze
6
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
7
+ from diffusers import StableDiffusionXLControlNetPipeline, UNet2DConditionModel, DiffusionPipeline, LCMScheduler
8
  from diffusers.utils import make_image_grid
9
 
10
  atkbold = ImageFont.truetype("Atkinson-Hyperlegible-Bold-102.otf",50)
 
46
 
47
  preferred_device = "cuda" if torch.cuda.is_available() else ("mps" if torch.backends.mps.is_available() else "cpu")
48
  # preferred_device = "cpu"
49
+ preferred_dtype = torch.float32 if preferred_device == "cpu" else torch.float16
50
 
51
  controlnet = ControlNetModel.from_pretrained(
52
+ # "monster-labs/control_v1p_sd15_qrcode_monster",
53
+ "monster-labs/control_v1p_sdxl_qrcode_monster",
54
+ # subfolder="v2",
55
  torch_dtype=preferred_dtype,
56
  #torch_dtype=unet_preferred_dtype
57
  ).to(preferred_device)
 
59
  #quantize(controlnet, weights=qfloat8)
60
  #freeze(controlnet)
61
 
62
+ unet = UNet2DConditionModel.from_pretrained(
63
+ "latent-consistency/lcm-sdxl",
64
+ torch_dtype=preferred_dtype,
65
+ variant="fp16",
66
+ ).to(preferred_device)
67
+
68
+ ctlpipe = StableDiffusionXLControlNetPipeline.from_pretrained(
69
+ "stabilityai/stable-diffusion-xl-base-1.0",
70
+ unet=unet,
71
  controlnet=controlnet,
72
  torch_dtype=preferred_dtype,
73
  safety_checker=None,
74
  ).to(preferred_device)
75
 
76
+ ctlpipe.scheduler = LCMScheduler.from_config(ctlpipe.scheduler.config)
77
+
78
  #quantize(ctlpipe.unet, weights=qfloat8)
79
  #freeze(ctlpipe.unet)
80
  #quantize(ctlpipe.text_encoder, weights=qfloat8)