wanghuging commited on
Commit
d12db4f
·
1 Parent(s): 4d65704

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -48,7 +48,8 @@ if SAFETY_CHECKER == "True":
48
  t2i_pipe = AutoPipelineForText2Image.from_pretrained(
49
  #"stabilityai/sdxl-turbo",
50
  # "wanghuging/demo_model",
51
- "stabilityai/stable-diffusion-xl-base-1.0",
 
52
  torch_dtype=torch_dtype,
53
  variant="fp16" #if torch_dtype == torch.float16 else "fp32",
54
  )
@@ -62,19 +63,21 @@ else:
62
  t2i_pipe = AutoPipelineForText2Image.from_pretrained(
63
  #"stabilityai/sdxl-turbo",
64
  # "wanghuging/demo_model",
65
- "stabilityai/stable-diffusion-xl-base-1.0",
 
66
  safety_checker=None,
67
  torch_dtype=torch_dtype,
68
  variant="fp16" #if torch_dtype == torch.float16 else "fp32",
69
  )
70
 
71
-
72
  t2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
73
  t2i_pipe.set_progress_bar_config(disable=True)
74
  i2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
75
  i2i_pipe.set_progress_bar_config(disable=True)
76
 
77
 
 
78
  def resize_crop(image, size=512):
79
  image = image.convert("RGB")
80
  w, h = image.size
 
48
  t2i_pipe = AutoPipelineForText2Image.from_pretrained(
49
  #"stabilityai/sdxl-turbo",
50
  # "wanghuging/demo_model",
51
+ #"stabilityai/stable-diffusion-xl-base-1.0",
52
+ "stabilityai/stable-diffusion-2-1",
53
  torch_dtype=torch_dtype,
54
  variant="fp16" #if torch_dtype == torch.float16 else "fp32",
55
  )
 
63
  t2i_pipe = AutoPipelineForText2Image.from_pretrained(
64
  #"stabilityai/sdxl-turbo",
65
  # "wanghuging/demo_model",
66
+ # "stabilityai/stable-diffusion-xl-base-1.0",
67
+ "stabilityai/stable-diffusion-2-1",
68
  safety_checker=None,
69
  torch_dtype=torch_dtype,
70
  variant="fp16" #if torch_dtype == torch.float16 else "fp32",
71
  )
72
 
73
+ t2i_pipe.load_lora_weights("wanghuging/skin_demo", weight_name="skin_demo.safetensors")
74
  t2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
75
  t2i_pipe.set_progress_bar_config(disable=True)
76
  i2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
77
  i2i_pipe.set_progress_bar_config(disable=True)
78
 
79
 
80
+
81
  def resize_crop(image, size=512):
82
  image = image.convert("RGB")
83
  w, h = image.size