radames commited on
Commit
2951b6b
1 Parent(s): 74f6ce1

use taesd for all models

Browse files
pipelines/controlnet.py CHANGED
@@ -16,6 +16,7 @@ import psutil
16
  from config import Args
17
  from pydantic import BaseModel, Field
18
  from PIL import Image
 
19
 
20
  base_model = "SimianLuo/LCM_Dreamshaper_v7"
21
  taesd_model = "madebyollin/taesd"
@@ -68,13 +69,13 @@ class Pipeline:
68
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
69
  )
70
  steps: int = Field(
71
- 4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
72
  )
73
  width: int = Field(
74
- 512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
75
  )
76
  height: int = Field(
77
- 512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
78
  )
79
  guidance_scale: float = Field(
80
  0.2,
@@ -171,7 +172,7 @@ class Pipeline:
171
  if args.use_taesd:
172
  self.pipe.vae = AutoencoderTiny.from_pretrained(
173
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
174
- )
175
  self.canny_torch = SobelOperator(device=device)
176
  self.pipe.set_progress_bar_config(disable=True)
177
  self.pipe.to(device=device, dtype=torch_dtype)
@@ -208,14 +209,18 @@ class Pipeline:
208
  control_image = self.canny_torch(
209
  params.image, params.canny_low_threshold, params.canny_high_threshold
210
  )
 
 
 
 
211
 
212
  results = self.pipe(
213
  image=params.image,
214
  control_image=control_image,
215
  prompt_embeds=prompt_embeds,
216
  generator=generator,
217
- strength=params.strength,
218
- num_inference_steps=params.steps,
219
  guidance_scale=params.guidance_scale,
220
  width=params.width,
221
  height=params.height,
 
16
  from config import Args
17
  from pydantic import BaseModel, Field
18
  from PIL import Image
19
+ import math
20
 
21
  base_model = "SimianLuo/LCM_Dreamshaper_v7"
22
  taesd_model = "madebyollin/taesd"
 
69
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
70
  )
71
  steps: int = Field(
72
+ 4, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
73
  )
74
  width: int = Field(
75
+ 768, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
76
  )
77
  height: int = Field(
78
+ 768, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
79
  )
80
  guidance_scale: float = Field(
81
  0.2,
 
172
  if args.use_taesd:
173
  self.pipe.vae = AutoencoderTiny.from_pretrained(
174
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
175
+ ).to(device)
176
  self.canny_torch = SobelOperator(device=device)
177
  self.pipe.set_progress_bar_config(disable=True)
178
  self.pipe.to(device=device, dtype=torch_dtype)
 
209
  control_image = self.canny_torch(
210
  params.image, params.canny_low_threshold, params.canny_high_threshold
211
  )
212
+ steps = params.steps
213
+ strength = params.strength
214
+ if int(steps * strength) < 1:
215
+ steps = math.ceil(1 / max(0.10, strength))
216
 
217
  results = self.pipe(
218
  image=params.image,
219
  control_image=control_image,
220
  prompt_embeds=prompt_embeds,
221
  generator=generator,
222
+ strength=strength,
223
+ num_inference_steps=steps,
224
  guidance_scale=params.guidance_scale,
225
  width=params.width,
226
  height=params.height,
pipelines/controlnetLoraSD15.py CHANGED
@@ -2,6 +2,7 @@ from diffusers import (
2
  StableDiffusionControlNetImg2ImgPipeline,
3
  ControlNetModel,
4
  LCMScheduler,
 
5
  )
6
  from compel import Compel
7
  import torch
@@ -16,6 +17,7 @@ import psutil
16
  from config import Args
17
  from pydantic import BaseModel, Field
18
  from PIL import Image
 
19
 
20
  taesd_model = "madebyollin/taesd"
21
  controlnet_model = "lllyasviel/control_v11p_sd15_canny"
@@ -79,13 +81,13 @@ class Pipeline:
79
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
80
  )
81
  steps: int = Field(
82
- 4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
83
  )
84
  width: int = Field(
85
- 512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
86
  )
87
  height: int = Field(
88
- 512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
89
  )
90
  guidance_scale: float = Field(
91
  0.2,
@@ -200,6 +202,11 @@ class Pipeline:
200
  if psutil.virtual_memory().total < 64 * 1024**3:
201
  pipe.enable_attention_slicing()
202
 
 
 
 
 
 
203
  # Load LCM LoRA
204
  pipe.load_lora_weights(lcm_lora_id, adapter_name="lcm")
205
  pipe.compel_proc = Compel(
@@ -222,7 +229,6 @@ class Pipeline:
222
 
223
  def predict(self, params: "Pipeline.InputParams") -> Image.Image:
224
  generator = torch.manual_seed(params.seed)
225
- print(f"Using model: {params.base_model_id}")
226
  pipe = self.pipes[params.base_model_id]
227
 
228
  activation_token = base_models[params.base_model_id]
@@ -231,14 +237,18 @@ class Pipeline:
231
  control_image = self.canny_torch(
232
  params.image, params.canny_low_threshold, params.canny_high_threshold
233
  )
 
 
 
 
234
 
235
  results = pipe(
236
  image=params.image,
237
  control_image=control_image,
238
  prompt_embeds=prompt_embeds,
239
  generator=generator,
240
- strength=params.strength,
241
- num_inference_steps=params.steps,
242
  guidance_scale=params.guidance_scale,
243
  width=params.width,
244
  height=params.height,
 
2
  StableDiffusionControlNetImg2ImgPipeline,
3
  ControlNetModel,
4
  LCMScheduler,
5
+ AutoencoderTiny,
6
  )
7
  from compel import Compel
8
  import torch
 
17
  from config import Args
18
  from pydantic import BaseModel, Field
19
  from PIL import Image
20
+ import math
21
 
22
  taesd_model = "madebyollin/taesd"
23
  controlnet_model = "lllyasviel/control_v11p_sd15_canny"
 
81
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
82
  )
83
  steps: int = Field(
84
+ 4, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
85
  )
86
  width: int = Field(
87
+ 768, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
88
  )
89
  height: int = Field(
90
+ 768, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
91
  )
92
  guidance_scale: float = Field(
93
  0.2,
 
202
  if psutil.virtual_memory().total < 64 * 1024**3:
203
  pipe.enable_attention_slicing()
204
 
205
+ if args.use_taesd:
206
+ pipe.vae = AutoencoderTiny.from_pretrained(
207
+ taesd_model, torch_dtype=torch_dtype, use_safetensors=True
208
+ ).to(device)
209
+
210
  # Load LCM LoRA
211
  pipe.load_lora_weights(lcm_lora_id, adapter_name="lcm")
212
  pipe.compel_proc = Compel(
 
229
 
230
  def predict(self, params: "Pipeline.InputParams") -> Image.Image:
231
  generator = torch.manual_seed(params.seed)
 
232
  pipe = self.pipes[params.base_model_id]
233
 
234
  activation_token = base_models[params.base_model_id]
 
237
  control_image = self.canny_torch(
238
  params.image, params.canny_low_threshold, params.canny_high_threshold
239
  )
240
+ steps = params.steps
241
+ strength = params.strength
242
+ if int(steps * strength) < 1:
243
+ steps = math.ceil(1 / max(0.10, strength))
244
 
245
  results = pipe(
246
  image=params.image,
247
  control_image=control_image,
248
  prompt_embeds=prompt_embeds,
249
  generator=generator,
250
+ strength=strength,
251
+ num_inference_steps=steps,
252
  guidance_scale=params.guidance_scale,
253
  width=params.width,
254
  height=params.height,
pipelines/controlnetLoraSDXL.py CHANGED
@@ -3,6 +3,7 @@ from diffusers import (
3
  ControlNetModel,
4
  LCMScheduler,
5
  AutoencoderKL,
 
6
  )
7
  from compel import Compel, ReturnedEmbeddingsType
8
  import torch
@@ -17,10 +18,12 @@ import psutil
17
  from config import Args
18
  from pydantic import BaseModel, Field
19
  from PIL import Image
 
20
 
21
  controlnet_model = "diffusers/controlnet-canny-sdxl-1.0"
22
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
23
  lcm_lora_id = "latent-consistency/lcm-lora-sdxl"
 
24
 
25
 
26
  default_prompt = "Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5 cinematic, masterpiece"
@@ -77,7 +80,7 @@ class Pipeline:
77
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
78
  )
79
  steps: int = Field(
80
- 4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
81
  )
82
  width: int = Field(
83
  1024, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
@@ -96,10 +99,10 @@ class Pipeline:
96
  id="guidance_scale",
97
  )
98
  strength: float = Field(
99
- 0.5,
100
  min=0.25,
101
  max=1.0,
102
- step=0.001,
103
  title="Strength",
104
  field="range",
105
  hide=True,
@@ -208,6 +211,10 @@ class Pipeline:
208
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
209
  requires_pooled=[False, True],
210
  )
 
 
 
 
211
 
212
  if args.torch_compile:
213
  self.pipe.unet = torch.compile(
@@ -231,6 +238,10 @@ class Pipeline:
231
  control_image = self.canny_torch(
232
  params.image, params.canny_low_threshold, params.canny_high_threshold
233
  )
 
 
 
 
234
 
235
  results = self.pipe(
236
  image=params.image,
@@ -240,8 +251,8 @@ class Pipeline:
240
  negative_prompt_embeds=prompt_embeds[1:2],
241
  negative_pooled_prompt_embeds=pooled_prompt_embeds[1:2],
242
  generator=generator,
243
- strength=params.strength,
244
- num_inference_steps=params.steps,
245
  guidance_scale=params.guidance_scale,
246
  width=params.width,
247
  height=params.height,
 
3
  ControlNetModel,
4
  LCMScheduler,
5
  AutoencoderKL,
6
+ AutoencoderTiny,
7
  )
8
  from compel import Compel, ReturnedEmbeddingsType
9
  import torch
 
18
  from config import Args
19
  from pydantic import BaseModel, Field
20
  from PIL import Image
21
+ import math
22
 
23
  controlnet_model = "diffusers/controlnet-canny-sdxl-1.0"
24
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
25
  lcm_lora_id = "latent-consistency/lcm-lora-sdxl"
26
+ taesd_model = "madebyollin/taesdxl"
27
 
28
 
29
  default_prompt = "Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5 cinematic, masterpiece"
 
80
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
81
  )
82
  steps: int = Field(
83
+ 2, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
84
  )
85
  width: int = Field(
86
  1024, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
 
99
  id="guidance_scale",
100
  )
101
  strength: float = Field(
102
+ 1,
103
  min=0.25,
104
  max=1.0,
105
+ step=0.0001,
106
  title="Strength",
107
  field="range",
108
  hide=True,
 
211
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
212
  requires_pooled=[False, True],
213
  )
214
+ if args.use_taesd:
215
+ self.pipe.vae = AutoencoderTiny.from_pretrained(
216
+ taesd_model, torch_dtype=torch_dtype, use_safetensors=True
217
+ ).to(device)
218
 
219
  if args.torch_compile:
220
  self.pipe.unet = torch.compile(
 
238
  control_image = self.canny_torch(
239
  params.image, params.canny_low_threshold, params.canny_high_threshold
240
  )
241
+ steps = params.steps
242
+ strength = params.strength
243
+ if int(steps * strength) < 1:
244
+ steps = math.ceil(1 / max(0.10, strength))
245
 
246
  results = self.pipe(
247
  image=params.image,
 
251
  negative_prompt_embeds=prompt_embeds[1:2],
252
  negative_pooled_prompt_embeds=pooled_prompt_embeds[1:2],
253
  generator=generator,
254
+ strength=strength,
255
+ num_inference_steps=steps,
256
  guidance_scale=params.guidance_scale,
257
  width=params.width,
258
  height=params.height,
pipelines/controlnetSDXLTurbo.py CHANGED
@@ -2,6 +2,7 @@ from diffusers import (
2
  StableDiffusionXLControlNetImg2ImgPipeline,
3
  ControlNetModel,
4
  AutoencoderKL,
 
5
  )
6
  from compel import Compel, ReturnedEmbeddingsType
7
  import torch
@@ -20,6 +21,7 @@ import math
20
 
21
  controlnet_model = "diffusers/controlnet-canny-sdxl-1.0"
22
  model_id = "stabilityai/sdxl-turbo"
 
23
 
24
  default_prompt = "Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5 cinematic, masterpiece"
25
  default_negative_prompt = "blurry, low quality, render, 3D, oversaturated"
@@ -75,18 +77,18 @@ class Pipeline:
75
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
76
  )
77
  steps: int = Field(
78
- 4, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
79
  )
80
  width: int = Field(
81
- 512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
82
  )
83
  height: int = Field(
84
- 512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
85
  )
86
  guidance_scale: float = Field(
87
  1.0,
88
  min=0,
89
- max=20,
90
  step=0.001,
91
  title="Guidance Scale",
92
  field="range",
@@ -197,6 +199,10 @@ class Pipeline:
197
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
198
  requires_pooled=[False, True],
199
  )
 
 
 
 
200
 
201
  if args.torch_compile:
202
  self.pipe.unet = torch.compile(
 
2
  StableDiffusionXLControlNetImg2ImgPipeline,
3
  ControlNetModel,
4
  AutoencoderKL,
5
+ AutoencoderTiny,
6
  )
7
  from compel import Compel, ReturnedEmbeddingsType
8
  import torch
 
21
 
22
  controlnet_model = "diffusers/controlnet-canny-sdxl-1.0"
23
  model_id = "stabilityai/sdxl-turbo"
24
+ taesd_model = "madebyollin/taesdxl"
25
 
26
  default_prompt = "Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5 cinematic, masterpiece"
27
  default_negative_prompt = "blurry, low quality, render, 3D, oversaturated"
 
77
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
78
  )
79
  steps: int = Field(
80
+ 2, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
81
  )
82
  width: int = Field(
83
+ 1024, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
84
  )
85
  height: int = Field(
86
+ 1024, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
87
  )
88
  guidance_scale: float = Field(
89
  1.0,
90
  min=0,
91
+ max=10,
92
  step=0.001,
93
  title="Guidance Scale",
94
  field="range",
 
199
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
200
  requires_pooled=[False, True],
201
  )
202
+ if args.use_taesd:
203
+ self.pipe.vae = AutoencoderTiny.from_pretrained(
204
+ taesd_model, torch_dtype=torch_dtype, use_safetensors=True
205
+ ).to(device)
206
 
207
  if args.torch_compile:
208
  self.pipe.unet = torch.compile(
pipelines/img2img.py CHANGED
@@ -14,6 +14,7 @@ import psutil
14
  from config import Args
15
  from pydantic import BaseModel, Field
16
  from PIL import Image
 
17
 
18
  base_model = "SimianLuo/LCM_Dreamshaper_v7"
19
  taesd_model = "madebyollin/taesd"
@@ -64,13 +65,13 @@ class Pipeline:
64
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
65
  )
66
  steps: int = Field(
67
- 4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
68
  )
69
  width: int = Field(
70
- 512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
71
  )
72
  height: int = Field(
73
- 512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
74
  )
75
  guidance_scale: float = Field(
76
  0.2,
@@ -104,7 +105,7 @@ class Pipeline:
104
  if args.use_taesd:
105
  self.pipe.vae = AutoencoderTiny.from_pretrained(
106
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
107
- )
108
 
109
  self.pipe.set_progress_bar_config(disable=True)
110
  self.pipe.to(device=device, dtype=torch_dtype)
@@ -138,12 +139,18 @@ class Pipeline:
138
  def predict(self, params: "Pipeline.InputParams") -> Image.Image:
139
  generator = torch.manual_seed(params.seed)
140
  prompt_embeds = self.compel_proc(params.prompt)
 
 
 
 
 
 
141
  results = self.pipe(
142
  image=params.image,
143
  prompt_embeds=prompt_embeds,
144
  generator=generator,
145
- strength=params.strength,
146
- num_inference_steps=params.steps,
147
  guidance_scale=params.guidance_scale,
148
  width=params.width,
149
  height=params.height,
 
14
  from config import Args
15
  from pydantic import BaseModel, Field
16
  from PIL import Image
17
+ import math
18
 
19
  base_model = "SimianLuo/LCM_Dreamshaper_v7"
20
  taesd_model = "madebyollin/taesd"
 
65
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
66
  )
67
  steps: int = Field(
68
+ 4, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
69
  )
70
  width: int = Field(
71
+ 768, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
72
  )
73
  height: int = Field(
74
+ 768, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
75
  )
76
  guidance_scale: float = Field(
77
  0.2,
 
105
  if args.use_taesd:
106
  self.pipe.vae = AutoencoderTiny.from_pretrained(
107
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
108
+ ).to(device)
109
 
110
  self.pipe.set_progress_bar_config(disable=True)
111
  self.pipe.to(device=device, dtype=torch_dtype)
 
139
  def predict(self, params: "Pipeline.InputParams") -> Image.Image:
140
  generator = torch.manual_seed(params.seed)
141
  prompt_embeds = self.compel_proc(params.prompt)
142
+
143
+ steps = params.steps
144
+ strength = params.strength
145
+ if int(steps * strength) < 1:
146
+ steps = math.ceil(1 / max(0.10, strength))
147
+
148
  results = self.pipe(
149
  image=params.image,
150
  prompt_embeds=prompt_embeds,
151
  generator=generator,
152
+ strength=strength,
153
+ num_inference_steps=steps,
154
  guidance_scale=params.guidance_scale,
155
  width=params.width,
156
  height=params.height,
pipelines/img2imgSDXLTurbo.py CHANGED
@@ -17,7 +17,7 @@ from PIL import Image
17
  import math
18
 
19
  base_model = "stabilityai/sdxl-turbo"
20
- taesd_model = "madebyollin/taesd"
21
 
22
  default_prompt = "close-up photography of old man standing in the rain at night, in a street lit by lamps, leica 35mm summilux"
23
  default_negative_prompt = "blurry, low quality, render, 3D, oversaturated"
@@ -113,7 +113,7 @@ class Pipeline:
113
  if args.use_taesd:
114
  self.pipe.vae = AutoencoderTiny.from_pretrained(
115
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
116
- )
117
 
118
  self.pipe.set_progress_bar_config(disable=True)
119
  self.pipe.to(device=device, dtype=torch_dtype)
 
17
  import math
18
 
19
  base_model = "stabilityai/sdxl-turbo"
20
+ taesd_model = "madebyollin/taesdxl"
21
 
22
  default_prompt = "close-up photography of old man standing in the rain at night, in a street lit by lamps, leica 35mm summilux"
23
  default_negative_prompt = "blurry, low quality, render, 3D, oversaturated"
 
113
  if args.use_taesd:
114
  self.pipe.vae = AutoencoderTiny.from_pretrained(
115
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
116
+ ).to(device)
117
 
118
  self.pipe.set_progress_bar_config(disable=True)
119
  self.pipe.to(device=device, dtype=torch_dtype)
pipelines/txt2img.py CHANGED
@@ -62,10 +62,10 @@ class Pipeline:
62
  4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
63
  )
64
  width: int = Field(
65
- 512, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
66
  )
67
  height: int = Field(
68
- 512, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
69
  )
70
  guidance_scale: float = Field(
71
  8.0,
@@ -88,7 +88,7 @@ class Pipeline:
88
  if args.use_taesd:
89
  self.pipe.vae = AutoencoderTiny.from_pretrained(
90
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
91
- )
92
 
93
  self.pipe.set_progress_bar_config(disable=True)
94
  self.pipe.to(device=device, dtype=torch_dtype)
 
62
  4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
63
  )
64
  width: int = Field(
65
+ 768, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
66
  )
67
  height: int = Field(
68
+ 768, min=2, max=15, title="Height", disabled=True, hide=True, id="height"
69
  )
70
  guidance_scale: float = Field(
71
  8.0,
 
88
  if args.use_taesd:
89
  self.pipe.vae = AutoencoderTiny.from_pretrained(
90
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
91
+ ).to(device)
92
 
93
  self.pipe.set_progress_bar_config(disable=True)
94
  self.pipe.to(device=device, dtype=torch_dtype)
pipelines/txt2imgLora.py CHANGED
@@ -95,7 +95,7 @@ class Pipeline:
95
  if args.use_taesd:
96
  self.pipe.vae = AutoencoderTiny.from_pretrained(
97
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
98
- )
99
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
100
  self.pipe.set_progress_bar_config(disable=True)
101
  self.pipe.to(device=device, dtype=torch_dtype)
 
95
  if args.use_taesd:
96
  self.pipe.vae = AutoencoderTiny.from_pretrained(
97
  taesd_model, torch_dtype=torch_dtype, use_safetensors=True
98
+ ).to(device)
99
  self.pipe.scheduler = LCMScheduler.from_config(self.pipe.scheduler.config)
100
  self.pipe.set_progress_bar_config(disable=True)
101
  self.pipe.to(device=device, dtype=torch_dtype)
pipelines/txt2imgLoraSDXL.py CHANGED
@@ -1,8 +1,4 @@
1
- from diffusers import (
2
- DiffusionPipeline,
3
- LCMScheduler,
4
- AutoencoderKL,
5
- )
6
  from compel import Compel, ReturnedEmbeddingsType
7
  import torch
8
 
@@ -16,9 +12,9 @@ from config import Args
16
  from pydantic import BaseModel, Field
17
  from PIL import Image
18
 
19
- controlnet_model = "diffusers/controlnet-canny-sdxl-1.0"
20
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
21
  lcm_lora_id = "latent-consistency/lcm-lora-sdxl"
 
22
 
23
 
24
  default_prompt = "close-up photography of old man standing in the rain at night, in a street lit by lamps, leica 35mm summilux"
@@ -76,7 +72,7 @@ class Pipeline:
76
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
77
  )
78
  steps: int = Field(
79
- 4, min=2, max=15, title="Steps", field="range", hide=True, id="steps"
80
  )
81
  width: int = Field(
82
  1024, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
@@ -127,6 +123,10 @@ class Pipeline:
127
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
128
  requires_pooled=[False, True],
129
  )
 
 
 
 
130
 
131
  if args.torch_compile:
132
  self.pipe.unet = torch.compile(
 
1
+ from diffusers import DiffusionPipeline, LCMScheduler, AutoencoderKL, AutoencoderTiny
 
 
 
 
2
  from compel import Compel, ReturnedEmbeddingsType
3
  import torch
4
 
 
12
  from pydantic import BaseModel, Field
13
  from PIL import Image
14
 
 
15
  model_id = "stabilityai/stable-diffusion-xl-base-1.0"
16
  lcm_lora_id = "latent-consistency/lcm-lora-sdxl"
17
+ taesd_model = "madebyollin/taesdxl"
18
 
19
 
20
  default_prompt = "close-up photography of old man standing in the rain at night, in a street lit by lamps, leica 35mm summilux"
 
72
  2159232, min=0, title="Seed", field="seed", hide=True, id="seed"
73
  )
74
  steps: int = Field(
75
+ 4, min=1, max=15, title="Steps", field="range", hide=True, id="steps"
76
  )
77
  width: int = Field(
78
  1024, min=2, max=15, title="Width", disabled=True, hide=True, id="width"
 
123
  returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
124
  requires_pooled=[False, True],
125
  )
126
+ if args.use_taesd:
127
+ self.pipe.vae = AutoencoderTiny.from_pretrained(
128
+ taesd_model, torch_dtype=torch_dtype, use_safetensors=True
129
+ ).to(device)
130
 
131
  if args.torch_compile:
132
  self.pipe.unet = torch.compile(