AlekseyCalvin commited on
Commit
c1b2604
1 Parent(s): 0945218

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +68 -1
pipeline.py CHANGED
@@ -56,6 +56,40 @@ def prepare_timesteps(
56
 
57
  # FLUX pipeline function
58
  class FluxWithCFGPipeline(StableDiffusion3Pipeline):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  def __call__(
60
  self,
61
  prompt: Union[str, List[str]] = None,
@@ -208,7 +242,40 @@ class FluxWithCFGPipeline(StableDiffusion3Pipeline):
208
  return self.image_processor.postprocess(image, output_type=output_type)[0]
209
 
210
  class FluxWithCFGPipeline(StableDiffusion3Pipeline):
211
- @torch.inference_mode()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  def generate_image(
213
  self,
214
  prompt: Union[str, List[str]] = None,
 
56
 
57
  # FLUX pipeline function
58
  class FluxWithCFGPipeline(StableDiffusion3Pipeline):
59
+ def __init__(
60
+ self,
61
+ transformer: FluxTransformer2DModel,
62
+ scheduler: FlowMatchEulerDiscreteScheduler,
63
+ vae: AutoencoderKL,
64
+ text_encoder: CLIPTextModelWithProjection,
65
+ tokenizer: CLIPTokenizer,
66
+ tokenizer_2: T5TokenizerFast,,
67
+ text_encoder_2: T5EncoderModel,
68
+ tokenizer_3: None,
69
+ ):
70
+ super().__init__()
71
+
72
+ self.register_modules(
73
+ vae=vae,
74
+ text_encoder=text_encoder,
75
+ text_encoder_2=text_encoder_2,
76
+ text_encoder_3=None,
77
+ tokenizer=tokenizer,
78
+ tokenizer_2=tokenizer_2,
79
+ tokenizer_3=None,
80
+ transformer=transformer,
81
+ scheduler=scheduler,
82
+ )
83
+ self.vae_scale_factor = (
84
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 16
85
+ )
86
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
87
+ self.tokenizer_max_length = (
88
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
89
+ )
90
+ self.default_sample_size = 64
91
+ )
92
+
93
  def __call__(
94
  self,
95
  prompt: Union[str, List[str]] = None,
 
242
  return self.image_processor.postprocess(image, output_type=output_type)[0]
243
 
244
  class FluxWithCFGPipeline(StableDiffusion3Pipeline):
245
+ def __init__(
246
+ self,
247
+ transformer: FluxTransformer2DModel,
248
+ scheduler: FlowMatchEulerDiscreteScheduler,
249
+ vae: AutoencoderKL,
250
+ text_encoder: CLIPTextModelWithProjection,
251
+ tokenizer: CLIPTokenizer,
252
+ tokenizer_2: T5TokenizerFast,,
253
+ text_encoder_2: T5EncoderModel,
254
+ tokenizer_3: None,
255
+ ):
256
+ super().__init__()
257
+
258
+ self.register_modules(
259
+ vae=vae,
260
+ text_encoder=text_encoder,
261
+ text_encoder_2=text_encoder_2,
262
+ text_encoder_3=None,
263
+ tokenizer=tokenizer,
264
+ tokenizer_2=tokenizer_2,
265
+ tokenizer_3=None,
266
+ transformer=transformer,
267
+ scheduler=scheduler,
268
+ )
269
+ self.vae_scale_factor = (
270
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 16
271
+ )
272
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
273
+ self.tokenizer_max_length = (
274
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
275
+ )
276
+ self.default_sample_size = 64
277
+ )
278
+ @torch.inference_mode()
279
  def generate_image(
280
  self,
281
  prompt: Union[str, List[str]] = None,