dome272 commited on
Commit
7c92d6d
1 Parent(s): bd44803

add compel support

Browse files
Files changed (2) hide show
  1. app.py +12 -6
  2. requirements.txt +2 -1
app.py CHANGED
@@ -7,8 +7,9 @@ import torch
7
  from typing import List
8
  from diffusers.utils import numpy_to_pil
9
  from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline
10
- from diffusers.pipelines.wuerstchen import WuerstchenPrior, default_stage_c_timesteps
11
  from previewer.modules import Previewer
 
12
  os.environ['TOKENIZERS_PARALLELISM'] = 'false'
13
 
14
  DESCRIPTION = "# Würstchen"
@@ -19,7 +20,7 @@ if not torch.cuda.is_available():
19
  MAX_SEED = np.iinfo(np.int32).max
20
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
21
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
22
- USE_TORCH_COMPILE = True
23
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
24
  PREVIEW_IMAGES = True
25
 
@@ -51,6 +52,7 @@ if torch.cuda.is_available():
51
  else:
52
  previewer = None
53
  callback_prior = None
 
54
  else:
55
  prior_pipeline = None
56
  decoder_pipeline = None
@@ -78,12 +80,16 @@ def generate(
78
  ) -> PIL.Image.Image:
79
  generator = torch.Generator().manual_seed(seed)
80
 
 
 
 
 
81
  prior_output = prior_pipeline(
82
- prompt=prompt,
83
  height=height,
84
  width=width,
85
- timesteps=default_stage_c_timesteps,
86
- negative_prompt=negative_prompt,
87
  guidance_scale=prior_guidance_scale,
88
  num_images_per_prompt=num_images_per_prompt,
89
  generator=generator,
@@ -91,7 +97,7 @@ def generate(
91
  )
92
 
93
  if PREVIEW_IMAGES:
94
- for _ in range(len(default_stage_c_timesteps)):
95
  r = next(prior_output)
96
  if isinstance(r, list):
97
  yield r
 
7
  from typing import List
8
  from diffusers.utils import numpy_to_pil
9
  from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline
10
+ from diffusers.pipelines.wuerstchen import WuerstchenPrior, DEFAULT_STAGE_C_TIMESTEPS
11
  from previewer.modules import Previewer
12
+ from compel import Compel
13
  os.environ['TOKENIZERS_PARALLELISM'] = 'false'
14
 
15
  DESCRIPTION = "# Würstchen"
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
22
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
23
+ USE_TORCH_COMPILE = False
24
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
25
  PREVIEW_IMAGES = True
26
 
 
52
  else:
53
  previewer = None
54
  callback_prior = None
55
+ compel_proc = Compel(tokenizer=prior_pipeline.tokenizer, text_encoder=prior_pipeline.text_encoder)
56
  else:
57
  prior_pipeline = None
58
  decoder_pipeline = None
 
80
  ) -> PIL.Image.Image:
81
  generator = torch.Generator().manual_seed(seed)
82
 
83
+ print("Running compel")
84
+ prompt_embeds = compel_proc(prompt)
85
+ negative_prompt_embeds = compel_proc(negative_prompt)
86
+
87
  prior_output = prior_pipeline(
88
+ prompt_embeds=prompt_embeds,
89
  height=height,
90
  width=width,
91
+ timesteps=DEFAULT_STAGE_C_TIMESTEPS,
92
+ negative_prompt_embeds=negative_prompt_embeds,
93
  guidance_scale=prior_guidance_scale,
94
  num_images_per_prompt=num_images_per_prompt,
95
  generator=generator,
 
97
  )
98
 
99
  if PREVIEW_IMAGES:
100
+ for _ in range(len(DEFAULT_STAGE_C_TIMESTEPS)):
101
  r = next(prior_output)
102
  if isinstance(r, list):
103
  yield r
requirements.txt CHANGED
@@ -4,4 +4,5 @@ gradio==3.42.0
4
  invisible-watermark==0.2.0
5
  Pillow==10.0.0
6
  torch==2.0.1
7
- transformers==4.32.1
 
 
4
  invisible-watermark==0.2.0
5
  Pillow==10.0.0
6
  torch==2.0.1
7
+ transformers==4.32.1
8
+ compel