multimodalart HF staff commited on
Commit
d5a21dc
1 Parent(s): cc54eed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -9,7 +9,7 @@ from diffusers.utils import numpy_to_pil
9
  from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
10
  from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
11
 
12
- import user_history
13
 
14
  os.environ['TOKENIZERS_PARALLELISM'] = 'false'
15
 
@@ -113,22 +113,22 @@ def generate(
113
  ).images
114
 
115
  # Save images
116
- for image in decoder_output:
117
- user_history.save_image(
118
- profile=profile,
119
- image=image,
120
- label=prompt,
121
- metadata={
122
- "negative_prompt": negative_prompt,
123
- "seed": seed,
124
- "width": width,
125
- "height": height,
126
- "prior_guidance_scale": prior_guidance_scale,
127
- "decoder_num_inference_steps": decoder_num_inference_steps,
128
- "decoder_guidance_scale": decoder_guidance_scale,
129
- "num_images_per_prompt": num_images_per_prompt,
130
- },
131
- )
132
 
133
  yield decoder_output
134
 
@@ -261,10 +261,10 @@ with gr.Blocks() as demo:
261
  )
262
 
263
  with gr.Blocks(css="style.css") as demo_with_history:
264
- with gr.Tab("App"):
265
- demo.render()
266
- with gr.Tab("Past generations"):
267
- user_history.render()
268
 
269
  if __name__ == "__main__":
270
  demo_with_history.queue(max_size=20).launch()
 
9
  from diffusers import StableCascadeDecoderPipeline, StableCascadePriorPipeline
10
  from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
11
 
12
+ #import user_history
13
 
14
  os.environ['TOKENIZERS_PARALLELISM'] = 'false'
15
 
 
113
  ).images
114
 
115
  # Save images
116
+ #for image in decoder_output:
117
+ # user_history.save_image(
118
+ # profile=profile,
119
+ # image=image,
120
+ # label=prompt,
121
+ # metadata={
122
+ # "negative_prompt": negative_prompt,
123
+ # "seed": seed,
124
+ # "width": width,
125
+ # "height": height,
126
+ # "prior_guidance_scale": prior_guidance_scale,
127
+ # "decoder_num_inference_steps": decoder_num_inference_steps,
128
+ # "decoder_guidance_scale": decoder_guidance_scale,
129
+ # "num_images_per_prompt": num_images_per_prompt,
130
+ # },
131
+ # )
132
 
133
  yield decoder_output
134
 
 
261
  )
262
 
263
  with gr.Blocks(css="style.css") as demo_with_history:
264
+ #with gr.Tab("App"):
265
+ demo.render()
266
+ #with gr.Tab("Past generations"):
267
+ # user_history.render()
268
 
269
  if __name__ == "__main__":
270
  demo_with_history.queue(max_size=20).launch()