multimodalart HF staff commited on
Commit
b725492
1 Parent(s): 0abf9df

Update with latest text and UI

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -1,4 +1,3 @@
1
- # !pip install diffusers
2
  import torch
3
  from diffusers import DDIMPipeline, DDPMPipeline, PNDMPipeline
4
  from diffusers import DDIMScheduler, DDPMScheduler, PNDMScheduler
@@ -20,6 +19,7 @@ ddim_pipeline = DDIMPipeline(unet=model, scheduler=ddim_scheduler)
20
 
21
  pndm_scheduler = PNDMScheduler.from_config(model_id, subfolder="scheduler")
22
  pndm_pipeline = PNDMPipeline(unet=model, scheduler=pndm_scheduler)
 
23
  # run pipeline in inference (sample random noise and denoise)
24
  def predict(steps=100, seed=42,scheduler="ddim"):
25
  torch.cuda.empty_cache()
@@ -29,6 +29,8 @@ def predict(steps=100, seed=42,scheduler="ddim"):
29
  elif(scheduler == "ddpm"):
30
  image = ddpm_pipeline(generator=generator)["sample"]
31
  elif(scheduler == "pndm"):
 
 
32
  image = pndm_pipeline(generator=generator, num_inference_steps=steps)["sample"]
33
 
34
  image_processed = image.cpu().permute(0, 2, 3, 1)
@@ -46,9 +48,12 @@ random_seed = random.randint(0, 2147483647)
46
  gr.Interface(
47
  predict,
48
  inputs=[
49
- gr.inputs.Slider(1, 100, label='Inference Steps', default=20, step=1),
50
- gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed),
51
- gr.inputs.Radio(["ddim", "ddpm", "pndm"], default="ddpm",label="Diffusion scheduler")
52
  ],
53
- outputs=gr.Image(shape=[256,256], type="pil"),
 
 
 
54
  ).launch()
 
 
1
  import torch
2
  from diffusers import DDIMPipeline, DDPMPipeline, PNDMPipeline
3
  from diffusers import DDIMScheduler, DDPMScheduler, PNDMScheduler
 
19
 
20
  pndm_scheduler = PNDMScheduler.from_config(model_id, subfolder="scheduler")
21
  pndm_pipeline = PNDMPipeline(unet=model, scheduler=pndm_scheduler)
22
+
23
  # run pipeline in inference (sample random noise and denoise)
24
  def predict(steps=100, seed=42,scheduler="ddim"):
25
  torch.cuda.empty_cache()
 
29
  elif(scheduler == "ddpm"):
30
  image = ddpm_pipeline(generator=generator)["sample"]
31
  elif(scheduler == "pndm"):
32
+ if(steps > 100):
33
+ steps = 100
34
  image = pndm_pipeline(generator=generator, num_inference_steps=steps)["sample"]
35
 
36
  image_processed = image.cpu().permute(0, 2, 3, 1)
 
48
  gr.Interface(
49
  predict,
50
  inputs=[
51
+ gr.inputs.Slider(1, 1000, label='Inference Steps (ignored for the ddpm scheduler, that diffuses for 1000 steps - limited to 100 steps max for pndm)', default=20, step=1),
52
+ gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
53
+ gr.inputs.Radio(["ddpm", "ddim", "pndm"], default="ddpm",label="Diffusion scheduler")
54
  ],
55
+ outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
56
+ css="#output_image{width: 256px}",
57
+ title="ddpm-celebahq-256 diffusion - 🧨 diffusers library",
58
+ description="This Spaces contains an unconditional diffusion process for the <a href=\"https://huggingface.co/google/ddpm-celebahq-256\">ddpm-celebahq-256</a> face generator model by <a href=\"https://huggingface.co/google\">Google</a> using the <a href=\"https://github.com/huggingface/diffusers\">diffusers library</a>. You can try the diffusion process not only with the default <code>ddpm</code> scheduler but also with <code>ddim</code> and <code>pndm</code>, showcasing the modularity of the library. <a href=\"https://github.com/huggingface/diffusers/tree/main/src/diffusers/schedulers\">Learn more about schedulers here.</a>",
59
  ).launch()