AP123 commited on
Commit
ab79cec
1 Parent(s): 4d346de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -18
app.py CHANGED
@@ -2,39 +2,38 @@ import gradio as gr
2
  import torch
3
  from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler
4
  from huggingface_hub import hf_hub_download
5
- from PIL import Image
6
- import io
7
  import spaces
8
 
9
- @spaces.GPU
10
- def generate_image(prompt):
11
- base = "stabilityai/stable-diffusion-xl-base-1.0"
12
- repo = "ByteDance/SDXL-Lightning"
13
- ckpt = "sdxl_lightning_4step_unet.pth"
14
 
15
- # Load model
16
- pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
17
- pipe.unet.load_state_dict(torch.load(hf_hub_download(repo, ckpt), map_location="cuda"))
18
- pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
19
 
20
- # Generate image
 
 
 
21
  image = pipe(prompt, num_inference_steps=4, guidance_scale=0).images[0]
22
-
23
  return image
24
 
25
- #gradio
26
  description = """
27
  This demo utilizes the SDXL-Lightning model by ByteDance, which is a fast text-to-image generative model capable of producing high-quality images in 4 steps.
28
  As a community effort, this demo was put together by AngryPenguin. Link to model: https://huggingface.co/ByteDance/SDXL-Lightning
29
  """
30
 
31
  demo = gr.Interface(
32
- fn=generate_image,
33
- inputs="text",
34
- outputs="image",
35
  title="Text-to-Image with SDXL Lightning ⚡",
36
  description=description
37
  )
38
 
39
  demo.queue(max_size=20)
40
- demo.launch()
 
2
  import torch
3
  from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler
4
  from huggingface_hub import hf_hub_download
 
 
5
  import spaces
6
 
7
+ # Constants
8
+ base = "stabilityai/stable-diffusion-xl-base-1.0"
9
+ repo = "ByteDance/SDXL-Lightning"
10
+ ckpt = "sdxl_lightning_4step_unet.pth"
 
11
 
12
+ # Model Initialization
13
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
14
+ pipe.unet.load_state_dict(torch.load(hf_hub_download(repo, ckpt), map_location="cuda"))
15
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
16
 
17
+ # Function
18
+ @spaces.GPU
19
+ def generate_image(prompt):
20
+ # Generate image using the preloaded model
21
  image = pipe(prompt, num_inference_steps=4, guidance_scale=0).images[0]
 
22
  return image
23
 
24
+ # Gradio Interface
25
  description = """
26
  This demo utilizes the SDXL-Lightning model by ByteDance, which is a fast text-to-image generative model capable of producing high-quality images in 4 steps.
27
  As a community effort, this demo was put together by AngryPenguin. Link to model: https://huggingface.co/ByteDance/SDXL-Lightning
28
  """
29
 
30
  demo = gr.Interface(
31
+ fn=generate_image,
32
+ inputs="text",
33
+ outputs="image",
34
  title="Text-to-Image with SDXL Lightning ⚡",
35
  description=description
36
  )
37
 
38
  demo.queue(max_size=20)
39
+ demo.launch()