sandeepmajumdar commited on
Commit
744998e
1 Parent(s): abdc685

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -15
app.py CHANGED
@@ -4,28 +4,20 @@ from torch import autocast
4
  from diffusers import StableDiffusionPipeline
5
 
6
  model_id = "CompVis/stable-diffusion-v1-4"
7
- pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token='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', torch_dtype=torch.float32, low_cpu_mem_usage=True)
8
- has_cuda = torch.cuda.is_available()
9
- device = torch.device('cpu' if not has_cuda else 'cuda')
10
  pipe = pipe.to(device)
11
 
12
  def convert(prompt):
13
  samples = 4
14
- generator = torch.Generator(device=device)
15
- torch.cuda.empty_cache()
16
- with autocast("cuda"):
17
- images_list = pipe(
18
- [prompt] * samples,
19
- height=256, width=384,
20
- num_inference_steps=50,
21
- )
22
- images = []
23
- for i, image in enumerate(images_list["sample"]):
24
- images.append(image)
25
  return images
26
 
27
 
28
  gr.Interface(convert,
29
  inputs = [gr.inputs.Textbox(label="Enter text")],
30
- outputs = [gr.outputs.Image(label="Generated Image")],
31
  title="Text to Image Generation").launch()
 
4
  from diffusers import StableDiffusionPipeline
5
 
6
  model_id = "CompVis/stable-diffusion-v1-4"
7
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token='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', low_cpu_mem_usage=True)
8
+ device = 'cpu'
 
9
  pipe = pipe.to(device)
10
 
11
  def convert(prompt):
12
  samples = 4
13
+ images_list = pipe([prompt] * samples, height=256, width=384, num_inference_steps=50)
14
+ images = []
15
+ for i, image in enumerate(images_list["sample"]):
16
+ images.append(image)
 
 
 
 
 
 
 
17
  return images
18
 
19
 
20
  gr.Interface(convert,
21
  inputs = [gr.inputs.Textbox(label="Enter text")],
22
+ outputs = [gr.outputs.Gallery(label="Images").style(grid=4)],
23
  title="Text to Image Generation").launch()