Manjushri commited on
Commit
b369717
1 Parent(s): fdcb92d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -19
app.py CHANGED
@@ -5,43 +5,26 @@ import numpy as np
5
  from diffusers import DiffusionPipeline
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
8
  if torch.cuda.is_available():
9
- #PYTORCH_CUDA_ALLOC_CONF={'max_split_size_mb': 6000}
10
  torch.cuda.max_memory_allocated(device=device)
11
  torch.cuda.empty_cache()
12
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
13
  pipe.enable_xformers_memory_efficient_attention()
14
  pipe = pipe.to(device)
15
- #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
16
  torch.cuda.empty_cache()
17
- #refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
18
- #refiner.enable_xformers_memory_efficient_attention()
19
- #refiner.enable_sequential_cpu_offload()
20
- #refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
21
  else:
22
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
23
  pipe = pipe.to(device)
24
- #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
25
- #refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
26
- #refiner = refiner.to(device)
27
- #refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
28
-
29
  def genie (prompt, steps, seed):
30
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
31
  int_image = pipe(prompt=prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0]
32
- #image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, denoising_start=high_noise_frac).images[0]
33
  return int_image
34
 
35
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
36
- #gr.Textbox(label='What you Do Not want the AI to generate.'),
37
- #gr.Slider(512, 1024, 768, step=128, label='Height'),
38
- #gr.Slider(512, 1024, 768, step=128, label='Width'),
39
- #gr.Slider(1, 15, 10, label='Guidance Scale'),
40
  gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations'),
41
  gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True),
42
- #gr.Textbox(label='Embedded Prompt'),
43
- #gr.Textbox(label='Embedded Negative Prompt'),
44
- #gr.Slider(minimum=.7, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %')
45
  ],
46
  outputs='image',
47
  title="Stable Diffusion Turbo CPU or GPU",
 
5
  from diffusers import DiffusionPipeline
6
 
7
  device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
  if torch.cuda.is_available():
 
10
  torch.cuda.max_memory_allocated(device=device)
11
  torch.cuda.empty_cache()
12
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
13
  pipe.enable_xformers_memory_efficient_attention()
14
  pipe = pipe.to(device)
 
15
  torch.cuda.empty_cache()
 
 
 
 
16
  else:
17
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
18
  pipe = pipe.to(device)
19
+
 
 
 
 
20
  def genie (prompt, steps, seed):
21
  generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
22
  int_image = pipe(prompt=prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0).images[0]
 
23
  return int_image
24
 
25
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
 
 
 
 
26
  gr.Slider(1, maximum=5, value=2, step=1, label='Number of Iterations'),
27
  gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True),
 
 
 
28
  ],
29
  outputs='image',
30
  title="Stable Diffusion Turbo CPU or GPU",