Krebzonide commited on
Commit
4358e59
1 Parent(s): ec78f45

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -12
app.py CHANGED
@@ -1,7 +1,8 @@
1
- from diffusers import AutoPipelineForText2Image
2
- import torch
3
- import random
4
  import gradio as gr
 
 
5
 
6
  css = """
7
  .btn-green {
@@ -17,23 +18,36 @@ css = """
17
  def generate(prompt, samp_steps, batch_size, seed, progress=gr.Progress(track_tqdm=True)):
18
  if seed < 0:
19
  seed = random.randint(1,999999)
20
- images = pipe(
21
  prompt,
22
- num_inference_steps=samp_steps,
23
  num_images_per_prompt=batch_size,
24
  guidance_scale=0.0,
25
  generator=torch.manual_seed(seed),
26
  ).images
27
- return gr.update(value = [(img, f"Image {i+1}") for i, img in enumerate(images)]), seed
 
 
 
 
 
 
 
28
 
29
- def set_base_model():
30
- pipe = AutoPipelineForText2Image.from_pretrained(
31
  "stabilityai/sdxl-turbo",
32
  torch_dtype = torch.float16,
33
  variant = "fp16"
34
  )
35
- pipe.to("cuda")
36
- return pipe
 
 
 
 
 
 
37
 
38
  with gr.Blocks(css=css) as demo:
39
  with gr.Column():
@@ -41,7 +55,7 @@ with gr.Blocks(css=css) as demo:
41
  submit_btn = gr.Button("Generate", elem_classes="btn-green")
42
 
43
  with gr.Row():
44
- sampling_steps = gr.Slider(1, 4, value=1, step=1, label="Sampling steps")
45
  batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size")
46
  seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
47
  lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
@@ -50,5 +64,5 @@ with gr.Blocks(css=css) as demo:
50
 
51
  submit_btn.click(generate, [prompt, sampling_steps, batch_size, seed], [gallery, lastSeed], queue=True)
52
 
53
- pipe = set_base_model()
54
  demo.launch(debug=True)
 
1
+ from diffusers import AutoPipelineForText2Image, StableDiffusionImg2ImgPipeline
2
+ import torchvision.transforms.functional as fn
 
3
  import gradio as gr
4
+ import random
5
+ import torch
6
 
7
  css = """
8
  .btn-green {
 
18
  def generate(prompt, samp_steps, batch_size, seed, progress=gr.Progress(track_tqdm=True)):
19
  if seed < 0:
20
  seed = random.randint(1,999999)
21
+ images = txt2img(
22
  prompt,
23
+ num_inference_steps=1,
24
  num_images_per_prompt=batch_size,
25
  guidance_scale=0.0,
26
  generator=torch.manual_seed(seed),
27
  ).images
28
+ upscaled_images = fn.resize(images, 1024, InterpolationMode.NEAREST_EXACT)
29
+ final_images = img2img(
30
+ prompt,
31
+ num_inference_steps=samp_steps,
32
+ guidance_scale=5,
33
+ generator=torch.manual_seed(seed),
34
+ ).images
35
+ return gr.update(value = [(img, f"Image {i+1}") for i, img in enumerate(final_images)]), seed
36
 
37
+ def set_base_models():
38
+ txt2img = AutoPipelineForText2Image.from_pretrained(
39
  "stabilityai/sdxl-turbo",
40
  torch_dtype = torch.float16,
41
  variant = "fp16"
42
  )
43
+ txt2img.to("cuda")
44
+ img2img = StableDiffusionImg2ImgPipeline.from_pretrained(
45
+ "Lykon/dreamshaper-8",
46
+ torch_dtype = torch.float16,
47
+ variant = "fp16"
48
+ )
49
+ img2img.to("cuda")
50
+ return txt2img, img2img
51
 
52
  with gr.Blocks(css=css) as demo:
53
  with gr.Column():
 
55
  submit_btn = gr.Button("Generate", elem_classes="btn-green")
56
 
57
  with gr.Row():
58
+ sampling_steps = gr.Slider(1, 20, value=5, step=1, label="Sampling steps")
59
  batch_size = gr.Slider(1, 6, value=1, step=1, label="Batch size")
60
  seed = gr.Number(label="Seed", value=-1, minimum=-1, precision=0)
61
  lastSeed = gr.Number(label="Last Seed", value=-1, interactive=False)
 
64
 
65
  submit_btn.click(generate, [prompt, sampling_steps, batch_size, seed], [gallery, lastSeed], queue=True)
66
 
67
+ txt2img, img2img = set_base_models()
68
  demo.launch(debug=True)