anzorq commited on
Commit
c6a131d
β€’
1 Parent(s): f0e19af

Update template/app.py

Browse files
Files changed (1) hide show
  1. template/app.py +13 -38
template/app.py CHANGED
@@ -19,29 +19,19 @@ scheduler = DPMSolverMultistepScheduler(
19
  lower_order_final=True,
20
  )
21
 
22
- last_mode = "txt2img"
 
 
 
23
 
24
- def get_pipe(img2img=False):
 
 
 
25
 
26
- if img2img:
27
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
28
- model_name,
29
- torch_dtype=torch.float16 if torch.cuda.is_available() else 'auto',
30
- scheduler=scheduler)
31
- else:
32
- pipe = StableDiffusionPipeline.from_pretrained(
33
- model_name,
34
- torch_dtype=torch.float16 if torch.cuda.is_available() else 'auto',
35
- scheduler=scheduler)
36
-
37
- if torch.cuda.is_available():
38
- pipe = pipe.to("cuda")
39
-
40
- return pipe
41
-
42
- pipe = get_pipe()
43
-
44
- device = "GPU πŸ”₯" if torch.cuda.is_available() else "CPU πŸ₯Ά"
45
 
46
  def error_str(error, title="Error"):
47
  return f"""#### {title}
@@ -62,14 +52,6 @@ def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None,
62
 
63
  def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
64
 
65
- global last_mode
66
- global pipe
67
- if last_mode != "txt2img":
68
-
69
- pipe = get_pipe()
70
- last_mode = "txt2img"
71
-
72
- prompt = prompt
73
  result = pipe(
74
  prompt,
75
  negative_prompt = neg_prompt,
@@ -83,17 +65,9 @@ def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
83
 
84
  def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
85
 
86
- global last_mode
87
- global pipe
88
- if last_mode != "img2img":
89
-
90
- pipe = get_pipe(img2img=True)
91
- last_mode = "img2img"
92
-
93
- prompt = prompt
94
  ratio = min(height / img.height, width / img.width)
95
  img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
96
- result = pipe(
97
  prompt,
98
  negative_prompt = neg_prompt,
99
  init_image = img,
@@ -126,6 +100,7 @@ with gr.Blocks(css=css) as demo:
126
  Demo for the <a href="https://huggingface.co/$model_name">$model_name</a> Stable Diffusion model.<br>
127
  Add the following tokens to your prompts for the effect: <b>$prefix</b>.
128
  </p>
 
129
  </div>
130
  """
131
  )
 
19
  lower_order_final=True,
20
  )
21
 
22
+ pipe = StableDiffusionPipeline.from_pretrained(
23
+ model_name,
24
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
25
+ scheduler=scheduler)
26
 
27
+ pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
28
+ model_name,
29
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
30
+ scheduler=scheduler)
31
 
32
+ if torch.cuda.is_available():
33
+ pipe = pipe.to("cuda")
34
+ pipe_i2i = pipe_i2i.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  def error_str(error, title="Error"):
37
  return f"""#### {title}
 
52
 
53
  def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
54
 
 
 
 
 
 
 
 
 
55
  result = pipe(
56
  prompt,
57
  negative_prompt = neg_prompt,
 
65
 
66
  def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
67
 
 
 
 
 
 
 
 
 
68
  ratio = min(height / img.height, width / img.width)
69
  img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
70
+ result = pipe_i2i(
71
  prompt,
72
  negative_prompt = neg_prompt,
73
  init_image = img,
 
100
  Demo for the <a href="https://huggingface.co/$model_name">$model_name</a> Stable Diffusion model.<br>
101
  Add the following tokens to your prompts for the effect: <b>$prefix</b>.
102
  </p>
103
+ Running on <b>{"GPU πŸ”₯" if torch.cuda.is_available() else "CPU πŸ₯Ά"}</b>
104
  </div>
105
  """
106
  )