NikeZoldyck commited on
Commit
4650aad
1 Parent(s): 337e469

SD diffuser

Browse files
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -25,17 +25,19 @@ model_id = "CompVis/stable-diffusion-v1-4"
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
  context = autocast if device == "cuda" else nullcontext
27
 
28
- pipe = StableDiffusionPipeline.from_pretrained(model_id,use_auth_token=token).to(device)
29
 
30
 
31
- def infer_original(prompt,samples):
 
32
  with context(device):
33
- images = pipe(samples*[prompt], guidance_scale=7.5).images
 
 
34
  return images
35
 
36
 
37
 
38
-
39
  # Apply the transformations needed
40
 
41
 
@@ -48,14 +50,13 @@ def select_input(input_img,webcm_img):
48
  return img
49
 
50
 
51
- def infer(prompt,samples):
52
- images= []
53
- selections = ["Img_{}".format(str(i+1).zfill(2)) for i in range(samples)]
54
- with context(device):
55
- for _ in range(samples):
56
- back_img = st.stableDiffusionAPICall(prompt)
57
- images.append(back_img)
58
- return images
59
 
60
 
61
  # def newstyleimage(choice):
@@ -151,8 +152,8 @@ with demo:
151
 
152
  gallery = gr.Gallery(label="Generated images", show_label=True).style(grid=(1, 3), height="auto")
153
  # image_options = gr.Radio(label="Pick", interactive=True, choices=None, type="value")
154
- text.submit(infer_original, inputs=[text, samples], outputs=gallery)
155
- btn.click(infer_original, inputs=[text, samples], outputs=gallery, show_progress=True, status_tracker=None)
156
 
157
 
158
  # Second Row - Backgrounds
 
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
  context = autocast if device == "cuda" else nullcontext
27
 
28
+ pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16,use_auth_token=token).to(device)
29
 
30
 
31
+ def infer(prompt,samples):
32
+ images=[]
33
  with context(device):
34
+ for _ in range(samples):
35
+ image = pipe([prompt],num_inference_steps=50, guidance_scale=7.5, height=400, width=400).images[0]
36
+ images.append(image)
37
  return images
38
 
39
 
40
 
 
41
  # Apply the transformations needed
42
 
43
 
 
50
  return img
51
 
52
 
53
+ # def infer(prompt,samples):
54
+ # images= []
55
+ # with context(device):
56
+ # for _ in range(samples):
57
+ # back_img = st.stableDiffusionAPICall(prompt)
58
+ # images.append(back_img)
59
+ # return images
 
60
 
61
 
62
  # def newstyleimage(choice):
 
152
 
153
  gallery = gr.Gallery(label="Generated images", show_label=True).style(grid=(1, 3), height="auto")
154
  # image_options = gr.Radio(label="Pick", interactive=True, choices=None, type="value")
155
+ text.submit(infer, inputs=[text, samples], outputs=gallery)
156
+ btn.click(infer, inputs=[text, samples], outputs=gallery, show_progress=True, status_tracker=None)
157
 
158
 
159
  # Second Row - Backgrounds