r3gm commited on
Commit
e454cf0
verified
1 Parent(s): b7fef72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -9
app.py CHANGED
@@ -2,13 +2,18 @@ import gradio as gr
2
  import spaces
3
  import torch
4
  from diffusers import AuraFlowPipeline
 
5
 
6
  pipe = AuraFlowPipeline.from_pretrained("purplesmartai/pony-v7-base", torch_dtype=torch.float16)
7
  pipe = pipe.to("cuda")
8
 
9
  @spaces.GPU()
10
  def generate_image(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed):
11
- generator = torch.Generator("cuda").manual_seed(seed)
 
 
 
 
12
  image = pipe(
13
  prompt=prompt,
14
  negative_prompt=negative_prompt,
@@ -16,22 +21,25 @@ def generate_image(prompt, negative_prompt, height, width, num_inference_steps,
16
  width=int(width),
17
  num_inference_steps=int(num_inference_steps),
18
  guidance_scale=guidance_scale,
19
- generator=generator
20
  ).images[0]
21
- return image
22
 
23
  iface = gr.Interface(
24
  fn=generate_image,
25
  inputs=[
26
- gr.Textbox(label="Prompt", value="A cat holding a sign that says hello world"),
27
- gr.Textbox(label="Negative Prompt", placeholder="Enter prompts to exclude"),
28
  gr.Slider(label="Height", minimum=256, maximum=2048, step=64, value=1024),
29
  gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024),
30
- gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=50),
31
- gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, step=0.1, value=5.0),
32
- gr.Number(label="Seed", value=42)
 
 
 
 
33
  ],
34
- outputs=gr.Image(label="Generated Image", format="png"),
35
  title="purplesmartai/pony-v7-base",
36
  description="Generate images from text prompts using the AuraFlow model."
37
  )
 
2
  import spaces
3
  import torch
4
  from diffusers import AuraFlowPipeline
5
+ import random
6
 
7
  pipe = AuraFlowPipeline.from_pretrained("purplesmartai/pony-v7-base", torch_dtype=torch.float16)
8
  pipe = pipe.to("cuda")
9
 
10
  @spaces.GPU()
11
  def generate_image(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed):
12
+ if seed < 0:
13
+ seed = random.randint(0, 2**32 - 1)
14
+
15
+ generator = torch.Generator("cuda").manual_seed(int(seed))
16
+
17
  image = pipe(
18
  prompt=prompt,
19
  negative_prompt=negative_prompt,
 
21
  width=int(width),
22
  num_inference_steps=int(num_inference_steps),
23
  guidance_scale=guidance_scale,
24
+ generator=generator,
25
  ).images[0]
26
+ return image, seed
27
 
28
  iface = gr.Interface(
29
  fn=generate_image,
30
  inputs=[
31
+ gr.Textbox(label="Prompt", value="Score_9, "),
32
+ gr.Textbox(label="Negative Prompt", value="score_6, score_5, score_4, worst quality, low quality, text, deformed, bad hand, blurry, (watermark), extra hands, long ears, ugly, deformed joints, deformed hands, empty background, big ears, narrow face, glowing eyes,", placeholder="Enter prompts to exclude"),
33
  gr.Slider(label="Height", minimum=256, maximum=2048, step=64, value=1024),
34
  gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024),
35
+ gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=30),
36
+ gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, step=0.1, value=3.5),
37
+ gr.Number(label="Seed (set to -1 for random)", value=-1, minimum=-1)
38
+ ],
39
+ outputs=[
40
+ gr.Image(label="Generated Image", format="png"),
41
+ gr.Number(label="Used Seed")
42
  ],
 
43
  title="purplesmartai/pony-v7-base",
44
  description="Generate images from text prompts using the AuraFlow model."
45
  )