r3gm commited on
Commit
82a49a3
verified
1 Parent(s): 56e46e1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -0
app.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import AuraFlowPipeline
4
+
5
+ # Initialize the AuraFlow pipeline
6
+ pipe = AuraFlowPipeline.from_pretrained("purplesmartai/pony-v7-base", torch_dtype=torch.float16)
7
+ pipe = pipe.to("cuda")
8
+
9
+ def generate_image(prompt, negative_prompt, height, width, num_inference_steps, guidance_scale, seed):
10
+ """
11
+ Function to generate an image using the AuraFlow pipeline.
12
+ """
13
+ generator = torch.Generator("cuda").manual_seed(seed)
14
+ image = pipe(
15
+ prompt=prompt,
16
+ negative_prompt=negative_prompt,
17
+ height=int(height),
18
+ width=int(width),
19
+ num_inference_steps=int(num_inference_steps),
20
+ guidance_scale=guidance_scale,
21
+ generator=generator
22
+ ).images[0]
23
+ return image
24
+
25
+ # Create the Gradio interface
26
+ iface = gr.Interface(
27
+ fn=generate_image,
28
+ inputs=[
29
+ gr.Textbox(label="Prompt", value="A cat holding a sign that says hello world"),
30
+ gr.Textbox(label="Negative Prompt", placeholder="Enter prompts to exclude"),
31
+ gr.Slider(label="Height", minimum=256, maximum=2048, step=64, value=1024),
32
+ gr.Slider(label="Width", minimum=256, maximum=2048, step=64, value=1024),
33
+ gr.Slider(label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=50),
34
+ gr.Slider(label="Guidance Scale", minimum=1.0, maximum=20.0, step=0.1, value=5.0),
35
+ gr.Number(label="Seed", value=42)
36
+ ],
37
+ outputs=gr.Image(label="Generated Image"),
38
+ title="AuraFlow Text-to-Image Generation",
39
+ description="Generate images from text prompts using the AuraFlow model."
40
+ )
41
+
42
+ # Launch the Gradio app
43
+ iface.launch()