AiCoderv2 commited on
Commit
ed4033a
·
verified ·
1 Parent(s): 205149a

Deploy Gradio app with multiple files

Browse files
Files changed (5) hide show
  1. app.py +31 -0
  2. config.py +7 -0
  3. models.py +49 -0
  4. requirements.txt +10 -0
  5. utils.py +8 -0
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from models import load_model, generate_image
3
+
4
+ # Load the model on startup
5
+ pipe = load_model()
6
+
7
+ def text_to_image(prompt):
8
+ return generate_image(pipe, prompt)
9
+
10
+ with gr.Blocks(title="AI Text-to-Image Generator") as demo:
11
+ gr.Markdown("# AI Text-to-Image Generator\nBuilt with [anycoder](https://huggingface.co/spaces/akhaliq/anycoder)")
12
+ gr.Markdown("Generate images from text prompts using FLUX.1-dev model.")
13
+
14
+ prompt_input = gr.Textbox(
15
+ label="Enter your prompt",
16
+ placeholder="A beautiful landscape with mountains and a lake...",
17
+ lines=3
18
+ )
19
+
20
+ generate_btn = gr.Button("Generate Image")
21
+
22
+ output_image = gr.Image(label="Generated Image")
23
+
24
+ generate_btn.click(
25
+ fn=text_to_image,
26
+ inputs=[prompt_input],
27
+ outputs=[output_image]
28
+ )
29
+
30
+ if __name__ == "__main__":
31
+ demo.launch()
config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Configuration constants
2
+ MODEL_ID = 'black-forest-labs/FLUX.1-dev'
3
+ DEFAULT_NUM_STEPS = 20
4
+ DEFAULT_GUIDANCE_SCALE = 3.5
5
+ DEFAULT_HEIGHT = 512
6
+ DEFAULT_WIDTH = 512
7
+ MAX_PROMPT_LENGTH = 500
models.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ from diffusers import DiffusionPipeline
4
+
5
+ MODEL_ID = 'black-forest-labs/FLUX.1-dev'
6
+
7
+ # Compile the model ahead-of-time for optimal performance
8
+ @spaces.GPU(duration=1500) # Maximum duration for compilation
9
+ def compile_transformer():
10
+ pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
11
+ pipe.to('cuda')
12
+
13
+ # Capture example inputs for AoT compilation
14
+ with spaces.aoti_capture(pipe.transformer) as call:
15
+ pipe("arbitrary example prompt")
16
+
17
+ # Export the model
18
+ exported = torch.export.export(
19
+ pipe.transformer,
20
+ args=call.args,
21
+ kwargs=call.kwargs,
22
+ )
23
+
24
+ # Compile the exported model
25
+ return spaces.aoti_compile(exported)
26
+
27
+ # Load and compile the model
28
+ def load_model():
29
+ pipe = DiffusionPipeline.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16)
30
+ pipe.to('cuda')
31
+
32
+ # Apply compiled transformer
33
+ compiled_transformer = compile_transformer()
34
+ spaces.aoti_apply(compiled_transformer, pipe.transformer)
35
+
36
+ return pipe
37
+
38
+ @spaces.GPU # GPU inference for fast generation
39
+ def generate_image(pipe, prompt):
40
+ # Generate image with optimized settings
41
+ image = pipe(
42
+ prompt,
43
+ num_inference_steps=20, # Fewer steps for speed
44
+ guidance_scale=3.5,
45
+ height=512,
46
+ width=512
47
+ ).images[0]
48
+
49
+ return image
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ git+https://github.com/huggingface/diffusers
3
+ torch
4
+ accelerate
5
+ transformers
6
+ spaces
7
+ safetensors
8
+ numpy
9
+ Pillow
10
+ huggingface_hub
utils.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Utility functions for the text-to-image generator
2
+ def validate_prompt(prompt):
3
+ """Basic prompt validation"""
4
+ if not prompt or len(prompt.strip()) == 0:
5
+ return False, "Prompt cannot be empty"
6
+ if len(prompt) > 500:
7
+ return False, "Prompt is too long (max 500 characters)"
8
+ return True, ""