Amanpandey04 commited on
Commit
de4f0cf
·
verified ·
1 Parent(s): 84f6bda

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -0
app.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ import numpy as np
4
+ from diffusers import AutoPipelineForText2Image
5
+ from huggingface_hub import login
6
+ from tqdm.auto import tqdm
7
+ from PIL import Image
8
+ import os
9
+
10
+ # Set page config
11
+ st.set_page_config(
12
+ page_title="FLUX.1 Image Generator",
13
+ page_icon="🎨",
14
+ layout="wide"
15
+ )
16
+
17
+ # Custom CSS to improve the app's appearance
18
+ st.markdown("""
19
+ <style>
20
+ .stProgress > div > div > div > div {
21
+ background-color: #1f77b4;
22
+ }
23
+ </style>
24
+ """, unsafe_allow_html=True)
25
+
26
+ class StreamlitProgressCallback:
27
+ def __init__(self, progress_bar):
28
+ self.progress_bar = progress_bar
29
+ self.current_step = 0
30
+
31
+ def __call__(self, step: int, timestep: int, latents: torch.FloatTensor):
32
+ self.current_step += 1
33
+ self.progress_bar.progress(self.current_step / step)
34
+
35
+ @st.cache_resource
36
+ def setup_flux(hf_token):
37
+ """
38
+ Setup FLUX.1 with proper authentication and GPU optimization
39
+ """
40
+ if not torch.cuda.is_available():
41
+ st.warning("⚠️ No GPU detected. Processing will be slow on CPU.")
42
+ return None
43
+
44
+ # Login to Hugging Face
45
+ login(token=hf_token)
46
+
47
+ try:
48
+ model_id = "black-forest-labs/FLUX.1-dev"
49
+ with st.spinner(f"Loading {model_id}..."):
50
+ pipe = AutoPipelineForText2Image.from_pretrained(
51
+ model_id,
52
+ token=hf_token
53
+ )
54
+
55
+ # Move to GPU and optimize
56
+ pipe = pipe.to("cuda")
57
+ pipe.enable_attention_slicing()
58
+ pipe.enable_vae_slicing()
59
+
60
+ try:
61
+ pipe.enable_xformers_memory_efficient_attention()
62
+ st.success("✅ Model loaded successfully with xformers optimization!")
63
+ except Exception:
64
+ st.info("ℹ️ xformers not available. Install with: pip install xformers")
65
+
66
+ return pipe
67
+
68
+ except Exception as e:
69
+ st.error(f"Error loading FLUX.1: {str(e)}")
70
+ return None
71
+
72
+ def process_image(image):
73
+ """Process the generated image to ensure proper value ranges"""
74
+ img_array = np.array(image)
75
+ img_array = np.clip(img_array, 0, 1)
76
+ img_array = (img_array * 255).round().astype(np.uint8)
77
+ return Image.fromarray(img_array)
78
+
79
+ def generate_image(pipe, prompt, num_inference_steps=30):
80
+ """Generate an image using the initialized pipeline"""
81
+ if pipe is None:
82
+ st.error("Error: Pipeline not properly initialized")
83
+ return None
84
+
85
+ # Clear CUDA cache before generation
86
+ if torch.cuda.is_available():
87
+ torch.cuda.empty_cache()
88
+
89
+ progress_bar = st.progress(0)
90
+ status_text = st.empty()
91
+
92
+ # Set up the progress callback
93
+ pipe.callback = StreamlitProgressCallback(progress_bar)
94
+
95
+ try:
96
+ with torch.autocast("cuda"):
97
+ status_text.text("🎨 Generating image...")
98
+ image = pipe(
99
+ prompt=prompt,
100
+ num_inference_steps=num_inference_steps,
101
+ guidance_scale=7.5,
102
+ ).images[0]
103
+
104
+ # Process the image
105
+ image = process_image(image)
106
+ status_text.text("✨ Generation complete!")
107
+ progress_bar.progress(1.0)
108
+ return image
109
+
110
+ except Exception as e:
111
+ st.error(f"Error during image generation: {str(e)}")
112
+ return None
113
+ finally:
114
+ # Clean up GPU memory
115
+ if torch.cuda.is_available():
116
+ torch.cuda.empty_cache()
117
+
118
+ def main():
119
+ st.title("🎨 FLUX.1 Image Generator")
120
+ st.markdown("Generate amazing images using the FLUX.1 model from Black Forest Labs!")
121
+
122
+ # Sidebar for configuration
123
+ with st.sidebar:
124
+ st.header("Configuration")
125
+ hf_token = st.text_input("HuggingFace Token", type="password",
126
+ help="Enter your HuggingFace token to access the model")
127
+
128
+ num_steps = st.slider("Number of inference steps",
129
+ min_value=10, max_value=50, value=30,
130
+ help="More steps generally means better quality but slower generation")
131
+
132
+ st.markdown("---")
133
+ st.markdown("### Tips for better prompts:")
134
+ st.markdown("""
135
+ - Be specific and descriptive
136
+ - Include details about style, lighting, and mood
137
+ - Mention artistic mediums or techniques
138
+ """)
139
+
140
+ # Main content area
141
+ prompt = st.text_area("Enter your prompt",
142
+ "A serene landscape with mountains and a lake at sunset",
143
+ help="Describe the image you want to generate")
144
+
145
+ col1, col2 = st.columns([1, 4])
146
+ with col1:
147
+ generate_button = st.button("Generate Image", type="primary")
148
+
149
+ # Initialize session state for storing generated images
150
+ if 'generated_images' not in st.session_state:
151
+ st.session_state.generated_images = []
152
+
153
+ if generate_button and hf_token:
154
+ pipe = setup_flux(hf_token)
155
+ if pipe is not None:
156
+ image = generate_image(pipe, prompt, num_steps)
157
+ if image is not None:
158
+ # Save the image
159
+ st.session_state.generated_images.append({
160
+ 'image': image,
161
+ 'prompt': prompt
162
+ })
163
+
164
+ # Display generated images
165
+ if st.session_state.generated_images:
166
+ st.markdown("---")
167
+ st.header("Generated Images")
168
+ for idx, item in enumerate(reversed(st.session_state.generated_images)):
169
+ st.markdown(f"**Prompt:** {item['prompt']}")
170
+ st.image(item['image'], use_column_width=True)
171
+ st.markdown("---")
172
+
173
+ if __name__ == "__main__":
174
+ main()