Ryukijano commited on
Commit
f866cb0
1 Parent(s): b7a692e

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +171 -0
  2. custom_pipeline.py +180 -0
  3. readme.md +10 -0
  4. requirements.txt +10 -0
app.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import torch
5
+ import time
6
+ from diffusers import DiffusionPipeline, AutoencoderTiny
7
+ from diffusers.models.attention_processor import AttnProcessor2_0
8
+ from custom_pipeline import FluxWithCFGPipeline
9
+
10
+ torch.backends.cuda.matmul.allow_tf32 = True
11
+ torch.backends.cudnn.benchmark = True
12
+
13
+ # Constants
14
+ MAX_SEED = np.iinfo(np.int32).max
15
+ MAX_IMAGE_SIZE = 2048
16
+ DEFAULT_WIDTH = 1024
17
+ DEFAULT_HEIGHT = 1024
18
+ DEFAULT_INFERENCE_STEPS = 1
19
+
20
+ # Device and model setup
21
+ dtype = torch.bfloat16
22
+ pipe = FluxWithCFGPipeline.from_pretrained(
23
+ "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
24
+ )
25
+ pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
26
+ pipe.to("cuda")
27
+ pipe.load_lora_weights('hugovntr/flux-schnell-realism', weight_name='schnell-realism_v2.3.safetensors', adapter_name="better")
28
+ pipe.set_adapters(["better"], adapter_weights=[1.0])
29
+ pipe.fuse_lora(adapter_name=["better"], lora_scale=1.0)
30
+ pipe.unload_lora_weights()
31
+ pipe.enable_xformers_memory_efficient_attention()
32
+ pipe.unet.to(memory_format=torch.channels_last)
33
+ pipe.vae.to(memory_format=torch.channels_last)
34
+
35
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead")
36
+ pipe.text_encoder = torch.compile(pipe.text_encoder, mode="reduce-overhead")
37
+
38
+ torch.cuda.empty_cache()
39
+
40
+ # Inference function
41
+ def generate_image(prompt, seed=24, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=2, progress=gr.Progress(track_tqdm=True)):
42
+ if randomize_seed:
43
+ seed = random.randint(0, MAX_SEED)
44
+ generator = torch.Generator().manual_seed(int(float(seed)))
45
+
46
+ start_time = time.time()
47
+
48
+ # Only generate the last image in the sequence
49
+ img = pipe.generate_images(
50
+ prompt=prompt,
51
+ width=width,
52
+ height=height,
53
+ num_inference_steps=num_inference_steps,
54
+ generator=generator
55
+ )
56
+ latency = f"Latency: {(time.time()-start_time):.2f} seconds"
57
+ return img, seed, latency
58
+
59
+ # Example prompts
60
+ examples = [
61
+ "a tiny astronaut hatching from an egg on the moon",
62
+ "a cute white cat holding a sign that says hello world",
63
+ "an anime illustration of Steve Jobs",
64
+ "Create image of Modern house in minecraft style",
65
+ "photo of a woman on the beach, shot from above. She is facing the sea, while wearing a white dress. She has long blonde hair",
66
+ "Selfie photo of a wizard with long beard and purple robes, he is apparently in the middle of Tokyo. Probably taken from a phone.",
67
+ "Photo of a young woman with long, wavy brown hair tied in a bun and glasses. She has a fair complexion and is wearing subtle makeup, emphasizing her eyes and lips. She is dressed in a black top. The background appears to be an urban setting with a building facade, and the sunlight casts a warm glow on her face.",
68
+ ]
69
+
70
+ # --- Gradio UI ---
71
+ with gr.Blocks() as demo:
72
+ with gr.Column(elem_id="app-container"):
73
+ gr.Markdown("# 🎨 Realtime FLUX Image Generator")
74
+ gr.Markdown("Generate stunning images in real-time with Modified Flux.Schnell pipeline.")
75
+ gr.Markdown("<span style='color: red;'>Note: Sometimes it stucks or stops generating images (I don't know why). In that situation just refresh the site.</span>")
76
+
77
+ with gr.Row():
78
+ with gr.Column(scale=2.5):
79
+ result = gr.Image(label="Generated Image", show_label=False, interactive=False)
80
+ with gr.Column(scale=1):
81
+ prompt = gr.Textbox(
82
+ label="Prompt",
83
+ placeholder="Describe the image you want to generate...",
84
+ lines=3,
85
+ show_label=False,
86
+ container=False,
87
+ )
88
+ generateBtn = gr.Button("🖼️ Generate Image")
89
+ enhanceBtn = gr.Button("🚀 Enhance Image")
90
+
91
+ with gr.Column("Advanced Options"):
92
+ with gr.Row():
93
+ realtime = gr.Checkbox(label="Realtime Toggler", info="If TRUE then uses more GPU but create image in realtime.", value=False)
94
+ latency = gr.Textbox(label="Latency")
95
+ with gr.Row():
96
+ seed = gr.Number(label="Seed", value=42)
97
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
98
+ with gr.Row():
99
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
100
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
101
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
102
+
103
+ with gr.Row():
104
+ gr.Markdown("### 🌟 Inspiration Gallery")
105
+ with gr.Row():
106
+ gr.Examples(
107
+ examples=examples,
108
+ fn=generate_image,
109
+ inputs=[prompt],
110
+ outputs=[result, seed, latency],
111
+ cache_examples="lazy"
112
+ )
113
+
114
+ enhanceBtn.click(
115
+ fn=generate_image,
116
+ inputs=[prompt, seed, width, height],
117
+ outputs=[result, seed, latency],
118
+ show_progress="full",
119
+ queue=False,
120
+ concurrency_limit=None
121
+ )
122
+
123
+ generateBtn.click(
124
+ fn=generate_image,
125
+ inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
126
+ outputs=[result, seed, latency],
127
+ show_progress="full",
128
+ api_name="RealtimeFlux",
129
+ queue=False
130
+ )
131
+
132
+ def update_ui(realtime_enabled):
133
+ return {
134
+ prompt: gr.update(interactive=True),
135
+ generateBtn: gr.update(visible=not realtime_enabled)
136
+ }
137
+
138
+ realtime.change(
139
+ fn=update_ui,
140
+ inputs=[realtime],
141
+ outputs=[prompt, generateBtn],
142
+ queue=False,
143
+ concurrency_limit=None
144
+ )
145
+
146
+ def realtime_generation(*args):
147
+ if args[0]: # If realtime is enabled
148
+ return next(generate_image(*args[1:]))
149
+
150
+ prompt.submit(
151
+ fn=generate_image,
152
+ inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
153
+ outputs=[result, seed, latency],
154
+ show_progress="full",
155
+ queue=False,
156
+ concurrency_limit=None
157
+ )
158
+
159
+ for component in [prompt, width, height, num_inference_steps]:
160
+ component.input(
161
+ fn=realtime_generation,
162
+ inputs=[realtime, prompt, seed, width, height, randomize_seed, num_inference_steps],
163
+ outputs=[result, seed, latency],
164
+ show_progress="hidden",
165
+ trigger_mode="always_last",
166
+ queue=False,
167
+ concurrency_limit=None
168
+ )
169
+
170
+ # Launch the app
171
+ demo.launch()
custom_pipeline.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+ from PIL import Image
6
+ from torch.cuda import graphs
7
+
8
+ # Enable TF32 and memory format optimizations
9
+ torch.backends.cuda.matmul.allow_tf32 = True
10
+ torch.backends.cudnn.allow_tf32 = True
11
+ torch.backends.cudnn.benchmark = True
12
+
13
+ # Constants with optimized values
14
+ BASE_SEQ_LEN = 256
15
+ MAX_SEQ_LEN = 4096
16
+ BASE_SHIFT = 0.5
17
+ MAX_SHIFT = 1.2
18
+ BATCH_SIZE = 4 # Optimal batch size for A100
19
+
20
+ @torch.jit.script
21
+ def calculate_timestep_shift(image_seq_len: int) -> float:
22
+ """Optimized timestep shift calculation using TorchScript"""
23
+ m = (MAX_SHIFT - BASE_SHIFT) / (MAX_SEQ_LEN - BASE_SEQ_LEN)
24
+ b = BASE_SHIFT - m * BASE_SEQ_LEN
25
+ return image_seq_len * m + b
26
+
27
+ def prepare_timesteps(
28
+ scheduler: FlowMatchEulerDiscreteScheduler,
29
+ num_inference_steps: Optional[int] = None,
30
+ device: Optional[Union[str, torch.device]] = None,
31
+ timesteps: Optional[List[int]] = None,
32
+ sigmas: Optional[List[float]] = None,
33
+ mu: Optional[float] = None,
34
+ ) -> (torch.Tensor, int):
35
+ """Optimized timestep preparation with CUDA graphs support"""
36
+ if device is None:
37
+ device = torch.device("cuda")
38
+
39
+ # Pre-calculate timesteps using CUDA graph
40
+ static_input = torch.tensor([], device=device)
41
+ g = torch.cuda.CUDAGraph()
42
+
43
+ with torch.cuda.graph(g):
44
+ if timesteps is not None:
45
+ scheduler.set_timesteps(timesteps=timesteps, device=device)
46
+ elif sigmas is not None:
47
+ scheduler.set_timesteps(sigmas=sigmas, device=device)
48
+ else:
49
+ scheduler.set_timesteps(num_inference_steps, device=device, mu=mu)
50
+
51
+ timesteps = scheduler.timesteps.to(memory_format=torch.channels_last)
52
+ num_inference_steps = len(timesteps)
53
+
54
+ return timesteps, num_inference_steps
55
+
56
+ # FLUX pipeline function
57
+ class FluxWithCFGPipeline(FluxPipeline):
58
+ """
59
+ Extends the FluxPipeline to yield intermediate images during the denoising process
60
+ with progressively increasing resolution for faster generation.
61
+ """
62
+ @torch.inference_mode()
63
+ def generate_images(
64
+ self,
65
+ prompt: Union[str, List[str]] = None,
66
+ prompt_2: Optional[Union[str, List[str]]] = None,
67
+ height: Optional[int] = None,
68
+ width: Optional[int] = None,
69
+ num_inference_steps: int = 4,
70
+ timesteps: List[int] = None,
71
+ guidance_scale: float = 3.5,
72
+ num_images_per_prompt: Optional[int] = 1,
73
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
74
+ latents: Optional[torch.FloatTensor] = None,
75
+ prompt_embeds: Optional[torch.FloatTensor] = None,
76
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
77
+ output_type: Optional[str] = "pil",
78
+ return_dict: bool = True,
79
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
80
+ max_sequence_length: int = 300,
81
+ ):
82
+ """Generates images and yields intermediate results during the denoising process."""
83
+ height = height or self.default_sample_size * self.vae_scale_factor
84
+ width = width or self.default_sample_size * self.vae_scale_factor
85
+
86
+ # 1. Check inputs
87
+ self.check_inputs(
88
+ prompt,
89
+ prompt_2,
90
+ height,
91
+ width,
92
+ prompt_embeds=prompt_embeds,
93
+ pooled_prompt_embeds=pooled_prompt_embeds,
94
+ max_sequence_length=max_sequence_length,
95
+ )
96
+
97
+ self._guidance_scale = guidance_scale
98
+ self._joint_attention_kwargs = joint_attention_kwargs
99
+ self._interrupt = False
100
+
101
+ # 2. Define call parameters
102
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
103
+ device = self._execution_device
104
+
105
+ # 3. Encode prompt
106
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
107
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
108
+ prompt=prompt,
109
+ prompt_2=prompt_2,
110
+ prompt_embeds=prompt_embeds,
111
+ pooled_prompt_embeds=pooled_prompt_embeds,
112
+ device=device,
113
+ num_images_per_prompt=num_images_per_prompt,
114
+ max_sequence_length=max_sequence_length,
115
+ lora_scale=lora_scale,
116
+ )
117
+ # 4. Prepare latent variables
118
+ num_channels_latents = self.transformer.config.in_channels // 4
119
+ latents, latent_image_ids = self.prepare_latents(
120
+ batch_size * num_images_per_prompt,
121
+ num_channels_latents,
122
+ height,
123
+ width,
124
+ prompt_embeds.dtype,
125
+ device,
126
+ generator,
127
+ latents,
128
+ )
129
+ # 5. Prepare timesteps
130
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
131
+ image_seq_len = latents.shape[1]
132
+ mu = calculate_timestep_shift(image_seq_len)
133
+ timesteps, num_inference_steps = prepare_timesteps(
134
+ self.scheduler,
135
+ num_inference_steps,
136
+ device,
137
+ timesteps,
138
+ sigmas,
139
+ mu=mu,
140
+ )
141
+ self._num_timesteps = len(timesteps)
142
+
143
+ # Handle guidance
144
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float16).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
145
+
146
+ # 6. Denoising loop
147
+ for i, t in enumerate(timesteps):
148
+ if self.interrupt:
149
+ continue
150
+
151
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
152
+
153
+ noise_pred = self.transformer(
154
+ hidden_states=latents,
155
+ timestep=timestep / 1000,
156
+ guidance=guidance,
157
+ pooled_projections=pooled_prompt_embeds,
158
+ encoder_hidden_states=prompt_embeds,
159
+ txt_ids=text_ids,
160
+ img_ids=latent_image_ids,
161
+ joint_attention_kwargs=self.joint_attention_kwargs,
162
+ return_dict=False,
163
+ )[0]
164
+
165
+ # Yield intermediate result
166
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
167
+ torch.cuda.empty_cache()
168
+
169
+ # Final image
170
+ return self._decode_latents_to_image(latents, height, width, output_type)
171
+ self.maybe_free_model_hooks()
172
+ torch.cuda.empty_cache()
173
+
174
+ def _decode_latents_to_image(self, latents, height, width, output_type, vae=None):
175
+ """Decodes the given latents into an image."""
176
+ vae = vae or self.vae
177
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
178
+ latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor
179
+ image = vae.decode(latents, return_dict=False)[0]
180
+ return self.image_processor.postprocess(image, output_type=output_type)[0]
readme.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ title: FLUX Realtime
2
+ emoji: ⚡
3
+ colorFrom: yellow
4
+ colorTo: pink
5
+ sdk: gradio
6
+ sdk_version: 5.8.0
7
+ app_file: app.py
8
+ pinned: true
9
+ license: mit
10
+ short_description: High quality Images in Realtime
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ git+https://github.com/huggingface/diffusers.git@main
3
+ torch>=2.0
4
+ gradio==5.8.0
5
+ transformers
6
+ xformers
7
+ sentencepiece
8
+ peft
9
+ numpy
10
+ pillow