AI-Anchorite commited on
Commit
abe2421
1 Parent(s): c178cab

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +26 -151
gradio_app.py CHANGED
@@ -4,21 +4,27 @@ import os
4
  import gradio as gr
5
  import subprocess
6
  from subprocess import getoutput
 
7
  from diffusers.schedulers import EulerAncestralDiscreteScheduler
8
  from transformers import T5EncoderModel, T5Tokenizer
9
  from allegro.pipelines.pipeline_allegro import AllegroPipeline
10
  from allegro.models.vae.vae_allegro import AllegroAutoencoderKL3D
11
  from allegro.models.transformers.transformer_3d_allegro import AllegroTransformer3DModel
 
12
 
13
  from huggingface_hub import snapshot_download
14
 
 
 
 
15
  weights_dir = './allegro_weights'
16
  os.makedirs(weights_dir, exist_ok=True)
17
 
18
- is_shared_ui = True if "fffiloni/allegro-text2video" in os.environ['SPACE_ID'] else False
19
  is_gpu_associated = torch.cuda.is_available()
20
 
21
- if not is_shared_ui:
 
22
  snapshot_download(
23
  repo_id='rhymes-ai/Allegro',
24
  allow_patterns=[
@@ -31,11 +37,8 @@ if not is_shared_ui:
31
  local_dir=weights_dir,
32
  )
33
 
34
- if is_gpu_associated:
35
- gpu_info = getoutput('nvidia-smi')
36
-
37
  def single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload):
38
- dtype = torch.bfloat16
39
 
40
  # Load models
41
  vae = AllegroAutoencoderKL3D.from_pretrained(
@@ -80,6 +83,9 @@ def single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps,
80
  if enable_cpu_offload:
81
  allegro_pipeline.enable_sequential_cpu_offload()
82
 
 
 
 
83
  out_video = allegro_pipeline(
84
  user_prompt,
85
  negative_prompt=negative_prompt,
@@ -105,152 +111,22 @@ def run_inference(user_prompt, guidance_scale, num_sampling_steps, seed, enable_
105
  result_path = single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload)
106
  return result_path
107
 
108
- css="""
109
- div#col-container{
110
- margin: 0 auto;
111
- max-width: 800px;
112
- }
113
- div#warning-ready {
114
- background-color: #ecfdf5;
115
- padding: 0 16px 16px;
116
- margin: 20px 0;
117
- color: #030303!important;
118
- }
119
- div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
120
- color: #057857!important;
121
- }
122
- div#warning-duplicate {
123
- background-color: #ebf5ff;
124
- padding: 0 16px 16px;
125
- margin: 20px 0;
126
- color: #030303!important;
127
- }
128
- div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
129
- color: #0f4592!important;
130
- }
131
- div#warning-duplicate strong {
132
- color: #0f4592;
133
- }
134
- p.actions {
135
- display: flex;
136
- align-items: center;
137
- margin: 20px 0;
138
- }
139
- div#warning-duplicate .actions a {
140
- display: inline-block;
141
- margin-right: 10px;
142
- }
143
- div#warning-setgpu {
144
- background-color: #fff4eb;
145
- padding: 0 16px 16px;
146
- margin: 20px 0;
147
- color: #030303!important;
148
- }
149
- div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
150
- color: #92220f!important;
151
- }
152
- div#warning-setgpu a, div#warning-setgpu b {
153
- color: #91230f;
154
- }
155
- div#warning-setgpu p.actions > a {
156
- display: inline-block;
157
- background: #1f1f23;
158
- border-radius: 40px;
159
- padding: 6px 24px;
160
- color: antiquewhite;
161
- text-decoration: none;
162
- font-weight: 600;
163
- font-size: 1.2em;
164
- }
165
- div#warning-setsleeptime {
166
- background-color: #fff4eb;
167
- padding: 10px 10px;
168
- margin: 0!important;
169
- color: #030303!important;
170
- }
171
- .custom-color {
172
- color: #030303 !important;
173
- }
174
- """
175
-
176
  # Create Gradio interface
177
- with gr.Blocks(css=css) as demo:
178
- with gr.Column(elem_id="col-container"):
179
  gr.Markdown("# Allegro Video Generation")
180
  gr.Markdown("Generate a video based on a text prompt using the Allegro pipeline.")
181
- gr.HTML("""
182
- <div style="display:flex;column-gap:4px;">
183
- <a href='https://huggingface.co/rhymes-ai/Allegro'>
184
- <img src='https://img.shields.io/badge/HuggingFace-Model-orange'>
185
- </a>
186
- <a href='https://github.com/rhymes-ai/Allegro/tree/main'>
187
- <img src='https://img.shields.io/badge/GitHub-Repo-blue'>
188
- </a>
189
- <a href='https://arxiv.org/abs/2410.15458'>
190
- <img src='https://img.shields.io/badge/ArXivPaper-red'>
191
- </a>
192
- </div>
193
- """)
194
- user_prompt=gr.Textbox(label="User Prompt")
195
  with gr.Row():
196
- guidance_scale=gr.Slider(minimum=0, maximum=20, step=0.1, label="Guidance Scale", value=7.5)
197
- num_sampling_steps=gr.Slider(minimum=10, maximum=100, step=1, label="Number of Sampling Steps", value=20)
198
  with gr.Row():
199
- seed=gr.Slider(minimum=0, maximum=10000, step=1, label="Random Seed", value=42)
200
- enable_cpu_offload=gr.Checkbox(label="Enable CPU Offload", value=False, scale=1)
201
- if is_shared_ui:
202
- top_description = gr.HTML(f'''
203
- <div class="gr-prose">
204
- <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
205
- Attention: this Space need to be duplicated to work</h2>
206
- <p class="main-message custom-color">
207
- To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU.<br />
208
- You'll be able to offload the model into CPU for less GPU memory cost (about 9.3G, compared to 27.5G if CPU offload is not enabled), but the inference time will increase significantly.
209
- </p>
210
- <p class="actions custom-color">
211
- <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
212
- <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
213
- </a>
214
- </p>
215
- </div>
216
- ''', elem_id="warning-duplicate")
217
- submit_btn = gr.Button("Generate Video", visible=False)
218
- else:
219
- if(is_gpu_associated):
220
- submit_btn = gr.Button("Generate Video", visible=True)
221
- top_description = gr.HTML(f'''
222
- <div class="gr-prose">
223
- <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
224
- You have successfully associated a GPU to this Space 🎉</h2>
225
- <p class="custom-color">
226
- You can now generate a video! You will be billed by the minute from when you activated the GPU until when it is turned off.
227
- You can offload the model into CPU for less GPU memory cost (about 9.3G, compared to 27.5G if CPU offload is not enabled), but the inference time will increase significantly.
228
- </p>
229
- </div>
230
- ''', elem_id="warning-ready")
231
- else:
232
- top_description = gr.HTML(f'''
233
- <div class="gr-prose">
234
- <h2 class="custom-color"><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
235
- You have successfully duplicated the Allegro Video Generation Space 🎉</h2>
236
- <p class="custom-color">There's only one step left before you can generate a video: we recommend to <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a L40S GPU</b> to it (via the Settings tab)</a>.
237
- You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
238
- <p class="actions custom-color">
239
- <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">🔥 &nbsp; Set recommended GPU</a>
240
- </p>
241
- </div>
242
- ''', elem_id="warning-setgpu")
243
- submit_btn = gr.Button("Generate Video", visible=False)
244
-
245
- video_output=gr.Video(label="Generated Video")
246
-
247
- def load_allegro_examples(prompt):
248
- if prompt == "A Monkey is playing bass guitar.":
249
- return "https://rhymes.ai/allegroVideos/30_demo_w_watermark_prompt_1018/11.mp4"
250
- elif prompt == "An astronaut riding a horse.":
251
- return "https://rhymes.ai/allegroVideos/30_demo_w_watermark_prompt_1018/15.mp4"
252
- elif prompt == "A tiny finch on a branch with spring flowers on background.":
253
- return "https://rhymes.ai/allegroVideos/30_demo_w_watermark_prompt_1018/22.mp4"
254
 
255
  gr.Examples(
256
  examples=[
@@ -258,11 +134,10 @@ with gr.Blocks(css=css) as demo:
258
  ["An astronaut riding a horse."],
259
  ["A tiny finch on a branch with spring flowers on background."]
260
  ],
261
- fn=load_allegro_examples,
262
  inputs=[user_prompt],
263
  outputs=video_output,
264
- run_on_click=True,
265
-
266
  )
267
 
268
  submit_btn.click(
@@ -272,4 +147,4 @@ with gr.Blocks(css=css) as demo:
272
  )
273
 
274
  # Launch the interface
275
- demo.launch(show_error=True, show_api=False)
 
4
  import gradio as gr
5
  import subprocess
6
  from subprocess import getoutput
7
+
8
  from diffusers.schedulers import EulerAncestralDiscreteScheduler
9
  from transformers import T5EncoderModel, T5Tokenizer
10
  from allegro.pipelines.pipeline_allegro import AllegroPipeline
11
  from allegro.models.vae.vae_allegro import AllegroAutoencoderKL3D
12
  from allegro.models.transformers.transformer_3d_allegro import AllegroTransformer3DModel
13
+ # from allegro.models.transformers.block import AttnProcessor2_0
14
 
15
  from huggingface_hub import snapshot_download
16
 
17
+ # # Override attention processor initialization
18
+ # AttnProcessor2_0.__init__ = lambda self, *args, **kwargs: super(AttnProcessor2_0, self).__init__()
19
+
20
  weights_dir = './allegro_weights'
21
  os.makedirs(weights_dir, exist_ok=True)
22
 
23
+ is_shared_ui = False
24
  is_gpu_associated = torch.cuda.is_available()
25
 
26
+ # Download weights if not present
27
+ if not os.path.exists(weights_dir):
28
  snapshot_download(
29
  repo_id='rhymes-ai/Allegro',
30
  allow_patterns=[
 
37
  local_dir=weights_dir,
38
  )
39
 
 
 
 
40
  def single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload):
41
+ dtype = torch.float16 # Changed from torch.bfloat16
42
 
43
  # Load models
44
  vae = AllegroAutoencoderKL3D.from_pretrained(
 
83
  if enable_cpu_offload:
84
  allegro_pipeline.enable_sequential_cpu_offload()
85
 
86
+ # Clear memory before generation
87
+ # torch.cuda.empty_cache()
88
+
89
  out_video = allegro_pipeline(
90
  user_prompt,
91
  negative_prompt=negative_prompt,
 
111
  result_path = single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload)
112
  return result_path
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  # Create Gradio interface
115
+ with gr.Blocks() as demo:
116
+ with gr.Column():
117
  gr.Markdown("# Allegro Video Generation")
118
  gr.Markdown("Generate a video based on a text prompt using the Allegro pipeline.")
119
+
120
+ user_prompt = gr.Textbox(label="User Prompt")
 
 
 
 
 
 
 
 
 
 
 
 
121
  with gr.Row():
122
+ guidance_scale = gr.Slider(minimum=0, maximum=20, step=0.1, label="Guidance Scale", value=7.5)
123
+ num_sampling_steps = gr.Slider(minimum=10, maximum=100, step=1, label="Number of Sampling Steps", value=20)
124
  with gr.Row():
125
+ seed = gr.Slider(minimum=0, maximum=10000, step=1, label="Random Seed", value=42)
126
+ enable_cpu_offload = gr.Checkbox(label="Enable CPU Offload", value=True, scale=1)
127
+
128
+ submit_btn = gr.Button("Generate Video")
129
+ video_output = gr.Video(label="Generated Video")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  gr.Examples(
132
  examples=[
 
134
  ["An astronaut riding a horse."],
135
  ["A tiny finch on a branch with spring flowers on background."]
136
  ],
 
137
  inputs=[user_prompt],
138
  outputs=video_output,
139
+ fn=lambda x: None,
140
+ cache_examples=False
141
  )
142
 
143
  submit_btn.click(
 
147
  )
148
 
149
  # Launch the interface
150
+ demo.launch(show_error=True)