HeliosZhao commited on
Commit
bd0ad83
1 Parent(s): c57315b
Files changed (2) hide show
  1. app.py +21 -14
  2. inference.py +1 -0
app.py CHANGED
@@ -106,6 +106,13 @@ with gr.Blocks(css='style.css') as demo:
106
  </div>
107
  """)
108
 
 
 
 
 
 
 
 
109
 
110
  with gr.Row():
111
  with gr.Column():
@@ -136,14 +143,14 @@ with gr.Blocks(css='style.css') as demo:
136
  placeholder='Example: "A panda is surfing"')
137
  video_length = gr.Slider(label='Video length',
138
  minimum=4,
139
- maximum=8,
140
  step=1,
141
- value=8)
142
  fps = gr.Slider(label='FPS',
143
  minimum=1,
144
- maximum=8,
145
  step=1,
146
- value=4)
147
  seed = gr.Slider(label='Seed',
148
  minimum=0,
149
  maximum=100000,
@@ -207,8 +214,8 @@ with gr.Blocks(css='style.css') as demo:
207
  [
208
  'Make-A-Protagonist/ikun',
209
  'A man is playing basketball on the beach, anime style.',
210
- 8,
211
- 4,
212
  33,
213
  50,
214
  12.5,
@@ -224,8 +231,8 @@ with gr.Blocks(css='style.css') as demo:
224
  [
225
  'Make-A-Protagonist/huaqiang',
226
  'Elon Musk walking down the street.',
227
- 8,
228
- 4,
229
  33,
230
  50,
231
  12.5,
@@ -241,8 +248,8 @@ with gr.Blocks(css='style.css') as demo:
241
  [
242
  'Make-A-Protagonist/yanzi',
243
  'A panda walking down the snowy street.',
244
- 8,
245
- 4,
246
  33,
247
  50,
248
  12.5,
@@ -258,8 +265,8 @@ with gr.Blocks(css='style.css') as demo:
258
  [
259
  'Make-A-Protagonist/car-turn',
260
  'A car moving in the desert.',
261
- 8,
262
- 4,
263
  33,
264
  50,
265
  12.5,
@@ -275,8 +282,8 @@ with gr.Blocks(css='style.css') as demo:
275
  [
276
  'Make-A-Protagonist/car-turn',
277
  'A Suzuki Jimny driving down a mountain road in the rain.',
278
- 8,
279
- 4,
280
  33,
281
  50,
282
  12.5,
 
106
  </div>
107
  """)
108
 
109
+ gr.HTML("""
110
+ <p>We provide a <a href="https://github.com/Make-A-Protagonist/Make-A-Protagonist/blob/main/docs/demo_guidance.md"> Demo Guidance </a> to help users to choose hyperparameters when editing videos.
111
+ <p>You may duplicate the space and upgrade GPU for better performance and faster inference without waiting in the queue.
112
+ <p>Alternatively, try our GitHub <a href=https://github.com/Make-A-Protagonist/Make-A-Protagonist> code </a> on your GPU.
113
+ </p>""")
114
+
115
+
116
 
117
  with gr.Row():
118
  with gr.Column():
 
143
  placeholder='Example: "A panda is surfing"')
144
  video_length = gr.Slider(label='Video length',
145
  minimum=4,
146
+ maximum=6,
147
  step=1,
148
+ value=6)
149
  fps = gr.Slider(label='FPS',
150
  minimum=1,
151
+ maximum=6,
152
  step=1,
153
+ value=3)
154
  seed = gr.Slider(label='Seed',
155
  minimum=0,
156
  maximum=100000,
 
214
  [
215
  'Make-A-Protagonist/ikun',
216
  'A man is playing basketball on the beach, anime style.',
217
+ 6,
218
+ 3,
219
  33,
220
  50,
221
  12.5,
 
231
  [
232
  'Make-A-Protagonist/huaqiang',
233
  'Elon Musk walking down the street.',
234
+ 6,
235
+ 3,
236
  33,
237
  50,
238
  12.5,
 
248
  [
249
  'Make-A-Protagonist/yanzi',
250
  'A panda walking down the snowy street.',
251
+ 6,
252
+ 3,
253
  33,
254
  50,
255
  12.5,
 
265
  [
266
  'Make-A-Protagonist/car-turn',
267
  'A car moving in the desert.',
268
+ 6,
269
+ 3,
270
  33,
271
  50,
272
  12.5,
 
282
  [
283
  'Make-A-Protagonist/car-turn',
284
  'A Suzuki Jimny driving down a mountain road in the rain.',
285
+ 6,
286
+ 3,
287
  33,
288
  50,
289
  12.5,
inference.py CHANGED
@@ -242,6 +242,7 @@ class InferencePipeline:
242
  sample_indices = self.sample_indices
243
  image_embed = [self.train_dataset.img_embeddings[idx] for idx in sample_indices]
244
  image_embed = torch.stack(image_embed, dim=0).to(device=ddim_inv_latent.device, dtype=ddim_inv_latent.dtype) # F, 768 for UnCLIP-small # F,C
 
245
  ref_image = None
246
 
247
  # ipdb.set_trace()
 
242
  sample_indices = self.sample_indices
243
  image_embed = [self.train_dataset.img_embeddings[idx] for idx in sample_indices]
244
  image_embed = torch.stack(image_embed, dim=0).to(device=ddim_inv_latent.device, dtype=ddim_inv_latent.dtype) # F, 768 for UnCLIP-small # F,C
245
+ image_embed = image_embed[:video_length]
246
  ref_image = None
247
 
248
  # ipdb.set_trace()