learnmlf commited on
Commit
280372d
Β·
1 Parent(s): 71a3ef4

feat: refactor

Browse files
Files changed (1) hide show
  1. app.py +91 -45
app.py CHANGED
@@ -256,72 +256,104 @@ def generate_video(input_img, should_crop_face, expand_x, expand_y, offset_x, of
256
 
257
  with gr.Blocks() as demo:
258
  gr.Markdown("# FollowYourEmoji Webui")
259
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  with gr.Row():
 
261
  with gr.Column(scale=1):
262
- input_img = gr.Image(label="Upload reference image", type="filepath", height=500)
263
-
264
- crop_face_checkbox = gr.Checkbox(label="Crop face according to video",info="If your picture is too far away or the face doesn't fit you can use cropping, you can see a preview in the tab below", value=False)
265
- with gr.Accordion("Face Cropping", open=False):
 
266
  expand_x = gr.Slider(label="Expand X", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
267
  expand_y = gr.Slider(label="Expand Y", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
268
  offset_x = gr.Slider(label="Offset X", minimum=-1, maximum=1, value=0.0, step=0.01)
269
  offset_y = gr.Slider(label="Offset Y", minimum=-1, maximum=1, value=0.0, step=0.01)
270
-
271
- preview_crop_btn = gr.Button(value="Preview Crop")
272
  with gr.Row():
273
- crop_preview = gr.Image(label="Crop Preview", height=300)
274
- crop_preview_motion = gr.Image(label="Preview motion Crop", height=300)
275
-
276
- with gr.Accordion("Input Video", open=True):
277
- input_video_type = gr.Radio(label="Input reference video type",info="You can either upload the video through the interface or use an already compiled npy file", choices=["video","npy"], value="video")
278
-
279
- with gr.Group() as video_group:
280
- input_video = gr.Video(label="Upload reference video", height=500)
281
- input_video_save = gr.Checkbox(label="Save video to processed video folder", value=True)
282
-
283
- with gr.Group(visible=False) as npy_group:
284
- input_npy_select = gr.Dropdown(label="Select from processed video folder", choices=["None"], value="None")
285
- input_npy_refresh = gr.Button(value="Update NPY list")
286
- input_npy = gr.File(file_types=[".npy"], label="Upload preprocessed video in .npy")
287
- with gr.Accordion("Animation Preview",open=False):
288
- show_gif_btn = gr.Button(value="Show Animation preview")
 
 
 
289
  with gr.Row():
290
- gif_output = gr.Image(label="GIF Preview", height=300)
291
- gif_output_align = gr.Image(label="Aligned GIF Preview", height=300)
292
-
293
- with gr.Accordion("Animation Settings", open=True):
 
 
 
 
 
 
 
 
 
 
 
 
294
  input_video_frames = gr.Slider(label="Video frames", minimum=1, maximum=30, value=30, step=1)
295
  settings_steps = gr.Slider(label="Steps", minimum=1, maximum=200, value=30)
296
  settings_cfg_scale = gr.Slider(label="CFG scale", minimum=0.1, maximum=20, value=3.5, step=0.1)
297
  settings_seed = gr.Slider(minimum=0, maximum=1000, value=42, step=1, label="Seed")
298
- intropolate_factor = gr.Slider(label="Intropolate Factor Frames",info="This is the number of frames to interpolate between the frames", minimum=1, maximum=50, value=1, step=1)
299
-
300
- use_custom_fps = gr.Checkbox(label="Use custom FPS",info="By default the FPS is set to 7", value=True)
301
- with gr.Row():
302
- output_fps = gr.Slider(label="Output FPS",info="if you upload video fps slider updates to video fps", minimum=1, maximum=240, value=15, step=1)
303
- output_fps_info = gr.Label(value="This will be the FPS information of the video you uploaded")
304
-
305
- with gr.Accordion("Generation Settings", open=True):
306
  context_frames = gr.Slider(label="Context Frames", minimum=1, maximum=50, value=24, step=1)
307
  context_stride = gr.Slider(label="Context Stride", minimum=1, maximum=10, value=1, step=1)
308
  context_overlap = gr.Slider(label="Context Overlap", minimum=0, maximum=10, value=4, step=1)
309
  context_batch_size = gr.Slider(label="Context Batch Size", minimum=1, maximum=10, value=1, step=1)
310
  callback_steps = gr.Slider(label="Callback Steps", minimum=1, maximum=50, value=1, step=1)
311
-
312
- with gr.Accordion("Advanced Settings", open=False):
 
313
  resolution_w = gr.Slider(label="Resolution Width", minimum=64, maximum=1024, value=config['resolution_w'], step=64)
314
  resolution_h = gr.Slider(label="Resolution Height", minimum=64, maximum=1024, value=config['resolution_h'], step=64)
315
  model_step = gr.Slider(label="Model Step", value=0, minimum=0, maximum=100)
316
  custom_output_path = gr.Textbox(label="Custom Output Path", placeholder="Leave empty for default")
317
- anomaly_action = gr.Radio(label="Anomaly Action",info="Sometimes a bad frame can slip through and this function will detect it and do what you specify", choices=["none", "remove"], value="none")
318
-
319
- with gr.Column(scale=1):
320
- result_status = gr.Label(value="Status")
321
- result_video = gr.Video(label="Result Video (oo)", interactive=False, height=500)
322
- result_video_2 = gr.Video(label="Result Video (all)", interactive=False, height=500)
323
- result_btn = gr.Button(value="Generate Video")
324
- frames_output = gr.File(label="Frames Archive ( You'll get an archive with all the frames )")
325
 
326
  input_video_type.change(
327
  fn=lambda x: (gr.update(visible=(x=="video")), gr.update(visible=(x=="npy"))),
@@ -363,6 +395,20 @@ with gr.Blocks() as demo:
363
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor],
364
  outputs=[result_status, result_video, result_video_2, frames_output]
365
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
 
367
  if __name__ == "__main__":
368
  import argparse
 
256
 
257
  with gr.Blocks() as demo:
258
  gr.Markdown("# FollowYourEmoji Webui")
259
+
260
+ gr.Markdown("""
261
+ ## πŸ“– How to Use This Demo
262
+
263
+ Follow these simple steps to create your animated emoji video:
264
+
265
+ 1. **πŸ“Έ Upload Reference Image**: Upload a portrait photo in the left panel
266
+ 2. **βœ‚οΈ Crop Face (Optional)**: Enable face cropping to automatically fit the image to match the video motion
267
+ 3. **🎬 Upload Reference Video**: Upload a video or select a preprocessed .npy file in the middle panel
268
+ 4. **πŸ‘οΈ Preview Animation**: Click "Show Animation preview" to see how the motion will look
269
+ 5. **βš™οΈ Adjust Settings**: Fine-tune generation parameters at the bottom (steps, CFG scale, FPS, etc.)
270
+ 6. **🎨 Generate Video**: Click "Generate Video" to create your animated result
271
+
272
+ πŸ’‘ **Tips**:
273
+ - Use face cropping if your portrait is too far away or doesn't align well
274
+ - Preview the animation before generating to ensure the motion looks good
275
+ - Try the examples below to get started quickly!
276
+ """)
277
+
278
+ # Main Layout: 3 columns - Image, Video, Results
279
  with gr.Row():
280
+ # Left Column: Image Upload and Crop Face
281
  with gr.Column(scale=1):
282
+ gr.Markdown("### πŸ“Έ Reference Image")
283
+ input_img = gr.Image(label="Upload reference image", type="filepath", height=400)
284
+
285
+ crop_face_checkbox = gr.Checkbox(label="Crop face according to video",info="If your picture is too far away or the face doesn't fit you can use cropping", value=False)
286
+ with gr.Accordion("Face Cropping Settings", open=False):
287
  expand_x = gr.Slider(label="Expand X", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
288
  expand_y = gr.Slider(label="Expand Y", minimum=0.5, maximum=5.0, value=1.2, step=0.01)
289
  offset_x = gr.Slider(label="Offset X", minimum=-1, maximum=1, value=0.0, step=0.01)
290
  offset_y = gr.Slider(label="Offset Y", minimum=-1, maximum=1, value=0.0, step=0.01)
291
+
292
+ preview_crop_btn = gr.Button(value="Preview Crop", variant="secondary")
293
  with gr.Row():
294
+ crop_preview = gr.Image(label="Crop Preview", height=200)
295
+ crop_preview_motion = gr.Image(label="Motion Preview", height=200)
296
+
297
+ # Middle Column: Video Input and Animation Preview
298
+ with gr.Column(scale=1):
299
+ gr.Markdown("### 🎬 Reference Video")
300
+ input_video_type = gr.Radio(label="Input type", choices=["video","npy"], value="video")
301
+
302
+ with gr.Group() as video_group:
303
+ input_video = gr.Video(label="Upload reference video", height=400)
304
+ input_video_save = gr.Checkbox(label="Save to processed folder", value=True)
305
+
306
+ with gr.Group(visible=False) as npy_group:
307
+ input_npy_select = gr.Dropdown(label="Select preprocessed NPY", choices=["None"], value="None")
308
+ input_npy_refresh = gr.Button(value="Refresh NPY List")
309
+ input_npy = gr.File(file_types=[".npy"], label="Upload .npy file")
310
+
311
+ with gr.Accordion("Animation Preview", open=False):
312
+ show_gif_btn = gr.Button(value="Show Animation Preview", variant="secondary")
313
  with gr.Row():
314
+ gif_output = gr.Image(label="Motion Preview", height=200)
315
+ gif_output_align = gr.Image(label="Aligned Preview", height=200)
316
+
317
+ # Right Column: Results
318
+ with gr.Column(scale=1):
319
+ gr.Markdown("### 🎨 Generated Results")
320
+ result_status = gr.Label(value="Ready to generate")
321
+ result_video = gr.Video(label="Result Video (Main)", interactive=False, height=400)
322
+ result_video_2 = gr.Video(label="Result Video (Full)", interactive=False, height=400)
323
+ frames_output = gr.File(label="Download Frames Archive")
324
+
325
+ # Bottom Section: Settings and Generate Button
326
+ with gr.Accordion("βš™οΈ Generation Settings", open=True):
327
+ with gr.Row():
328
+ with gr.Column(scale=1):
329
+ gr.Markdown("#### Animation Settings")
330
  input_video_frames = gr.Slider(label="Video frames", minimum=1, maximum=30, value=30, step=1)
331
  settings_steps = gr.Slider(label="Steps", minimum=1, maximum=200, value=30)
332
  settings_cfg_scale = gr.Slider(label="CFG scale", minimum=0.1, maximum=20, value=3.5, step=0.1)
333
  settings_seed = gr.Slider(minimum=0, maximum=1000, value=42, step=1, label="Seed")
334
+ intropolate_factor = gr.Slider(label="Interpolate Factor",info="Number of frames to interpolate between frames", minimum=1, maximum=50, value=1, step=1)
335
+
336
+ use_custom_fps = gr.Checkbox(label="Use custom FPS",info="By default FPS is 7", value=True)
337
+ output_fps = gr.Slider(label="Output FPS",info="Automatically updates from uploaded video", minimum=1, maximum=240, value=15, step=1)
338
+ output_fps_info = gr.Label(value="FPS info will appear here")
339
+
340
+ with gr.Column(scale=1):
341
+ gr.Markdown("#### Context Settings")
342
  context_frames = gr.Slider(label="Context Frames", minimum=1, maximum=50, value=24, step=1)
343
  context_stride = gr.Slider(label="Context Stride", minimum=1, maximum=10, value=1, step=1)
344
  context_overlap = gr.Slider(label="Context Overlap", minimum=0, maximum=10, value=4, step=1)
345
  context_batch_size = gr.Slider(label="Context Batch Size", minimum=1, maximum=10, value=1, step=1)
346
  callback_steps = gr.Slider(label="Callback Steps", minimum=1, maximum=50, value=1, step=1)
347
+
348
+ with gr.Column(scale=1):
349
+ gr.Markdown("#### Advanced Settings")
350
  resolution_w = gr.Slider(label="Resolution Width", minimum=64, maximum=1024, value=config['resolution_w'], step=64)
351
  resolution_h = gr.Slider(label="Resolution Height", minimum=64, maximum=1024, value=config['resolution_h'], step=64)
352
  model_step = gr.Slider(label="Model Step", value=0, minimum=0, maximum=100)
353
  custom_output_path = gr.Textbox(label="Custom Output Path", placeholder="Leave empty for default")
354
+ anomaly_action = gr.Radio(label="Anomaly Detection",info="Detect and handle bad frames", choices=["none", "remove"], value="none")
355
+
356
+ result_btn = gr.Button(value="🎨 Generate Video", variant="primary", size="lg")
 
 
 
 
 
357
 
358
  input_video_type.change(
359
  fn=lambda x: (gr.update(visible=(x=="video")), gr.update(visible=(x=="npy"))),
 
395
  model_step, custom_output_path, use_custom_fps, output_fps, callback_steps, context_frames, context_stride, context_overlap, context_batch_size, anomaly_action,intropolate_factor],
396
  outputs=[result_status, result_video, result_video_2, frames_output]
397
  )
398
+
399
+ # Examples Section
400
+ gr.Markdown("---")
401
+ gr.Markdown("## 🎯 Examples")
402
+ gr.Markdown("Click on an example below to quickly get started:")
403
+
404
+ gr.Examples(
405
+ examples=[
406
+ ["example/s1.jpg", "example/temple_video.mp4"],
407
+ ["example/123.png", "example/test.mp4"],
408
+ ],
409
+ inputs=[input_img, input_video],
410
+ label="Try these examples"
411
+ )
412
 
413
  if __name__ == "__main__":
414
  import argparse