svjack commited on
Commit
9b51da9
·
verified ·
1 Parent(s): 737793b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +61 -8
README.md CHANGED
@@ -120,6 +120,7 @@ name_dict = {
120
  To use this model, you need to install the following dependencies:
121
 
122
  ```bash
 
123
  pip install -U diffusers transformers sentencepiece peft controlnet-aux
124
  ```
125
 
@@ -340,12 +341,66 @@ from IPython import display
340
  display.Video("zhongli_animation.mp4", width=512, height=512)
341
  ```
342
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
  ##### Enhancing Animation with RIFE
344
  To enhance the animation using RIFE (Real-Time Intermediate Flow Estimation):
345
 
346
  ```bash
347
  git clone https://github.com/svjack/Practical-RIFE && cd Practical-RIFE && pip install -r requirements.txt
348
- python inference_video.py --multi=128 --video=../zhongli_animation.mp4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  ```
350
 
351
  ##### Merging Videos Horizontally
@@ -378,22 +433,20 @@ def merge_videos_horizontally(video_path1, video_path2, output_video_path):
378
  print(f"Merged video saved to {output_video_path}")
379
 
380
  # Example usage
381
- video_path1 = "zhongli_animation.mp4"
382
- video_path2 = "zhongli_animation_128X_1280fps_wrt.mp4"
383
- output_video_path = "zhongli_inter_video_compare.mp4"
384
  merge_videos_horizontally(video_path1, video_path2, output_video_path)
385
  ```
386
 
387
 
388
 
389
  <div>
390
- <b><h3 style="text-align: center;">Left is zhongli_animation.mp4, Right is zhongli_animation_128X_1280fps_wrt.mp4</h3></b>
391
  <div style="display: flex; flex-direction: column; align-items: center;">
392
  <div style="margin-bottom: 10px;">
393
- <video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/AgdsshSX-Dt5ObeAkjmby.mp4"></video>
394
  <p style="text-align: center;">钟离</p>
395
  </div>
396
  </div>
397
  </div>
398
-
399
-
 
120
  To use this model, you need to install the following dependencies:
121
 
122
  ```bash
123
+ sudo apt-get update && sudo apt-get install git-lfs ffmpeg cbm
124
  pip install -U diffusers transformers sentencepiece peft controlnet-aux
125
  ```
126
 
 
341
  display.Video("zhongli_animation.mp4", width=512, height=512)
342
  ```
343
 
344
+ Use `AutoPipelineForImage2Image` to enhance output:
345
+
346
+ ```python
347
+ from moviepy.editor import VideoFileClip
348
+ from PIL import Image
349
+ clip = VideoFileClip("zhongli_animation.mp4")
350
+ frames = list(map(Image.fromarray ,clip.iter_frames()))
351
+
352
+ from diffusers import AutoPipelineForText2Image, AutoPipelineForImage2Image
353
+ from diffusers.utils import load_image, make_image_grid
354
+ import torch
355
+
356
+ pipeline_text2image = AutoPipelineForText2Image.from_pretrained(
357
+ "svjack/GenshinImpact_XL_Base",
358
+ torch_dtype=torch.float16
359
+ )
360
+
361
+ # use from_pipe to avoid consuming additional memory when loading a checkpoint
362
+ pipeline = AutoPipelineForImage2Image.from_pipe(pipeline_text2image).to("cuda")
363
+
364
+ from tqdm import tqdm
365
+ req = []
366
+ for init_image in tqdm(frames):
367
+ prompt = "solo,ZHONGLI\(genshin impact\),1boy,portrait,upper_body,highres, keep eyes forward."
368
+ image = pipeline(prompt, image=init_image, strength=0.8, guidance_scale=10.5).images[0]
369
+ req.append(image)
370
+
371
+ from diffusers.utils import export_to_video
372
+ export_to_video(req, "zhongli_animation_im2im.mp4")
373
+ from IPython import display
374
+ display.Video("zhongli_animation_im2im.mp4", width=512, height=512)
375
+ ```
376
+
377
  ##### Enhancing Animation with RIFE
378
  To enhance the animation using RIFE (Real-Time Intermediate Flow Estimation):
379
 
380
  ```bash
381
  git clone https://github.com/svjack/Practical-RIFE && cd Practical-RIFE && pip install -r requirements.txt
382
+ python inference_video.py --multi=128 --video=zhongli_animation_im2im.mp4
383
+ ```
384
+
385
+ ```python
386
+ from moviepy.editor import VideoFileClip
387
+ clip = VideoFileClip("zhongli_animation_im2im_128X_1280fps.mp4")
388
+
389
+ def speed_change_video(video_clip, speed_factor, output_path):
390
+ if speed_factor == 1:
391
+ # 如果变速因子为1,直接复制原视频
392
+ video_clip.write_videofile(output_path, codec="libx264")
393
+ else:
394
+ # 否则,按变速因子调整视频速度
395
+ new_duration = video_clip.duration / speed_factor
396
+ sped_up_clip = video_clip.speedx(speed_factor)
397
+ sped_up_clip.write_videofile(output_path, codec="libx264")
398
+
399
+ speed_change_video(clip, 0.05, "zhongli_animation_im2im_128X_1280fps_wrt.mp4")
400
+
401
+ VideoFileClip("zhongli_animation_im2im_128X_1280fps_wrt.mp4").set_duration(10).write_videofile("zhongli_animation_im2im_128X_1280fps_wrt_10s.mp4", codec="libx264")
402
+ from IPython import display
403
+ display.Video("zhongli_animation_im2im_128X_1280fps_wrt_10s.mp4", width=512, height=512)
404
  ```
405
 
406
  ##### Merging Videos Horizontally
 
433
  print(f"Merged video saved to {output_video_path}")
434
 
435
  # Example usage
436
+ video_path1 = "zhongli_animation.mp4" # 第一个视频文件路径
437
+ video_path2 = "zhongli_animation_im2im_128X_1280fps_wrt_10s.mp4" # 第二个视频文件路径
438
+ output_video_path = "zhongli_inter_video_im2im_compare.mp4" # 输出视频的路径
439
  merge_videos_horizontally(video_path1, video_path2, output_video_path)
440
  ```
441
 
442
 
443
 
444
  <div>
445
+ <b><h3 style="text-align: center;">Left is zhongli_animation.mp4 (By AnimateDiffSDXLPipeline), Right is zhongli_animation_im2im_128X_1280fps_wrt_10s.mp4 (By AutoPipelineForImage2Image + Practical-RIFE)</h3></b>
446
  <div style="display: flex; flex-direction: column; align-items: center;">
447
  <div style="margin-bottom: 10px;">
448
+ <video controls autoplay src="https://cdn-uploads.huggingface.co/production/uploads/634dffc49b777beec3bc6448/gBaodBk8z3aI69LiT36w2.mp4"></video>
449
  <p style="text-align: center;">钟离</p>
450
  </div>
451
  </div>
452
  </div>