mayuema commited on
Commit
f4c8778
1 Parent(s): df73191

add mmpose

Browse files
FollowYourPose/__pycache__/test_followyourpose.cpython-38.pyc CHANGED
Binary files a/FollowYourPose/__pycache__/test_followyourpose.cpython-38.pyc and b/FollowYourPose/__pycache__/test_followyourpose.cpython-38.pyc differ
 
FollowYourPose/followyourpose/__pycache__/util.cpython-38.pyc CHANGED
Binary files a/FollowYourPose/followyourpose/__pycache__/util.cpython-38.pyc and b/FollowYourPose/followyourpose/__pycache__/util.cpython-38.pyc differ
 
FollowYourPose/followyourpose/models/__pycache__/attention.cpython-38.pyc CHANGED
Binary files a/FollowYourPose/followyourpose/models/__pycache__/attention.cpython-38.pyc and b/FollowYourPose/followyourpose/models/__pycache__/attention.cpython-38.pyc differ
 
FollowYourPose/followyourpose/models/__pycache__/resnet.cpython-38.pyc CHANGED
Binary files a/FollowYourPose/followyourpose/models/__pycache__/resnet.cpython-38.pyc and b/FollowYourPose/followyourpose/models/__pycache__/resnet.cpython-38.pyc differ
 
FollowYourPose/followyourpose/models/__pycache__/unet.cpython-38.pyc CHANGED
Binary files a/FollowYourPose/followyourpose/models/__pycache__/unet.cpython-38.pyc and b/FollowYourPose/followyourpose/models/__pycache__/unet.cpython-38.pyc differ
 
FollowYourPose/followyourpose/models/__pycache__/unet_blocks.cpython-38.pyc CHANGED
Binary files a/FollowYourPose/followyourpose/models/__pycache__/unet_blocks.cpython-38.pyc and b/FollowYourPose/followyourpose/models/__pycache__/unet_blocks.cpython-38.pyc differ
 
FollowYourPose/followyourpose/pipelines/__pycache__/pipeline_followyourpose.cpython-38.pyc CHANGED
Binary files a/FollowYourPose/followyourpose/pipelines/__pycache__/pipeline_followyourpose.cpython-38.pyc and b/FollowYourPose/followyourpose/pipelines/__pycache__/pipeline_followyourpose.cpython-38.pyc differ
 
__pycache__/example.cpython-38.pyc CHANGED
Binary files a/__pycache__/example.cpython-38.pyc and b/__pycache__/example.cpython-38.pyc differ
 
__pycache__/inference_followyourpose.cpython-38.pyc CHANGED
Binary files a/__pycache__/inference_followyourpose.cpython-38.pyc and b/__pycache__/inference_followyourpose.cpython-38.pyc differ
 
__pycache__/inference_mmpose.cpython-38.pyc ADDED
Binary file (2.65 kB). View file
 
app.py CHANGED
@@ -171,4 +171,4 @@ with gr.Blocks(css='style.css') as demo:
171
  run_button.click(fn=pipe.run, inputs=inputs, outputs=result)
172
 
173
  demo.queue().launch()
174
- # demo.queue().launch(share=False, server_name='0.0.0.0', server_port=7890)
 
171
  run_button.click(fn=pipe.run, inputs=inputs, outputs=result)
172
 
173
  demo.queue().launch()
174
+ # demo.queue().launch(share=False, server_name='0.0.0.0', server_port=80)
inference_followyourpose.py CHANGED
@@ -5,6 +5,7 @@ import copy
5
  import gradio as gr
6
  from transformers import AutoTokenizer, CLIPTextModel
7
  from huggingface_hub import snapshot_download
 
8
  import sys
9
  sys.path.append('FollowYourPose')
10
 
@@ -21,10 +22,11 @@ class merge_config_then_run():
21
  self.vae = None
22
  self.unet = None
23
  self.download_model()
 
24
 
25
  def download_model(self):
26
  REPO_ID = 'YueMafighting/FollowYourPose_v1'
27
- snapshot_download(repo_id=REPO_ID, local_dir='./FollowYourPose/checkpoints/', local_dir_use_symlinks=False)
28
 
29
 
30
  def run(
@@ -42,7 +44,7 @@ class merge_config_then_run():
42
  top_crop=0,
43
  bottom_crop=0,
44
  ):
45
-
46
  default_edit_config='./FollowYourPose/configs/pose_sample.yaml'
47
  Omegadict_default_edit_config = OmegaConf.load(default_edit_config)
48
 
@@ -73,7 +75,7 @@ class merge_config_then_run():
73
  # ddim config
74
  config_now['validation_data']['guidance_scale'] = guidance_scale
75
  config_now['validation_data']['num_inference_steps'] = num_steps
76
- config_now['skeleton_path'] = data_path
77
 
78
  save_path = test(**config_now)
79
  mp4_path = save_path.replace('_0.gif', '_0_0_0.mp4')
 
5
  import gradio as gr
6
  from transformers import AutoTokenizer, CLIPTextModel
7
  from huggingface_hub import snapshot_download
8
+ from inference_mmpose import *
9
  import sys
10
  sys.path.append('FollowYourPose')
11
 
 
22
  self.vae = None
23
  self.unet = None
24
  self.download_model()
25
+ self.mmpose = gr.Interface.load(name="spaces/fffiloni/mmpose-estimation")
26
 
27
  def download_model(self):
28
  REPO_ID = 'YueMafighting/FollowYourPose_v1'
29
+ snapshot_download(repo_id=REPO_ID, local_dir='./FollowYourPose/checkpoints/', local_dir_use_symlinks=False)
30
 
31
 
32
  def run(
 
44
  top_crop=0,
45
  bottom_crop=0,
46
  ):
47
+ infer_skeleton(self.mmpose,data_path)
48
  default_edit_config='./FollowYourPose/configs/pose_sample.yaml'
49
  Omegadict_default_edit_config = OmegaConf.load(default_edit_config)
50
 
 
75
  # ddim config
76
  config_now['validation_data']['guidance_scale'] = guidance_scale
77
  config_now['validation_data']['num_inference_steps'] = num_steps
78
+ config_now['skeleton_path'] = './mmpose_result.mp4'
79
 
80
  save_path = test(**config_now)
81
  mp4_path = save_path.replace('_0.gif', '_0_0_0.mp4')
inference_mmpose.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image
7
+ from moviepy.editor import *
8
+
9
+ import sys
10
+ sys.path.append('FollowYourPose')
11
+
12
+ def get_frames(video_in):
13
+ frames = []
14
+ #resize the video
15
+ clip = VideoFileClip(video_in)
16
+ start_frame = 0 # 起始帧数
17
+ end_frame = 50 # 结束帧数
18
+
19
+
20
+ #check fps
21
+ if clip.fps > 30:
22
+ print("vide rate is over 30, resetting to 30")
23
+ clip_resized = clip.resize(height=512)
24
+ clip_resized = clip_resized.subclip(start_frame / clip_resized.fps, end_frame / clip_resized.fps) # subclip 2 seconds
25
+ clip_resized.write_videofile("./video_resized.mp4", fps=30)
26
+ else:
27
+ print("video rate is OK")
28
+ clip_resized = clip.resize(height=512)
29
+ clip_resized = clip_resized.subclip(start_frame / clip.fps, end_frame / clip.fps) # subclip 5 seconds
30
+ clip_resized.write_videofile("./video_resized.mp4", fps=clip.fps)
31
+
32
+ print("video resized to 512 height")
33
+
34
+ # Opens the Video file with CV2
35
+ cap= cv2.VideoCapture("./video_resized.mp4")
36
+
37
+ fps = cap.get(cv2.CAP_PROP_FPS)
38
+ print("video fps: " + str(fps))
39
+ i=0
40
+ while(cap.isOpened()):
41
+ ret, frame = cap.read()
42
+ if ret == False:
43
+ break
44
+ cv2.imwrite('./raw_frames/kang'+str(i)+'.jpg',frame)
45
+ frames.append('./raw_frames/kang'+str(i)+'.jpg')
46
+ i+=1
47
+
48
+ cap.release()
49
+ cv2.destroyAllWindows()
50
+ print("broke the video into frames")
51
+
52
+ return frames, fps
53
+
54
+ def get_mmpose_filter(mmpose, i):
55
+ #image = Image.open(i)
56
+
57
+ #image = np.array(image)
58
+ image = mmpose(i, fn_index=0)[1]
59
+ image = Image.open(image)
60
+ #image = Image.fromarray(image)
61
+ image.save("./mmpose_frames/mmpose_frame_" + str(i).split('/')[-1][:-4] + ".jpeg")
62
+ return "./mmpose_frames/mmpose_frame_" + str(i).split('/')[-1][:-4] + ".jpeg"
63
+
64
+ def create_video(frames, fps, type):
65
+ print("building video result")
66
+ clip = ImageSequenceClip(frames, fps=fps)
67
+ clip.write_videofile(type + "_result.mp4", fps=fps)
68
+
69
+ return type + "_result.mp4"
70
+
71
+ def convertG2V(imported_gif):
72
+ clip = VideoFileClip(imported_gif.name)
73
+ clip.write_videofile("my_gif_video.mp4")
74
+ return "my_gif_video.mp4"
75
+
76
+ def infer_skeleton(mmpose, video_in):
77
+
78
+
79
+ # 1. break video into frames and get FPS
80
+
81
+ break_vid = get_frames(video_in)
82
+ frames_list= break_vid[0]
83
+ fps = break_vid[1]
84
+ #n_frame = int(trim_value*fps)
85
+ n_frame = len(frames_list)
86
+
87
+ if n_frame >= len(frames_list):
88
+ print("video is shorter than the cut value")
89
+ n_frame = len(frames_list)
90
+
91
+ # 2. prepare frames result arrays
92
+ result_frames = []
93
+ print("set stop frames to: " + str(n_frame))
94
+
95
+ for i in frames_list[0:int(n_frame)]:
96
+ mmpose_frame = get_mmpose_filter(mmpose, i)
97
+ result_frames.append(mmpose_frame)
98
+ print("frame " + i + "/" + str(n_frame) + ": done;")
99
+
100
+
101
+ final_vid = create_video(result_frames, fps, "mmpose")
102
+ files = [final_vid]
103
+
104
+ return final_vid, files
requirements.txt CHANGED
@@ -110,3 +110,6 @@ Werkzeug==2.2.3
110
  xformers==0.0.16
111
  yarl==1.8.2
112
  zipp==3.14.0
 
 
 
 
110
  xformers==0.0.16
111
  yarl==1.8.2
112
  zipp==3.14.0
113
+ opencv-python
114
+ moviepy
115
+ controlnet_aux