bill-jiang commited on
Commit
b625c80
β€’
1 Parent(s): a8de91e

Update render

Browse files
Files changed (2) hide show
  1. app.py +20 -25
  2. mGPT/render/pyrender/smpl_render.py +69 -56
app.py CHANGED
@@ -6,6 +6,7 @@ import cv2
6
  import os
7
  import numpy as np
8
  import OpenGL.GL as gl
 
9
  import pytorch_lightning as pl
10
  import moviepy.editor as mp
11
  from pathlib import Path
@@ -118,33 +119,26 @@ def render_motion(data, feats, method='fast'):
118
  shape = [768, 768]
119
  render = SMPLRender(cfg.RENDER.SMPL_MODEL_PATH)
120
 
121
- if not os.environ.get("PYOPENGL_PLATFORM"):
122
- os.environ["DISPLAY"] = ":0.0"
123
- os.environ["PYOPENGL_PLATFORM"] = "egl"
124
-
125
- size = (shape[1], shape[0])
126
- fps = 20.0
127
- fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', 'V')
128
- videoWriter = cv2.VideoWriter(output_mp4_path, fourcc, fps, size)
129
  r = RRR.from_rotvec(np.array([np.pi, 0.0, 0.0]))
130
  pose[:, 0] = np.matmul(r.as_matrix().reshape(1, 3, 3), pose[:, 0])
 
 
 
 
 
 
 
131
  for i in range(data.shape[0]):
132
- img = np.zeros([shape[0], shape[1], 3])
133
- aroot = data[[i], 0] + np.array([[0.0, 0.0, 30.0]])
134
- aroot[:, 1] = -aroot[:, 1]
135
- params = dict(pred_shape=np.zeros([1, 10]),
136
- pred_root=aroot,
137
- pred_pose=pose[[i]])
138
- renderImg = render.render(img.copy(), params)
139
- renderImg = (renderImg * 255).astype(np.uint8)
140
- videoWriter.write(renderImg)
141
- videoWriter.release()
142
- output_video_h264_name = output_mp4_path[:-4] + '_h264.mp4'
143
- command = 'ffmpeg -y -i {} -vcodec h264 {}'.format(
144
- output_mp4_path, output_video_h264_name)
145
- os.system(command)
146
- output_mp4_path = output_video_h264_name
147
- video_fname = video_fname[:-4] + '_h264.mp4'
148
  elif method == 'fast':
149
  output_gif_path = output_mp4_path[:-4] + '.gif'
150
  if len(data.shape) == 3:
@@ -154,6 +148,7 @@ def render_motion(data, feats, method='fast'):
154
  pose_vis = plot_3d.draw_to_batch(data, [''], [output_gif_path])
155
  out_video = mp.VideoFileClip(output_gif_path)
156
  out_video.write_videofile(output_mp4_path)
 
157
 
158
  return output_mp4_path, video_fname, output_npy_path, feats_fname
159
 
@@ -544,7 +539,7 @@ with gr.Blocks(css=customCSS) as demo:
544
  label="Visulization method",
545
  interactive=True,
546
  elem_id="method",
547
- value="fast")
548
 
549
  language = gr.Dropdown(["English", "δΈ­ζ–‡"],
550
  label="Speech language",
 
6
  import os
7
  import numpy as np
8
  import OpenGL.GL as gl
9
+ import imageio
10
  import pytorch_lightning as pl
11
  import moviepy.editor as mp
12
  from pathlib import Path
 
119
  shape = [768, 768]
120
  render = SMPLRender(cfg.RENDER.SMPL_MODEL_PATH)
121
 
 
 
 
 
 
 
 
 
122
  r = RRR.from_rotvec(np.array([np.pi, 0.0, 0.0]))
123
  pose[:, 0] = np.matmul(r.as_matrix().reshape(1, 3, 3), pose[:, 0])
124
+ vid = []
125
+ aroot = data[[0], 0]
126
+ aroot[:, 1] = -aroot[:, 1]
127
+ params = dict(pred_shape=np.zeros([1, 10]),
128
+ pred_root=aroot,
129
+ pred_pose=pose)
130
+ render.init_renderer([shape[0], shape[1], 3], params)
131
  for i in range(data.shape[0]):
132
+ renderImg = render.render(i)
133
+ vid.append(renderImg)
134
+
135
+ out = np.stack(vid, axis=0)
136
+ output_gif_path = output_mp4_path[:-4] + '.gif'
137
+ imageio.mimwrite(output_gif_path, out, duration=50)
138
+ out_video = mp.VideoFileClip(output_gif_path)
139
+ out_video.write_videofile(output_mp4_path)
140
+ del out, render
141
+
 
 
 
 
 
 
142
  elif method == 'fast':
143
  output_gif_path = output_mp4_path[:-4] + '.gif'
144
  if len(data.shape) == 3:
 
148
  pose_vis = plot_3d.draw_to_batch(data, [''], [output_gif_path])
149
  out_video = mp.VideoFileClip(output_gif_path)
150
  out_video.write_videofile(output_mp4_path)
151
+ del pose_vis
152
 
153
  return output_mp4_path, video_fname, output_npy_path, feats_fname
154
 
 
539
  label="Visulization method",
540
  interactive=True,
541
  elem_id="method",
542
+ value="slow")
543
 
544
  language = gr.Dropdown(["English", "δΈ­ζ–‡"],
545
  label="Speech language",
mGPT/render/pyrender/smpl_render.py CHANGED
@@ -55,7 +55,7 @@ class Renderer:
55
  faces=None):
56
  self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res[0],
57
  viewport_height=img_res[1],
58
- point_size=1.0)
59
  self.focal_length = focal_length
60
  self.camera_center = [img_res[0] // 2, img_res[1] // 2]
61
  self.faces = faces
@@ -65,58 +65,62 @@ class Renderer:
65
  else:
66
  self.device = torch.device("cpu")
67
 
68
- vertices = np.concatenate(vertices)
69
- # Center the first root to the first frame
70
- vertices -= vertices[[0], [0], :]
71
- # Remove the floor
72
- vertices[..., 2] -= vertices[..., 2].min()
73
- data = vertices[..., [2, 0, 1]]
74
- minx, miny, _ = data.min(axis=(0, 1))
75
- maxx, maxy, _ = data.max(axis=(0, 1))
76
- minz, maxz = -0.5, 0.5
77
- minx = minx - 0.5
78
- maxx = maxx + 0.5
79
- miny = miny - 0.5
80
- maxy = maxy + 0.5
81
-
82
- polygon = geometry.Polygon([[minx, minz], [minx, maxz], [maxx, maxz],
83
- [maxx, minz]])
84
- self.polygon_mesh = trimesh.creation.extrude_polygon(polygon, 1e-5)
85
- self.polygon_mesh.visual.face_colors = [0, 0, 0, 0.21]
86
  self.rot = trimesh.transformations.rotation_matrix(
87
  np.radians(180), [1, 0, 0])
88
- # self.polygon_mesh.apply_transform(self.rot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  def __call__(self, vertices, camera_translation):
91
- scene = pyrender.Scene(bg_color=(1., 1., 1., 0.8),
92
- ambient_light=(0.4, 0.4, 0.4))
93
 
94
  material = pyrender.MetallicRoughnessMaterial(
95
- metallicFactor=0.4,
96
  alphaMode='OPAQUE',
97
  baseColorFactor=(0.658, 0.214, 0.0114, 0.2))
98
  mesh = trimesh.Trimesh(vertices, self.faces)
99
  mesh.apply_transform(self.rot)
100
  mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  scene.add(mesh, 'mesh')
102
 
103
- polygon_render = pyrender.Mesh.from_trimesh(self.polygon_mesh,
104
- smooth=False)
105
- c = np.pi / 2
106
- scene.add(polygon_render)
107
-
108
- camera_pose = np.eye(4)
109
- camera_translation[0] *= -1.
110
- camera_pose[:3, 3] = camera_translation
111
- camera = pyrender.IntrinsicsCamera(fx=self.focal_length,
112
- fy=self.focal_length,
113
- cx=self.camera_center[0],
114
- cy=self.camera_center[1])
115
- scene.add(camera, pose=camera_pose)
116
-
117
- light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=300)
118
  light_pose = np.eye(4)
119
-
120
  light_pose[:3, 3] = np.array([0, -1, 1])
121
  scene.add(light, pose=light_pose)
122
 
@@ -126,8 +130,10 @@ class Renderer:
126
  light_pose[:3, 3] = np.array([1, 1, 2])
127
  scene.add(light, pose=light_pose)
128
 
129
- color, rend_depth = self.renderer.render(
130
- scene, flags=pyrender.RenderFlags.RGBA)
 
 
131
 
132
  return color
133
 
@@ -141,27 +147,35 @@ class SMPLRender():
141
  self.device = torch.device("cpu")
142
  self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=1,
143
  create_transl=False).to(self.device)
144
- self.vertices = []
145
  self.pred_camera_t = []
146
- self.focal_length = 5000
147
-
148
- def fit(self, smpl_param, is_headroot=False):
149
- pose = smpl_param['pred_pose']
150
- if pose.size == 72:
151
- pose = pose.reshape(-1, 3)
152
- pose = RRR.from_rotvec(pose).as_matrix()
153
- pose = pose.reshape(1, 24, 3, 3)
 
 
 
 
 
 
 
 
154
  pred_betas = torch.from_numpy(smpl_param['pred_shape'].reshape(
155
  1, 10).astype(np.float32)).to(self.device)
156
- pred_rotmat = torch.from_numpy(pose.astype(np.float32)).to(self.device)
157
  pred_camera_t = smpl_param['pred_root'].reshape(1,
158
  3).astype(np.float32)
 
159
  smpl_output = self.smpl(betas=pred_betas,
160
  body_pose=pred_rotmat[:, 1:],
161
  global_orient=pred_rotmat[:, 0].unsqueeze(1),
162
  pose2rot=False)
163
- vertices = smpl_output.vertices[0].detach().cpu().numpy()
164
- self.vertices.append(vertices[None])
165
 
166
  pred_camera_t = pred_camera_t[0]
167
 
@@ -171,13 +185,12 @@ class SMPLRender():
171
 
172
  self.pred_camera_t.append(pred_camera_t)
173
 
174
- def init_renderer(self, res):
175
  self.renderer = Renderer(vertices=self.vertices,
176
  focal_length=self.focal_length,
177
  img_res=(res[1], res[0]),
178
  faces=self.smpl.faces)
179
 
180
  def render(self, index):
181
- renderImg = self.renderer(self.vertices[index][0],
182
- self.pred_camera_t[index].copy())
183
  return renderImg
 
55
  faces=None):
56
  self.renderer = pyrender.OffscreenRenderer(viewport_width=img_res[0],
57
  viewport_height=img_res[1],
58
+ point_size=2.0)
59
  self.focal_length = focal_length
60
  self.camera_center = [img_res[0] // 2, img_res[1] // 2]
61
  self.faces = faces
 
65
  else:
66
  self.device = torch.device("cpu")
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  self.rot = trimesh.transformations.rotation_matrix(
69
  np.radians(180), [1, 0, 0])
70
+
71
+ minx, miny, minz = vertices.min(axis=(0, 1))
72
+ maxx, maxy, maxz = vertices.max(axis=(0, 1))
73
+ minx = minx - 0.5
74
+ maxx = maxx + 0.5
75
+ minz = minz - 0.5
76
+ maxz = maxz + 0.5
77
+
78
+ floor = geometry.Polygon([[minx, minz], [minx, maxz], [maxx, maxz],
79
+ [maxx, minz]])
80
+ self.floor = trimesh.creation.extrude_polygon(floor, 1e-5)
81
+ self.floor.visual.face_colors = [0, 0, 0, 0.2]
82
+ self.floor.apply_transform(self.rot)
83
+ self.floor_pose = np.array(
84
+ [[1, 0, 0, 0], [0, np.cos(np.pi / 2), -np.sin(np.pi / 2), miny],
85
+ [0, np.sin(np.pi / 2), np.cos(np.pi / 2), 0], [0, 0, 0, 1]])
86
+
87
+ c = -np.pi / 6
88
+ self.camera_pose = [[1, 0, 0, (minx + maxx) / 2],
89
+ [0, np.cos(c), -np.sin(c), 1.5],
90
+ [
91
+ 0,
92
+ np.sin(c),
93
+ np.cos(c),
94
+ max(4, minz + (1.5 - miny) * 2, (maxx - minx))
95
+ ], [0, 0, 0, 1]]
96
 
97
  def __call__(self, vertices, camera_translation):
98
+
99
+ floor_render = pyrender.Mesh.from_trimesh(self.floor, smooth=False)
100
 
101
  material = pyrender.MetallicRoughnessMaterial(
102
+ metallicFactor=0.1,
103
  alphaMode='OPAQUE',
104
  baseColorFactor=(0.658, 0.214, 0.0114, 0.2))
105
  mesh = trimesh.Trimesh(vertices, self.faces)
106
  mesh.apply_transform(self.rot)
107
  mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
108
+
109
+ camera = pyrender.PerspectiveCamera(yfov=(np.pi / 3.0), znear=0.5)
110
+
111
+ light = pyrender.DirectionalLight(color=[1, 1, 1], intensity=350)
112
+ spot_l = pyrender.SpotLight(color=np.ones(3),
113
+ intensity=300.0,
114
+ innerConeAngle=np.pi / 16,
115
+ outerConeAngle=np.pi / 6)
116
+ point_l = pyrender.PointLight(color=np.ones(3), intensity=300.0)
117
+
118
+ scene = pyrender.Scene(bg_color=(1., 1., 1., 0.8),
119
+ ambient_light=(0.4, 0.4, 0.4))
120
+ scene.add(floor_render, pose=self.floor_pose)
121
  scene.add(mesh, 'mesh')
122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123
  light_pose = np.eye(4)
 
124
  light_pose[:3, 3] = np.array([0, -1, 1])
125
  scene.add(light, pose=light_pose)
126
 
 
130
  light_pose[:3, 3] = np.array([1, 1, 2])
131
  scene.add(light, pose=light_pose)
132
 
133
+ scene.add(camera, pose=self.camera_pose)
134
+
135
+ flags = pyrender.RenderFlags.RGBA | pyrender.RenderFlags.SHADOWS_DIRECTIONAL
136
+ color, rend_depth = self.renderer.render(scene, flags=flags)
137
 
138
  return color
139
 
 
147
  self.device = torch.device("cpu")
148
  self.smpl = SMPL(SMPL_MODEL_DIR, batch_size=1,
149
  create_transl=False).to(self.device)
150
+
151
  self.pred_camera_t = []
152
+ self.focal_length = 110
153
+
154
+ def init_renderer(self, res, smpl_param, is_headroot=False):
155
+ poses = smpl_param['pred_pose']
156
+ pred_rotmats = []
157
+ for pose in poses:
158
+ if pose.size == 72:
159
+ pose = pose.reshape(-1, 3)
160
+ pose = RRR.from_rotvec(pose).as_matrix()
161
+ pose = pose.reshape(1, 24, 3, 3)
162
+ pred_rotmats.append(
163
+ torch.from_numpy(pose.astype(np.float32)[None]).to(
164
+ self.device))
165
+
166
+ pred_rotmat = torch.cat(pred_rotmats, dim=0)
167
+
168
  pred_betas = torch.from_numpy(smpl_param['pred_shape'].reshape(
169
  1, 10).astype(np.float32)).to(self.device)
 
170
  pred_camera_t = smpl_param['pred_root'].reshape(1,
171
  3).astype(np.float32)
172
+
173
  smpl_output = self.smpl(betas=pred_betas,
174
  body_pose=pred_rotmat[:, 1:],
175
  global_orient=pred_rotmat[:, 0].unsqueeze(1),
176
  pose2rot=False)
177
+
178
+ self.vertices = smpl_output.vertices.detach().cpu().numpy()
179
 
180
  pred_camera_t = pred_camera_t[0]
181
 
 
185
 
186
  self.pred_camera_t.append(pred_camera_t)
187
 
 
188
  self.renderer = Renderer(vertices=self.vertices,
189
  focal_length=self.focal_length,
190
  img_res=(res[1], res[0]),
191
  faces=self.smpl.faces)
192
 
193
  def render(self, index):
194
+ renderImg = self.renderer(self.vertices[index, ...],
195
+ self.pred_camera_t)
196
  return renderImg