vgvm commited on
Commit
863686e
1 Parent(s): a8c9a4d

completely rework the ui

Browse files
Files changed (1) hide show
  1. app.py +230 -222
app.py CHANGED
@@ -1,7 +1,10 @@
1
  ########################################################################################
2
- import gradio as gr
3
 
 
4
  import cv2
 
 
5
  import matplotlib
6
  import matplotlib.cm
7
  import mediapipe as mp
@@ -16,246 +19,265 @@ from mediapipe.python.solutions.drawing_utils import _normalized_to_pixel_coordi
16
  from PIL import Image
17
  from quads import QUADS
18
  from typing import List, Mapping, Optional, Tuple, Union
 
19
  from utils import colorize, get_most_recent_subdirectory
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- class face_image_to_face_mesh:
22
  def __init__(self):
23
- self.zoe_me = True
24
- self.uvwrap = not True
25
 
26
  def demo(self):
27
- if self.zoe_me:
28
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
29
- self.zoe = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
30
-
31
- demo = gr.Blocks(css=self.css(), cache_examples=True)
32
  with demo:
33
  gr.Markdown(self.header())
34
 
 
 
35
  with gr.Row():
36
- with gr.Column():
37
- upload_image = gr.Image(label="Input image", type="numpy", source="upload")
38
- self.temp_dir = get_most_recent_subdirectory( upload_image.DEFAULT_TEMP_DIR )
39
- print( f'The temp_dir is {self.temp_dir}' )
40
-
41
- gr.Examples( examples=[
42
- 'examples/blonde-00081-399357008.png',
43
- 'examples/dude-00110-1227390728.png',
44
- 'examples/granny-00056-1867315302.png',
45
- 'examples/tuffie-00039-499759385.png',
46
- 'examples/character.png',
47
- ], inputs=[upload_image] )
48
- upload_image_btn = gr.Button(value="Detect faces")
49
- if self.zoe_me:
50
- with gr.Group():
51
- zoe_scale = gr.Slider(label="Mix the ZoeDepth with the MediaPipe Depth", value=1, minimum=0, maximum=1, step=.01)
52
- flat_scale = gr.Slider(label="Depth scale, smaller is flatter and possibly more flattering", value=1, minimum=0, maximum=1, step=.01)
53
- min_detection_confidence = gr.Slider(label="Mininum face detection confidence", value=.5, minimum=0, maximum=1.0, step=0.01)
54
- else:
55
- use_zoe = False
56
- zoe_scale = 0
57
  with gr.Group():
58
- gr.Markdown(self.footer())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
- with gr.Column():
61
- with gr.Group():
62
- output_mesh = gr.Model3D(clear_color=3*[0], label="3D Model",elem_id='mesh-display-output')
63
- output_image = gr.Image(label="Output image",elem_id='img-display-output')
64
- depth_image = gr.Image(label="Depth image",elem_id='img-display-output')
65
- num_faces_detected = gr.Number(label="Number of faces detected", value=0)
66
-
67
- upload_image_btn.click(
68
- fn=self.detect,
69
- inputs=[upload_image, min_detection_confidence,zoe_scale,flat_scale],
70
- outputs=[output_mesh, output_image, depth_image, num_faces_detected]
71
- )
72
  demo.launch()
73
 
 
 
74
 
75
- def detect(self, image, min_detection_confidence, zoe_scale, flat_scale):
76
- width = image.shape[1]
77
- height = image.shape[0]
78
- ratio = width / height
79
 
80
- mp_drawing = mp.solutions.drawing_utils
81
- mp_drawing_styles = mp.solutions.drawing_styles
82
- mp_face_mesh = mp.solutions.face_mesh
83
-
84
- mesh = "examples/converted/in-granny.obj"
85
-
86
- if self.zoe_me and 0 < zoe_scale:
87
- depth = self.zoe.infer_pil(image)
88
- idepth = colorize(depth, cmap='gray_r')
89
- else:
90
- depth = None
91
- idepth = image
92
-
93
- drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
94
- with mp_face_mesh.FaceMesh(
95
- static_image_mode=True,
96
- max_num_faces=1,
97
- min_detection_confidence=min_detection_confidence) as face_mesh:
98
- results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
99
- if not results.multi_face_landmarks:
100
- return mesh, image, idepth, 0
101
-
102
- annotated_image = image.copy()
103
- for face_landmarks in results.multi_face_landmarks:
104
- (mesh,mtl,png) = self.toObj(image=image, width=width, height=height, ratio=ratio, landmark_list=face_landmarks, depth=depth, zoe_scale=zoe_scale, flat_scale=flat_scale)
105
-
106
- mp_drawing.draw_landmarks(
107
- image=annotated_image,
108
- landmark_list=face_landmarks,
109
- connections=mp_face_mesh.FACEMESH_TESSELATION,
110
- landmark_drawing_spec=None,
111
- connection_drawing_spec=mp_drawing_styles
112
- .get_default_face_mesh_tesselation_style())
113
- mp_drawing.draw_landmarks(
114
- image=annotated_image,
115
- landmark_list=face_landmarks,
116
- connections=mp_face_mesh.FACEMESH_CONTOURS,
117
- landmark_drawing_spec=None,
118
- connection_drawing_spec=mp_drawing_styles
119
- .get_default_face_mesh_contours_style())
120
-
121
- return mesh, annotated_image, idepth, 1
122
-
123
- def toObj( self, image: np.ndarray, width:int, height:int, ratio: float, landmark_list: landmark_pb2.NormalizedLandmarkList, depth: np.ndarray, zoe_scale: float, flat_scale: float):
124
- print( f'you have such pretty hair', self.temp_dir )
125
-
126
- hf_hack = True
127
- if hf_hack:
128
- obj_file = tempfile.NamedTemporaryFile(suffix='.obj', delete=False)
129
- mtl_file = tempfile.NamedTemporaryFile(suffix='.mtl', delete=False)
130
- png_file = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
131
- else:
132
- obj_file = tempfile.NamedTemporaryFile(suffix='.obj', dir=self.temp_dir, delete=False)
133
- mtl_file = tempfile.NamedTemporaryFile(suffix='.mtl', dir=self.temp_dir, delete=False)
134
- png_file = tempfile.NamedTemporaryFile(suffix='.png', dir=self.temp_dir, delete=False)
135
-
136
- ############################################
137
- (points,coordinates,colors) = self.landmarksToPoints( image, width, height, ratio, landmark_list, depth, zoe_scale, flat_scale )
138
- ############################################
139
-
140
- lines = []
141
-
142
- lines.append( f'o MyMesh' )
143
-
144
- if hf_hack:
145
- # the 'file=' is a gradio hack
146
- lines.append( f'#mtllib file={mtl_file.name}' )
147
- else:
148
- # the 'file=' is a gradio hack
149
- lines.append( f'mtllib file={mtl_file.name}' )
150
-
151
- for index, point in enumerate(points):
152
- color = colors[index]
153
- scaled_color = [value / 255 for value in color] # Scale colors down to 0-1 range
154
- flipped = [-value for value in point]
155
- flipped[ 0 ] = -flipped[ 0 ]
156
- lines.append( "v " + " ".join(map(str, flipped + color)) )
157
-
158
- for coordinate in coordinates:
159
- lines.append( "vt " + " ".join([str(value) for value in coordinate]) )
160
-
161
- for quad in QUADS:
162
- #quad = list(reversed(quad))
163
- normal = self.totallyNormal( points[ quad[ 0 ] -1 ], points[ quad[ 1 ] -1 ], points[ quad[ 2 ] -1 ] )
164
- lines.append( "vn " + " ".join([str(value) for value in normal]) )
165
-
166
- lines.append( 'usemtl MyMaterial' )
167
-
168
- quadIndex = 0
169
- for quad in QUADS:
170
- quadIndex = 1 + quadIndex
171
- face_uv = "f " + " ".join([f'{vertex}/{vertex}/{quadIndex}' for vertex in quad])
172
- face_un = "f " + " ".join([str(vertex) for vertex in quad])
173
- if self.uvwrap:
174
- lines.append( face_uv )
175
- else:
176
- lines.append( f'#{face_uv}' )
177
- lines.append( f'{face_un}' )
178
- #"f " + " ".join([str(vertex) for vertex in quad]) )
179
-
180
- out = open( obj_file.name, 'w' )
181
- out.write( '\n'.join( lines ) + '\n' )
182
- out.close()
183
 
184
- ############################################
 
 
185
 
186
- lines = []
187
- lines.append( 'newmtl MyMaterial' )
188
- lines.append( f'map_Kd file={png_file.name}' ) # the 'file=' is a gradio hack
189
 
190
- out = open( mtl_file.name, 'w' )
191
- out.write( '\n'.join( lines ) + '\n' )
192
- out.close()
 
 
 
 
193
 
194
- ############################################
 
 
 
195
 
196
- cv2.imwrite(png_file.name, cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
 
 
 
197
 
198
- ############################################
 
 
 
199
 
200
- print( f'I know it is special to you so I saved it to {obj_file.name} since we are friends' )
201
- return (obj_file.name,mtl_file.name,png_file.name)
202
 
203
- def landmarksToPoints( self, image:np.ndarray, width: int, height: int, ratio: float, landmark_list: landmark_pb2.NormalizedLandmarkList, depth: np.ndarray, zoe_scale: float, flat_scale: float ):
204
- points = [] # 3d vertices
205
- coordinates = [] # 2d texture coordinates
206
- colors = [] # 3d rgb info
207
 
208
- mins = [+np.inf] * 3
209
- maxs = [-np.inf] * 3
210
 
211
- mp_scale = 1 - zoe_scale
212
- print( f'zoe_scale:{zoe_scale}, mp_scale:{mp_scale}' )
213
 
214
- for idx, landmark in enumerate(landmark_list.landmark):
215
- x, y = _normalized_to_pixel_coordinates(landmark.x,landmark.y,width,height)
216
- color = image[y,x]
217
- colors.append( [value / 255 for value in color ] )
218
- coordinates.append( [x/width,1-y/height] )
219
 
220
- if depth is not None:
221
- landmark.z = depth[y, x] * zoe_scale + mp_scale * landmark.z
 
 
222
 
223
- landmark.z = landmark.z * flat_scale
 
 
224
 
225
- point = [landmark.x * ratio, landmark.y, landmark.z];
226
- for pidx,value in enumerate( point ):
227
- mins[pidx] = min(mins[pidx],value)
228
- maxs[pidx] = max(maxs[pidx],value)
229
- points.append( point )
230
 
231
- mids = [(min_val + max_val) / 2 for min_val, max_val in zip(mins, maxs)]
232
- for idx,point in enumerate( points ):
233
- points[idx] = [(val-mid) for val, mid in zip(point,mids)]
234
 
235
- print( f'mins: {mins}' )
236
- print( f'mids: {mids}' )
237
- print( f'maxs: {maxs}' )
238
- return (points,coordinates,colors)
239
 
 
240
 
241
- def totallyNormal(self, p0, p1, p2):
242
- v1 = np.array(p1) - np.array(p0)
243
- v2 = np.array(p2) - np.array(p0)
244
- normal = np.cross(v1, v2)
245
- normal = normal / np.linalg.norm(normal)
246
- return normal.tolist()
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
 
249
  def header(self):
250
  return ("""
251
- # Image to Quad Mesh
 
 
252
 
253
- Uses MediaPipe to detect a face in an image and convert it to a quad mesh.
254
- Saves to OBJ since gltf does not support quad faces. The 3d viewer has Y pointing the opposite direction from Blender, so ya hafta spin it.
 
 
 
255
 
256
- The face depth with Zoe can be a bit much and without it is a bit generic. In blender you can fix this just by snapping to the high poly model. For photos turning it down to .4 helps, but may still need cleanup...
 
257
 
258
- Highly recommend running it locally. The 3D model has uv values in the faces, but you will have to either use the script or do some manually tomfoolery.
 
259
 
260
  Quick import result in examples/converted/movie-gallery.mp4 under files
261
  """)
@@ -286,6 +308,8 @@ class face_image_to_face_mesh:
286
 
287
  This is all a work around for a weird hf+gradios+babylonjs bug which seems to be related to the version
288
  of babylonjs being used... It works fine in a local babylonjs sandbox...
 
 
289
 
290
  # Suggested Workflows
291
 
@@ -309,31 +333,15 @@ class face_image_to_face_mesh:
309
  1. generate a face in sd
310
  2. generate the mesh
311
  3. repose it and use it for further generation
 
 
312
 
313
- May need to expanded the generated mesh to cover more, maybe with
314
  <a href="https://github.com/shunsukesaito/PIFu" target="_blank">PIFu model</a>.
315
-
316
  """)
 
317
 
318
-
319
- def css(self):
320
- return ("""
321
- #mesh-display-output {
322
- max-height: 44vh;
323
- max-width: 44vh;
324
- width:auto;
325
- height:auto
326
- }
327
- #img-display-output {
328
- max-height: 28vh;
329
- max-width: 28vh;
330
- width:auto;
331
- height:auto
332
- }
333
- """)
334
-
335
-
336
- face_image_to_face_mesh().demo()
337
 
338
  # EOF
339
  ########################################################################################
 
1
  ########################################################################################
2
+ #
3
 
4
+ import gradio as gr
5
  import cv2
6
+ import glob
7
+ import json
8
  import matplotlib
9
  import matplotlib.cm
10
  import mediapipe as mp
 
19
  from PIL import Image
20
  from quads import QUADS
21
  from typing import List, Mapping, Optional, Tuple, Union
22
+
23
  from utils import colorize, get_most_recent_subdirectory
24
+ from MediaMesh import *
25
+
26
+ class FaceMeshWorkflow:
27
+ LOG = logging.getLogger(__name__)
28
+
29
+ IMAGE = 'image'
30
+ LABEL = 'label'
31
+ MESH = 'mesh'
32
+ LO = 'lo'
33
+ HI = 'hi'
34
+ TO_LO = 'toLo'
35
+ TO_HI = 'toHi'
36
+ WEIGHT = 'weight'
37
+ BUTTON = 'button'
38
 
 
39
  def __init__(self):
40
+ self.mm = mediaMesh = MediaMesh().demoSetup()
 
41
 
42
  def demo(self):
43
+ demo = gr.Blocks()
44
+ sources = {source:{} for source in 'mediapipe zoe midas'.split()}
45
+ flat_inn = []
46
+ flat_out = []
 
47
  with demo:
48
  gr.Markdown(self.header())
49
 
50
+ # image input and annotated output
51
+
52
  with gr.Row():
53
+ upload_image = gr.Image(label="Input image", type="numpy", source="upload")
54
+ flat_inn.append( upload_image )
55
+ examples = gr.Examples( examples=self.examples(), inputs=[upload_image] )
56
+ detect_button = gr.Button(value="Detect Faces")
57
+ faced_image = self.img('faced image')
58
+ flat_out.append( faced_image )
59
+
60
+ # per source widget sets
61
+
62
+ for name, source in sources.items():
63
+ with gr.Row():
64
+ source[ FaceMeshWorkflow.LABEL ] = gr.Label(label=name, value=name)
65
+ with gr.Row():
66
+ source[ FaceMeshWorkflow.IMAGE ] = self.img(f'{name} depth')
 
 
 
 
 
 
 
67
  with gr.Group():
68
+ source[ FaceMeshWorkflow.LO ] = gr.Label( label=f'{name}:Min', value=+33)
69
+ source[ FaceMeshWorkflow.HI ] = gr.Label( label=f'{name}:Max', value=-33)
70
+ source[ FaceMeshWorkflow.TO_LO ] = gr.Slider(label=f'{name}:Target Min', value=-.11, minimum=-3.3, maximum=3.3, step=0.01)
71
+ source[ FaceMeshWorkflow.TO_HI ] = gr.Slider(label=f'{name}:Target Max', value=+.11, minimum=-3.3, maximum=3.3, step=0.01)
72
+ source[ FaceMeshWorkflow.BUTTON ] = gr.Button(value='Update Mesh')
73
+ source[ FaceMeshWorkflow.MESH ] = self.m3d(name)
74
+
75
+ # the combined mesh with controls
76
+
77
+ weights = []
78
+ with gr.Row():
79
+ with gr.Row():
80
+ with gr.Column():
81
+ for name, source in sources.items():
82
+ source[ FaceMeshWorkflow.WEIGHT ] = gr.Slider(label=f'{name}:Source Weight', value=1, minimum=-1, maximum=1, step=0.01)
83
+ weights.append( source[ FaceMeshWorkflow.WEIGHT ] )
84
+ combine_button = gr.Button(value="Combined Meshes")
85
+ with gr.Column():
86
+ combined_mesh = self.m3d( 'combined' )
87
+ flat_out.append( combined_mesh )
88
+
89
+ # setup the button clicks
90
+
91
+ outties = {k:True for k in [ FaceMeshWorkflow.MESH, FaceMeshWorkflow.IMAGE, FaceMeshWorkflow.LO, FaceMeshWorkflow.HI]}
92
+ for name, source in sources.items():
93
+ update_inputs = []
94
+ update_outputs = [combined_mesh, source[FaceMeshWorkflow.MESH]]
95
+ for key, control in source.items():
96
+ if key is FaceMeshWorkflow.BUTTON:
97
+ continue
98
+ if key in outties:
99
+ flat_out.append( control )
100
+ else:
101
+ if not key is FaceMeshWorkflow.LABEL:
102
+ flat_inn.append( control )
103
+ update_inputs.append( control )
104
+ source[FaceMeshWorkflow.BUTTON].click( fn=self.remesh, inputs=update_inputs, outputs=update_outputs )
105
+
106
+ detect_button.click( fn=self.detect, inputs=flat_inn, outputs=flat_out )
107
+ combine_button.click( fn=self.combine, inputs=weights, outputs=[combined_mesh] )
108
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  demo.launch()
110
 
111
+ def detect(self, image:np.ndarray, mp_lo, mp_hi, mp_wt, zoe_lo, zoe_hi, zoe_wt, midas_lo, midas_hi, midas_wt):
112
+ self.mm.detect(image)
113
 
114
+ self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].weight = mp_wt
115
+ self.mm.weightMap.values[ ZoeDepthSource.NAME ].weight = zoe_wt
116
+ self.mm.weightMap.values[ MidasDepthSource.NAME ].weight = midas_wt
 
117
 
118
+ self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].toLo = mp_lo
119
+ self.mm.weightMap.values[ ZoeDepthSource.NAME ].toLo = zoe_lo
120
+ self.mm.weightMap.values[ MidasDepthSource.NAME ].toLo = midas_lo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
+ self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].toHi = mp_hi
123
+ self.mm.weightMap.values[ ZoeDepthSource.NAME ].toHi = zoe_hi
124
+ self.mm.weightMap.values[ MidasDepthSource.NAME ].toHi = midas_hi
125
 
126
+ meshes = self.mm.meshmerizing()
 
 
127
 
128
+ z = self.mm.depthSources[0]
129
+ m = self.mm.depthSources[1]
130
+
131
+ ##################################################################
132
+
133
+ annotated = self.mm.annotated
134
+ combined_mesh = meshes[MediaMesh.COMBINED][0]
135
 
136
+ mp_gray = self.mm.gray
137
+ mp_lo = str(self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].lo)
138
+ mp_hi = str(self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].hi)
139
+ mp_mesh = meshes[DepthMap.MEDIA_PIPE][0]
140
 
141
+ zoe_gray = z.gray
142
+ zoe_lo = str(self.mm.weightMap.values[z.name].lo)
143
+ zoe_hi = str(self.mm.weightMap.values[z.name].hi)
144
+ zoe_mesh = meshes[z.name][0]
145
 
146
+ midas_gray = m.gray
147
+ midas_lo = str(self.mm.weightMap.values[m.name].lo)
148
+ midas_hi = str(self.mm.weightMap.values[m.name].hi)
149
+ midas_mesh = meshes[m.name][0]
150
 
151
+ ##################################################################
152
+ # gotta write 'em to disk for some reason
153
 
154
+ combined_mesh = self.writeMesh( MediaMesh.COMBINED, meshes[MediaMesh.COMBINED][0] )
155
+ mp_mesh = self.writeMesh( DepthMap.MEDIA_PIPE, meshes[DepthMap.MEDIA_PIPE][0] )
156
+ zoe_mesh = self.writeMesh( z.name, meshes[z.name][0] )
157
+ midas_mesh = self.writeMesh( m.name, meshes[m.name][0] )
158
 
159
+ ##################################################################
160
+ # [image, model3d, (image, label, label, model3d), (image, label, label, model3d), (image, label, label, model3d)]
161
 
162
+ return annotated, combined_mesh, mp_gray, mp_lo, mp_hi, mp_mesh, zoe_gray, zoe_lo, zoe_hi, zoe_mesh, midas_gray, midas_lo, midas_hi, midas_mesh
 
163
 
164
+ def combine(self, mp_wt, zoe_wt, midas_wt ):
165
+ self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].weight = mp_wt
166
+ self.mm.weightMap.values[ ZoeDepthSource.NAME ].weight = zoe_wt
167
+ self.mm.weightMap.values[ MidasDepthSource.NAME ].weight = midas_wt
168
+ return self.writeMesh(MediaMesh.COMBINED, self.mm.toObj(MediaMesh.COMBINED)[0])
169
 
170
+ def kombine(self, image:np.ndarray, mp_lo, mp_hi, mp_wt, zoe_lo, zoe_hi, zoe_wt, midas_lo, midas_hi, midas_wt):
171
+ self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].weight = mp_wt
172
+ self.mm.weightMap.values[ ZoeDepthSource.NAME ].weight = zoe_wt
173
+ self.mm.weightMap.values[ MidasDepthSource.NAME ].weight = midas_wt
174
 
175
+ self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].toLo = mp_lo
176
+ self.mm.weightMap.values[ ZoeDepthSource.NAME ].toLo = zoe_lo
177
+ self.mm.weightMap.values[ MidasDepthSource.NAME ].toLo = midas_lo
178
 
179
+ self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].toHi = mp_hi
180
+ self.mm.weightMap.values[ ZoeDepthSource.NAME ].toHi = zoe_hi
181
+ self.mm.weightMap.values[ MidasDepthSource.NAME ].toHi = midas_hi
 
 
182
 
183
+ meshes = self.mm.meshmerizing()
 
 
184
 
185
+ z = self.mm.depthSources[0]
186
+ m = self.mm.depthSources[1]
 
 
187
 
188
+ ##################################################################
189
 
190
+ annotated = self.mm.annotated
191
+ combined_mesh = meshes[MediaMesh.COMBINED][0]
 
 
 
 
192
 
193
+ mp_gray = self.mm.gray
194
+ mp_lo = str(self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].lo)
195
+ mp_hi = str(self.mm.weightMap.values[ DepthMap.MEDIA_PIPE ].hi)
196
+ mp_mesh = meshes[DepthMap.MEDIA_PIPE][0]
197
+
198
+ zoe_gray = z.gray
199
+ zoe_lo = str(self.mm.weightMap.values[z.name].lo)
200
+ zoe_hi = str(self.mm.weightMap.values[z.name].hi)
201
+ zoe_mesh = meshes[z.name][0]
202
+
203
+ midas_gray = m.gray
204
+ midas_lo = str(self.mm.weightMap.values[m.name].lo)
205
+ midas_hi = str(self.mm.weightMap.values[m.name].hi)
206
+ midas_mesh = meshes[m.name][0]
207
+
208
+ ##################################################################
209
+ # gotta write 'em to disk for some reason
210
+
211
+ combined_mesh = self.writeMesh( MediaMesh.COMBINED, meshes[MediaMesh.COMBINED][0] )
212
+ mp_mesh = self.writeMesh( DepthMap.MEDIA_PIPE, meshes[DepthMap.MEDIA_PIPE][0] )
213
+ zoe_mesh = self.writeMesh( z.name, meshes[z.name][0] )
214
+ midas_mesh = self.writeMesh( m.name, meshes[m.name][0] )
215
+
216
+ ##################################################################
217
+ # [image, model3d, (image, label, label, model3d), (image, label, label, model3d), (image, label, label, model3d)]
218
+
219
+ return annotated, combined_mesh, mp_gray, mp_lo, mp_hi, mp_mesh, zoe_gray, zoe_lo, zoe_hi, zoe_mesh, midas_gray, midas_lo, midas_hi, midas_mesh
220
+
221
+ def remesh(self, label:Dict[str,str], lo:float, hi:float, wt:float):
222
+ name = label[ 'label' ] # hax
223
+ FaceMeshWorkflow.LOG.info( f'remesh {name} with lo:{lo}, hi:{hi} and wt:{wt}' )
224
+
225
+ self.mm.weightMap.values[ name ].toLo = lo
226
+ self.mm.weightMap.values[ name ].toHi = hi
227
+ self.mm.weightMap.values[ name ].weight = wt
228
+
229
+ mesh = self.writeMesh(name, self.mm.singleSourceMesh(name)[0])
230
+ combined = self.writeMesh(MediaMesh.COMBINED, self.mm.toObj(MediaMesh.COMBINED)[0])
231
+
232
+ return mesh, combined
233
+
234
+ def writeMesh(self, name:str, mesh:List[str])->str:
235
+ file = tempfile.NamedTemporaryFile(suffix='.obj', delete=False).name
236
+ out = open( file, 'w' )
237
+ out.write( '\n'.join( mesh ) + '\n' )
238
+ out.close()
239
+ return file
240
+
241
+ def detective(self, *args):
242
+ for arg in args:
243
+ wat = 'TMI' if isinstance(arg, np.ndarray) else arg
244
+ #c = '#' if hf_hack else ''
245
+ print( f'hi there {type(arg)} ur a nice {wat} to have ' )
246
+ return None
247
+
248
+ def m3d(self, name:str):
249
+ return gr.Model3D(clear_color=3*[0], label=f"{name} mesh", elem_id='mesh-display-output')
250
+
251
+ def img(self, name:str, src:str='upload'):
252
+ return gr.Image(label=name,elem_id='img-display-output',source=src)
253
+
254
+ def examples(self) -> List[str]:
255
+ return glob.glob('examples/*png')
256
+ return [
257
+ 'examples/blonde-00081-399357008.png',
258
+ 'examples/dude-00110-1227390728.png',
259
+ 'examples/granny-00056-1867315302.png',
260
+ 'examples/tuffie-00039-499759385.png',
261
+ 'examples/character.png',
262
+ ]
263
 
264
  def header(self):
265
  return ("""
266
+ # FaceMeshWorkflow
267
+
268
+ The process goes like this:
269
 
270
+ 1. select an input images
271
+ 2. click "Detect Faces"
272
+ 3. fine tune the different depth sources
273
+ 4. fine tune the combinations of the depth sources
274
+ 5. download the obj and have fun
275
 
276
+ The primary motivation was that all the MediaPipe faces all looked the same.
277
+ Usually ZoeDepth is usually better, but can be extreme. Midas works sometimes :-P
278
 
279
+ The depth analysis is a bit slow. Especially on the hf site, so I recommend running it locally.
280
+ Since the tuning is a post-process to the analysis you can go nuts!
281
 
282
  Quick import result in examples/converted/movie-gallery.mp4 under files
283
  """)
 
308
 
309
  This is all a work around for a weird hf+gradios+babylonjs bug which seems to be related to the version
310
  of babylonjs being used... It works fine in a local babylonjs sandbox...
311
+
312
+ If you forget, the .obj has notes on how to mangle it.
313
 
314
  # Suggested Workflows
315
 
 
333
  1. generate a face in sd
334
  2. generate the mesh
335
  3. repose it and use it for further generation
336
+
337
+ An extension would be hoopy
338
 
339
+ May want to expanded the generated mesh to cover more, maybe with
340
  <a href="https://github.com/shunsukesaito/PIFu" target="_blank">PIFu model</a>.
 
341
  """)
342
+
343
 
344
+ FaceMeshWorkflow().demo()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
 
346
  # EOF
347
  ########################################################################################