vgvm commited on
Commit
5bc01b9
1 Parent(s): 9df023e

decouple the depth detection from tuning phase and refactor a lot

Browse files
Files changed (1) hide show
  1. MediaMesh.py +612 -0
MediaMesh.py ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ #############################################################################
3
+ #
4
+ # This is the bulk of the logic for the gradio demo. You use it for whatever
5
+ # you want. Credit would be nice but w/e
6
+ #
7
+ # You can also run it on an image from the cli
8
+ #
9
+ # TODO:
10
+ #
11
+ # 1. rework the classes that just wrap Dict and List to extend them
12
+ # 2. cleanup all the to_dict madness
13
+ # 3. convert the print calls to use the logging
14
+ # 4. add a proper creative commons license
15
+ # 5. cleanup string constants
16
+ # 6. replace custom code with libraries like for OBJ
17
+ #
18
+ #############################################################################
19
+
20
+ import cv2
21
+ import json
22
+ import logging
23
+ import mediapipe as mp
24
+ import numpy as np
25
+ import os
26
+ import sys
27
+ import torch
28
+
29
+ from mediapipe.framework.formats import landmark_pb2
30
+ from mediapipe.python.solutions.drawing_utils import _normalized_to_pixel_coordinates
31
+ from PIL import Image, ImageDraw
32
+ from transformers import DPTFeatureExtractor, DPTForDepthEstimation
33
+ from typing import List, Mapping, Optional, Tuple, Union, Dict, Type
34
+
35
+ from utils import colorize
36
+ from quads import QUADS
37
+
38
+ mp_face_mesh = mp.solutions.face_mesh
39
+ mp_drawing = mp.solutions.drawing_utils
40
+ mp_drawing_styles = mp.solutions.drawing_styles
41
+
42
+ NumpyImage = Type[np.ndarray]
43
+
44
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
45
+
46
+ HF_HACK = True
47
+
48
+ class Point3:
49
+ def __init__(self, values:List[float]=3*[0] ):
50
+ self.values = values
51
+
52
+ @property
53
+ def x(self):
54
+ return self.values[0]
55
+
56
+ @property
57
+ def y(self):
58
+ return self.values[1]
59
+
60
+ @property
61
+ def z(self):
62
+ return self.values[2]
63
+
64
+ def to_dict(self):
65
+ return {'x':self.x,'y':self.y,'z':self.z}
66
+
67
+
68
+ class TextureCoordinate:
69
+ def __init__(self, values:List[float]=2*[0] ):
70
+ self.values = values
71
+
72
+ @property
73
+ def u(self):
74
+ return self.values[0]
75
+
76
+ @property
77
+ def v(self):
78
+ return self.values[1]
79
+
80
+ def to_dict(self):
81
+ return {'u':self.u,'v':self.v}
82
+
83
+ class PixelCoordinate:
84
+ def __init__(self, values:List[int]=2*[0] ):
85
+ self.values = values
86
+
87
+ @property
88
+ def x(self):
89
+ return self.values[0]
90
+
91
+ @property
92
+ def y(self):
93
+ return self.values[1]
94
+
95
+ def to_dict(self):
96
+ return {'x':self.x,'y':self.y}
97
+
98
+ class DepthMap:
99
+ MEDIA_PIPE = 'mediapipe'
100
+ def __init__(self, values:Dict[str,float]={'og':0} ):
101
+ self.values = values
102
+
103
+ def to_dict(self):
104
+ return self.values
105
+
106
+ class DepthMapping:
107
+ def __init__(self, weight:float=1, lo:float=+np.inf, hi:float=-np.inf, toLo:float=0, toHi:float=1):
108
+ self.weight = weight
109
+ self.lo = lo
110
+ self.hi = hi
111
+ self.toLo = toLo
112
+ self.toHi = toHi
113
+ self.diff = 1
114
+ self.toDiff = 1
115
+ self.update()
116
+
117
+ def reset(self):
118
+ self.lo = +np.inf
119
+ self.hi = -np.inf
120
+
121
+ def track(self,value):
122
+ self.lo = min(self.lo,value)
123
+ self.hi = max(self.hi,value)
124
+
125
+ def update(self):
126
+ self.diff = self.hi - self.lo
127
+ self.toDiff = self.toHi - self.toLo
128
+ return self
129
+
130
+ def translate(self,value):
131
+ if not self.diff == 0:
132
+ value = ( value - self.lo ) / self.diff
133
+ value = self.toLo + value * self.toDiff
134
+ value = value * self.weight
135
+ return value
136
+
137
+ def to_dict(self):
138
+ return {
139
+ 'weight' : self.weight,
140
+ 'lo' : self.lo,
141
+ 'hi' : self.hi,
142
+ 'toLo' : self.toLo,
143
+ 'toHi' : self.toHi,
144
+ 'diff' : self.diff,
145
+ 'toDiff' : self.toDiff,
146
+ }
147
+
148
+
149
+ class WeightMap:
150
+ def __init__(self, values:Dict[str,DepthMapping]=None):
151
+ if values is None:
152
+ self.values = {DepthMap.MEDIA_PIPE:DepthMapping()}
153
+ else:
154
+ self.values = values
155
+
156
+ def set(self,key:str,depthMapping:DepthMapping):
157
+ self.values[key] = depthMapping
158
+
159
+ def totally(self,name:str):
160
+ if not name in self.values:
161
+ raise Exception( f'no weight for {k} in {self.to_dict()}' )
162
+ for depthMapping in self.values.values():
163
+ depthMapping.weight = 0
164
+ self.values[ name ].weight = 1
165
+
166
+ def saveWeights(self)->Dict[str,float]:
167
+ return {k:v.weight for k,v in self.values.items()}
168
+
169
+ def loadWeights(self,weights:Dict[str,float]):
170
+ for k,weight in weights.items():
171
+ if k in self.values:
172
+ self.values[ k ].weight = weight
173
+ else:
174
+ raise Exception( f'no weight for {k} in {self.to_dict()}' )
175
+
176
+ def to_dict(self):
177
+ return {k:dm.to_dict() for k,dm in self.values.items()}
178
+ return self.values
179
+
180
+ class MeshPoint:
181
+ def __init__(self,
182
+ position:Point3 = Point3(),
183
+ color:Point3 = Point3(),
184
+ textureCoordinate:TextureCoordinate = TextureCoordinate(),
185
+ pixelCoordinate:PixelCoordinate = PixelCoordinate(),
186
+ depthMap:DepthMap = None,
187
+ ):
188
+ self.position = position
189
+ self.color = color
190
+ self.textureCoordinate = textureCoordinate
191
+ self.pixelCoordinate = pixelCoordinate
192
+
193
+ if depthMap is None:
194
+ self.depthMap = DepthMap({DepthMap.MEDIA_PIPE:position.values[2]})
195
+ else:
196
+ self.depthMap = depthMap
197
+
198
+ def to_dict(self):
199
+ derp = {
200
+ 'position' : self.position.to_dict(),
201
+ 'color' : self.color.to_dict(),
202
+ 'textureCoordinate' : self.textureCoordinate.to_dict(),
203
+ 'pixelCoordinate' : self.pixelCoordinate.to_dict(),
204
+ }
205
+ if not self.depthMap is None:
206
+ derp[ 'depthMap' ] = self.depthMap.to_dict()
207
+ return derp
208
+
209
+ def weighDepth(self, weightMap:WeightMap = WeightMap()):
210
+ total_sum = sum([dm.weight for dm in weightMap.values.values()])
211
+ tmp = 0
212
+ for key, depthMapping in weightMap.values.items():
213
+ if key in self.depthMap.values:
214
+ tmp = tmp + depthMapping.translate( self.depthMap.values[ key ] )
215
+ else:
216
+ raise Exception(f'{key} from weightMap not in depthMap')
217
+ tmp = tmp / total_sum
218
+ #print( f'depthMap: {json.dumps(self.depthMap.to_dict())} -> {tmp}') # spam!!!
219
+ self.position.values[2] = tmp
220
+
221
+ def mapLandMark(self, mediaMesh:'MediaMesh', landmark: landmark_pb2.NormalizedLandmark) -> 'MeshPoint':
222
+ x, y = _normalized_to_pixel_coordinates(landmark.x,landmark.y,mediaMesh.width,mediaMesh.height)
223
+
224
+ #position = [landmark.x * mediaMesh.ratio, landmark.y, landmark.z]
225
+ #position = [landmark.x * mediaMesh.ratio, landmark.y, landmark.z]
226
+ position = [v * mediaMesh.scale[i] for i,v in enumerate([landmark.x, landmark.y, landmark.z])]
227
+
228
+ self.position = Point3(position)
229
+
230
+ #self.position = Point3([landmark.x * mediaMesh.ratio, landmark.y, landmark.z])
231
+ self.color = Point3([value / 255 for value in mediaMesh.image[y,x]])
232
+ self.textureCoordinate = TextureCoordinate([x/mediaMesh.width,1-y/mediaMesh.height] )
233
+ self.pixelCoordinate = PixelCoordinate([x,y])
234
+ self.depthMap = DepthMap({DepthMap.MEDIA_PIPE:self.position.z})
235
+ return self
236
+
237
+ def toObj(self, lines:List[str], hf_hack:bool=HF_HACK):
238
+ lines.append( "v " + " ".join(map(str, self.position.values + self.color.values)) )
239
+ lines.append( "vt " + " ".join(map(str, self.textureCoordinate.values ) ) )
240
+
241
+ # IMPORTANT! MeshFace uses 1 based indices, not 0 based!!!!
242
+ class MeshFace:
243
+ def __init__(self,indices:List[int]=None,normal:Point3=Point3()):
244
+ self.indices = indices
245
+ self.normal = normal
246
+
247
+ def calculateNormal(self,meshPoints:List[MeshPoint]):
248
+ if self.indices is None:
249
+ raise Exception('indices is junk')
250
+ if meshPoints is None:
251
+ raise Exception('meshPoints is junk')
252
+ if len(self.indices)<3:
253
+ raise Exception('need at least 3 points')
254
+
255
+ points = [meshPoints[index-1] for index in self.indices[:3]]
256
+ npz = [np.array(point.position.values) for point in points]
257
+
258
+ v1 = npz[1] - npz[0]
259
+ v2 = npz[2] - npz[0]
260
+ normal = np.cross(v1, v2)
261
+ normal = normal / np.linalg.norm(normal)
262
+ self.normal = Point3( normal.tolist() )
263
+
264
+ def toObj(self, lines:List[str], index:int, hf_hack:bool=HF_HACK):
265
+ lines.append( "vn " + " ".join([str(value) for value in self.normal.values]) )
266
+ face_uv = "f " + " ".join([f'{vertex}/{vertex}/{index}' for vertex in self.indices])
267
+ face_un = "f " + " ".join([str(vertex) for vertex in self.indices])
268
+ if hf_hack:
269
+ lines.append( f'#{face_uv}' )
270
+ lines.append( f'{face_un}' )
271
+ else:
272
+ lines.append( face_uv )
273
+
274
+ class DepthSource:
275
+ def __init__(self, name:str=None):
276
+ self.name = name
277
+ self.mediaMesh = None
278
+ self.depth:NumpyImage = None
279
+ self.gray:NumpyImage = None
280
+
281
+ def mapDepth(self, mediaMesh:'MediaMesh', depthMapping:DepthMapping=None) -> 'DepthSource':
282
+ return self
283
+
284
+ def _addDepth(self, mediaMesh:'MediaMesh', depthMapping:DepthMapping=None) -> 'DepthSource':
285
+ self.gray = colorize(self.depth, cmap='gray_r')
286
+ self.mediaMesh = mediaMesh
287
+
288
+ for meshPoint in mediaMesh.points:
289
+ depth = self.depth[meshPoint.pixelCoordinate.y,meshPoint.pixelCoordinate.x]
290
+ #depth = -depth # lazy conversion from depth to position
291
+ meshPoint.depthMap.values[ self.name ] = float( depth )
292
+
293
+ mediaMesh.weightMap.set( self.name, self.createDepthMapping(depthMapping) )
294
+
295
+ self.gray = mediaMesh.drawGrayMesh(self.name,True)
296
+ return self
297
+
298
+ # note: if depthMapping is passed in, the hi and lo will be reset
299
+ def createDepthMapping(self,depthMapping:DepthMapping=None) -> DepthMapping:
300
+ if depthMapping is None:
301
+ depthMapping = DepthMapping()
302
+ depthMapping.reset()
303
+ if not self.depth is None:
304
+ for meshPoint in self.mediaMesh.points:
305
+ depth = self.depth[meshPoint.pixelCoordinate.y,meshPoint.pixelCoordinate.x]
306
+ depthMapping.track(float(depth))
307
+ return depthMapping.update()
308
+
309
+ class ZoeDepthSource( DepthSource ):
310
+ NAME = 'zoe'
311
+
312
+ def __init__(self):
313
+ super().__init__(ZoeDepthSource.NAME)
314
+ self.model = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
315
+
316
+ def mapDepth(self, mediaMesh:'MediaMesh', depthMapping:DepthMapping=None) -> 'DepthSource':
317
+ self.depth = 1.-self.model.infer_pil(mediaMesh.image)
318
+ return self._addDepth(mediaMesh, depthMapping)
319
+
320
+ class MidasDepthSource( DepthSource ):
321
+ NAME = 'midas'
322
+
323
+ def __init__(self):
324
+ super().__init__(MidasDepthSource.NAME)
325
+ self.feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
326
+ self.model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
327
+
328
+ def mapDepth(self, mediaMesh:'MediaMesh', depthMapping:DepthMapping=None) -> 'DepthSource':
329
+ img = Image.fromarray(mediaMesh.image)
330
+
331
+ encoding = self.feature_extractor(img, return_tensors="pt")
332
+ with torch.no_grad():
333
+ outputs = self.model(**encoding)
334
+ predicted_depth = outputs.predicted_depth
335
+
336
+ prediction = torch.nn.functional.interpolate(
337
+ predicted_depth.unsqueeze(1),
338
+ size=img.size[::-1],
339
+ mode="bicubic",
340
+ align_corners=False,
341
+ ).squeeze()
342
+ self.depth = prediction.cpu().numpy()
343
+ return self._addDepth(mediaMesh, depthMapping)
344
+
345
+ #############################################################################
346
+ #
347
+ # A MediaMesh has:
348
+ #
349
+ # 1. an input image
350
+ # 2. the first landmark found
351
+ # 3. a MeshPoint for each point
352
+ #
353
+ #
354
+ #
355
+ #############################################################################
356
+ class MediaMesh:
357
+ LOG = logging.getLogger(__name__)
358
+ COMBINED = 'combined'
359
+
360
+ def __init__(self, scale:List[int]=[-1,-1,-1], weightMap:WeightMap = None, image:NumpyImage = None, annotated:NumpyImage = None, points:List[MeshPoint] = None):
361
+ self.scale = scale
362
+ if weightMap is None:
363
+ self.weightMap = WeightMap()
364
+ else:
365
+ self.weightMap = weightMap
366
+ self.image = image
367
+ self.annotated = annotated
368
+ self.points = points
369
+ self.meshes = {}
370
+ self.depthSources = {}
371
+
372
+ # after this call, instance variables for image, annotated and points should be set
373
+ def detect(self, image:NumpyImage, min_detection_confidence:float = .5) -> 'MediaMesh':
374
+ self.image = image
375
+ self.annotated = image.copy()
376
+ self.points = None
377
+
378
+ self.width = image.shape[1]
379
+ self.height = image.shape[0]
380
+ self.ratio = self.width / self.height
381
+
382
+ self.scale[0] = self.ratio
383
+
384
+ first = True # just do the first face for now
385
+
386
+ with mp_face_mesh.FaceMesh(
387
+ static_image_mode=True,
388
+ max_num_faces=1,
389
+ min_detection_confidence=min_detection_confidence) as face_mesh:
390
+
391
+ results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
392
+ if not results.multi_face_landmarks:
393
+ raise Exception( 'no faces found' )
394
+
395
+ for landmarks in results.multi_face_landmarks:
396
+ if first:
397
+ self.points = self.mapLandMarks(landmarks)
398
+ first = False
399
+ self.drawLandMarks(self.annotated, landmarks)
400
+
401
+ self.gray = self.drawGrayMesh()
402
+ self.weightMap.set( DepthMap.MEDIA_PIPE, self.createDepthMapping() )
403
+
404
+ return self
405
+
406
+ def drawLandMarks(self, image:NumpyImage, landmarks: landmark_pb2.NormalizedLandmarkList):
407
+ drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
408
+
409
+ mp_drawing.draw_landmarks(
410
+ image=image,
411
+ landmark_list=landmarks,
412
+ connections=mp_face_mesh.FACEMESH_TESSELATION,
413
+ landmark_drawing_spec=None,
414
+ connection_drawing_spec=mp_drawing_styles
415
+ .get_default_face_mesh_tesselation_style())
416
+ mp_drawing.draw_landmarks(
417
+ image=image,
418
+ landmark_list=landmarks,
419
+ connections=mp_face_mesh.FACEMESH_CONTOURS,
420
+ landmark_drawing_spec=None,
421
+ connection_drawing_spec=mp_drawing_styles
422
+ .get_default_face_mesh_contours_style())
423
+
424
+ def mapLandMarks(self, landmarks: landmark_pb2.NormalizedLandmarkList) -> List[MeshPoint]:
425
+ points = []
426
+ for landmark in landmarks.landmark:
427
+ point = MeshPoint().mapLandMark(self, landmark)
428
+ points.append( point )
429
+ return self.centerPoints(points)
430
+
431
+ def centerPoints(self,points:List[MeshPoint]=None) -> List[MeshPoint]:
432
+ if points is None:
433
+ points = self.points
434
+
435
+ mins = [+np.inf] * 3
436
+ maxs = [-np.inf] * 3
437
+
438
+ for point in points:
439
+ for dimension,value in enumerate( point.position.values ):
440
+ mins[dimension] = min(mins[dimension],value)
441
+ maxs[dimension] = max(maxs[dimension],value)
442
+
443
+ mids = [(min_val + max_val) / 2 for min_val, max_val in zip(mins, maxs)]
444
+ for point in points:
445
+ point.position.values = [(val-mid) for val, mid in zip(point.position.values,mids)]
446
+
447
+ print( f'mins: {mins}' )
448
+ print( f'mids: {mids}' )
449
+ print( f'maxs: {maxs}' )
450
+
451
+ return points
452
+
453
+ def createDepthMapping(self,depthMapping:DepthMapping=None) -> DepthMapping:
454
+ if depthMapping is None:
455
+ depthMapping = DepthMapping()
456
+ for point in self.points:
457
+ depthMapping.track(point.position.z)
458
+ return depthMapping.update()
459
+
460
+ def drawGrayMesh(self, source:str=DepthMap.MEDIA_PIPE, invert:bool=False):
461
+ image = Image.new("RGB", (self.width, self.height), (88,13,33))
462
+ draw = ImageDraw.Draw(image)
463
+
464
+ minZ = np.inf
465
+ maxZ = -np.inf
466
+
467
+ depths = []
468
+
469
+ for point in self.points:
470
+ depth = point.depthMap.values[source]
471
+ depths.append( depth )
472
+ minZ = min( minZ, depth )
473
+ maxZ = max( maxZ, depth )
474
+
475
+ difZ = maxZ - minZ
476
+ if 0 == difZ:
477
+ difZ = 1
478
+
479
+ depths = [(depth-minZ)/difZ for depth in depths]
480
+
481
+ for quad in QUADS:
482
+ points = [tuple(self.points[index-1].pixelCoordinate.values) for index in quad]
483
+ colors = [tuple(3*[int(255*depths[index-1])]) for index in quad]
484
+ color = int(np.average(colors))
485
+ if invert:
486
+ color = 255 - color
487
+ draw.polygon(points, fill=tuple(3*[color]))
488
+ #draw.polygon(points, fill=colors) # sadly this does not work
489
+
490
+ return np.asarray(image)
491
+
492
+ # the obj is based on the current weightMap
493
+ def toObj(self, name:str='sweet', hf_hack:bool=HF_HACK):
494
+ print( '-----------------------------------------------------------------------------' )
495
+
496
+ obj = [f'o {name}Mesh']
497
+ mtl = f'newmtl {name}Material\nmap_Kd {name}.png\n'
498
+
499
+ c = '#' if hf_hack else ''
500
+ obj.append( f'{c}mtllib {name}.mtl' )
501
+
502
+ obj.append( f'##################################################################' )
503
+ obj.append( f'# to bring into blender with uvs:' )
504
+ obj.append( f'# put the following 2 lines into {name}.mtl uncommented' )
505
+ obj.append( f'#newmtl {name}Material' )
506
+ obj.append( f'#map_Kd {name}.png' )
507
+ obj.append( f'# remove lines from this file starting with "f "' )
508
+ obj.append( f'# uncomment the lines that start with "#f "' )
509
+ obj.append( f'##################################################################' )
510
+
511
+ for key, depthMapping in self.weightMap.values.items():
512
+ depthMapping.update()
513
+ print( f'{name}.{key} -> {depthMapping.to_dict()}' )
514
+
515
+ for point in self.points:
516
+ point.weighDepth(self.weightMap)
517
+
518
+ self.centerPoints()
519
+
520
+ for point in self.points:
521
+ point.toObj(obj,hf_hack)
522
+
523
+ obj.append( f'usemtl {name}Material' )
524
+
525
+ index = 0
526
+ for quad in QUADS:
527
+ index = 1 + index
528
+ face = MeshFace(quad)
529
+ face.calculateNormal(self.points)
530
+ face.toObj(obj, index, hf_hack)
531
+
532
+ obj.append( f'##################################################################' )
533
+ obj.append( f'# EOF' )
534
+ obj.append( f'##################################################################' )
535
+
536
+ print( '-----------------------------------------------------------------------------' )
537
+
538
+ return obj,mtl
539
+
540
+ def to_dict(self):
541
+ return {
542
+ 'width' : self.width,
543
+ 'height' : self.height,
544
+ 'ratio' : self.ratio,
545
+ 'weightMap' : {key: value.to_dict() for key, value in self.weightMap.values.items()},
546
+ 'points' : [point.to_dict() for point in self.points]
547
+ }
548
+
549
+ # should be called after demoSetup and detect
550
+ def singleSourceMesh(self,name:str, hf_hack:bool=HF_HACK):
551
+ before = self.weightMap.saveWeights() # push
552
+ self.weightMap.totally(name)
553
+ obj,mtl = self.toObj(name)
554
+ self.weightMap.loadWeights( before ) # pop
555
+ return obj,mtl
556
+
557
+ # should be called after demoSetup and detect
558
+ def meshmerizing(self,hf_hack:bool=HF_HACK):
559
+ for depthSource in self.depthSources:
560
+ depthSource.mapDepth(self,self.weightMap.values[depthSource.name])
561
+
562
+ obj,mtl = self.toObj(MediaMesh.COMBINED)
563
+ self.meshes = {MediaMesh.COMBINED:(obj,mtl)}
564
+
565
+ for source in self.depthSources:
566
+ self.meshes[ source.name ] = (self.singleSourceMesh(source.name))
567
+
568
+ self.meshes[DepthMap.MEDIA_PIPE] = (self.singleSourceMesh(DepthMap.MEDIA_PIPE))
569
+
570
+ return self.meshes
571
+
572
+ def demoSetup(self) -> 'MediaMesh':
573
+ self.depthSources = [ ZoeDepthSource(), MidasDepthSource() ]
574
+
575
+ for depthSource in self.depthSources:
576
+ self.weightMap.set( depthSource.name, depthSource.createDepthMapping() )
577
+
578
+ # observationally
579
+ self.weightMap.values[ ZoeDepthSource.NAME ].toHi = 1.77
580
+ self.weightMap.values[ MidasDepthSource.NAME ].toHi = 2.55
581
+ self.weightMap.values[ ZoeDepthSource.NAME ].weight = 1.00
582
+ self.weightMap.values[ MidasDepthSource.NAME ].weight = 0.22
583
+
584
+ return self
585
+
586
+ def main(self):
587
+ if not 2 == len(sys.argv):
588
+ raise Exception( 'usage: MediaMesh.py <image filename>' )
589
+ mediaMesh = MediaMesh().demoSetup()
590
+ mediaMesh.detect(cv2.imread( sys.argv[1] ) )
591
+
592
+ for name,mesh in mediaMesh.meshmerizing().items():
593
+ obj = mesh[0]
594
+ mtl = mesh[1]
595
+ with open(f"{name}.obj", "w") as file:
596
+ file.write( '\n'.join(obj) )
597
+ with open(f"{name}.mtl", "w") as file:
598
+ file.write( mtl )
599
+
600
+ cv2.imwrite( 'mesh.png', mediaMesh.annotated )
601
+ cv2.imwrite( 'mpg.png', mediaMesh.gray )
602
+ for source in mediaMesh.depthSources:
603
+ cv2.imwrite( f'{source.name}.png', source.gray )
604
+
605
+ with open("mesh.json", "w") as file:
606
+ json.dump(mediaMesh.to_dict(), file, indent=4)
607
+
608
+ if __name__ == "__main__":
609
+ MediaMesh().main()
610
+
611
+ # EOF
612
+ #############################################################################