Spaces:
Runtime error
Runtime error
vgvm
commited on
Commit
•
3d5f8b6
1
Parent(s):
222641c
tweaks to make mtl work
Browse files- app.py +99 -44
- requirements.txt +18 -1
- utils.py +40 -0
app.py
CHANGED
@@ -7,7 +7,6 @@ import matplotlib.cm
|
|
7 |
import mediapipe as mp
|
8 |
import numpy as np
|
9 |
import os
|
10 |
-
import shutil
|
11 |
import struct
|
12 |
import tempfile
|
13 |
import torch
|
@@ -17,21 +16,22 @@ from mediapipe.python.solutions.drawing_utils import _normalized_to_pixel_coordi
|
|
17 |
from PIL import Image
|
18 |
from quads import QUADS
|
19 |
from typing import List, Mapping, Optional, Tuple, Union
|
20 |
-
from utils import colorize
|
21 |
|
22 |
class face_image_to_face_mesh:
|
23 |
def __init__(self):
|
24 |
self.zoe_me = True
|
|
|
25 |
self.css = ("""
|
26 |
-
#
|
27 |
-
max-height:
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
}
|
32 |
#img-display-output {
|
33 |
-
max-height:
|
34 |
-
max-width:
|
35 |
width:auto;
|
36 |
height:auto
|
37 |
}
|
@@ -42,25 +42,29 @@ class face_image_to_face_mesh:
|
|
42 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
43 |
self.zoe = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
|
44 |
|
45 |
-
demo = gr.Blocks(css=self.css)
|
46 |
with demo:
|
47 |
gr.Markdown("""
|
48 |
# Face Image to Face Quad Mesh
|
49 |
|
50 |
Uses MediaPipe to detect a face in an image and convert it to a quad mesh.
|
51 |
-
Saves to OBJ since gltf does not support quad faces.
|
52 |
|
53 |
The face depth with Zoe can be a bit much and without it is a bit generic. In blender you can fix this just by snapping to the high poly model.
|
54 |
|
55 |
-
Highly recommend and running it locally. The 3D model has
|
|
|
|
|
56 |
""")
|
57 |
|
58 |
with gr.Row():
|
59 |
with gr.Column():
|
60 |
upload_image = gr.Image(label="Input image", type="numpy", source="upload")
|
|
|
|
|
61 |
|
62 |
gr.Examples( examples=[
|
63 |
-
'examples/blonde-
|
64 |
'examples/dude-00110-1227390728.png',
|
65 |
'examples/granny-00056-1867315302.png',
|
66 |
'examples/tuffie-00039-499759385.png',
|
@@ -82,7 +86,35 @@ class face_image_to_face_mesh:
|
|
82 |
with gr.Group():
|
83 |
gr.Markdown(
|
84 |
"""
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
1. sculpt high poly mesh in blender
|
88 |
2. snapshot the face
|
@@ -94,14 +126,24 @@ class face_image_to_face_mesh:
|
|
94 |
8. it's just that easy 😛
|
95 |
|
96 |
Ideally it would be a plugin...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
""")
|
98 |
|
99 |
with gr.Column():
|
100 |
with gr.Group():
|
101 |
-
|
102 |
output_image = gr.Image(label="Output image",elem_id='img-display-output')
|
103 |
-
output_mesh = gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="3D Model",elem_id='img-display-output')
|
104 |
depth_image = gr.Image(label="Depth image",elem_id='img-display-output')
|
|
|
105 |
|
106 |
upload_image_btn.click(
|
107 |
fn=self.detect,
|
@@ -120,11 +162,10 @@ class face_image_to_face_mesh:
|
|
120 |
mp_drawing_styles = mp.solutions.drawing_styles
|
121 |
mp_face_mesh = mp.solutions.face_mesh
|
122 |
|
123 |
-
mesh = "examples/
|
124 |
|
125 |
if self.zoe_me and use_zoe:
|
126 |
depth = self.zoe.infer_pil(image)
|
127 |
-
print( f'type of depth is {type(depth)}' )
|
128 |
idepth = colorize(depth, cmap='gray_r')
|
129 |
else:
|
130 |
depth = None
|
@@ -141,7 +182,7 @@ class face_image_to_face_mesh:
|
|
141 |
|
142 |
annotated_image = image.copy()
|
143 |
for face_landmarks in results.multi_face_landmarks:
|
144 |
-
mesh = self.toObj(image=image, width=width, height=height, ratio=ratio, landmark_list=face_landmarks, depth=depth, zoe_scale=zoe_scale)
|
145 |
|
146 |
mp_drawing.draw_landmarks(
|
147 |
image=annotated_image,
|
@@ -161,25 +202,35 @@ class face_image_to_face_mesh:
|
|
161 |
return mesh, annotated_image, idepth, 1
|
162 |
|
163 |
def toObj( self, image: np.ndarray, width:int, height:int, ratio: float, landmark_list: landmark_pb2.NormalizedLandmarkList, depth: np.ndarray, zoe_scale: float):
|
164 |
-
print( f'you have such pretty hair' )
|
165 |
|
166 |
-
obj_file = tempfile.NamedTemporaryFile(suffix='.obj', delete=False)
|
167 |
-
mtl_file = tempfile.NamedTemporaryFile(suffix='.mtl', delete=False)
|
168 |
-
png_file = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
|
169 |
|
|
|
|
|
170 |
############################################
|
171 |
|
172 |
lines = []
|
173 |
-
lines.append( f'mtllib {os.path.basename(mtl_file.name)}' )
|
174 |
|
175 |
-
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
for coordinate in coordinates:
|
180 |
lines.append( "vt " + " ".join([str(value) for value in coordinate]) )
|
181 |
|
182 |
for quad in QUADS:
|
|
|
183 |
normal = self.totallyNormal( points[ quad[ 0 ] -1 ], points[ quad[ 1 ] -1 ], points[ quad[ 2 ] -1 ] )
|
184 |
lines.append( "vn " + " ".join([str(value) for value in normal]) )
|
185 |
|
@@ -188,59 +239,63 @@ class face_image_to_face_mesh:
|
|
188 |
quadIndex = 0
|
189 |
for quad in QUADS:
|
190 |
quadIndex = 1 + quadIndex
|
191 |
-
|
192 |
-
|
193 |
-
|
|
|
194 |
else:
|
195 |
-
lines.append(
|
196 |
-
|
|
|
197 |
|
198 |
out = open( obj_file.name, 'w' )
|
199 |
out.write( '\n'.join( lines ) + '\n' )
|
200 |
out.close()
|
201 |
-
shutil.copy(obj_file.name, "/tmp/lol.obj")
|
202 |
|
203 |
############################################
|
204 |
|
205 |
lines = []
|
206 |
lines.append( 'newmtl MyMaterial' )
|
207 |
-
lines.append(
|
208 |
-
lines.append( f'
|
209 |
-
lines.append( f'Ks 0.000 0.000 0.000 # black (off)' )
|
210 |
-
lines.append( f'map_Ka {os.path.basename(png_file.name)}' )
|
211 |
-
lines.append( f'map_Kd {os.path.basename(png_file.name)}' )
|
212 |
|
213 |
out = open( mtl_file.name, 'w' )
|
214 |
out.write( '\n'.join( lines ) + '\n' )
|
215 |
out.close()
|
216 |
-
shutil.copy(mtl_file.name, "/tmp/lol.mtl")
|
217 |
|
218 |
############################################
|
219 |
|
220 |
cv2.imwrite(png_file.name, cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
221 |
-
shutil.copy(png_file.name, "/tmp/lol.png")
|
222 |
|
223 |
############################################
|
224 |
|
225 |
print( f'I know it is special to you so I saved it to {obj_file.name} since we are friends' )
|
226 |
-
return obj_file.name
|
227 |
|
228 |
-
def landmarksToPoints( self, width: int, height: int, ratio: float, landmark_list: landmark_pb2.NormalizedLandmarkList, depth: np.ndarray, zoe_scale: float ):
|
229 |
points = [] # 3d vertices
|
230 |
coordinates = [] # 2d texture coordinates
|
|
|
|
|
231 |
mins = [+np.inf] * 3
|
232 |
maxs = [-np.inf] * 3
|
|
|
233 |
for idx, landmark in enumerate(landmark_list.landmark):
|
234 |
if ((landmark.HasField('visibility') and
|
235 |
landmark.visibility < _VISIBILITY_THRESHOLD) or
|
236 |
(landmark.HasField('presence') and
|
237 |
landmark.presence < _PRESENCE_THRESHOLD)):
|
238 |
idk_what_to_do_for_this = True
|
|
|
239 |
x, y = _normalized_to_pixel_coordinates(landmark.x,landmark.y,width,height)
|
|
|
|
|
240 |
coordinates.append( [x/width,1-y/height] )
|
|
|
241 |
if depth is not None:
|
242 |
landmark.z = depth[y, x] * zoe_scale
|
243 |
-
point = [landmark.x * ratio, -landmark.y, -landmark.z];
|
|
|
244 |
for pidx,value in enumerate( point ):
|
245 |
mins[pidx] = min(mins[pidx],value)
|
246 |
maxs[pidx] = max(maxs[pidx],value)
|
@@ -253,7 +308,7 @@ class face_image_to_face_mesh:
|
|
253 |
print( f'mins: {mins}' )
|
254 |
print( f'mids: {mids}' )
|
255 |
print( f'maxs: {maxs}' )
|
256 |
-
return (points,coordinates)
|
257 |
|
258 |
def totallyNormal(self, p0, p1, p2):
|
259 |
v1 = np.array(p1) - np.array(p0)
|
|
|
7 |
import mediapipe as mp
|
8 |
import numpy as np
|
9 |
import os
|
|
|
10 |
import struct
|
11 |
import tempfile
|
12 |
import torch
|
|
|
16 |
from PIL import Image
|
17 |
from quads import QUADS
|
18 |
from typing import List, Mapping, Optional, Tuple, Union
|
19 |
+
from utils import colorize, get_most_recent_subdirectory
|
20 |
|
21 |
class face_image_to_face_mesh:
|
22 |
def __init__(self):
|
23 |
self.zoe_me = True
|
24 |
+
self.uvwrap = not True
|
25 |
self.css = ("""
|
26 |
+
#mesh-display-output {
|
27 |
+
max-height: 44vh;
|
28 |
+
max-width: 44vh;
|
29 |
+
width:auto;
|
30 |
+
height:auto
|
31 |
}
|
32 |
#img-display-output {
|
33 |
+
max-height: 28vh;
|
34 |
+
max-width: 28vh;
|
35 |
width:auto;
|
36 |
height:auto
|
37 |
}
|
|
|
42 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
43 |
self.zoe = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
|
44 |
|
45 |
+
demo = gr.Blocks(css=self.css, cache_examples=True)
|
46 |
with demo:
|
47 |
gr.Markdown("""
|
48 |
# Face Image to Face Quad Mesh
|
49 |
|
50 |
Uses MediaPipe to detect a face in an image and convert it to a quad mesh.
|
51 |
+
Saves to OBJ since gltf does not support quad faces. The 3d viewer has Y pointing the opposite direction from Blender, so ya hafta spin it.
|
52 |
|
53 |
The face depth with Zoe can be a bit much and without it is a bit generic. In blender you can fix this just by snapping to the high poly model.
|
54 |
|
55 |
+
Highly recommend and running it locally. The 3D model has uv values in the faces, but you will have to make the mlt file manually at this point."
|
56 |
+
|
57 |
+
Quick import result in examples/converted/movie-gallery.mp4 under files
|
58 |
""")
|
59 |
|
60 |
with gr.Row():
|
61 |
with gr.Column():
|
62 |
upload_image = gr.Image(label="Input image", type="numpy", source="upload")
|
63 |
+
self.temp_dir = get_most_recent_subdirectory( upload_image.DEFAULT_TEMP_DIR )
|
64 |
+
print( f'The temp_dir is {self.temp_dir}' )
|
65 |
|
66 |
gr.Examples( examples=[
|
67 |
+
'examples/blonde-00081-399357008.png',
|
68 |
'examples/dude-00110-1227390728.png',
|
69 |
'examples/granny-00056-1867315302.png',
|
70 |
'examples/tuffie-00039-499759385.png',
|
|
|
86 |
with gr.Group():
|
87 |
gr.Markdown(
|
88 |
"""
|
89 |
+
# Using the Textured Mesh in Blender
|
90 |
+
|
91 |
+
There a couple of annoying steps atm after you download the obj from the 3d viewer.
|
92 |
+
|
93 |
+
You can use the script meshin-around.sh in the files section to do the conversion or manually:
|
94 |
+
|
95 |
+
1. edit the file and change the mtllib line to use fun.mtl
|
96 |
+
2. replace / delete all lines that start with 'f', eg :%s,^f.*,,
|
97 |
+
3. uncomment all the lines that start with '#f', eg: :%s,^#f,f,
|
98 |
+
4. save and exit
|
99 |
+
5. create fun.mtl to point to the texture like:
|
100 |
+
|
101 |
+
```
|
102 |
+
newmtl MyMaterial
|
103 |
+
map_Kd fun.png
|
104 |
+
```
|
105 |
+
|
106 |
+
Make sure the obj, mtl and png are all in the same directory
|
107 |
+
|
108 |
+
Now the import will have the texture data: File -> Import -> Wavefront (obj) -> fun.obj
|
109 |
+
|
110 |
+
This is all a work around for a weird hf+gradios+babylonjs bug which seems to be related to the version
|
111 |
+
of babylonjs being used... It works fine in a local babylonjs sandbox...
|
112 |
+
|
113 |
+
# Suggested Workflows
|
114 |
+
|
115 |
+
Here are some workflow ideas.
|
116 |
+
|
117 |
+
## retopologize high poly face mesh
|
118 |
|
119 |
1. sculpt high poly mesh in blender
|
120 |
2. snapshot the face
|
|
|
126 |
8. it's just that easy 😛
|
127 |
|
128 |
Ideally it would be a plugin...
|
129 |
+
|
130 |
+
## stable diffusion integration
|
131 |
+
|
132 |
+
1. generate a face in sd
|
133 |
+
2. generate the mesh
|
134 |
+
3. repose it and use it for further generation
|
135 |
+
|
136 |
+
May need to expanded the generated mesh to cover more, maybe with
|
137 |
+
<a href="https://github.com/shunsukesaito/PIFu" target="_blank">PIFu model</a>.
|
138 |
+
|
139 |
""")
|
140 |
|
141 |
with gr.Column():
|
142 |
with gr.Group():
|
143 |
+
output_mesh = gr.Model3D(clear_color=3*[0], label="3D Model",elem_id='mesh-display-output')
|
144 |
output_image = gr.Image(label="Output image",elem_id='img-display-output')
|
|
|
145 |
depth_image = gr.Image(label="Depth image",elem_id='img-display-output')
|
146 |
+
num_faces_detected = gr.Number(label="Number of faces detected", value=0)
|
147 |
|
148 |
upload_image_btn.click(
|
149 |
fn=self.detect,
|
|
|
162 |
mp_drawing_styles = mp.solutions.drawing_styles
|
163 |
mp_face_mesh = mp.solutions.face_mesh
|
164 |
|
165 |
+
mesh = "examples/converted/in-granny.obj"
|
166 |
|
167 |
if self.zoe_me and use_zoe:
|
168 |
depth = self.zoe.infer_pil(image)
|
|
|
169 |
idepth = colorize(depth, cmap='gray_r')
|
170 |
else:
|
171 |
depth = None
|
|
|
182 |
|
183 |
annotated_image = image.copy()
|
184 |
for face_landmarks in results.multi_face_landmarks:
|
185 |
+
(mesh,mtl,png) = self.toObj(image=image, width=width, height=height, ratio=ratio, landmark_list=face_landmarks, depth=depth, zoe_scale=zoe_scale)
|
186 |
|
187 |
mp_drawing.draw_landmarks(
|
188 |
image=annotated_image,
|
|
|
202 |
return mesh, annotated_image, idepth, 1
|
203 |
|
204 |
def toObj( self, image: np.ndarray, width:int, height:int, ratio: float, landmark_list: landmark_pb2.NormalizedLandmarkList, depth: np.ndarray, zoe_scale: float):
|
205 |
+
print( f'you have such pretty hair', self.temp_dir )
|
206 |
|
207 |
+
obj_file = tempfile.NamedTemporaryFile(suffix='.obj', dir=self.temp_dir, delete=False)
|
208 |
+
mtl_file = tempfile.NamedTemporaryFile(suffix='.mtl', dir=self.temp_dir, delete=False)
|
209 |
+
png_file = tempfile.NamedTemporaryFile(suffix='.png', dir=self.temp_dir, delete=False)
|
210 |
|
211 |
+
############################################
|
212 |
+
(points,coordinates,colors) = self.landmarksToPoints( image, width, height, ratio, landmark_list, depth, zoe_scale )
|
213 |
############################################
|
214 |
|
215 |
lines = []
|
|
|
216 |
|
217 |
+
lines.append( f'o MyMesh' )
|
218 |
+
|
219 |
+
# the 'file=' is a gradio hack
|
220 |
+
lines.append( f'mtllib file={mtl_file.name}' )
|
221 |
+
|
222 |
+
for index, point in enumerate(points):
|
223 |
+
color = colors[index]
|
224 |
+
scaled_color = [value / 255 for value in color] # Scale colors down to 0-1 range
|
225 |
+
flipped = [-value for value in point]
|
226 |
+
flipped[ 0 ] = -flipped[ 0 ]
|
227 |
+
lines.append( "v " + " ".join(map(str, flipped + color)) )
|
228 |
|
229 |
for coordinate in coordinates:
|
230 |
lines.append( "vt " + " ".join([str(value) for value in coordinate]) )
|
231 |
|
232 |
for quad in QUADS:
|
233 |
+
#quad = list(reversed(quad))
|
234 |
normal = self.totallyNormal( points[ quad[ 0 ] -1 ], points[ quad[ 1 ] -1 ], points[ quad[ 2 ] -1 ] )
|
235 |
lines.append( "vn " + " ".join([str(value) for value in normal]) )
|
236 |
|
|
|
239 |
quadIndex = 0
|
240 |
for quad in QUADS:
|
241 |
quadIndex = 1 + quadIndex
|
242 |
+
face_uv = "f " + " ".join([f'{vertex}/{vertex}/{quadIndex}' for vertex in quad])
|
243 |
+
face_un = "f " + " ".join([str(vertex) for vertex in quad])
|
244 |
+
if self.uvwrap:
|
245 |
+
lines.append( face_uv )
|
246 |
else:
|
247 |
+
lines.append( f'#{face_uv}' )
|
248 |
+
lines.append( f'{face_un}' )
|
249 |
+
#"f " + " ".join([str(vertex) for vertex in quad]) )
|
250 |
|
251 |
out = open( obj_file.name, 'w' )
|
252 |
out.write( '\n'.join( lines ) + '\n' )
|
253 |
out.close()
|
|
|
254 |
|
255 |
############################################
|
256 |
|
257 |
lines = []
|
258 |
lines.append( 'newmtl MyMaterial' )
|
259 |
+
lines.append( 'illum 2' )
|
260 |
+
lines.append( f'map_Kd file={png_file.name}' ) # the 'file=' is a gradio hack
|
|
|
|
|
|
|
261 |
|
262 |
out = open( mtl_file.name, 'w' )
|
263 |
out.write( '\n'.join( lines ) + '\n' )
|
264 |
out.close()
|
|
|
265 |
|
266 |
############################################
|
267 |
|
268 |
cv2.imwrite(png_file.name, cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
|
|
|
269 |
|
270 |
############################################
|
271 |
|
272 |
print( f'I know it is special to you so I saved it to {obj_file.name} since we are friends' )
|
273 |
+
return (obj_file.name,mtl_file.name,png_file.name)
|
274 |
|
275 |
+
def landmarksToPoints( self, image:np.ndarray, width: int, height: int, ratio: float, landmark_list: landmark_pb2.NormalizedLandmarkList, depth: np.ndarray, zoe_scale: float ):
|
276 |
points = [] # 3d vertices
|
277 |
coordinates = [] # 2d texture coordinates
|
278 |
+
colors = [] # 3d rgb info
|
279 |
+
|
280 |
mins = [+np.inf] * 3
|
281 |
maxs = [-np.inf] * 3
|
282 |
+
|
283 |
for idx, landmark in enumerate(landmark_list.landmark):
|
284 |
if ((landmark.HasField('visibility') and
|
285 |
landmark.visibility < _VISIBILITY_THRESHOLD) or
|
286 |
(landmark.HasField('presence') and
|
287 |
landmark.presence < _PRESENCE_THRESHOLD)):
|
288 |
idk_what_to_do_for_this = True
|
289 |
+
|
290 |
x, y = _normalized_to_pixel_coordinates(landmark.x,landmark.y,width,height)
|
291 |
+
color = image[y,x]
|
292 |
+
colors.append( [value / 255 for value in color ] )
|
293 |
coordinates.append( [x/width,1-y/height] )
|
294 |
+
|
295 |
if depth is not None:
|
296 |
landmark.z = depth[y, x] * zoe_scale
|
297 |
+
#point = [landmark.x * ratio, -landmark.y, -landmark.z];
|
298 |
+
point = [landmark.x * ratio, landmark.y, landmark.z];
|
299 |
for pidx,value in enumerate( point ):
|
300 |
mins[pidx] = min(mins[pidx],value)
|
301 |
maxs[pidx] = max(maxs[pidx],value)
|
|
|
308 |
print( f'mins: {mins}' )
|
309 |
print( f'mids: {mids}' )
|
310 |
print( f'maxs: {maxs}' )
|
311 |
+
return (points,coordinates,colors)
|
312 |
|
313 |
def totallyNormal(self, p0, p1, p2):
|
314 |
v1 = np.array(p1) - np.array(p0)
|
requirements.txt
CHANGED
@@ -1,8 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
opencv-python>=4.7.0.72
|
2 |
mediapipe>=0.10.1
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
#--extra-index-url https://download.pytorch.org/whl/cu113
|
6 |
torch
|
7 |
torchvision>=0.11.2
|
8 |
timm==0.6.11
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
########################################################################################
|
2 |
+
# :-D
|
3 |
+
|
4 |
+
############################################
|
5 |
+
# mediapipe up on this!
|
6 |
+
|
7 |
opencv-python>=4.7.0.72
|
8 |
mediapipe>=0.10.1
|
9 |
+
|
10 |
+
#
|
11 |
+
############################################
|
12 |
+
|
13 |
+
############################################
|
14 |
+
# zoe depth requirements
|
15 |
|
16 |
#--extra-index-url https://download.pytorch.org/whl/cu113
|
17 |
torch
|
18 |
torchvision>=0.11.2
|
19 |
timm==0.6.11
|
20 |
+
|
21 |
+
#
|
22 |
+
############################################
|
23 |
+
|
24 |
+
# EOF
|
25 |
+
########################################################################################
|
utils.py
CHANGED
@@ -86,3 +86,43 @@ def colorize(value, vmin=None, vmax=None, cmap='magma_r', invalid_val=-99, inval
|
|
86 |
img = img.astype(np.uint8)
|
87 |
return img
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
img = img.astype(np.uint8)
|
87 |
return img
|
88 |
|
89 |
+
|
90 |
+
import os
|
91 |
+
|
92 |
+
# bard...
|
93 |
+
def find_most_recently_created_directory(temp_dir):
|
94 |
+
"""Finds the most recently created directory in a directory.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
temp_dir: The directory to search.
|
98 |
+
|
99 |
+
Returns:
|
100 |
+
The path to the most recently created directory.
|
101 |
+
"""
|
102 |
+
|
103 |
+
directories = os.listdir(temp_dir)
|
104 |
+
most_recently_created_directory = None
|
105 |
+
for directory in directories:
|
106 |
+
path = os.path.join(temp_dir, directory)
|
107 |
+
st = os.stat(path)
|
108 |
+
if most_recently_created_directory is None or st.mtime > most_recently_created_directory.mtime:
|
109 |
+
most_recently_created_directory = path
|
110 |
+
|
111 |
+
if most_recently_created_directory is None:
|
112 |
+
most_recently_created_directory = temp_dir
|
113 |
+
|
114 |
+
return most_recently_created_directory
|
115 |
+
|
116 |
+
|
117 |
+
#chatgpt
|
118 |
+
def get_most_recent_subdirectory(path):
|
119 |
+
if not os.path.isdir(path):
|
120 |
+
return path
|
121 |
+
|
122 |
+
subdirectories = [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
|
123 |
+
if not subdirectories:
|
124 |
+
return path
|
125 |
+
|
126 |
+
most_recent_subdirectory = max(subdirectories, key=lambda d: os.path.getctime(os.path.join(path, d)))
|
127 |
+
return os.path.join(path, most_recent_subdirectory)
|
128 |
+
|