Spaces:
Running on Zero
Running on Zero
Bypass TexturePipeline: use CameraProjection directly for UV baking
Browse filesTexturePipeline imports mesh_process which requires open3d + pymeshlab
at module level β both unavailable in the Space environment.
Replace with direct CameraProjection + replace_mesh_texture_and_save
calls which have no such dependency:
- Split 6-view grid back into individual PIL images
- Match cameras to MV-Adapter generation (same azimuths)
- Project directly onto mesh UV space at 1024px
- Save textured GLB via gltflib backend
app.py
CHANGED
|
@@ -1025,25 +1025,54 @@ def apply_texture(glb_path, input_image, remove_background, variant, tex_seed,
|
|
| 1025 |
print(f"[apply_texture] face enhance failed: {_fe}")
|
| 1026 |
|
| 1027 |
# ββ Bake textures onto mesh βββββββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
| 1028 |
progress(0.85, desc="Baking UV texture onto mesh...")
|
| 1029 |
-
from mvadapter.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1030 |
|
| 1031 |
-
|
| 1032 |
-
|
| 1033 |
-
|
|
|
|
|
|
|
|
|
|
| 1034 |
device=DEVICE,
|
| 1035 |
)
|
| 1036 |
-
|
| 1037 |
-
|
| 1038 |
-
|
| 1039 |
-
|
| 1040 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1041 |
uv_size=1024,
|
| 1042 |
-
|
| 1043 |
-
|
| 1044 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1045 |
)
|
| 1046 |
-
out_glb = tex_out.shaded_model_save_path
|
| 1047 |
|
| 1048 |
final_path = "/tmp/triposg_textured.glb"
|
| 1049 |
shutil.copy(out_glb, final_path)
|
|
|
|
| 1025 |
print(f"[apply_texture] face enhance failed: {_fe}")
|
| 1026 |
|
| 1027 |
# ββ Bake textures onto mesh βββββββββββββββββββββββββββββββββββββ
|
| 1028 |
+
# Use CameraProjection + replace_mesh_texture_and_save directly.
|
| 1029 |
+
# TexturePipeline imports mesh_process which requires open3d+pymeshlab
|
| 1030 |
+
# (not available); the UV projection itself has no such dependency.
|
| 1031 |
progress(0.85, desc="Baking UV texture onto mesh...")
|
| 1032 |
+
from mvadapter.utils.mesh_utils import (
|
| 1033 |
+
load_mesh, replace_mesh_texture_and_save,
|
| 1034 |
+
)
|
| 1035 |
+
from mvadapter.utils.mesh_utils.projection import CameraProjection
|
| 1036 |
+
from mvadapter.utils import image_to_tensor, tensor_to_image
|
| 1037 |
+
|
| 1038 |
+
# Split the saved horizontal 6-view grid back into individual images
|
| 1039 |
+
mv_img = Image.open(mv_path)
|
| 1040 |
+
mv_np = np.array(mv_img)
|
| 1041 |
+
mv_views = [Image.fromarray(v) for v in np.array_split(mv_np, 6, axis=1)]
|
| 1042 |
|
| 1043 |
+
# Cameras must match those used during MV-Adapter generation
|
| 1044 |
+
tex_cameras = get_orthogonal_camera(
|
| 1045 |
+
elevation_deg=[0, 0, 0, 0, 0, 0],
|
| 1046 |
+
distance=[1.8] * 6,
|
| 1047 |
+
left=-0.55, right=0.55, bottom=-0.55, top=0.55,
|
| 1048 |
+
azimuth_deg=[x - 90 for x in [0, 45, 90, 180, 270, 315]],
|
| 1049 |
device=DEVICE,
|
| 1050 |
)
|
| 1051 |
+
mesh_obj = load_mesh(glb_path, rescale=True, device=DEVICE, default_uv_size=1024)
|
| 1052 |
+
cam_proj = CameraProjection(pb_backend="torch-cuda", bg_remover=None, device=DEVICE)
|
| 1053 |
+
mod_tensor = image_to_tensor(mv_views, device=DEVICE)
|
| 1054 |
+
|
| 1055 |
+
cam_out = cam_proj(
|
| 1056 |
+
mod_tensor, mesh_obj, tex_cameras,
|
| 1057 |
+
from_scratch=True,
|
| 1058 |
+
poisson_blending=False,
|
| 1059 |
+
depth_grad_dilation=5,
|
| 1060 |
+
depth_grad_threshold=0.1,
|
| 1061 |
+
uv_exp_blend_alpha=3,
|
| 1062 |
+
uv_exp_blend_view_weight=torch.as_tensor([1, 1, 1, 1, 1, 1]),
|
| 1063 |
+
aoi_cos_valid_threshold=0.2,
|
| 1064 |
uv_size=1024,
|
| 1065 |
+
uv_padding=True,
|
| 1066 |
+
return_dict=True,
|
| 1067 |
+
)
|
| 1068 |
+
|
| 1069 |
+
out_glb = os.path.join(out_dir, "textured_shaded.glb")
|
| 1070 |
+
replace_mesh_texture_and_save(
|
| 1071 |
+
glb_path, out_glb,
|
| 1072 |
+
texture=tensor_to_image(cam_out.uv_proj),
|
| 1073 |
+
backend="gltflib",
|
| 1074 |
+
task_id="textured",
|
| 1075 |
)
|
|
|
|
| 1076 |
|
| 1077 |
final_path = "/tmp/triposg_textured.glb"
|
| 1078 |
shutil.copy(out_glb, final_path)
|