Spaces:
Running
on
Zero
Running
on
Zero
xinjie.wang
commited on
Commit
Β·
7493f60
1
Parent(s):
4023f8f
update
Browse files
embodied_gen/data/convex_decomposer.py
CHANGED
|
@@ -19,6 +19,7 @@ import multiprocessing as mp
|
|
| 19 |
import os
|
| 20 |
|
| 21 |
import coacd
|
|
|
|
| 22 |
import trimesh
|
| 23 |
|
| 24 |
logger = logging.getLogger(__name__)
|
|
@@ -31,7 +32,11 @@ __all__ = [
|
|
| 31 |
|
| 32 |
|
| 33 |
def decompose_convex_coacd(
|
| 34 |
-
filename: str,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
) -> None:
|
| 36 |
coacd.set_log_level("info" if verbose else "warn")
|
| 37 |
|
|
@@ -40,6 +45,14 @@ def decompose_convex_coacd(
|
|
| 40 |
|
| 41 |
result = coacd.run_coacd(mesh, **params)
|
| 42 |
combined = sum([trimesh.Trimesh(*m) for m in result])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
combined.export(outfile)
|
| 44 |
|
| 45 |
|
|
@@ -57,6 +70,7 @@ def decompose_convex_mesh(
|
|
| 57 |
pca: bool = False,
|
| 58 |
merge: bool = True,
|
| 59 |
seed: int = 0,
|
|
|
|
| 60 |
verbose: bool = False,
|
| 61 |
) -> str:
|
| 62 |
"""Decompose a mesh into convex parts using the CoACD algorithm."""
|
|
@@ -81,7 +95,7 @@ def decompose_convex_mesh(
|
|
| 81 |
)
|
| 82 |
|
| 83 |
try:
|
| 84 |
-
decompose_convex_coacd(filename, outfile, params, verbose)
|
| 85 |
if os.path.exists(outfile):
|
| 86 |
return outfile
|
| 87 |
except Exception as e:
|
|
@@ -91,7 +105,9 @@ def decompose_convex_mesh(
|
|
| 91 |
if preprocess_mode != "on":
|
| 92 |
try:
|
| 93 |
params["preprocess_mode"] = "on"
|
| 94 |
-
decompose_convex_coacd(
|
|
|
|
|
|
|
| 95 |
if os.path.exists(outfile):
|
| 96 |
return outfile
|
| 97 |
except Exception as e:
|
|
@@ -118,6 +134,7 @@ def decompose_convex_mp(
|
|
| 118 |
merge: bool = True,
|
| 119 |
seed: int = 0,
|
| 120 |
verbose: bool = False,
|
|
|
|
| 121 |
) -> str:
|
| 122 |
"""Decompose a mesh into convex parts using the CoACD algorithm in a separate process.
|
| 123 |
|
|
@@ -140,7 +157,7 @@ def decompose_convex_mp(
|
|
| 140 |
ctx = mp.get_context("spawn")
|
| 141 |
p = ctx.Process(
|
| 142 |
target=decompose_convex_coacd,
|
| 143 |
-
args=(filename, outfile, params, verbose),
|
| 144 |
)
|
| 145 |
p.start()
|
| 146 |
p.join()
|
|
@@ -151,7 +168,7 @@ def decompose_convex_mp(
|
|
| 151 |
params["preprocess_mode"] = "on"
|
| 152 |
p = ctx.Process(
|
| 153 |
target=decompose_convex_coacd,
|
| 154 |
-
args=(filename, outfile, params, verbose),
|
| 155 |
)
|
| 156 |
p.start()
|
| 157 |
p.join()
|
|
|
|
| 19 |
import os
|
| 20 |
|
| 21 |
import coacd
|
| 22 |
+
import numpy as np
|
| 23 |
import trimesh
|
| 24 |
|
| 25 |
logger = logging.getLogger(__name__)
|
|
|
|
| 32 |
|
| 33 |
|
| 34 |
def decompose_convex_coacd(
|
| 35 |
+
filename: str,
|
| 36 |
+
outfile: str,
|
| 37 |
+
params: dict,
|
| 38 |
+
verbose: bool = False,
|
| 39 |
+
auto_scale: bool = True,
|
| 40 |
) -> None:
|
| 41 |
coacd.set_log_level("info" if verbose else "warn")
|
| 42 |
|
|
|
|
| 45 |
|
| 46 |
result = coacd.run_coacd(mesh, **params)
|
| 47 |
combined = sum([trimesh.Trimesh(*m) for m in result])
|
| 48 |
+
|
| 49 |
+
# Compute collision_scale because convex decomposition usually makes the mesh larger.
|
| 50 |
+
if auto_scale:
|
| 51 |
+
convex_mesh_shape = np.ptp(combined.vertices, axis=0)
|
| 52 |
+
visual_mesh_shape = np.ptp(mesh.vertices, axis=0)
|
| 53 |
+
rescale = visual_mesh_shape / convex_mesh_shape
|
| 54 |
+
combined.vertices *= rescale
|
| 55 |
+
|
| 56 |
combined.export(outfile)
|
| 57 |
|
| 58 |
|
|
|
|
| 70 |
pca: bool = False,
|
| 71 |
merge: bool = True,
|
| 72 |
seed: int = 0,
|
| 73 |
+
auto_scale: bool = True,
|
| 74 |
verbose: bool = False,
|
| 75 |
) -> str:
|
| 76 |
"""Decompose a mesh into convex parts using the CoACD algorithm."""
|
|
|
|
| 95 |
)
|
| 96 |
|
| 97 |
try:
|
| 98 |
+
decompose_convex_coacd(filename, outfile, params, verbose, auto_scale)
|
| 99 |
if os.path.exists(outfile):
|
| 100 |
return outfile
|
| 101 |
except Exception as e:
|
|
|
|
| 105 |
if preprocess_mode != "on":
|
| 106 |
try:
|
| 107 |
params["preprocess_mode"] = "on"
|
| 108 |
+
decompose_convex_coacd(
|
| 109 |
+
filename, outfile, params, verbose, auto_scale
|
| 110 |
+
)
|
| 111 |
if os.path.exists(outfile):
|
| 112 |
return outfile
|
| 113 |
except Exception as e:
|
|
|
|
| 134 |
merge: bool = True,
|
| 135 |
seed: int = 0,
|
| 136 |
verbose: bool = False,
|
| 137 |
+
auto_scale: bool = True,
|
| 138 |
) -> str:
|
| 139 |
"""Decompose a mesh into convex parts using the CoACD algorithm in a separate process.
|
| 140 |
|
|
|
|
| 157 |
ctx = mp.get_context("spawn")
|
| 158 |
p = ctx.Process(
|
| 159 |
target=decompose_convex_coacd,
|
| 160 |
+
args=(filename, outfile, params, verbose, auto_scale),
|
| 161 |
)
|
| 162 |
p.start()
|
| 163 |
p.join()
|
|
|
|
| 168 |
params["preprocess_mode"] = "on"
|
| 169 |
p = ctx.Process(
|
| 170 |
target=decompose_convex_coacd,
|
| 171 |
+
args=(filename, outfile, params, verbose, auto_scale),
|
| 172 |
)
|
| 173 |
p.start()
|
| 174 |
p.join()
|
embodied_gen/models/text_model.py
CHANGED
|
@@ -136,7 +136,7 @@ def build_text2img_ip_pipeline(
|
|
| 136 |
pipe = pipe.to(device)
|
| 137 |
pipe.image_encoder = pipe.image_encoder.to(device)
|
| 138 |
# pipe.enable_model_cpu_offload()
|
| 139 |
-
# pipe.enable_xformers_memory_efficient_attention()
|
| 140 |
# pipe.enable_vae_slicing()
|
| 141 |
|
| 142 |
return pipe
|
|
|
|
| 136 |
pipe = pipe.to(device)
|
| 137 |
pipe.image_encoder = pipe.image_encoder.to(device)
|
| 138 |
# pipe.enable_model_cpu_offload()
|
| 139 |
+
# # pipe.enable_xformers_memory_efficient_attention()
|
| 140 |
# pipe.enable_vae_slicing()
|
| 141 |
|
| 142 |
return pipe
|
embodied_gen/scripts/gen_layout.py
CHANGED
|
@@ -137,7 +137,7 @@ def entrypoint() -> None:
|
|
| 137 |
sim_cli(
|
| 138 |
layout_path=layout_path,
|
| 139 |
output_dir=output_root,
|
| 140 |
-
|
| 141 |
)
|
| 142 |
|
| 143 |
torch.cuda.empty_cache()
|
|
|
|
| 137 |
sim_cli(
|
| 138 |
layout_path=layout_path,
|
| 139 |
output_dir=output_root,
|
| 140 |
+
insert_robot=args.insert_robot,
|
| 141 |
)
|
| 142 |
|
| 143 |
torch.cuda.empty_cache()
|
embodied_gen/scripts/simulate_sapien.py
CHANGED
|
@@ -156,14 +156,15 @@ def entrypoint(**kwargs):
|
|
| 156 |
bg_images[camera.name] = result.rgb[..., ::-1]
|
| 157 |
|
| 158 |
video_frames = []
|
| 159 |
-
for camera in scene_manager.cameras:
|
| 160 |
# Scene rendering
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
|
|
|
| 167 |
|
| 168 |
# Grasp rendering
|
| 169 |
for node in actions:
|
|
|
|
| 156 |
bg_images[camera.name] = result.rgb[..., ::-1]
|
| 157 |
|
| 158 |
video_frames = []
|
| 159 |
+
for idx, camera in enumerate(scene_manager.cameras):
|
| 160 |
# Scene rendering
|
| 161 |
+
if idx == 0:
|
| 162 |
+
for step in range(image_cnt):
|
| 163 |
+
rgba = alpha_blend_rgba(
|
| 164 |
+
frames[camera.name][step]["Foreground"],
|
| 165 |
+
bg_images[camera.name],
|
| 166 |
+
)
|
| 167 |
+
video_frames.append(np.array(rgba))
|
| 168 |
|
| 169 |
# Grasp rendering
|
| 170 |
for node in actions:
|
embodied_gen/utils/simulation.py
CHANGED
|
@@ -73,8 +73,17 @@ def load_actor_from_urdf(
|
|
| 73 |
root = tree.getroot()
|
| 74 |
node_name = root.get("name")
|
| 75 |
file_dir = os.path.dirname(file_path)
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
visual_file = os.path.join(file_dir, visual_file)
|
| 79 |
collision_file = os.path.join(file_dir, collision_file)
|
| 80 |
static_fric = root.find('.//collision/gazebo/mu1').text
|
|
@@ -90,15 +99,16 @@ def load_actor_from_urdf(
|
|
| 90 |
body_type = "static" if use_static else "dynamic"
|
| 91 |
builder.set_physx_body_type(body_type)
|
| 92 |
builder.add_multiple_convex_collisions_from_file(
|
| 93 |
-
collision_file
|
| 94 |
material=material,
|
|
|
|
| 95 |
# decomposition="coacd",
|
| 96 |
# decomposition_params=dict(
|
| 97 |
# threshold=0.05, max_convex_hull=64, verbose=False
|
| 98 |
# ),
|
| 99 |
)
|
| 100 |
|
| 101 |
-
builder.add_visual_from_file(visual_file)
|
| 102 |
builder.set_initial_pose(pose)
|
| 103 |
if isinstance(scene, ManiSkillScene) and env_idx is not None:
|
| 104 |
builder.set_scene_idxs([env_idx])
|
|
|
|
| 73 |
root = tree.getroot()
|
| 74 |
node_name = root.get("name")
|
| 75 |
file_dir = os.path.dirname(file_path)
|
| 76 |
+
|
| 77 |
+
visual_mesh = root.find('.//visual/geometry/mesh')
|
| 78 |
+
visual_file = visual_mesh.get("filename")
|
| 79 |
+
visual_scale = visual_mesh.get("scale", "1.0 1.0 1.0")
|
| 80 |
+
visual_scale = np.array([float(x) for x in visual_scale.split()])
|
| 81 |
+
|
| 82 |
+
collision_mesh = root.find('.//collision/geometry/mesh')
|
| 83 |
+
collision_file = collision_mesh.get("filename")
|
| 84 |
+
collision_scale = collision_mesh.get("scale", "1.0 1.0 1.0")
|
| 85 |
+
collision_scale = np.array([float(x) for x in collision_scale.split()])
|
| 86 |
+
|
| 87 |
visual_file = os.path.join(file_dir, visual_file)
|
| 88 |
collision_file = os.path.join(file_dir, collision_file)
|
| 89 |
static_fric = root.find('.//collision/gazebo/mu1').text
|
|
|
|
| 99 |
body_type = "static" if use_static else "dynamic"
|
| 100 |
builder.set_physx_body_type(body_type)
|
| 101 |
builder.add_multiple_convex_collisions_from_file(
|
| 102 |
+
collision_file,
|
| 103 |
material=material,
|
| 104 |
+
scale=collision_scale,
|
| 105 |
# decomposition="coacd",
|
| 106 |
# decomposition_params=dict(
|
| 107 |
# threshold=0.05, max_convex_hull=64, verbose=False
|
| 108 |
# ),
|
| 109 |
)
|
| 110 |
|
| 111 |
+
builder.add_visual_from_file(visual_file, scale=visual_scale)
|
| 112 |
builder.set_initial_pose(pose)
|
| 113 |
if isinstance(scene, ManiSkillScene) and env_idx is not None:
|
| 114 |
builder.set_scene_idxs([env_idx])
|
embodied_gen/validators/urdf_convertor.py
CHANGED
|
@@ -134,8 +134,9 @@ class URDFGenerator(object):
|
|
| 134 |
Estimate the vertical projection of their real length based on its pose.
|
| 135 |
For example:
|
| 136 |
- A pen standing upright in the first image (aligned with the image's vertical axis)
|
| 137 |
-
|
| 138 |
-
- A pen lying flat in the first image
|
|
|
|
| 139 |
- Tilted pen in the first image (e.g., ~45Β° angle): vertical height β 0.07-0.12 m
|
| 140 |
- Use the rest views to help determine the object's 3D pose and orientation.
|
| 141 |
Assume the object is in real-world scale and estimate the approximate vertical height
|
|
|
|
| 134 |
Estimate the vertical projection of their real length based on its pose.
|
| 135 |
For example:
|
| 136 |
- A pen standing upright in the first image (aligned with the image's vertical axis)
|
| 137 |
+
full body visible in the first image: β vertical height β 0.14-0.20 m
|
| 138 |
+
- A pen lying flat in the first image or either the tip or the tail is facing the image
|
| 139 |
+
(showing thickness or as a circle) β vertical height β 0.018-0.025 m
|
| 140 |
- Tilted pen in the first image (e.g., ~45Β° angle): vertical height β 0.07-0.12 m
|
| 141 |
- Use the rest views to help determine the object's 3D pose and orientation.
|
| 142 |
Assume the object is in real-world scale and estimate the approximate vertical height
|