Add files using upload-large-folder tool
Browse files- README.md +43 -0
- docs/BUNDLE_CONTENTS.txt +10 -0
- docs/ENVIRONMENT_NOTES.md +47 -0
- models/anybimanual/peract_lf/config.yaml +139 -0
- models/anybimanual/peract_lf/eval_data.csv +4 -0
- reports/3dfa_peract2_take_tray_out_of_oven_full100/eval.json +6 -0
- reports/3dfa_peract2_take_tray_out_of_oven_full100/run.log +242 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json +6 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval_after_official_ttm.json +6 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run.log +11 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_minimalegl.log +11 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_offscreen.log +26 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_offscreen2.log +31 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_offscreen3.log +27 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb.log +12 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb2.log +29 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb3.log +46 -0
- reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb_official_ttm.log +46 -0
- reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep1_demo.log +178 -0
- reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep5.log +205 -0
- reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep5_clean.log +242 -0
- reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep5_live.log +226 -0
- reports/anybimanual_eval_logs/coordinated_lift_tray_ep1_live.log +181 -0
- reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_fold_tops__trunk_only_ft_seed17.json +65 -0
- reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hang_coat__trunk_only_ft_seed17.json +65 -0
- reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hybrid_smoke_summary.json +18 -0
- reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_store_tops__trunk_only_ft_seed17.json +66 -0
- reports/public_hybrid_benchmark_smoke_v1/hybrid_public_benchmark_smoke_summary.json +85 -0
- reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_lift_tray__trunk_only_ft_seed17.json +61 -0
- reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_put_bottle_in_fridge__trunk_only_ft_seed17.json +61 -0
- reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_straighten_rope__trunk_only_ft_seed17.json +61 -0
- reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_sweep_to_dustpan__trunk_only_ft_seed17.json +61 -0
- reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_take_out_tray__trunk_only_ft_seed17.json +61 -0
- reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench_hybrid_smoke_summary.json +22 -0
- scripts/run_anybimanual_task_eval.sh +41 -0
- third_party/DexGarmentLab/Data/Fold_Tops_Validation_HALO/final_state_pic/img_0.png +0 -0
- third_party/DexGarmentLab/Data/Fold_Tops_Validation_HALO/final_state_pic/img_1.png +0 -0
- third_party/DexGarmentLab/Data/Fold_Tops_Validation_HALO/validation_log.txt +2 -0
- third_party/DexGarmentLab/Data/Hang_Coat_Validation_HALO/validation_log.txt +2 -0
- third_party/DexGarmentLab/Data/Store_Tops_Validation_HALO/final_state_pic/img_0.png +0 -0
- third_party/DexGarmentLab/Data/Store_Tops_Validation_HALO/validation_log.txt +1 -0
- third_party/DexGarmentLab/Env_StandAlone/Fold_Tops_Env.py +440 -0
- third_party/DexGarmentLab/Env_StandAlone/Hang_Coat_Env.py +417 -0
- third_party/DexGarmentLab/Env_StandAlone/Store_Tops_Env.py +511 -0
- third_party/DexGarmentLab/Env_Validation/Hang_Tops_HALO.py +405 -0
- third_party/DexGarmentLab/Env_Validation/Hang_Trousers_HALO.py +409 -0
- third_party/DexGarmentLab/Env_Validation/Store_Tops_HALO.py +547 -0
- third_party/DexGarmentLab/Env_Validation/Wear_Baseballcap_HALO.py +403 -0
- third_party/DexGarmentLab/Env_Validation/Wear_Bowlhat_HALO.py +410 -0
- third_party/DexGarmentLab/Env_Validation/Wear_Scarf_HALO.py +470 -0
README.md
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# VLAarchTests Bench
|
| 2 |
+
|
| 3 |
+
This repository is a bundled benchmark workspace for future VLAarchTests model
|
| 4 |
+
iterations, with emphasis on RLBench2 `take tray out of oven`.
|
| 5 |
+
|
| 6 |
+
## Included
|
| 7 |
+
|
| 8 |
+
- VLAarchTests benchmark code and generated public benchmark manifest
|
| 9 |
+
- Patched AnyBimanual RLBench runtime used to execute the public oven benchmark
|
| 10 |
+
- Official `katefgroup/3d_flowmatch_actor` PerAct2 checkpoint and public test data
|
| 11 |
+
- Public AnyBimanual LF baseline weights and comparison logs
|
| 12 |
+
- Verified benchmark reports:
|
| 13 |
+
- oven subset run: `9/10`
|
| 14 |
+
- oven full official run: `95/100 = 0.95`
|
| 15 |
+
- hybrid public benchmark smoke outputs
|
| 16 |
+
- DexGarmentLab benchmark-related validation scripts and validation logs
|
| 17 |
+
|
| 18 |
+
## Key Result
|
| 19 |
+
|
| 20 |
+
The strongest public out-of-box checkpoint validated here is:
|
| 21 |
+
|
| 22 |
+
- `models/3dfa_peract2/3dfa_peract2.pth`
|
| 23 |
+
|
| 24 |
+
Official oven result artifacts:
|
| 25 |
+
|
| 26 |
+
- `reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval_after_official_ttm.json`
|
| 27 |
+
- `reports/3dfa_peract2_take_tray_out_of_oven_full100/eval.json`
|
| 28 |
+
|
| 29 |
+
## Important Code Paths
|
| 30 |
+
|
| 31 |
+
- `code/VLAarchtests4/code/VLAarchtests2_code/VLAarchtests/code/reveal_vla_bimanual/eval/public_benchmark_package.py`
|
| 32 |
+
- `code/VLAarchtests4/code/VLAarchtests2_code/VLAarchtests/code/reveal_vla_bimanual/eval/run_rlbench_hybrid_smoke.py`
|
| 33 |
+
- `third_party/AnyBimanual/third_party/RLBench/rlbench/bimanual_tasks/bimanual_take_tray_out_of_oven.py`
|
| 34 |
+
- `third_party/AnyBimanual/third_party/RLBench/rlbench/task_ttms/bimanual_take_tray_out_of_oven.ttm`
|
| 35 |
+
|
| 36 |
+
## External Dependencies Not Mirrored Here
|
| 37 |
+
|
| 38 |
+
- CoppeliaSim v4.1.0 binary runtime
|
| 39 |
+
- Local Python environments under `/workspace/envs`
|
| 40 |
+
- Full IsaacSim installation
|
| 41 |
+
- Full DexGarmentLab simulator assets beyond the benchmark-related scripts and logs
|
| 42 |
+
|
| 43 |
+
See `docs/ENVIRONMENT_NOTES.md` for the runtime notes used in this workspace.
|
docs/BUNDLE_CONTENTS.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Bundle Contents
|
| 2 |
+
|
| 3 |
+
2.0K README.md
|
| 4 |
+
19M code
|
| 5 |
+
979K docs
|
| 6 |
+
979K scripts
|
| 7 |
+
1.9G third_party
|
| 8 |
+
1.6G models
|
| 9 |
+
3.2G data
|
| 10 |
+
6.9M reports
|
docs/ENVIRONMENT_NOTES.md
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environment Notes
|
| 2 |
+
|
| 3 |
+
This benchmark bundle was validated on a Linux GPU machine with:
|
| 4 |
+
|
| 5 |
+
- CoppeliaSim v4.1.0 available at `COPPELIASIM_ROOT`
|
| 6 |
+
- `xvfb` and `xauth` installed
|
| 7 |
+
- Qt xcb runtime libraries installed:
|
| 8 |
+
- `libxrender1`
|
| 9 |
+
- `libxkbcommon0`
|
| 10 |
+
- `libxkbcommon-x11-0`
|
| 11 |
+
- `libxcb-icccm4`
|
| 12 |
+
- `libxcb-image0`
|
| 13 |
+
- `libxcb-keysyms1`
|
| 14 |
+
- `libxcb-randr0`
|
| 15 |
+
- `libxcb-render-util0`
|
| 16 |
+
- `libxcb-shape0`
|
| 17 |
+
- `libxcb-shm0`
|
| 18 |
+
- `libxcb-sync1`
|
| 19 |
+
- `libxcb-xfixes0`
|
| 20 |
+
- `libxcb-xinerama0`
|
| 21 |
+
- `libxcb-xkb1`
|
| 22 |
+
|
| 23 |
+
The successful RLBench2 oven evaluation was run with:
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
xvfb-run -a -s "-screen 0 1400x900x24" python \
|
| 27 |
+
online_evaluation_rlbench/evaluate_policy.py \
|
| 28 |
+
--checkpoint models/3dfa_peract2/3dfa_peract2.pth \
|
| 29 |
+
--task bimanual_take_tray_out_of_oven \
|
| 30 |
+
--data_dir data/3dfa/peract2_test \
|
| 31 |
+
--dataset Peract2_3dfront_3dwrist \
|
| 32 |
+
--image_size 256,256 \
|
| 33 |
+
--model_type denoise3d \
|
| 34 |
+
--bimanual true \
|
| 35 |
+
--prediction_len 1 \
|
| 36 |
+
--backbone clip \
|
| 37 |
+
--fps_subsampling_factor 4 \
|
| 38 |
+
--embedding_dim 120 \
|
| 39 |
+
--num_attn_heads 8 \
|
| 40 |
+
--num_vis_instr_attn_layers 3 \
|
| 41 |
+
--num_history 3 \
|
| 42 |
+
--num_shared_attn_layers 4 \
|
| 43 |
+
--relative_action false \
|
| 44 |
+
--rotation_format quat_xyzw \
|
| 45 |
+
--denoise_timesteps 5 \
|
| 46 |
+
--denoise_model rectified_flow
|
| 47 |
+
```
|
models/anybimanual/peract_lf/config.yaml
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
ddp:
|
| 2 |
+
master_addr: localhost
|
| 3 |
+
master_port: 13985
|
| 4 |
+
num_devices: 2
|
| 5 |
+
rlbench:
|
| 6 |
+
task_name: perlf+ab
|
| 7 |
+
tasks:
|
| 8 |
+
- bimanual_pick_laptop
|
| 9 |
+
- bimanual_pick_plate
|
| 10 |
+
- bimanual_straighten_rope
|
| 11 |
+
- coordinated_lift_ball
|
| 12 |
+
- coordinated_lift_tray
|
| 13 |
+
- coordinated_push_box
|
| 14 |
+
- coordinated_put_bottle_in_fridge
|
| 15 |
+
- dual_push_buttons
|
| 16 |
+
- handover_item
|
| 17 |
+
- bimanual_sweep_to_dustpan
|
| 18 |
+
- coordinated_take_tray_out_of_oven
|
| 19 |
+
- handover_item_easy
|
| 20 |
+
demos: 100
|
| 21 |
+
demo_path: /mnt/disk_1/tengbo/bimanual_data/train
|
| 22 |
+
episode_length: 25
|
| 23 |
+
cameras:
|
| 24 |
+
- over_shoulder_right
|
| 25 |
+
- wrist_right
|
| 26 |
+
- front
|
| 27 |
+
- overhead
|
| 28 |
+
- over_shoulder_left
|
| 29 |
+
- wrist_left
|
| 30 |
+
camera_resolution:
|
| 31 |
+
- 256
|
| 32 |
+
- 256
|
| 33 |
+
scene_bounds:
|
| 34 |
+
- -0.3
|
| 35 |
+
- -0.5
|
| 36 |
+
- 0.6
|
| 37 |
+
- 0.7
|
| 38 |
+
- 0.5
|
| 39 |
+
- 1.6
|
| 40 |
+
include_lang_goal_in_obs: true
|
| 41 |
+
instructions: ''
|
| 42 |
+
replay:
|
| 43 |
+
batch_size: 2
|
| 44 |
+
timesteps: 1
|
| 45 |
+
prioritisation: false
|
| 46 |
+
task_uniform: true
|
| 47 |
+
use_disk: true
|
| 48 |
+
path: /mnt/disk_2/tengbo/replay/
|
| 49 |
+
max_parallel_processes: 32
|
| 50 |
+
task_folder: multi
|
| 51 |
+
framework:
|
| 52 |
+
log_freq: 1000
|
| 53 |
+
save_freq: 10000
|
| 54 |
+
train_envs: 1
|
| 55 |
+
replay_ratio: ${replay.batch_size}
|
| 56 |
+
transitions_before_train: 200
|
| 57 |
+
tensorboard_logging: true
|
| 58 |
+
csv_logging: true
|
| 59 |
+
training_iterations: 100001
|
| 60 |
+
gpu: 0
|
| 61 |
+
env_gpu: 0
|
| 62 |
+
logdir: /mnt/disk_1/tengbo/peract_bimanual/log
|
| 63 |
+
logging_level: 20
|
| 64 |
+
seeds: 1
|
| 65 |
+
start_seed: 0
|
| 66 |
+
load_existing_weights: false
|
| 67 |
+
num_weights_to_keep: 60
|
| 68 |
+
num_workers: 0
|
| 69 |
+
record_every_n: 5
|
| 70 |
+
checkpoint_name_prefix: checkpoint
|
| 71 |
+
use_wandb: true
|
| 72 |
+
wandb_project: ''
|
| 73 |
+
wandb_group: perlf+ab
|
| 74 |
+
seed: 0
|
| 75 |
+
wandb_name: perlf+ab
|
| 76 |
+
use_skill: true
|
| 77 |
+
use_pretrained: false
|
| 78 |
+
use_prefix: false
|
| 79 |
+
frozen: ''
|
| 80 |
+
anybimanual: true
|
| 81 |
+
augmentation_type: ab
|
| 82 |
+
method:
|
| 83 |
+
name: PERACT_BC
|
| 84 |
+
agent_type: leader_follower
|
| 85 |
+
robot_name: bimanual
|
| 86 |
+
image_crop_size: 64
|
| 87 |
+
bounds_offset:
|
| 88 |
+
- 0.15
|
| 89 |
+
voxel_sizes:
|
| 90 |
+
- 100
|
| 91 |
+
include_prev_layer: false
|
| 92 |
+
num_latents: 2048
|
| 93 |
+
latent_dim: 512
|
| 94 |
+
transformer_depth: 6
|
| 95 |
+
transformer_iterations: 1
|
| 96 |
+
cross_heads: 1
|
| 97 |
+
cross_dim_head: 64
|
| 98 |
+
latent_heads: 8
|
| 99 |
+
latent_dim_head: 64
|
| 100 |
+
pos_encoding_with_lang: true
|
| 101 |
+
conv_downsample: true
|
| 102 |
+
lang_fusion_type: seq
|
| 103 |
+
voxel_patch_size: 5
|
| 104 |
+
voxel_patch_stride: 5
|
| 105 |
+
final_dim: 64
|
| 106 |
+
low_dim_size: 4
|
| 107 |
+
input_dropout: 0.1
|
| 108 |
+
attn_dropout: 0.1
|
| 109 |
+
decoder_dropout: 0.0
|
| 110 |
+
lr: 0.0005
|
| 111 |
+
lr_scheduler: false
|
| 112 |
+
num_warmup_steps: 3000
|
| 113 |
+
optimizer: lamb
|
| 114 |
+
lambda_weight_l2: 1.0e-06
|
| 115 |
+
trans_loss_weight: 1.0
|
| 116 |
+
rot_loss_weight: 1.0
|
| 117 |
+
grip_loss_weight: 1.0
|
| 118 |
+
collision_loss_weight: 1.0
|
| 119 |
+
rotation_resolution: 5
|
| 120 |
+
activation: lrelu
|
| 121 |
+
norm: None
|
| 122 |
+
crop_augmentation: true
|
| 123 |
+
transform_augmentation:
|
| 124 |
+
apply_se3: true
|
| 125 |
+
aug_xyz:
|
| 126 |
+
- 0.125
|
| 127 |
+
- 0.125
|
| 128 |
+
- 0.125
|
| 129 |
+
aug_rpy:
|
| 130 |
+
- 0.0
|
| 131 |
+
- 0.0
|
| 132 |
+
- 45.0
|
| 133 |
+
aug_rot_resolution: ${method.rotation_resolution}
|
| 134 |
+
demo_augmentation: true
|
| 135 |
+
demo_augmentation_every_n: 10
|
| 136 |
+
no_skip_connection: false
|
| 137 |
+
no_perceiver: false
|
| 138 |
+
no_language: false
|
| 139 |
+
keypoint_method: heuristic
|
models/anybimanual/peract_lf/eval_data.csv
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
step,eval_envs/return/bimanual_pick_laptop,eval_envs/length/bimanual_pick_laptop,eval_envs/total_transitions/bimanual_pick_laptop,eval_envs/return/bimanual_pick_plate,eval_envs/length/bimanual_pick_plate,eval_envs/total_transitions/bimanual_pick_plate,eval_envs/return/bimanual_straighten_rope,eval_envs/length/bimanual_straighten_rope,eval_envs/total_transitions/bimanual_straighten_rope,eval_envs/return/coordinated_lift_ball,eval_envs/length/coordinated_lift_ball,eval_envs/total_transitions/coordinated_lift_ball,eval_envs/return/coordinated_lift_tray,eval_envs/length/coordinated_lift_tray,eval_envs/total_transitions/coordinated_lift_tray,eval_envs/return/coordinated_push_box,eval_envs/length/coordinated_push_box,eval_envs/total_transitions/coordinated_push_box,eval_envs/return/coordinated_put_bottle_in_fridge,eval_envs/length/coordinated_put_bottle_in_fridge,eval_envs/total_transitions/coordinated_put_bottle_in_fridge,eval_envs/return/dual_push_buttons,eval_envs/length/dual_push_buttons,eval_envs/total_transitions/dual_push_buttons,eval_envs/return/handover_item,eval_envs/length/handover_item,eval_envs/total_transitions/handover_item,eval_envs/return/bimanual_sweep_to_dustpan,eval_envs/length/bimanual_sweep_to_dustpan,eval_envs/total_transitions/bimanual_sweep_to_dustpan,eval_envs/return/coordinated_take_tray_out_of_oven,eval_envs/length/coordinated_take_tray_out_of_oven,eval_envs/total_transitions/coordinated_take_tray_out_of_oven,eval_envs/return/handover_item_easy,eval_envs/length/handover_item_easy,eval_envs/total_transitions/handover_item_easy
|
| 2 |
+
50000,0.0,25.0,625,4.0,24.76,1244,0.0,25.0,1869,0.0,25.0,2494,0.0,25.0,3119,0.0,25.0,3744,0.0,25.0,4369,20.0,21.84,4915,0.0,25.0,5540,0.0,25.0,6165,0.0,25.0,6790,8.0,24.0,7390
|
| 3 |
+
60000,8.0,25.0,625,4.0,24.48,1237,4.0,25.0,1862,8.0,23.24,2443,4.0,25.0,3068,20.0,25.0,3693,4.0,25.0,4318,24.0,21.56,4857,4.0,25.0,5482,24.0,25.0,6107,0.0,25.0,6732,12.0,24.6,7347
|
| 4 |
+
70000,4.0,25.0,625,12.0,23.24,1206,4.0,25.0,1831,0.0,25.0,2456,0.0,25.0,3081,12.0,25.0,3706,0.0,25.0,4331,12.0,23.16,4910,0.0,25.0,5535,16.0,25.0,6228,0.0,25.0,6160,8.0,25.0,6785
|
reports/3dfa_peract2_take_tray_out_of_oven_full100/eval.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bimanual_take_tray_out_of_oven": {
|
| 3 |
+
"0": 95,
|
| 4 |
+
"mean": 0.95
|
| 5 |
+
}
|
| 6 |
+
}
|
reports/3dfa_peract2_take_tray_out_of_oven_full100/run.log
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_full100/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
WARNING:root:not sure how _robot_shapes are used is used.
|
| 8 |
+
|
| 9 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 10 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 11 |
+
ERROR:root:robot is in collision
|
| 12 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 13 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 14 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 15 |
+
ERROR:root:robot is in collision
|
| 16 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 17 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 18 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 19 |
+
ERROR:root:robot is in collision
|
| 20 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 21 |
+
ERROR:root:robot is in collision
|
| 22 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 23 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 24 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 25 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 26 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 27 |
+
ERROR:root:robot is in collision
|
| 28 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 29 |
+
ERROR:root:robot is in collision
|
| 30 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 31 |
+
ERROR:root:robot is in collision
|
| 32 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 33 |
+
ERROR:root:robot is in collision
|
| 34 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 35 |
+
ERROR:root:robot is in collision
|
| 36 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 37 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 38 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 39 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 40 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 41 |
+
ERROR:root:robot is in collision
|
| 42 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 43 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 44 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 45 |
+
ERROR:root:robot is in collision
|
| 46 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 47 |
+
ERROR:root:robot is in collision
|
| 48 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 49 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 50 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 51 |
+
ERROR:root:robot is in collision
|
| 52 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 53 |
+
ERROR:root:robot is in collision
|
| 54 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 55 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 56 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 57 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 58 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 59 |
+
ERROR:root:robot is in collision
|
| 60 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 61 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 62 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 63 |
+
ERROR:root:robot is in collision
|
| 64 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 65 |
+
ERROR:root:robot is in collision
|
| 66 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 67 |
+
ERROR:root:robot is in collision
|
| 68 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 69 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 70 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 71 |
+
ERROR:root:robot is in collision
|
| 72 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 73 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 74 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 75 |
+
ERROR:root:robot is in collision
|
| 76 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 77 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 78 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 79 |
+
ERROR:root:robot is in collision
|
| 80 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 81 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 82 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 83 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 84 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 85 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 86 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 87 |
+
ERROR:root:robot is in collision
|
| 88 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 89 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 90 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 91 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 92 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 93 |
+
ERROR:root:robot is in collision
|
| 94 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 95 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 96 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 97 |
+
ERROR:root:robot is in collision
|
| 98 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 99 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 100 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 101 |
+
ERROR:root:robot is in collision
|
| 102 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 103 |
+
ERROR:root:robot is in collision
|
| 104 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 105 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 106 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 107 |
+
ERROR:root:robot is in collision
|
| 108 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 109 |
+
ERROR:root:robot is in collision
|
| 110 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 111 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 112 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 113 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 114 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 115 |
+
ERROR:root:robot is in collision
|
| 116 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 117 |
+
ERROR:root:robot is in collision
|
| 118 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 119 |
+
ERROR:root:robot is in collision
|
| 120 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 121 |
+
ERROR:root:robot is in collision
|
| 122 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 123 |
+
ERROR:root:robot is in collision
|
| 124 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 125 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 126 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 127 |
+
ERROR:root:robot is in collision
|
| 128 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 129 |
+
ERROR:root:robot is in collision
|
| 130 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 131 |
+
ERROR:root:robot is in collision
|
| 132 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 133 |
+
ERROR:root:robot is in collision
|
| 134 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 135 |
+
ERROR:root:robot is in collision
|
| 136 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 137 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 138 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 139 |
+
|
| 140 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 0 Reward 1.00 max_reward 1.00 SR: 1/1 SR: 1.00/1 # valid demos 1
|
| 141 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 1 Reward 1.00 max_reward 1.00 SR: 2/2 SR: 2.00/2 # valid demos 2
|
| 142 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 2 Reward 1.00 max_reward 1.00 SR: 3/3 SR: 3.00/3 # valid demos 3
|
| 143 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 3 Reward 1.00 max_reward 1.00 SR: 4/4 SR: 4.00/4 # valid demos 4
|
| 144 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 4 Reward 1.00 max_reward 1.00 SR: 5/5 SR: 5.00/5 # valid demos 5
|
| 145 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 5 Reward 1.00 max_reward 1.00 SR: 6/6 SR: 6.00/6 # valid demos 6
|
| 146 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 6 Reward 0.00 max_reward 0.00 SR: 6/7 SR: 6.00/7 # valid demos 7
|
| 147 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 7 Reward 1.00 max_reward 1.00 SR: 7/8 SR: 7.00/8 # valid demos 8
|
| 148 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 8 Reward 1.00 max_reward 1.00 SR: 8/9 SR: 8.00/9 # valid demos 9
|
| 149 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 9 Reward 1.00 max_reward 1.00 SR: 9/10 SR: 9.00/10 # valid demos 10
|
| 150 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 10 Reward 1.00 max_reward 1.00 SR: 10/11 SR: 10.00/11 # valid demos 11
|
| 151 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 11 Reward 1.00 max_reward 1.00 SR: 11/12 SR: 11.00/12 # valid demos 12
|
| 152 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 12 Reward 1.00 max_reward 1.00 SR: 12/13 SR: 12.00/13 # valid demos 13
|
| 153 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 13 Reward 1.00 max_reward 1.00 SR: 13/14 SR: 13.00/14 # valid demos 14
|
| 154 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 14 Reward 1.00 max_reward 1.00 SR: 14/15 SR: 14.00/15 # valid demos 15
|
| 155 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 15 Reward 1.00 max_reward 1.00 SR: 15/16 SR: 15.00/16 # valid demos 16
|
| 156 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 16 Reward 1.00 max_reward 1.00 SR: 16/17 SR: 16.00/17 # valid demos 17
|
| 157 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 17 Reward 0.00 max_reward 0.00 SR: 16/18 SR: 16.00/18 # valid demos 18
|
| 158 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 18 Reward 1.00 max_reward 1.00 SR: 17/19 SR: 17.00/19 # valid demos 19
|
| 159 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 19 Reward 1.00 max_reward 1.00 SR: 18/20 SR: 18.00/20 # valid demos 20
|
| 160 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 20 Reward 1.00 max_reward 1.00 SR: 19/21 SR: 19.00/21 # valid demos 21
|
| 161 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 21 Reward 1.00 max_reward 1.00 SR: 20/22 SR: 20.00/22 # valid demos 22
|
| 162 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 22 Reward 1.00 max_reward 1.00 SR: 21/23 SR: 21.00/23 # valid demos 23
|
| 163 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 23 Reward 1.00 max_reward 1.00 SR: 22/24 SR: 22.00/24 # valid demos 24
|
| 164 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 24 Reward 1.00 max_reward 1.00 SR: 23/25 SR: 23.00/25 # valid demos 25
|
| 165 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 25 Reward 1.00 max_reward 1.00 SR: 24/26 SR: 24.00/26 # valid demos 26
|
| 166 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 26 Reward 0.00 max_reward 0.00 SR: 24/27 SR: 24.00/27 # valid demos 27
|
| 167 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 27 Reward 1.00 max_reward 1.00 SR: 25/28 SR: 25.00/28 # valid demos 28
|
| 168 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 28 Reward 1.00 max_reward 1.00 SR: 26/29 SR: 26.00/29 # valid demos 29
|
| 169 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 29 Reward 1.00 max_reward 1.00 SR: 27/30 SR: 27.00/30 # valid demos 30
|
| 170 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 30 Reward 1.00 max_reward 1.00 SR: 28/31 SR: 28.00/31 # valid demos 31
|
| 171 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 31 Reward 1.00 max_reward 1.00 SR: 29/32 SR: 29.00/32 # valid demos 32
|
| 172 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 32 Reward 1.00 max_reward 1.00 SR: 30/33 SR: 30.00/33 # valid demos 33
|
| 173 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 33 Reward 1.00 max_reward 1.00 SR: 31/34 SR: 31.00/34 # valid demos 34
|
| 174 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 34 Reward 1.00 max_reward 1.00 SR: 32/35 SR: 32.00/35 # valid demos 35
|
| 175 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 35 Reward 1.00 max_reward 1.00 SR: 33/36 SR: 33.00/36 # valid demos 36
|
| 176 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 36 Reward 1.00 max_reward 1.00 SR: 34/37 SR: 34.00/37 # valid demos 37
|
| 177 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 37 Reward 1.00 max_reward 1.00 SR: 35/38 SR: 35.00/38 # valid demos 38
|
| 178 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 38 Reward 1.00 max_reward 1.00 SR: 36/39 SR: 36.00/39 # valid demos 39
|
| 179 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 39 Reward 1.00 max_reward 1.00 SR: 37/40 SR: 37.00/40 # valid demos 40
|
| 180 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 40 Reward 1.00 max_reward 1.00 SR: 38/41 SR: 38.00/41 # valid demos 41
|
| 181 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 41 Reward 1.00 max_reward 1.00 SR: 39/42 SR: 39.00/42 # valid demos 42
|
| 182 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 42 Reward 1.00 max_reward 1.00 SR: 40/43 SR: 40.00/43 # valid demos 43
|
| 183 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 43 Reward 1.00 max_reward 1.00 SR: 41/44 SR: 41.00/44 # valid demos 44
|
| 184 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 44 Reward 1.00 max_reward 1.00 SR: 42/45 SR: 42.00/45 # valid demos 45
|
| 185 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 45 Reward 1.00 max_reward 1.00 SR: 43/46 SR: 43.00/46 # valid demos 46
|
| 186 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 46 Reward 1.00 max_reward 1.00 SR: 44/47 SR: 44.00/47 # valid demos 47
|
| 187 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 47 Reward 1.00 max_reward 1.00 SR: 45/48 SR: 45.00/48 # valid demos 48
|
| 188 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 48 Reward 1.00 max_reward 1.00 SR: 46/49 SR: 46.00/49 # valid demos 49
|
| 189 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 49 Reward 1.00 max_reward 1.00 SR: 47/50 SR: 47.00/50 # valid demos 50
|
| 190 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 50 Reward 1.00 max_reward 1.00 SR: 48/51 SR: 48.00/51 # valid demos 51
|
| 191 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 51 Reward 1.00 max_reward 1.00 SR: 49/52 SR: 49.00/52 # valid demos 52
|
| 192 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 52 Reward 1.00 max_reward 1.00 SR: 50/53 SR: 50.00/53 # valid demos 53
|
| 193 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 53 Reward 1.00 max_reward 1.00 SR: 51/54 SR: 51.00/54 # valid demos 54
|
| 194 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 54 Reward 1.00 max_reward 1.00 SR: 52/55 SR: 52.00/55 # valid demos 55
|
| 195 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 55 Reward 1.00 max_reward 1.00 SR: 53/56 SR: 53.00/56 # valid demos 56
|
| 196 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 56 Reward 1.00 max_reward 1.00 SR: 54/57 SR: 54.00/57 # valid demos 57
|
| 197 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 57 Reward 1.00 max_reward 1.00 SR: 55/58 SR: 55.00/58 # valid demos 58
|
| 198 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 58 Reward 1.00 max_reward 1.00 SR: 56/59 SR: 56.00/59 # valid demos 59
|
| 199 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 59 Reward 1.00 max_reward 1.00 SR: 57/60 SR: 57.00/60 # valid demos 60
|
| 200 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 60 Reward 1.00 max_reward 1.00 SR: 58/61 SR: 58.00/61 # valid demos 61
|
| 201 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 61 Reward 1.00 max_reward 1.00 SR: 59/62 SR: 59.00/62 # valid demos 62
|
| 202 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 62 Reward 1.00 max_reward 1.00 SR: 60/63 SR: 60.00/63 # valid demos 63
|
| 203 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 63 Reward 1.00 max_reward 1.00 SR: 61/64 SR: 61.00/64 # valid demos 64
|
| 204 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 64 Reward 1.00 max_reward 1.00 SR: 62/65 SR: 62.00/65 # valid demos 65
|
| 205 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 65 Reward 1.00 max_reward 1.00 SR: 63/66 SR: 63.00/66 # valid demos 66
|
| 206 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 66 Reward 1.00 max_reward 1.00 SR: 64/67 SR: 64.00/67 # valid demos 67
|
| 207 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 67 Reward 1.00 max_reward 1.00 SR: 65/68 SR: 65.00/68 # valid demos 68
|
| 208 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 68 Reward 1.00 max_reward 1.00 SR: 66/69 SR: 66.00/69 # valid demos 69
|
| 209 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 69 Reward 1.00 max_reward 1.00 SR: 67/70 SR: 67.00/70 # valid demos 70
|
| 210 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 70 Reward 1.00 max_reward 1.00 SR: 68/71 SR: 68.00/71 # valid demos 71
|
| 211 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 71 Reward 1.00 max_reward 1.00 SR: 69/72 SR: 69.00/72 # valid demos 72
|
| 212 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 72 Reward 1.00 max_reward 1.00 SR: 70/73 SR: 70.00/73 # valid demos 73
|
| 213 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 73 Reward 1.00 max_reward 1.00 SR: 71/74 SR: 71.00/74 # valid demos 74
|
| 214 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 74 Reward 1.00 max_reward 1.00 SR: 72/75 SR: 72.00/75 # valid demos 75
|
| 215 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 75 Reward 1.00 max_reward 1.00 SR: 73/76 SR: 73.00/76 # valid demos 76
|
| 216 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 76 Reward 1.00 max_reward 1.00 SR: 74/77 SR: 74.00/77 # valid demos 77
|
| 217 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 77 Reward 1.00 max_reward 1.00 SR: 75/78 SR: 75.00/78 # valid demos 78
|
| 218 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 78 Reward 1.00 max_reward 1.00 SR: 76/79 SR: 76.00/79 # valid demos 79
|
| 219 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 79 Reward 1.00 max_reward 1.00 SR: 77/80 SR: 77.00/80 # valid demos 80
|
| 220 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 80 Reward 1.00 max_reward 1.00 SR: 78/81 SR: 78.00/81 # valid demos 81
|
| 221 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 81 Reward 1.00 max_reward 1.00 SR: 79/82 SR: 79.00/82 # valid demos 82
|
| 222 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 82 Reward 1.00 max_reward 1.00 SR: 80/83 SR: 80.00/83 # valid demos 83
|
| 223 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 83 Reward 1.00 max_reward 1.00 SR: 81/84 SR: 81.00/84 # valid demos 84
|
| 224 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 84 Reward 1.00 max_reward 1.00 SR: 82/85 SR: 82.00/85 # valid demos 85
|
| 225 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 85 Reward 0.00 max_reward 0.00 SR: 82/86 SR: 82.00/86 # valid demos 86
|
| 226 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 86 Reward 1.00 max_reward 1.00 SR: 83/87 SR: 83.00/87 # valid demos 87
|
| 227 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 87 Reward 1.00 max_reward 1.00 SR: 84/88 SR: 84.00/88 # valid demos 88
|
| 228 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 88 Reward 1.00 max_reward 1.00 SR: 85/89 SR: 85.00/89 # valid demos 89
|
| 229 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 89 Reward 0.00 max_reward 0.00 SR: 85/90 SR: 85.00/90 # valid demos 90
|
| 230 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 90 Reward 1.00 max_reward 1.00 SR: 86/91 SR: 86.00/91 # valid demos 91
|
| 231 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 91 Reward 1.00 max_reward 1.00 SR: 87/92 SR: 87.00/92 # valid demos 92
|
| 232 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 92 Reward 1.00 max_reward 1.00 SR: 88/93 SR: 88.00/93 # valid demos 93
|
| 233 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 93 Reward 1.00 max_reward 1.00 SR: 89/94 SR: 89.00/94 # valid demos 94
|
| 234 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 94 Reward 1.00 max_reward 1.00 SR: 90/95 SR: 90.00/95 # valid demos 95
|
| 235 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 95 Reward 1.00 max_reward 1.00 SR: 91/96 SR: 91.00/96 # valid demos 96
|
| 236 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 96 Reward 1.00 max_reward 1.00 SR: 92/97 SR: 92.00/97 # valid demos 97
|
| 237 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 97 Reward 1.00 max_reward 1.00 SR: 93/98 SR: 93.00/98 # valid demos 98
|
| 238 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 98 Reward 1.00 max_reward 1.00 SR: 94/99 SR: 94.00/99 # valid demos 99
|
| 239 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 99 Reward 1.00 max_reward 1.00 SR: 95/100 SR: 95.00/100 # valid demos 100
|
| 240 |
+
|
| 241 |
+
bimanual_take_tray_out_of_oven variation success rates: {0: 95, 'mean': 0.95}
|
| 242 |
+
bimanual_take_tray_out_of_oven mean success rate: 0.95
|
| 243 |
+
[CoppeliaSim:loadinfo] done.
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bimanual_take_tray_out_of_oven": {
|
| 3 |
+
"0": 10,
|
| 4 |
+
"mean": 1.0
|
| 5 |
+
}
|
| 6 |
+
}
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval_after_official_ttm.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bimanual_take_tray_out_of_oven": {
|
| 3 |
+
"0": 9,
|
| 4 |
+
"mean": 0.9
|
| 5 |
+
}
|
| 6 |
+
}
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run.log
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
qt.qpa.plugin: Could not load the Qt platform plugin "xcb" in "/workspace/assets/coppeliasim_v4_1_0" even though it was found.
|
| 8 |
+
This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem.
|
| 9 |
+
|
| 10 |
+
Available platform plugins are: eglfs, linuxfb, minimal, minimalegl, offscreen, vnc, webgl, xcb.
|
| 11 |
+
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_minimalegl.log
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
qt.qpa.plugin: Could not load the Qt platform plugin "minimalegl" in "/workspace/assets/coppeliasim_v4_1_0" even though it was found.
|
| 8 |
+
This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem.
|
| 9 |
+
|
| 10 |
+
Available platform plugins are: eglfs, linuxfb, minimal, minimalegl, offscreen, vnc, webgl, xcb.
|
| 11 |
+
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_offscreen.log
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
QStandardPaths: XDG_RUNTIME_DIR not set, defaulting to '/tmp/runtime-root'
|
| 8 |
+
Traceback (most recent call last):
|
| 9 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/evaluate_policy.py", line 143, in <module>
|
| 10 |
+
var_success_rates = env.evaluate_task_on_multiple_variations(
|
| 11 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 12 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/utils_with_bimanual_rlbench.py", line 196, in evaluate_task_on_multiple_variations
|
| 13 |
+
task = self.env.get_task(task_type)
|
| 14 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 15 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/environment.py", line 178, in get_task
|
| 16 |
+
return TaskEnvironment(
|
| 17 |
+
^^^^^^^^^^^^^^^^
|
| 18 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/task_environment.py", line 52, in __init__
|
| 19 |
+
self._scene.load(self._task)
|
| 20 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/backend/scene.py", line 112, in load
|
| 21 |
+
task.load() # Load the task in to the scene
|
| 22 |
+
^^^^^^^^^^^
|
| 23 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/backend/task.py", line 314, in load
|
| 24 |
+
raise FileNotFoundError(
|
| 25 |
+
FileNotFoundError: The following is not a valid task .ttm file: /workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/backend/../task_ttms/bimanual_take_tray_out_of_oven.ttm
|
| 26 |
+
QMutex: destroying locked mutex
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_offscreen2.log
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
Traceback (most recent call last):
|
| 8 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/evaluate_policy.py", line 143, in <module>
|
| 9 |
+
var_success_rates = env.evaluate_task_on_multiple_variations(
|
| 10 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 11 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/utils_with_bimanual_rlbench.py", line 196, in evaluate_task_on_multiple_variations
|
| 12 |
+
task = self.env.get_task(task_type)
|
| 13 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 14 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/environment.py", line 178, in get_task
|
| 15 |
+
return TaskEnvironment(
|
| 16 |
+
^^^^^^^^^^^^^^^^
|
| 17 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/task_environment.py", line 52, in __init__
|
| 18 |
+
self._scene.load(self._task)
|
| 19 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/backend/scene.py", line 115, in load
|
| 20 |
+
task.get_base().set_position(self._workspace.get_position())
|
| 21 |
+
^^^^^^^^^^^^^^^
|
| 22 |
+
File "/workspace/third_party/AnyBimanual/third_party/RLBench/rlbench/backend/task.py", line 341, in get_base
|
| 23 |
+
self._base_object = Dummy(self.name)
|
| 24 |
+
^^^^^^^^^^^^^^^^
|
| 25 |
+
File "/workspace/third_party/AnyBimanual/third_party/PyRep/pyrep/objects/object.py", line 24, in __init__
|
| 26 |
+
self._handle = sim.simGetObjectHandle(name_or_handle)
|
| 27 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 28 |
+
File "/workspace/third_party/AnyBimanual/third_party/PyRep/pyrep/backend/sim.py", line 100, in simGetObjectHandle
|
| 29 |
+
raise RuntimeError('Handle %s does not exist.' % objectName)
|
| 30 |
+
RuntimeError: Handle bimanual_take_tray_out_of_oven does not exist.
|
| 31 |
+
QMutex: destroying locked mutex
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_offscreen3.log
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
WARNING:root:not sure how _robot_shapes are used is used.
|
| 8 |
+
|
| 9 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 10 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task coordinated_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 11 |
+
ERROR:root:robot is in collision
|
| 12 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 13 |
+
This plugin does not support createPlatformOpenGLContext!
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
Error: signal 11:
|
| 17 |
+
|
| 18 |
+
/workspace/assets/coppeliasim_v4_1_0/libcoppeliaSim.so.1(_Z11_segHandleri+0x30)[0x73db22b0aae0]
|
| 19 |
+
/lib/x86_64-linux-gnu/libc.so.6(+0x42520)[0x73df84c74520]
|
| 20 |
+
/workspace/assets/coppeliasim_v4_1_0/libQt5Gui.so.5(_ZNK14QOpenGLContext10shareGroupEv+0x0)[0x73db20d91060]
|
| 21 |
+
/workspace/assets/coppeliasim_v4_1_0/libQt5Gui.so.5(_ZN16QOpenGLFunctions25initializeOpenGLFunctionsEv+0x4b)[0x73db2105da4b]
|
| 22 |
+
/workspace/assets/coppeliasim_v4_1_0/libQt5Gui.so.5(_ZN24QOpenGLFramebufferObjectC1EiiNS_10AttachmentEjj+0xc8)[0x73db21061a18]
|
| 23 |
+
/workspace/assets/coppeliasim_v4_1_0/libsimExtOpenGL3Renderer.so(_ZN18CFrameBufferObjectC2Eii+0x5a)[0x73de33e5424a]
|
| 24 |
+
/workspace/assets/coppeliasim_v4_1_0/libsimExtOpenGL3Renderer.so(_ZN16COpenglOffscreenC1EiiiP14QOpenGLContext+0x72)[0x73de33e54602]
|
| 25 |
+
/workspace/assets/coppeliasim_v4_1_0/libsimExtOpenGL3Renderer.so(_Z21executeRenderCommandsbiPv+0x2550)[0x73de33e52b90]
|
| 26 |
+
/workspace/assets/coppeliasim_v4_1_0/libcoppeliaSim.so.1(_ZN16CPluginContainer11extRendererEiPv+0x19)[0x73db22cd4249]
|
| 27 |
+
/workspace/assets/coppeliasim_v4_1_0/libcoppeliaSim.so.1(_ZN13CVisionSensor24_extRenderer_prepareViewEi+0x347)[0x73db229db107]
|
| 28 |
+
QMutex: destroying locked mutex
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb.log
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
qt.qpa.plugin: Could not load the Qt platform plugin "xcb" in "/workspace/assets/coppeliasim_v4_1_0" even though it was found.
|
| 8 |
+
This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem.
|
| 9 |
+
|
| 10 |
+
Available platform plugins are: eglfs, linuxfb, minimal, minimalegl, offscreen, vnc, webgl, xcb.
|
| 11 |
+
|
| 12 |
+
Aborted (core dumped)
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb2.log
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
0%| | 0/1 [00:08<?, ?it/s]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
WARNING:root:not sure how _robot_shapes are used is used.
|
| 8 |
+
|
| 9 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 10 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task coordinated_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 11 |
+
ERROR:root:robot is in collision
|
| 12 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 13 |
+
|
| 14 |
0%| | 0/1 [00:08<?, ?it/s]
|
| 15 |
+
Traceback (most recent call last):
|
| 16 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/evaluate_policy.py", line 143, in <module>
|
| 17 |
+
var_success_rates = env.evaluate_task_on_multiple_variations(
|
| 18 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 19 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/utils_with_bimanual_rlbench.py", line 211, in evaluate_task_on_multiple_variations
|
| 20 |
+
self._evaluate_task_on_one_variation(
|
| 21 |
+
File "/workspace/envs/anybi311/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 116, in decorate_context
|
| 22 |
+
return func(*args, **kwargs)
|
| 23 |
+
^^^^^^^^^^^^^^^^^^^^^
|
| 24 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/utils_with_bimanual_rlbench.py", line 298, in _evaluate_task_on_one_variation
|
| 25 |
+
obs, reward, _ = move(action, collision_checking=False)
|
| 26 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 27 |
+
File "/workspace/third_party/3d_flowmatch_actor/online_evaluation_rlbench/utils_with_bimanual_rlbench.py", line 59, in __call__
|
| 28 |
+
obs, reward, terminate = self._task.step(action_collision, ret_obs=True)
|
| 29 |
+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 30 |
+
TypeError: TaskEnvironment.step() got an unexpected keyword argument 'ret_obs'
|
| 31 |
+
QMutex: destroying locked mutex
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb3.log
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
WARNING:root:not sure how _robot_shapes are used is used.
|
| 8 |
+
|
| 9 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 10 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task coordinated_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 11 |
+
ERROR:root:robot is in collision
|
| 12 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 13 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 14 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task coordinated_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 15 |
+
ERROR:root:robot is in collision
|
| 16 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 17 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 18 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task coordinated_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 19 |
+
ERROR:root:robot is in collision
|
| 20 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 21 |
+
ERROR:root:robot is in collision
|
| 22 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 23 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 24 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task coordinated_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 25 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 26 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task coordinated_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 27 |
+
ERROR:root:robot is in collision
|
| 28 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 29 |
+
ERROR:root:robot is in collision
|
| 30 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 31 |
+
ERROR:root:robot is in collision
|
| 32 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 33 |
+
|
| 34 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 0 Reward 1.00 max_reward 1.00 SR: 1/1 SR: 1.00/1 # valid demos 1
|
| 35 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 1 Reward 1.00 max_reward 1.00 SR: 2/2 SR: 2.00/2 # valid demos 2
|
| 36 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 2 Reward 1.00 max_reward 1.00 SR: 3/3 SR: 3.00/3 # valid demos 3
|
| 37 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 3 Reward 1.00 max_reward 1.00 SR: 4/4 SR: 4.00/4 # valid demos 4
|
| 38 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 4 Reward 1.00 max_reward 1.00 SR: 5/5 SR: 5.00/5 # valid demos 5
|
| 39 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 5 Reward 1.00 max_reward 1.00 SR: 6/6 SR: 6.00/6 # valid demos 6
|
| 40 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 6 Reward 1.00 max_reward 1.00 SR: 7/7 SR: 7.00/7 # valid demos 7
|
| 41 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 7 Reward 1.00 max_reward 1.00 SR: 8/8 SR: 8.00/8 # valid demos 8
|
| 42 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 8 Reward 1.00 max_reward 1.00 SR: 9/9 SR: 9.00/9 # valid demos 9
|
| 43 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 9 Reward 1.00 max_reward 1.00 SR: 10/10 SR: 10.00/10 # valid demos 10
|
| 44 |
+
|
| 45 |
+
bimanual_take_tray_out_of_oven variation success rates: {0: 10, 'mean': 1.0}
|
| 46 |
+
bimanual_take_tray_out_of_oven mean success rate: 1.0
|
| 47 |
+
[CoppeliaSim:loadinfo] done.
|
reports/3dfa_peract2_take_tray_out_of_oven_subset10/run_xvfb_official_ttm.log
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Arguments:
|
| 2 |
+
Namespace(checkpoint='/workspace/models/3dfa_peract2/3dfa_peract2.pth', task='bimanual_take_tray_out_of_oven', max_tries=2, max_steps=25, headless=True, collision_checking=False, seed=0, data_dir=PosixPath('/workspace/data/3dfa/peract2_test_10'), dataset='Peract2_3dfront_3dwrist', image_size='256,256', output_file=PosixPath('/workspace/reports/3dfa_peract2_take_tray_out_of_oven_subset10/eval_after_official_ttm.json'), model_type='denoise3d', bimanual=True, prediction_len=1, backbone='clip', fps_subsampling_factor=4, embedding_dim=120, num_attn_heads=8, num_vis_instr_attn_layers=3, num_history=3, num_shared_attn_layers=4, relative_action=False, rotation_format='quat_xyzw', denoise_timesteps=5, denoise_model='rectified_flow')
|
| 3 |
+
----------------------------------------------------------------------------------------------------
|
| 4 |
+
Loading model from /workspace/models/3dfa_peract2/3dfa_peract2.pth
|
| 5 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/huggingface_hub/file_download.py:1142: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
|
| 6 |
+
warnings.warn(
|
| 7 |
+
WARNING:root:not sure how _robot_shapes are used is used.
|
| 8 |
+
|
| 9 |
0%| | 0/1 [00:00<?, ?it/s]WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 10 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 11 |
+
ERROR:root:robot is in collision
|
| 12 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 13 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 14 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 15 |
+
ERROR:root:robot is in collision
|
| 16 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 17 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(True, '')
|
| 18 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (True, '')).
|
| 19 |
+
ERROR:root:robot is in collision
|
| 20 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 21 |
+
ERROR:root:robot is in collision
|
| 22 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 23 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint3 - DualPanda') left=(True, '')
|
| 24 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint3 - DualPanda'), (True, '')).
|
| 25 |
+
WARNING:root:Waypoints are not reachable right=(False, 'waypoint0 - DualPanda') left=(False, 'waypoint5 - DualPanda')
|
| 26 |
+
ERROR:root:Error when checking waypoints. Exception is: Error in task bimanual_take_tray_out_of_oven. Infeasible episode. Can't reach waypoint ((False, 'waypoint0 - DualPanda'), (False, 'waypoint5 - DualPanda')).
|
| 27 |
+
ERROR:root:robot is in collision
|
| 28 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 29 |
+
ERROR:root:robot is in collision
|
| 30 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 31 |
+
ERROR:root:robot is in collision
|
| 32 |
+
ERROR:root:Error when checking waypoints. Exception is:
|
| 33 |
+
|
| 34 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 0 Reward 1.00 max_reward 1.00 SR: 1/1 SR: 1.00/1 # valid demos 1
|
| 35 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 1 Reward 1.00 max_reward 1.00 SR: 2/2 SR: 2.00/2 # valid demos 2
|
| 36 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 2 Reward 1.00 max_reward 1.00 SR: 3/3 SR: 3.00/3 # valid demos 3
|
| 37 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 3 Reward 1.00 max_reward 1.00 SR: 4/4 SR: 4.00/4 # valid demos 4
|
| 38 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 4 Reward 1.00 max_reward 1.00 SR: 5/5 SR: 5.00/5 # valid demos 5
|
| 39 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 5 Reward 1.00 max_reward 1.00 SR: 6/6 SR: 6.00/6 # valid demos 6
|
| 40 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 6 Reward 1.00 max_reward 1.00 SR: 7/7 SR: 7.00/7 # valid demos 7
|
| 41 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 7 Reward 1.00 max_reward 1.00 SR: 8/8 SR: 8.00/8 # valid demos 8
|
| 42 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 8 Reward 1.00 max_reward 1.00 SR: 9/9 SR: 9.00/9 # valid demos 9
|
| 43 |
+
bimanual_take_tray_out_of_oven Variation 0 Demo 9 Reward 0.00 max_reward 0.00 SR: 9/10 SR: 9.00/10 # valid demos 10
|
| 44 |
+
|
| 45 |
+
bimanual_take_tray_out_of_oven variation success rates: {0: 9, 'mean': 0.9}
|
| 46 |
+
bimanual_take_tray_out_of_oven mean success rate: 0.9
|
| 47 |
+
[CoppeliaSim:loadinfo] done.
|
reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep1_demo.log
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 2 |
+
The version_base parameter is not specified.
|
| 3 |
+
Please specify a compatability version level, or None.
|
| 4 |
+
Will assume defaults for version 1.1
|
| 5 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 6 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/defaults_list.py:251: UserWarning: In 'eval': Defaults list is missing `_self_`. See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/default_composition_order for more information
|
| 7 |
+
warnings.warn(msg, UserWarning)
|
| 8 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/core/default_element.py:124: UserWarning: In 'method/PERACT_BC': Usage of deprecated keyword in package header '# @package _group_'.
|
| 9 |
+
See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/changes_to_package_header for more information
|
| 10 |
+
deprecation_warning(
|
| 11 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/hydra.py:119: UserWarning: Future Hydra versions will no longer change working directory at job runtime by default.
|
| 12 |
+
See https://hydra.cc/docs/1.2/upgrades/1.1_to_1.2/changes_to_job_working_dir/ for more information.
|
| 13 |
+
ret = run_job(
|
| 14 |
+
[2026-04-02 22:20:49,219][root][INFO] -
|
| 15 |
+
method:
|
| 16 |
+
name: PERACT_BC
|
| 17 |
+
agent_type: leader_follower
|
| 18 |
+
robot_name: bimanual
|
| 19 |
+
image_crop_size: 64
|
| 20 |
+
bounds_offset:
|
| 21 |
+
- 0.15
|
| 22 |
+
voxel_sizes:
|
| 23 |
+
- 100
|
| 24 |
+
include_prev_layer: false
|
| 25 |
+
num_latents: 2048
|
| 26 |
+
latent_dim: 512
|
| 27 |
+
transformer_depth: 6
|
| 28 |
+
transformer_iterations: 1
|
| 29 |
+
cross_heads: 1
|
| 30 |
+
cross_dim_head: 64
|
| 31 |
+
latent_heads: 8
|
| 32 |
+
latent_dim_head: 64
|
| 33 |
+
pos_encoding_with_lang: true
|
| 34 |
+
conv_downsample: true
|
| 35 |
+
lang_fusion_type: seq
|
| 36 |
+
voxel_patch_size: 5
|
| 37 |
+
voxel_patch_stride: 5
|
| 38 |
+
final_dim: 64
|
| 39 |
+
low_dim_size: 4
|
| 40 |
+
input_dropout: 0.1
|
| 41 |
+
attn_dropout: 0.1
|
| 42 |
+
decoder_dropout: 0.0
|
| 43 |
+
lr: 0.0005
|
| 44 |
+
lr_scheduler: false
|
| 45 |
+
num_warmup_steps: 3000
|
| 46 |
+
optimizer: lamb
|
| 47 |
+
lambda_weight_l2: 1.0e-06
|
| 48 |
+
trans_loss_weight: 1.0
|
| 49 |
+
rot_loss_weight: 1.0
|
| 50 |
+
grip_loss_weight: 1.0
|
| 51 |
+
collision_loss_weight: 1.0
|
| 52 |
+
rotation_resolution: 5
|
| 53 |
+
activation: lrelu
|
| 54 |
+
norm: None
|
| 55 |
+
crop_augmentation: true
|
| 56 |
+
transform_augmentation:
|
| 57 |
+
apply_se3: true
|
| 58 |
+
aug_xyz:
|
| 59 |
+
- 0.125
|
| 60 |
+
- 0.125
|
| 61 |
+
- 0.125
|
| 62 |
+
aug_rpy:
|
| 63 |
+
- 0.0
|
| 64 |
+
- 0.0
|
| 65 |
+
- 45.0
|
| 66 |
+
aug_rot_resolution: ${method.rotation_resolution}
|
| 67 |
+
demo_augmentation: true
|
| 68 |
+
demo_augmentation_every_n: 10
|
| 69 |
+
no_skip_connection: false
|
| 70 |
+
no_perceiver: false
|
| 71 |
+
no_language: false
|
| 72 |
+
keypoint_method: heuristic
|
| 73 |
+
rlbench:
|
| 74 |
+
task_name: per2+ab
|
| 75 |
+
tasks:
|
| 76 |
+
- bimanual_sweep_to_dustpan
|
| 77 |
+
demo_path: /workspace/data/rlbench2
|
| 78 |
+
episode_length: 25
|
| 79 |
+
cameras:
|
| 80 |
+
- over_shoulder_left
|
| 81 |
+
- over_shoulder_right
|
| 82 |
+
- overhead
|
| 83 |
+
- wrist_right
|
| 84 |
+
- wrist_left
|
| 85 |
+
- front
|
| 86 |
+
camera_resolution:
|
| 87 |
+
- 256
|
| 88 |
+
- 256
|
| 89 |
+
scene_bounds:
|
| 90 |
+
- -0.3
|
| 91 |
+
- -0.5
|
| 92 |
+
- 0.6
|
| 93 |
+
- 0.7
|
| 94 |
+
- 0.5
|
| 95 |
+
- 1.6
|
| 96 |
+
include_lang_goal_in_obs: true
|
| 97 |
+
time_in_state: true
|
| 98 |
+
headless: true
|
| 99 |
+
gripper_mode: BimanualDiscrete
|
| 100 |
+
arm_action_mode: BimanualEndEffectorPoseViaPlanning
|
| 101 |
+
action_mode: BimanualMoveArmThenGripper
|
| 102 |
+
framework:
|
| 103 |
+
tensorboard_logging: true
|
| 104 |
+
csv_logging: true
|
| 105 |
+
gpu: 0
|
| 106 |
+
logdir: /workspace/runs/anybimanual/task_eval_sweep_demo1
|
| 107 |
+
start_seed: 0
|
| 108 |
+
record_every_n: 5
|
| 109 |
+
eval_envs: 1
|
| 110 |
+
eval_from_eps_number: 0
|
| 111 |
+
eval_episodes: 1
|
| 112 |
+
eval_type: 60000
|
| 113 |
+
eval_save_metrics: true
|
| 114 |
+
cinematic_recorder:
|
| 115 |
+
enabled: false
|
| 116 |
+
camera_resolution:
|
| 117 |
+
- 1280
|
| 118 |
+
- 720
|
| 119 |
+
fps: 30
|
| 120 |
+
rotate_speed: 0.005
|
| 121 |
+
save_path: /tmp/videos/
|
| 122 |
+
|
| 123 |
+
[2026-04-02 22:20:49,227][root][INFO] - Using env device cuda:0.
|
| 124 |
+
[2026-04-02 22:20:49,231][root][INFO] - Evaluating seed 0.
|
| 125 |
+
[2026-04-02 22:20:49,232][root][INFO] - Using method PERACT_BC with type leader_follower
|
| 126 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 127 |
+
The version_base parameter is not specified.
|
| 128 |
+
Please specify a compatability version level, or None.
|
| 129 |
+
Will assume defaults for version 1.1
|
| 130 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 131 |
+
Weight: [60000]
|
| 132 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 133 |
+
The version_base parameter is not specified.
|
| 134 |
+
Please specify a compatability version level, or None.
|
| 135 |
+
Will assume defaults for version 1.1
|
| 136 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 137 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 138 |
+
The version_base parameter is not specified.
|
| 139 |
+
Please specify a compatability version level, or None.
|
| 140 |
+
Will assume defaults for version 1.1
|
| 141 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 142 |
+
[04/02/26 22:21:23] INFO INFO:root:eval_env: _independent_env_runner.py:130
|
| 143 |
+
Launching env.
|
| 144 |
+
INFO INFO:root:Agent _independent_env_runner.py:133
|
| 145 |
+
information:
|
| 146 |
+
INFO INFO:root:<yarr.agen _independent_env_runner.py:134
|
| 147 |
+
ts.agent.LeaderFollo
|
| 148 |
+
werAgent object at
|
| 149 |
+
0x77e49d22f910>
|
| 150 |
+
QStandardPaths: XDG_RUNTIME_DIR not set, defaulting to '/tmp/runtime-root'
|
| 151 |
+
[04/02/26 22:21:24] INFO INFO:root:Using dual panda robot environment.py:119
|
| 152 |
+
INFO INFO:root:Setting control arm_action_modes.py:79
|
| 153 |
+
mode for both robots
|
| 154 |
+
WARNING WARNING:root:not sure how task_environment.py:57
|
| 155 |
+
_robot_shapes are used is
|
| 156 |
+
used.
|
| 157 |
+
INFO INFO:root:Evaluating _independent_env_runner.py:164
|
| 158 |
+
weight 60000
|
| 159 |
+
loaded weights from /workspace/runs/anybimanual/task_eval_sweep_demo1/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_leader_layer_0.pt
|
| 160 |
+
loaded weights from /workspace/runs/anybimanual/task_eval_sweep_demo1/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_follower_layer_0.pt
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
Starting episode 0,
|
| 164 |
+
seed 0.
|
| 165 |
+
eval_demo_seed: 0
|
| 166 |
+
[04/02/26 22:22:14] INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 167 |
+
left=5)
|
| 168 |
+
/workspace/third_party/AnyBimanual/third_party/YARR/yarr/utils/rollout_generator.py:94: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /pytorch/torch/csrc/utils/tensor_new.cpp:254.)
|
| 169 |
+
prepped_data = {k: torch.tensor([v], device=self._env_device) for k, v in obs_history.items()}
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
Evaluating bimanual_sweep_to_dustpan | Episode 0 | Score: 0.0 | Lang Goal: sweep dirt to dustpan
|
| 174 |
+
Finished bimanual_sweep_to_dustpan | Final Score: unknown
|
| 175 |
+
|
| 176 |
+
[04/02/26 22:23:20] INFO INFO:root:Finished _independent_env_runner.py:292
|
| 177 |
+
evaluation.
|
| 178 |
+
[CoppeliaSim:loadinfo] done.
|
reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep5.log
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 2 |
+
The version_base parameter is not specified.
|
| 3 |
+
Please specify a compatability version level, or None.
|
| 4 |
+
Will assume defaults for version 1.1
|
| 5 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 6 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/defaults_list.py:251: UserWarning: In 'eval': Defaults list is missing `_self_`. See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/default_composition_order for more information
|
| 7 |
+
warnings.warn(msg, UserWarning)
|
| 8 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/core/default_element.py:124: UserWarning: In 'method/PERACT_BC': Usage of deprecated keyword in package header '# @package _group_'.
|
| 9 |
+
See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/changes_to_package_header for more information
|
| 10 |
+
deprecation_warning(
|
| 11 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/hydra.py:119: UserWarning: Future Hydra versions will no longer change working directory at job runtime by default.
|
| 12 |
+
See https://hydra.cc/docs/1.2/upgrades/1.1_to_1.2/changes_to_job_working_dir/ for more information.
|
| 13 |
+
ret = run_job(
|
| 14 |
+
[2026-04-02 21:00:56,975][root][INFO] -
|
| 15 |
+
method:
|
| 16 |
+
name: PERACT_BC
|
| 17 |
+
agent_type: leader_follower
|
| 18 |
+
robot_name: bimanual
|
| 19 |
+
image_crop_size: 64
|
| 20 |
+
bounds_offset:
|
| 21 |
+
- 0.15
|
| 22 |
+
voxel_sizes:
|
| 23 |
+
- 100
|
| 24 |
+
include_prev_layer: false
|
| 25 |
+
num_latents: 2048
|
| 26 |
+
latent_dim: 512
|
| 27 |
+
transformer_depth: 6
|
| 28 |
+
transformer_iterations: 1
|
| 29 |
+
cross_heads: 1
|
| 30 |
+
cross_dim_head: 64
|
| 31 |
+
latent_heads: 8
|
| 32 |
+
latent_dim_head: 64
|
| 33 |
+
pos_encoding_with_lang: true
|
| 34 |
+
conv_downsample: true
|
| 35 |
+
lang_fusion_type: seq
|
| 36 |
+
voxel_patch_size: 5
|
| 37 |
+
voxel_patch_stride: 5
|
| 38 |
+
final_dim: 64
|
| 39 |
+
low_dim_size: 4
|
| 40 |
+
input_dropout: 0.1
|
| 41 |
+
attn_dropout: 0.1
|
| 42 |
+
decoder_dropout: 0.0
|
| 43 |
+
lr: 0.0005
|
| 44 |
+
lr_scheduler: false
|
| 45 |
+
num_warmup_steps: 3000
|
| 46 |
+
optimizer: lamb
|
| 47 |
+
lambda_weight_l2: 1.0e-06
|
| 48 |
+
trans_loss_weight: 1.0
|
| 49 |
+
rot_loss_weight: 1.0
|
| 50 |
+
grip_loss_weight: 1.0
|
| 51 |
+
collision_loss_weight: 1.0
|
| 52 |
+
rotation_resolution: 5
|
| 53 |
+
activation: lrelu
|
| 54 |
+
norm: None
|
| 55 |
+
crop_augmentation: true
|
| 56 |
+
transform_augmentation:
|
| 57 |
+
apply_se3: true
|
| 58 |
+
aug_xyz:
|
| 59 |
+
- 0.125
|
| 60 |
+
- 0.125
|
| 61 |
+
- 0.125
|
| 62 |
+
aug_rpy:
|
| 63 |
+
- 0.0
|
| 64 |
+
- 0.0
|
| 65 |
+
- 45.0
|
| 66 |
+
aug_rot_resolution: ${method.rotation_resolution}
|
| 67 |
+
demo_augmentation: true
|
| 68 |
+
demo_augmentation_every_n: 10
|
| 69 |
+
no_skip_connection: false
|
| 70 |
+
no_perceiver: false
|
| 71 |
+
no_language: false
|
| 72 |
+
keypoint_method: heuristic
|
| 73 |
+
rlbench:
|
| 74 |
+
task_name: per2+ab
|
| 75 |
+
tasks:
|
| 76 |
+
- bimanual_sweep_to_dustpan
|
| 77 |
+
demo_path: /workspace/nonexistent_demos
|
| 78 |
+
episode_length: 25
|
| 79 |
+
cameras:
|
| 80 |
+
- over_shoulder_left
|
| 81 |
+
- over_shoulder_right
|
| 82 |
+
- overhead
|
| 83 |
+
- wrist_right
|
| 84 |
+
- wrist_left
|
| 85 |
+
- front
|
| 86 |
+
camera_resolution:
|
| 87 |
+
- 256
|
| 88 |
+
- 256
|
| 89 |
+
scene_bounds:
|
| 90 |
+
- -0.3
|
| 91 |
+
- -0.5
|
| 92 |
+
- 0.6
|
| 93 |
+
- 0.7
|
| 94 |
+
- 0.5
|
| 95 |
+
- 1.6
|
| 96 |
+
include_lang_goal_in_obs: true
|
| 97 |
+
time_in_state: true
|
| 98 |
+
headless: true
|
| 99 |
+
gripper_mode: BimanualDiscrete
|
| 100 |
+
arm_action_mode: BimanualEndEffectorPoseViaPlanning
|
| 101 |
+
action_mode: BimanualMoveArmThenGripper
|
| 102 |
+
framework:
|
| 103 |
+
tensorboard_logging: true
|
| 104 |
+
csv_logging: true
|
| 105 |
+
gpu: 0
|
| 106 |
+
logdir: /workspace/runs/anybimanual/peract_lf_eval
|
| 107 |
+
start_seed: 0
|
| 108 |
+
record_every_n: 5
|
| 109 |
+
eval_envs: 1
|
| 110 |
+
eval_from_eps_number: 0
|
| 111 |
+
eval_episodes: 5
|
| 112 |
+
eval_type: 60000
|
| 113 |
+
eval_save_metrics: true
|
| 114 |
+
cinematic_recorder:
|
| 115 |
+
enabled: false
|
| 116 |
+
camera_resolution:
|
| 117 |
+
- 1280
|
| 118 |
+
- 720
|
| 119 |
+
fps: 30
|
| 120 |
+
rotate_speed: 0.005
|
| 121 |
+
save_path: /tmp/videos/
|
| 122 |
+
|
| 123 |
+
[2026-04-02 21:00:56,984][root][INFO] - Using env device cuda:0.
|
| 124 |
+
[2026-04-02 21:00:56,989][root][INFO] - Evaluating seed 0.
|
| 125 |
+
[2026-04-02 21:00:56,989][root][INFO] - Using method PERACT_BC with type leader_follower
|
| 126 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 127 |
+
The version_base parameter is not specified.
|
| 128 |
+
Please specify a compatability version level, or None.
|
| 129 |
+
Will assume defaults for version 1.1
|
| 130 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 131 |
+
Weight: [60000]
|
| 132 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 133 |
+
The version_base parameter is not specified.
|
| 134 |
+
Please specify a compatability version level, or None.
|
| 135 |
+
Will assume defaults for version 1.1
|
| 136 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 137 |
+
[04/02/26 21:01:22] WARNING WARNING:root:Data set root does environment.py:87
|
| 138 |
+
not exist:
|
| 139 |
+
/workspace/nonexistent_demos.
|
| 140 |
+
Continuing without demos.
|
| 141 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 142 |
+
The version_base parameter is not specified.
|
| 143 |
+
Please specify a compatability version level, or None.
|
| 144 |
+
Will assume defaults for version 1.1
|
| 145 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 146 |
+
[04/02/26 21:01:35] INFO INFO:root:eval_env: _independent_env_runner.py:130
|
| 147 |
+
Launching env.
|
| 148 |
+
INFO INFO:root:Agent _independent_env_runner.py:133
|
| 149 |
+
information:
|
| 150 |
+
INFO INFO:root:<yarr.agen _independent_env_runner.py:134
|
| 151 |
+
ts.agent.LeaderFollo
|
| 152 |
+
werAgent object at
|
| 153 |
+
0x7404edc39690>
|
| 154 |
+
QStandardPaths: XDG_RUNTIME_DIR not set, defaulting to '/tmp/runtime-root'
|
| 155 |
+
[04/02/26 21:01:36] INFO INFO:root:Using dual panda robot environment.py:119
|
| 156 |
+
INFO INFO:root:Setting control arm_action_modes.py:79
|
| 157 |
+
mode for both robots
|
| 158 |
+
WARNING WARNING:root:not sure how task_environment.py:57
|
| 159 |
+
_robot_shapes are used is
|
| 160 |
+
used.
|
| 161 |
+
INFO INFO:root:Evaluating _independent_env_runner.py:164
|
| 162 |
+
weight 60000
|
| 163 |
+
loaded weights from /workspace/runs/anybimanual/peract_lf_eval/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_leader_layer_0.pt
|
| 164 |
+
loaded weights from /workspace/runs/anybimanual/peract_lf_eval/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_follower_layer_0.pt
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
Starting episode 0,
|
| 168 |
+
seed 0.
|
| 169 |
+
eval_demo_seed: 0
|
| 170 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 171 |
+
missing or unavailable ();
|
| 172 |
+
using env.reset() for eval.
|
| 173 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 174 |
+
left=5)
|
| 175 |
+
/workspace/third_party/AnyBimanual/third_party/YARR/yarr/utils/rollout_generator.py:94: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /pytorch/torch/csrc/utils/tensor_new.cpp:254.)
|
| 176 |
+
prepped_data = {k: torch.tensor([v], device=self._env_device) for k, v in obs_history.items()}
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
[04/02/26 21:04:09] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 180 |
+
Starting episode 1,
|
| 181 |
+
seed 1.
|
| 182 |
+
eval_demo_seed: 1
|
| 183 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 184 |
+
missing or unavailable ();
|
| 185 |
+
using env.reset() for eval.
|
| 186 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 187 |
+
left=5)
|
| 188 |
+
Terminated
|
| 189 |
+
The X11 connection broke (error 1). Did the X11 server die?
|
| 190 |
+
QMutex: destroying locked mutex
|
| 191 |
+
with name Dustpan_5 is
|
| 192 |
+
already grasped by left
|
| 193 |
+
robot
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
[04/02/26 21:03:50] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 197 |
+
Starting episode 2,
|
| 198 |
+
seed 2.
|
| 199 |
+
eval_demo_seed: 2
|
| 200 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 201 |
+
missing or unavailable ();
|
| 202 |
+
using env.reset() for eval.
|
| 203 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 204 |
+
left=5)
|
| 205 |
+
[W402 21:04:37.404034653 CudaIPCTypes.cpp:16] Producer process has been terminated before all shared CUDA tensors released. See Note [Sharing CUDA tensors]
|
reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep5_clean.log
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 2 |
+
The version_base parameter is not specified.
|
| 3 |
+
Please specify a compatability version level, or None.
|
| 4 |
+
Will assume defaults for version 1.1
|
| 5 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 6 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/defaults_list.py:251: UserWarning: In 'eval': Defaults list is missing `_self_`. See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/default_composition_order for more information
|
| 7 |
+
warnings.warn(msg, UserWarning)
|
| 8 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/core/default_element.py:124: UserWarning: In 'method/PERACT_BC': Usage of deprecated keyword in package header '# @package _group_'.
|
| 9 |
+
See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/changes_to_package_header for more information
|
| 10 |
+
deprecation_warning(
|
| 11 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/hydra.py:119: UserWarning: Future Hydra versions will no longer change working directory at job runtime by default.
|
| 12 |
+
See https://hydra.cc/docs/1.2/upgrades/1.1_to_1.2/changes_to_job_working_dir/ for more information.
|
| 13 |
+
ret = run_job(
|
| 14 |
+
[2026-04-02 21:05:04,333][root][INFO] -
|
| 15 |
+
method:
|
| 16 |
+
name: PERACT_BC
|
| 17 |
+
agent_type: leader_follower
|
| 18 |
+
robot_name: bimanual
|
| 19 |
+
image_crop_size: 64
|
| 20 |
+
bounds_offset:
|
| 21 |
+
- 0.15
|
| 22 |
+
voxel_sizes:
|
| 23 |
+
- 100
|
| 24 |
+
include_prev_layer: false
|
| 25 |
+
num_latents: 2048
|
| 26 |
+
latent_dim: 512
|
| 27 |
+
transformer_depth: 6
|
| 28 |
+
transformer_iterations: 1
|
| 29 |
+
cross_heads: 1
|
| 30 |
+
cross_dim_head: 64
|
| 31 |
+
latent_heads: 8
|
| 32 |
+
latent_dim_head: 64
|
| 33 |
+
pos_encoding_with_lang: true
|
| 34 |
+
conv_downsample: true
|
| 35 |
+
lang_fusion_type: seq
|
| 36 |
+
voxel_patch_size: 5
|
| 37 |
+
voxel_patch_stride: 5
|
| 38 |
+
final_dim: 64
|
| 39 |
+
low_dim_size: 4
|
| 40 |
+
input_dropout: 0.1
|
| 41 |
+
attn_dropout: 0.1
|
| 42 |
+
decoder_dropout: 0.0
|
| 43 |
+
lr: 0.0005
|
| 44 |
+
lr_scheduler: false
|
| 45 |
+
num_warmup_steps: 3000
|
| 46 |
+
optimizer: lamb
|
| 47 |
+
lambda_weight_l2: 1.0e-06
|
| 48 |
+
trans_loss_weight: 1.0
|
| 49 |
+
rot_loss_weight: 1.0
|
| 50 |
+
grip_loss_weight: 1.0
|
| 51 |
+
collision_loss_weight: 1.0
|
| 52 |
+
rotation_resolution: 5
|
| 53 |
+
activation: lrelu
|
| 54 |
+
norm: None
|
| 55 |
+
crop_augmentation: true
|
| 56 |
+
transform_augmentation:
|
| 57 |
+
apply_se3: true
|
| 58 |
+
aug_xyz:
|
| 59 |
+
- 0.125
|
| 60 |
+
- 0.125
|
| 61 |
+
- 0.125
|
| 62 |
+
aug_rpy:
|
| 63 |
+
- 0.0
|
| 64 |
+
- 0.0
|
| 65 |
+
- 45.0
|
| 66 |
+
aug_rot_resolution: ${method.rotation_resolution}
|
| 67 |
+
demo_augmentation: true
|
| 68 |
+
demo_augmentation_every_n: 10
|
| 69 |
+
no_skip_connection: false
|
| 70 |
+
no_perceiver: false
|
| 71 |
+
no_language: false
|
| 72 |
+
keypoint_method: heuristic
|
| 73 |
+
rlbench:
|
| 74 |
+
task_name: per2+ab
|
| 75 |
+
tasks:
|
| 76 |
+
- bimanual_sweep_to_dustpan
|
| 77 |
+
demo_path: /workspace/nonexistent_demos
|
| 78 |
+
episode_length: 25
|
| 79 |
+
cameras:
|
| 80 |
+
- over_shoulder_left
|
| 81 |
+
- over_shoulder_right
|
| 82 |
+
- overhead
|
| 83 |
+
- wrist_right
|
| 84 |
+
- wrist_left
|
| 85 |
+
- front
|
| 86 |
+
camera_resolution:
|
| 87 |
+
- 256
|
| 88 |
+
- 256
|
| 89 |
+
scene_bounds:
|
| 90 |
+
- -0.3
|
| 91 |
+
- -0.5
|
| 92 |
+
- 0.6
|
| 93 |
+
- 0.7
|
| 94 |
+
- 0.5
|
| 95 |
+
- 1.6
|
| 96 |
+
include_lang_goal_in_obs: true
|
| 97 |
+
time_in_state: true
|
| 98 |
+
headless: true
|
| 99 |
+
gripper_mode: BimanualDiscrete
|
| 100 |
+
arm_action_mode: BimanualEndEffectorPoseViaPlanning
|
| 101 |
+
action_mode: BimanualMoveArmThenGripper
|
| 102 |
+
framework:
|
| 103 |
+
tensorboard_logging: true
|
| 104 |
+
csv_logging: true
|
| 105 |
+
gpu: 0
|
| 106 |
+
logdir: /workspace/runs/anybimanual/peract_lf_eval
|
| 107 |
+
start_seed: 0
|
| 108 |
+
record_every_n: 5
|
| 109 |
+
eval_envs: 1
|
| 110 |
+
eval_from_eps_number: 0
|
| 111 |
+
eval_episodes: 5
|
| 112 |
+
eval_type: 60000
|
| 113 |
+
eval_save_metrics: true
|
| 114 |
+
cinematic_recorder:
|
| 115 |
+
enabled: false
|
| 116 |
+
camera_resolution:
|
| 117 |
+
- 1280
|
| 118 |
+
- 720
|
| 119 |
+
fps: 30
|
| 120 |
+
rotate_speed: 0.005
|
| 121 |
+
save_path: /tmp/videos/
|
| 122 |
+
|
| 123 |
+
[2026-04-02 21:05:04,341][root][INFO] - Using env device cuda:0.
|
| 124 |
+
[2026-04-02 21:05:04,347][root][INFO] - Evaluating seed 0.
|
| 125 |
+
[2026-04-02 21:05:04,347][root][INFO] - Using method PERACT_BC with type leader_follower
|
| 126 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 127 |
+
The version_base parameter is not specified.
|
| 128 |
+
Please specify a compatability version level, or None.
|
| 129 |
+
Will assume defaults for version 1.1
|
| 130 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 131 |
+
Weight: [60000]
|
| 132 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 133 |
+
The version_base parameter is not specified.
|
| 134 |
+
Please specify a compatability version level, or None.
|
| 135 |
+
Will assume defaults for version 1.1
|
| 136 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 137 |
+
[04/02/26 21:05:25] WARNING WARNING:root:Data set root does environment.py:87
|
| 138 |
+
not exist:
|
| 139 |
+
/workspace/nonexistent_demos.
|
| 140 |
+
Continuing without demos.
|
| 141 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 142 |
+
The version_base parameter is not specified.
|
| 143 |
+
Please specify a compatability version level, or None.
|
| 144 |
+
Will assume defaults for version 1.1
|
| 145 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 146 |
+
[04/02/26 21:05:36] INFO INFO:root:eval_env: _independent_env_runner.py:130
|
| 147 |
+
Launching env.
|
| 148 |
+
INFO INFO:root:Agent _independent_env_runner.py:133
|
| 149 |
+
information:
|
| 150 |
+
INFO INFO:root:<yarr.agen _independent_env_runner.py:134
|
| 151 |
+
ts.agent.LeaderFollo
|
| 152 |
+
werAgent object at
|
| 153 |
+
0x7393c6a70f50>
|
| 154 |
+
QStandardPaths: XDG_RUNTIME_DIR not set, defaulting to '/tmp/runtime-root'
|
| 155 |
+
[04/02/26 21:05:38] INFO INFO:root:Using dual panda robot environment.py:119
|
| 156 |
+
INFO INFO:root:Setting control arm_action_modes.py:79
|
| 157 |
+
mode for both robots
|
| 158 |
+
WARNING WARNING:root:not sure how task_environment.py:57
|
| 159 |
+
_robot_shapes are used is
|
| 160 |
+
used.
|
| 161 |
+
INFO INFO:root:Evaluating _independent_env_runner.py:164
|
| 162 |
+
weight 60000
|
| 163 |
+
loaded weights from /workspace/runs/anybimanual/peract_lf_eval/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_leader_layer_0.pt
|
| 164 |
+
loaded weights from /workspace/runs/anybimanual/peract_lf_eval/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_follower_layer_0.pt
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
Starting episode 0,
|
| 168 |
+
seed 0.
|
| 169 |
+
eval_demo_seed: 0
|
| 170 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 171 |
+
missing or unavailable ();
|
| 172 |
+
using env.reset() for eval.
|
| 173 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 174 |
+
left=5)
|
| 175 |
+
[04/02/26 21:05:54] WARNING WARNING:root:Object gripper_action_modes.py:328
|
| 176 |
+
with name broom is
|
| 177 |
+
already grasped by
|
| 178 |
+
right robot
|
| 179 |
+
[04/02/26 21:06:47] WARNING WARNING:root:Object gripper_action_modes.py:328
|
| 180 |
+
with name broom is
|
| 181 |
+
already grasped by
|
| 182 |
+
right robot
|
| 183 |
+
[04/02/26 21:07:07] WARNING WARNING:root:Object gripper_action_modes.py:328
|
| 184 |
+
with name broom is
|
| 185 |
+
already grasped by
|
| 186 |
+
right robot
|
| 187 |
+
/workspace/third_party/AnyBimanual/third_party/YARR/yarr/utils/rollout_generator.py:94: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /pytorch/torch/csrc/utils/tensor_new.cpp:254.)
|
| 188 |
+
prepped_data = {k: torch.tensor([v], device=self._env_device) for k, v in obs_history.items()}
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
[04/02/26 21:07:14] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 192 |
+
Starting episode 1,
|
| 193 |
+
seed 1.
|
| 194 |
+
eval_demo_seed: 1
|
| 195 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 196 |
+
missing or unavailable ();
|
| 197 |
+
using env.reset() for eval.
|
| 198 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 199 |
+
left=5)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
[04/02/26 21:08:28] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 203 |
+
Starting episode 2,
|
| 204 |
+
seed 2.
|
| 205 |
+
eval_demo_seed: 2
|
| 206 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 207 |
+
missing or unavailable ();
|
| 208 |
+
using env.reset() for eval.
|
| 209 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 210 |
+
left=5)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
[04/02/26 21:10:01] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 214 |
+
Starting episode 3,
|
| 215 |
+
seed 3.
|
| 216 |
+
eval_demo_seed: 3
|
| 217 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 218 |
+
missing or unavailable ();
|
| 219 |
+
using env.reset() for eval.
|
| 220 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 221 |
+
left=5)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
[04/02/26 21:11:04] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 225 |
+
Starting episode 4,
|
| 226 |
+
seed 4.
|
| 227 |
+
eval_demo_seed: 4
|
| 228 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 229 |
+
missing or unavailable ();
|
| 230 |
+
using env.reset() for eval.
|
| 231 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 232 |
+
left=5)
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
Evaluating bimanual_sweep_to_dustpan | Episode 4 | Score: 0.0 | Lang Goal: sweep dirt to dustpan
|
| 237 |
+
Finished bimanual_sweep_to_dustpan | Final Score: 0.0
|
| 238 |
+
|
| 239 |
+
add_video needs package moviepy
|
| 240 |
+
[04/02/26 21:12:19] INFO INFO:root:Finished _independent_env_runner.py:292
|
| 241 |
+
evaluation.
|
| 242 |
+
[CoppeliaSim:loadinfo] done.
|
reports/anybimanual_eval_logs/bimanual_sweep_to_dustpan_ep5_live.log
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 2 |
+
The version_base parameter is not specified.
|
| 3 |
+
Please specify a compatability version level, or None.
|
| 4 |
+
Will assume defaults for version 1.1
|
| 5 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 6 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/defaults_list.py:251: UserWarning: In 'eval': Defaults list is missing `_self_`. See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/default_composition_order for more information
|
| 7 |
+
warnings.warn(msg, UserWarning)
|
| 8 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/core/default_element.py:124: UserWarning: In 'method/PERACT_BC': Usage of deprecated keyword in package header '# @package _group_'.
|
| 9 |
+
See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/changes_to_package_header for more information
|
| 10 |
+
deprecation_warning(
|
| 11 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/hydra.py:119: UserWarning: Future Hydra versions will no longer change working directory at job runtime by default.
|
| 12 |
+
See https://hydra.cc/docs/1.2/upgrades/1.1_to_1.2/changes_to_job_working_dir/ for more information.
|
| 13 |
+
ret = run_job(
|
| 14 |
+
[2026-04-02 22:12:16,880][root][INFO] -
|
| 15 |
+
method:
|
| 16 |
+
name: PERACT_BC
|
| 17 |
+
agent_type: leader_follower
|
| 18 |
+
robot_name: bimanual
|
| 19 |
+
image_crop_size: 64
|
| 20 |
+
bounds_offset:
|
| 21 |
+
- 0.15
|
| 22 |
+
voxel_sizes:
|
| 23 |
+
- 100
|
| 24 |
+
include_prev_layer: false
|
| 25 |
+
num_latents: 2048
|
| 26 |
+
latent_dim: 512
|
| 27 |
+
transformer_depth: 6
|
| 28 |
+
transformer_iterations: 1
|
| 29 |
+
cross_heads: 1
|
| 30 |
+
cross_dim_head: 64
|
| 31 |
+
latent_heads: 8
|
| 32 |
+
latent_dim_head: 64
|
| 33 |
+
pos_encoding_with_lang: true
|
| 34 |
+
conv_downsample: true
|
| 35 |
+
lang_fusion_type: seq
|
| 36 |
+
voxel_patch_size: 5
|
| 37 |
+
voxel_patch_stride: 5
|
| 38 |
+
final_dim: 64
|
| 39 |
+
low_dim_size: 4
|
| 40 |
+
input_dropout: 0.1
|
| 41 |
+
attn_dropout: 0.1
|
| 42 |
+
decoder_dropout: 0.0
|
| 43 |
+
lr: 0.0005
|
| 44 |
+
lr_scheduler: false
|
| 45 |
+
num_warmup_steps: 3000
|
| 46 |
+
optimizer: lamb
|
| 47 |
+
lambda_weight_l2: 1.0e-06
|
| 48 |
+
trans_loss_weight: 1.0
|
| 49 |
+
rot_loss_weight: 1.0
|
| 50 |
+
grip_loss_weight: 1.0
|
| 51 |
+
collision_loss_weight: 1.0
|
| 52 |
+
rotation_resolution: 5
|
| 53 |
+
activation: lrelu
|
| 54 |
+
norm: None
|
| 55 |
+
crop_augmentation: true
|
| 56 |
+
transform_augmentation:
|
| 57 |
+
apply_se3: true
|
| 58 |
+
aug_xyz:
|
| 59 |
+
- 0.125
|
| 60 |
+
- 0.125
|
| 61 |
+
- 0.125
|
| 62 |
+
aug_rpy:
|
| 63 |
+
- 0.0
|
| 64 |
+
- 0.0
|
| 65 |
+
- 45.0
|
| 66 |
+
aug_rot_resolution: ${method.rotation_resolution}
|
| 67 |
+
demo_augmentation: true
|
| 68 |
+
demo_augmentation_every_n: 10
|
| 69 |
+
no_skip_connection: false
|
| 70 |
+
no_perceiver: false
|
| 71 |
+
no_language: false
|
| 72 |
+
keypoint_method: heuristic
|
| 73 |
+
rlbench:
|
| 74 |
+
task_name: per2+ab
|
| 75 |
+
tasks:
|
| 76 |
+
- bimanual_sweep_to_dustpan
|
| 77 |
+
demo_path: /workspace/data/rlbench2
|
| 78 |
+
episode_length: 25
|
| 79 |
+
cameras:
|
| 80 |
+
- over_shoulder_left
|
| 81 |
+
- over_shoulder_right
|
| 82 |
+
- overhead
|
| 83 |
+
- wrist_right
|
| 84 |
+
- wrist_left
|
| 85 |
+
- front
|
| 86 |
+
camera_resolution:
|
| 87 |
+
- 256
|
| 88 |
+
- 256
|
| 89 |
+
scene_bounds:
|
| 90 |
+
- -0.3
|
| 91 |
+
- -0.5
|
| 92 |
+
- 0.6
|
| 93 |
+
- 0.7
|
| 94 |
+
- 0.5
|
| 95 |
+
- 1.6
|
| 96 |
+
include_lang_goal_in_obs: true
|
| 97 |
+
time_in_state: true
|
| 98 |
+
headless: true
|
| 99 |
+
gripper_mode: BimanualDiscrete
|
| 100 |
+
arm_action_mode: BimanualEndEffectorPoseViaPlanning
|
| 101 |
+
action_mode: BimanualMoveArmThenGripper
|
| 102 |
+
framework:
|
| 103 |
+
tensorboard_logging: true
|
| 104 |
+
csv_logging: true
|
| 105 |
+
gpu: 0
|
| 106 |
+
logdir: /workspace/runs/anybimanual/task_eval_sweep
|
| 107 |
+
start_seed: 0
|
| 108 |
+
record_every_n: 5
|
| 109 |
+
eval_envs: 1
|
| 110 |
+
eval_from_eps_number: 0
|
| 111 |
+
eval_episodes: 5
|
| 112 |
+
eval_type: 60000
|
| 113 |
+
eval_save_metrics: true
|
| 114 |
+
cinematic_recorder:
|
| 115 |
+
enabled: false
|
| 116 |
+
camera_resolution:
|
| 117 |
+
- 1280
|
| 118 |
+
- 720
|
| 119 |
+
fps: 30
|
| 120 |
+
rotate_speed: 0.005
|
| 121 |
+
save_path: /tmp/videos/
|
| 122 |
+
|
| 123 |
+
[2026-04-02 22:12:16,889][root][INFO] - Using env device cuda:0.
|
| 124 |
+
[2026-04-02 22:12:16,896][root][INFO] - Evaluating seed 0.
|
| 125 |
+
[2026-04-02 22:12:16,898][root][INFO] - Using method PERACT_BC with type leader_follower
|
| 126 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 127 |
+
The version_base parameter is not specified.
|
| 128 |
+
Please specify a compatability version level, or None.
|
| 129 |
+
Will assume defaults for version 1.1
|
| 130 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 131 |
+
Weight: [60000]
|
| 132 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 133 |
+
The version_base parameter is not specified.
|
| 134 |
+
Please specify a compatability version level, or None.
|
| 135 |
+
Will assume defaults for version 1.1
|
| 136 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 137 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 138 |
+
The version_base parameter is not specified.
|
| 139 |
+
Please specify a compatability version level, or None.
|
| 140 |
+
Will assume defaults for version 1.1
|
| 141 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 142 |
+
[04/02/26 22:12:52] INFO INFO:root:eval_env: _independent_env_runner.py:130
|
| 143 |
+
Launching env.
|
| 144 |
+
INFO INFO:root:Agent _independent_env_runner.py:133
|
| 145 |
+
information:
|
| 146 |
+
INFO INFO:root:<yarr.agen _independent_env_runner.py:134
|
| 147 |
+
ts.agent.LeaderFollo
|
| 148 |
+
werAgent object at
|
| 149 |
+
0x7d94a7c0bc50>
|
| 150 |
+
QStandardPaths: XDG_RUNTIME_DIR not set, defaulting to '/tmp/runtime-root'
|
| 151 |
+
[04/02/26 22:12:53] INFO INFO:root:Using dual panda robot environment.py:119
|
| 152 |
+
INFO INFO:root:Setting control arm_action_modes.py:79
|
| 153 |
+
mode for both robots
|
| 154 |
+
WARNING WARNING:root:not sure how task_environment.py:57
|
| 155 |
+
_robot_shapes are used is
|
| 156 |
+
used.
|
| 157 |
+
INFO INFO:root:Evaluating _independent_env_runner.py:164
|
| 158 |
+
weight 60000
|
| 159 |
+
loaded weights from /workspace/runs/anybimanual/task_eval_sweep/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_leader_layer_0.pt
|
| 160 |
+
loaded weights from /workspace/runs/anybimanual/task_eval_sweep/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_follower_layer_0.pt
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
Starting episode 0,
|
| 164 |
+
seed 0.
|
| 165 |
+
eval_demo_seed: 0
|
| 166 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 167 |
+
missing or unavailable ();
|
| 168 |
+
using env.reset() for eval.
|
| 169 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 170 |
+
left=5)
|
| 171 |
+
/workspace/third_party/AnyBimanual/third_party/YARR/yarr/utils/rollout_generator.py:94: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /pytorch/torch/csrc/utils/tensor_new.cpp:254.)
|
| 172 |
+
prepped_data = {k: torch.tensor([v], device=self._env_device) for k, v in obs_history.items()}
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
[04/02/26 22:14:21] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 176 |
+
Starting episode 1,
|
| 177 |
+
seed 1.
|
| 178 |
+
eval_demo_seed: 1
|
| 179 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 180 |
+
missing or unavailable ();
|
| 181 |
+
using env.reset() for eval.
|
| 182 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 183 |
+
left=5)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
[04/02/26 22:15:45] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 187 |
+
Starting episode 2,
|
| 188 |
+
seed 2.
|
| 189 |
+
eval_demo_seed: 2
|
| 190 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 191 |
+
missing or unavailable ();
|
| 192 |
+
using env.reset() for eval.
|
| 193 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 194 |
+
left=5)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
[04/02/26 22:16:21] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 198 |
+
Starting episode 3,
|
| 199 |
+
seed 3.
|
| 200 |
+
eval_demo_seed: 3
|
| 201 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 202 |
+
missing or unavailable ();
|
| 203 |
+
using env.reset() for eval.
|
| 204 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 205 |
+
left=5)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
[04/02/26 22:17:38] INFO INFO:root:eval_env: _independent_env_runner.py:191
|
| 209 |
+
Starting episode 4,
|
| 210 |
+
seed 4.
|
| 211 |
+
eval_demo_seed: 4
|
| 212 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 213 |
+
missing or unavailable ();
|
| 214 |
+
using env.reset() for eval.
|
| 215 |
+
INFO INFO:root:total waypoints 7, (right=2, task.py:370
|
| 216 |
+
left=5)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
Evaluating bimanual_sweep_to_dustpan | Episode 4 | Score: 0.0 | Lang Goal: sweep dirt to dustpan
|
| 221 |
+
Finished bimanual_sweep_to_dustpan | Final Score: 0.0
|
| 222 |
+
|
| 223 |
+
add_video needs package moviepy
|
| 224 |
+
[04/02/26 22:19:07] INFO INFO:root:Finished _independent_env_runner.py:292
|
| 225 |
+
evaluation.
|
| 226 |
+
[CoppeliaSim:loadinfo] done.
|
reports/anybimanual_eval_logs/coordinated_lift_tray_ep1_live.log
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 2 |
+
The version_base parameter is not specified.
|
| 3 |
+
Please specify a compatability version level, or None.
|
| 4 |
+
Will assume defaults for version 1.1
|
| 5 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 6 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/defaults_list.py:251: UserWarning: In 'eval': Defaults list is missing `_self_`. See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/default_composition_order for more information
|
| 7 |
+
warnings.warn(msg, UserWarning)
|
| 8 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/core/default_element.py:124: UserWarning: In 'method/PERACT_BC': Usage of deprecated keyword in package header '# @package _group_'.
|
| 9 |
+
See https://hydra.cc/docs/1.2/upgrades/1.0_to_1.1/changes_to_package_header for more information
|
| 10 |
+
deprecation_warning(
|
| 11 |
+
/workspace/envs/anybi311/lib/python3.11/site-packages/hydra/_internal/hydra.py:119: UserWarning: Future Hydra versions will no longer change working directory at job runtime by default.
|
| 12 |
+
See https://hydra.cc/docs/1.2/upgrades/1.1_to_1.2/changes_to_job_working_dir/ for more information.
|
| 13 |
+
ret = run_job(
|
| 14 |
+
[2026-04-02 22:09:13,243][root][INFO] -
|
| 15 |
+
method:
|
| 16 |
+
name: PERACT_BC
|
| 17 |
+
agent_type: leader_follower
|
| 18 |
+
robot_name: bimanual
|
| 19 |
+
image_crop_size: 64
|
| 20 |
+
bounds_offset:
|
| 21 |
+
- 0.15
|
| 22 |
+
voxel_sizes:
|
| 23 |
+
- 100
|
| 24 |
+
include_prev_layer: false
|
| 25 |
+
num_latents: 2048
|
| 26 |
+
latent_dim: 512
|
| 27 |
+
transformer_depth: 6
|
| 28 |
+
transformer_iterations: 1
|
| 29 |
+
cross_heads: 1
|
| 30 |
+
cross_dim_head: 64
|
| 31 |
+
latent_heads: 8
|
| 32 |
+
latent_dim_head: 64
|
| 33 |
+
pos_encoding_with_lang: true
|
| 34 |
+
conv_downsample: true
|
| 35 |
+
lang_fusion_type: seq
|
| 36 |
+
voxel_patch_size: 5
|
| 37 |
+
voxel_patch_stride: 5
|
| 38 |
+
final_dim: 64
|
| 39 |
+
low_dim_size: 4
|
| 40 |
+
input_dropout: 0.1
|
| 41 |
+
attn_dropout: 0.1
|
| 42 |
+
decoder_dropout: 0.0
|
| 43 |
+
lr: 0.0005
|
| 44 |
+
lr_scheduler: false
|
| 45 |
+
num_warmup_steps: 3000
|
| 46 |
+
optimizer: lamb
|
| 47 |
+
lambda_weight_l2: 1.0e-06
|
| 48 |
+
trans_loss_weight: 1.0
|
| 49 |
+
rot_loss_weight: 1.0
|
| 50 |
+
grip_loss_weight: 1.0
|
| 51 |
+
collision_loss_weight: 1.0
|
| 52 |
+
rotation_resolution: 5
|
| 53 |
+
activation: lrelu
|
| 54 |
+
norm: None
|
| 55 |
+
crop_augmentation: true
|
| 56 |
+
transform_augmentation:
|
| 57 |
+
apply_se3: true
|
| 58 |
+
aug_xyz:
|
| 59 |
+
- 0.125
|
| 60 |
+
- 0.125
|
| 61 |
+
- 0.125
|
| 62 |
+
aug_rpy:
|
| 63 |
+
- 0.0
|
| 64 |
+
- 0.0
|
| 65 |
+
- 45.0
|
| 66 |
+
aug_rot_resolution: ${method.rotation_resolution}
|
| 67 |
+
demo_augmentation: true
|
| 68 |
+
demo_augmentation_every_n: 10
|
| 69 |
+
no_skip_connection: false
|
| 70 |
+
no_perceiver: false
|
| 71 |
+
no_language: false
|
| 72 |
+
keypoint_method: heuristic
|
| 73 |
+
rlbench:
|
| 74 |
+
task_name: per2+ab
|
| 75 |
+
tasks:
|
| 76 |
+
- coordinated_lift_tray
|
| 77 |
+
demo_path: /workspace/data/rlbench2
|
| 78 |
+
episode_length: 25
|
| 79 |
+
cameras:
|
| 80 |
+
- over_shoulder_left
|
| 81 |
+
- over_shoulder_right
|
| 82 |
+
- overhead
|
| 83 |
+
- wrist_right
|
| 84 |
+
- wrist_left
|
| 85 |
+
- front
|
| 86 |
+
camera_resolution:
|
| 87 |
+
- 256
|
| 88 |
+
- 256
|
| 89 |
+
scene_bounds:
|
| 90 |
+
- -0.3
|
| 91 |
+
- -0.5
|
| 92 |
+
- 0.6
|
| 93 |
+
- 0.7
|
| 94 |
+
- 0.5
|
| 95 |
+
- 1.6
|
| 96 |
+
include_lang_goal_in_obs: true
|
| 97 |
+
time_in_state: true
|
| 98 |
+
headless: true
|
| 99 |
+
gripper_mode: BimanualDiscrete
|
| 100 |
+
arm_action_mode: BimanualEndEffectorPoseViaPlanning
|
| 101 |
+
action_mode: BimanualMoveArmThenGripper
|
| 102 |
+
framework:
|
| 103 |
+
tensorboard_logging: true
|
| 104 |
+
csv_logging: true
|
| 105 |
+
gpu: 0
|
| 106 |
+
logdir: /workspace/runs/anybimanual/peract_lf_eval
|
| 107 |
+
start_seed: 0
|
| 108 |
+
record_every_n: 5
|
| 109 |
+
eval_envs: 1
|
| 110 |
+
eval_from_eps_number: 0
|
| 111 |
+
eval_episodes: 1
|
| 112 |
+
eval_type: 60000
|
| 113 |
+
eval_save_metrics: true
|
| 114 |
+
cinematic_recorder:
|
| 115 |
+
enabled: false
|
| 116 |
+
camera_resolution:
|
| 117 |
+
- 1280
|
| 118 |
+
- 720
|
| 119 |
+
fps: 30
|
| 120 |
+
rotate_speed: 0.005
|
| 121 |
+
save_path: /tmp/videos/
|
| 122 |
+
|
| 123 |
+
[2026-04-02 22:09:13,251][root][INFO] - Using env device cuda:0.
|
| 124 |
+
[2026-04-02 22:09:13,261][root][INFO] - Evaluating seed 0.
|
| 125 |
+
[2026-04-02 22:09:13,261][root][INFO] - Using method PERACT_BC with type leader_follower
|
| 126 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 127 |
+
The version_base parameter is not specified.
|
| 128 |
+
Please specify a compatability version level, or None.
|
| 129 |
+
Will assume defaults for version 1.1
|
| 130 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 131 |
+
Weight: [60000]
|
| 132 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 133 |
+
The version_base parameter is not specified.
|
| 134 |
+
Please specify a compatability version level, or None.
|
| 135 |
+
Will assume defaults for version 1.1
|
| 136 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 137 |
+
/workspace/third_party/AnyBimanual/eval.py:183: UserWarning:
|
| 138 |
+
The version_base parameter is not specified.
|
| 139 |
+
Please specify a compatability version level, or None.
|
| 140 |
+
Will assume defaults for version 1.1
|
| 141 |
+
@hydra.main(config_name="eval", config_path="conf")
|
| 142 |
+
[04/02/26 22:09:50] INFO INFO:root:eval_env: _independent_env_runner.py:130
|
| 143 |
+
Launching env.
|
| 144 |
+
INFO INFO:root:Agent _independent_env_runner.py:133
|
| 145 |
+
information:
|
| 146 |
+
INFO INFO:root:<yarr.agen _independent_env_runner.py:134
|
| 147 |
+
ts.agent.LeaderFollo
|
| 148 |
+
werAgent object at
|
| 149 |
+
0x7dcccb11fbd0>
|
| 150 |
+
QStandardPaths: XDG_RUNTIME_DIR not set, defaulting to '/tmp/runtime-root'
|
| 151 |
+
[04/02/26 22:09:52] INFO INFO:root:Using dual panda robot environment.py:119
|
| 152 |
+
INFO INFO:root:Setting control arm_action_modes.py:79
|
| 153 |
+
mode for both robots
|
| 154 |
+
WARNING WARNING:root:not sure how task_environment.py:57
|
| 155 |
+
_robot_shapes are used is
|
| 156 |
+
used.
|
| 157 |
+
INFO INFO:root:Evaluating _independent_env_runner.py:164
|
| 158 |
+
weight 60000
|
| 159 |
+
loaded weights from /workspace/runs/anybimanual/peract_lf_eval/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_leader_layer_0.pt
|
| 160 |
+
loaded weights from /workspace/runs/anybimanual/peract_lf_eval/per2+ab/PERACT_BC/seed0/weights/60000/checkpoint_peract_bc_follower_layer_0.pt
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
Starting episode 0,
|
| 164 |
+
seed 0.
|
| 165 |
+
eval_demo_seed: 0
|
| 166 |
+
INFO INFO:root:Dataset root rollout_generator.py:38
|
| 167 |
+
missing or unavailable ();
|
| 168 |
+
using env.reset() for eval.
|
| 169 |
+
INFO INFO:root:total waypoints 6, (right=3, task.py:370
|
| 170 |
+
left=3)
|
| 171 |
+
/workspace/third_party/AnyBimanual/third_party/YARR/yarr/utils/rollout_generator.py:94: UserWarning: Creating a tensor from a list of numpy.ndarrays is extremely slow. Please consider converting the list to a single numpy.ndarray with numpy.array() before converting to a tensor. (Triggered internally at /pytorch/torch/csrc/utils/tensor_new.cpp:254.)
|
| 172 |
+
prepped_data = {k: torch.tensor([v], device=self._env_device) for k, v in obs_history.items()}
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
Evaluating coordinated_lift_tray | Episode 0 | Score: 0.0 | Lang Goal: Lift the tray
|
| 177 |
+
Finished coordinated_lift_tray | Final Score: unknown
|
| 178 |
+
|
| 179 |
+
[04/02/26 22:10:34] INFO INFO:root:Finished _independent_env_runner.py:292
|
| 180 |
+
evaluation.
|
| 181 |
+
[CoppeliaSim:loadinfo] done.
|
reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_fold_tops__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_mode": "trunk_only_ft",
|
| 3 |
+
"benchmark_task": "Fold Tops",
|
| 4 |
+
"episodes": 1,
|
| 5 |
+
"eval_protocol": {
|
| 6 |
+
"action_horizon": 8,
|
| 7 |
+
"action_space": "bimanual_ik_hand_state",
|
| 8 |
+
"benchmark_task": "Fold Tops",
|
| 9 |
+
"cameras": [
|
| 10 |
+
"env_camera",
|
| 11 |
+
"garment_camera",
|
| 12 |
+
"object_camera"
|
| 13 |
+
],
|
| 14 |
+
"episodes": 1,
|
| 15 |
+
"eval_mode": "trunk_only_ft",
|
| 16 |
+
"observation_stack": "rgbd_pointcloud_3cam",
|
| 17 |
+
"resolution": 224,
|
| 18 |
+
"role": "target",
|
| 19 |
+
"same_test_episodes": true,
|
| 20 |
+
"seed": 17,
|
| 21 |
+
"suite": "dexgarmentlab",
|
| 22 |
+
"track_id": "dexgarmentlab_fold_tops"
|
| 23 |
+
},
|
| 24 |
+
"nonzero_metric_count": 4,
|
| 25 |
+
"raw_metrics": {
|
| 26 |
+
"affordance_feature_norm": 28.461267471313477,
|
| 27 |
+
"final_point_count": 2048,
|
| 28 |
+
"fold_boundary_area": 0.38797936088666146,
|
| 29 |
+
"garment_point_count": 2048,
|
| 30 |
+
"success": true,
|
| 31 |
+
"task_name": "Fold Tops"
|
| 32 |
+
},
|
| 33 |
+
"runtime_sec": 62.63576006889343,
|
| 34 |
+
"seed": 17,
|
| 35 |
+
"smoke_pass": true,
|
| 36 |
+
"success_rate": 1.0,
|
| 37 |
+
"successes": [
|
| 38 |
+
1
|
| 39 |
+
],
|
| 40 |
+
"suite": "dexgarmentlab",
|
| 41 |
+
"task_success": true,
|
| 42 |
+
"timed_out": false,
|
| 43 |
+
"track_id": "dexgarmentlab_fold_tops",
|
| 44 |
+
"train_spec": {
|
| 45 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 46 |
+
"batch_size": 32,
|
| 47 |
+
"benchmark_task": "Fold Tops",
|
| 48 |
+
"dataset_split_id": "dexgarmentlab_fold_tops_shared_split_seed17",
|
| 49 |
+
"early_stopping_metric": "val_success",
|
| 50 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 51 |
+
"learning_rate": 0.0003,
|
| 52 |
+
"lr_schedule": "cosine",
|
| 53 |
+
"max_gradient_steps": 20000,
|
| 54 |
+
"model_variant": "trunk_only_ft",
|
| 55 |
+
"optimizer": "adamw",
|
| 56 |
+
"same_data_policy": true,
|
| 57 |
+
"same_init_policy": true,
|
| 58 |
+
"seed": 17,
|
| 59 |
+
"suite": "dexgarmentlab",
|
| 60 |
+
"track_id": "dexgarmentlab_fold_tops",
|
| 61 |
+
"train_demos": 64,
|
| 62 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 63 |
+
"val_demos": 16
|
| 64 |
+
}
|
| 65 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hang_coat__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_mode": "trunk_only_ft",
|
| 3 |
+
"benchmark_task": "Hang Coat",
|
| 4 |
+
"episodes": 1,
|
| 5 |
+
"eval_protocol": {
|
| 6 |
+
"action_horizon": 8,
|
| 7 |
+
"action_space": "bimanual_ik_hand_state",
|
| 8 |
+
"benchmark_task": "Hang Coat",
|
| 9 |
+
"cameras": [
|
| 10 |
+
"env_camera",
|
| 11 |
+
"garment_camera",
|
| 12 |
+
"object_camera"
|
| 13 |
+
],
|
| 14 |
+
"episodes": 1,
|
| 15 |
+
"eval_mode": "trunk_only_ft",
|
| 16 |
+
"observation_stack": "rgbd_pointcloud_3cam",
|
| 17 |
+
"resolution": 224,
|
| 18 |
+
"role": "target",
|
| 19 |
+
"same_test_episodes": true,
|
| 20 |
+
"seed": 17,
|
| 21 |
+
"suite": "dexgarmentlab",
|
| 22 |
+
"track_id": "dexgarmentlab_hang_coat"
|
| 23 |
+
},
|
| 24 |
+
"nonzero_metric_count": 4,
|
| 25 |
+
"raw_metrics": {
|
| 26 |
+
"affordance_feature_norm": 36.793704986572266,
|
| 27 |
+
"garment_center_height": 0.7110686898231506,
|
| 28 |
+
"garment_point_count": 2048,
|
| 29 |
+
"object_point_count": 2048,
|
| 30 |
+
"success": true,
|
| 31 |
+
"task_name": "Hang Coat"
|
| 32 |
+
},
|
| 33 |
+
"runtime_sec": 50.74257040023804,
|
| 34 |
+
"seed": 17,
|
| 35 |
+
"smoke_pass": true,
|
| 36 |
+
"success_rate": 1.0,
|
| 37 |
+
"successes": [
|
| 38 |
+
1
|
| 39 |
+
],
|
| 40 |
+
"suite": "dexgarmentlab",
|
| 41 |
+
"task_success": true,
|
| 42 |
+
"timed_out": false,
|
| 43 |
+
"track_id": "dexgarmentlab_hang_coat",
|
| 44 |
+
"train_spec": {
|
| 45 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 46 |
+
"batch_size": 32,
|
| 47 |
+
"benchmark_task": "Hang Coat",
|
| 48 |
+
"dataset_split_id": "dexgarmentlab_hang_coat_shared_split_seed17",
|
| 49 |
+
"early_stopping_metric": "val_success",
|
| 50 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 51 |
+
"learning_rate": 0.0003,
|
| 52 |
+
"lr_schedule": "cosine",
|
| 53 |
+
"max_gradient_steps": 20000,
|
| 54 |
+
"model_variant": "trunk_only_ft",
|
| 55 |
+
"optimizer": "adamw",
|
| 56 |
+
"same_data_policy": true,
|
| 57 |
+
"same_init_policy": true,
|
| 58 |
+
"seed": 17,
|
| 59 |
+
"suite": "dexgarmentlab",
|
| 60 |
+
"track_id": "dexgarmentlab_hang_coat",
|
| 61 |
+
"train_demos": 64,
|
| 62 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 63 |
+
"val_demos": 16
|
| 64 |
+
}
|
| 65 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hybrid_smoke_summary.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_mode": "trunk_only_ft",
|
| 3 |
+
"failures": [],
|
| 4 |
+
"result_files": [
|
| 5 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_store_tops__trunk_only_ft_seed17.json",
|
| 6 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_fold_tops__trunk_only_ft_seed17.json",
|
| 7 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hang_coat__trunk_only_ft_seed17.json"
|
| 8 |
+
],
|
| 9 |
+
"seed": 17,
|
| 10 |
+
"smoke_pass_count": 3,
|
| 11 |
+
"suite": "dexgarmentlab",
|
| 12 |
+
"task_success_count": 3,
|
| 13 |
+
"tracks": [
|
| 14 |
+
"dexgarmentlab_store_tops",
|
| 15 |
+
"dexgarmentlab_fold_tops",
|
| 16 |
+
"dexgarmentlab_hang_coat"
|
| 17 |
+
]
|
| 18 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_store_tops__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_mode": "trunk_only_ft",
|
| 3 |
+
"benchmark_task": "Store Tops",
|
| 4 |
+
"episodes": 1,
|
| 5 |
+
"eval_protocol": {
|
| 6 |
+
"action_horizon": 8,
|
| 7 |
+
"action_space": "bimanual_ik_hand_state",
|
| 8 |
+
"benchmark_task": "Store Tops",
|
| 9 |
+
"cameras": [
|
| 10 |
+
"env_camera",
|
| 11 |
+
"garment_camera",
|
| 12 |
+
"object_camera"
|
| 13 |
+
],
|
| 14 |
+
"episodes": 1,
|
| 15 |
+
"eval_mode": "trunk_only_ft",
|
| 16 |
+
"observation_stack": "rgbd_pointcloud_3cam",
|
| 17 |
+
"resolution": 224,
|
| 18 |
+
"role": "target",
|
| 19 |
+
"same_test_episodes": true,
|
| 20 |
+
"seed": 17,
|
| 21 |
+
"suite": "dexgarmentlab",
|
| 22 |
+
"track_id": "dexgarmentlab_store_tops"
|
| 23 |
+
},
|
| 24 |
+
"nonzero_metric_count": 5,
|
| 25 |
+
"raw_metrics": {
|
| 26 |
+
"affordance_feature_norm": 26.555341720581055,
|
| 27 |
+
"distance_to_target_center": 0.0008265018479589052,
|
| 28 |
+
"garment_point_count": 2048,
|
| 29 |
+
"judge_point_count": 2048,
|
| 30 |
+
"object_point_count": 2048,
|
| 31 |
+
"success": true,
|
| 32 |
+
"task_name": "Store Tops"
|
| 33 |
+
},
|
| 34 |
+
"runtime_sec": 68.48123931884766,
|
| 35 |
+
"seed": 17,
|
| 36 |
+
"smoke_pass": true,
|
| 37 |
+
"success_rate": 1.0,
|
| 38 |
+
"successes": [
|
| 39 |
+
1
|
| 40 |
+
],
|
| 41 |
+
"suite": "dexgarmentlab",
|
| 42 |
+
"task_success": true,
|
| 43 |
+
"timed_out": false,
|
| 44 |
+
"track_id": "dexgarmentlab_store_tops",
|
| 45 |
+
"train_spec": {
|
| 46 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 47 |
+
"batch_size": 32,
|
| 48 |
+
"benchmark_task": "Store Tops",
|
| 49 |
+
"dataset_split_id": "dexgarmentlab_store_tops_shared_split_seed17",
|
| 50 |
+
"early_stopping_metric": "val_success",
|
| 51 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 52 |
+
"learning_rate": 0.0003,
|
| 53 |
+
"lr_schedule": "cosine",
|
| 54 |
+
"max_gradient_steps": 20000,
|
| 55 |
+
"model_variant": "trunk_only_ft",
|
| 56 |
+
"optimizer": "adamw",
|
| 57 |
+
"same_data_policy": true,
|
| 58 |
+
"same_init_policy": true,
|
| 59 |
+
"seed": 17,
|
| 60 |
+
"suite": "dexgarmentlab",
|
| 61 |
+
"track_id": "dexgarmentlab_store_tops",
|
| 62 |
+
"train_demos": 64,
|
| 63 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 64 |
+
"val_demos": 16
|
| 65 |
+
}
|
| 66 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/hybrid_public_benchmark_smoke_summary.json
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_mode": "trunk_only_ft",
|
| 3 |
+
"failures": [],
|
| 4 |
+
"seed": 17,
|
| 5 |
+
"suite_summaries": {
|
| 6 |
+
"dexgarmentlab": {
|
| 7 |
+
"returncode": 0,
|
| 8 |
+
"stderr_tail": [],
|
| 9 |
+
"stdout_tail": [
|
| 10 |
+
"{",
|
| 11 |
+
" \"summary_path\": \"/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hybrid_smoke_summary.json\",",
|
| 12 |
+
" \"smoke_pass_count\": 3",
|
| 13 |
+
"}"
|
| 14 |
+
],
|
| 15 |
+
"summary": {
|
| 16 |
+
"adapter_mode": "trunk_only_ft",
|
| 17 |
+
"failures": [],
|
| 18 |
+
"result_files": [
|
| 19 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_store_tops__trunk_only_ft_seed17.json",
|
| 20 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_fold_tops__trunk_only_ft_seed17.json",
|
| 21 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hang_coat__trunk_only_ft_seed17.json"
|
| 22 |
+
],
|
| 23 |
+
"seed": 17,
|
| 24 |
+
"smoke_pass_count": 3,
|
| 25 |
+
"suite": "dexgarmentlab",
|
| 26 |
+
"task_success_count": 3,
|
| 27 |
+
"tracks": [
|
| 28 |
+
"dexgarmentlab_store_tops",
|
| 29 |
+
"dexgarmentlab_fold_tops",
|
| 30 |
+
"dexgarmentlab_hang_coat"
|
| 31 |
+
]
|
| 32 |
+
},
|
| 33 |
+
"summary_path": "/workspace/reports/public_hybrid_benchmark_smoke_v1/dexgarmentlab/dexgarmentlab_hybrid_smoke_summary.json"
|
| 34 |
+
},
|
| 35 |
+
"rlbench2": {
|
| 36 |
+
"returncode": 0,
|
| 37 |
+
"stderr_tail": [],
|
| 38 |
+
"stdout_tail": [
|
| 39 |
+
"WARNING: QApplication was not created in the main() thread.",
|
| 40 |
+
"WARNING:root:not sure how _robot_shapes are used is used.",
|
| 41 |
+
"ERROR:root:robot is in collision",
|
| 42 |
+
"ERROR:root:Error when checking waypoints. Exception is: ",
|
| 43 |
+
"WARNING: QApplication was not created in the main() thread.",
|
| 44 |
+
"WARNING:root:not sure how _robot_shapes are used is used.",
|
| 45 |
+
"WARNING: QApplication was not created in the main() thread.",
|
| 46 |
+
"WARNING:root:not sure how _robot_shapes are used is used.",
|
| 47 |
+
"WARNING: QApplication was not created in the main() thread.",
|
| 48 |
+
"WARNING:root:not sure how _robot_shapes are used is used.",
|
| 49 |
+
"[ 0.05 -0.27500001 0.75199997]",
|
| 50 |
+
"{",
|
| 51 |
+
" \"summary_path\": \"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench_hybrid_smoke_summary.json\",",
|
| 52 |
+
" \"smoke_pass_count\": 5",
|
| 53 |
+
"}",
|
| 54 |
+
"[CoppeliaSim:loadinfo] done.",
|
| 55 |
+
"[CoppeliaSim:loadinfo] done.",
|
| 56 |
+
"[CoppeliaSim:loadinfo] done.",
|
| 57 |
+
"[CoppeliaSim:loadinfo] done.",
|
| 58 |
+
"[CoppeliaSim:loadinfo] done."
|
| 59 |
+
],
|
| 60 |
+
"summary": {
|
| 61 |
+
"adapter_mode": "trunk_only_ft",
|
| 62 |
+
"failures": [],
|
| 63 |
+
"result_files": [
|
| 64 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_put_bottle_in_fridge__trunk_only_ft_seed17.json",
|
| 65 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_take_out_tray__trunk_only_ft_seed17.json",
|
| 66 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_lift_tray__trunk_only_ft_seed17.json",
|
| 67 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_straighten_rope__trunk_only_ft_seed17.json",
|
| 68 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_sweep_to_dustpan__trunk_only_ft_seed17.json"
|
| 69 |
+
],
|
| 70 |
+
"seed": 17,
|
| 71 |
+
"smoke_pass_count": 5,
|
| 72 |
+
"suite": "rlbench2",
|
| 73 |
+
"task_success_count": 0,
|
| 74 |
+
"tracks": [
|
| 75 |
+
"rlbench2_put_bottle_in_fridge",
|
| 76 |
+
"rlbench2_take_out_tray",
|
| 77 |
+
"rlbench2_lift_tray",
|
| 78 |
+
"rlbench2_straighten_rope",
|
| 79 |
+
"rlbench2_sweep_to_dustpan"
|
| 80 |
+
]
|
| 81 |
+
},
|
| 82 |
+
"summary_path": "/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench_hybrid_smoke_summary.json"
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_lift_tray__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"action_translation_norm": 2.146094799041748,
|
| 3 |
+
"adapter_mode": "trunk_only_ft",
|
| 4 |
+
"benchmark_task": "bimanual_lift_tray",
|
| 5 |
+
"description_count": 1,
|
| 6 |
+
"episodes": 1,
|
| 7 |
+
"eval_protocol": {
|
| 8 |
+
"action_horizon": 8,
|
| 9 |
+
"action_space": "bimanual_pose_then_gripper",
|
| 10 |
+
"benchmark_task": "bimanual_lift_tray",
|
| 11 |
+
"cameras": [
|
| 12 |
+
"front",
|
| 13 |
+
"wrist_left",
|
| 14 |
+
"wrist_right"
|
| 15 |
+
],
|
| 16 |
+
"episodes": 1,
|
| 17 |
+
"eval_mode": "trunk_only_ft",
|
| 18 |
+
"observation_stack": "rgbd_3cam",
|
| 19 |
+
"resolution": 256,
|
| 20 |
+
"role": "target",
|
| 21 |
+
"same_test_episodes": true,
|
| 22 |
+
"seed": 17,
|
| 23 |
+
"suite": "rlbench2",
|
| 24 |
+
"track_id": "rlbench2_lift_tray"
|
| 25 |
+
},
|
| 26 |
+
"left_pose_norm": 1.5036476850509644,
|
| 27 |
+
"reward": 0.0,
|
| 28 |
+
"right_pose_norm": 1.5118523836135864,
|
| 29 |
+
"runtime_sec": 1.2487857341766357,
|
| 30 |
+
"seed": 17,
|
| 31 |
+
"smoke_pass": true,
|
| 32 |
+
"success_rate": 1.0,
|
| 33 |
+
"successes": [
|
| 34 |
+
1
|
| 35 |
+
],
|
| 36 |
+
"suite": "rlbench2",
|
| 37 |
+
"task_success": false,
|
| 38 |
+
"terminate": false,
|
| 39 |
+
"track_id": "rlbench2_lift_tray",
|
| 40 |
+
"train_spec": {
|
| 41 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 42 |
+
"batch_size": 32,
|
| 43 |
+
"benchmark_task": "bimanual_lift_tray",
|
| 44 |
+
"dataset_split_id": "rlbench2_lift_tray_shared_split_seed17",
|
| 45 |
+
"early_stopping_metric": "val_success",
|
| 46 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 47 |
+
"learning_rate": 0.0003,
|
| 48 |
+
"lr_schedule": "cosine",
|
| 49 |
+
"max_gradient_steps": 20000,
|
| 50 |
+
"model_variant": "trunk_only_ft",
|
| 51 |
+
"optimizer": "adamw",
|
| 52 |
+
"same_data_policy": true,
|
| 53 |
+
"same_init_policy": true,
|
| 54 |
+
"seed": 17,
|
| 55 |
+
"suite": "rlbench2",
|
| 56 |
+
"track_id": "rlbench2_lift_tray",
|
| 57 |
+
"train_demos": 64,
|
| 58 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 59 |
+
"val_demos": 16
|
| 60 |
+
}
|
| 61 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_put_bottle_in_fridge__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"action_translation_norm": 2.146069288253784,
|
| 3 |
+
"adapter_mode": "trunk_only_ft",
|
| 4 |
+
"benchmark_task": "bimanual_put_bottle_in_fridge",
|
| 5 |
+
"description_count": 4,
|
| 6 |
+
"episodes": 1,
|
| 7 |
+
"eval_protocol": {
|
| 8 |
+
"action_horizon": 8,
|
| 9 |
+
"action_space": "bimanual_pose_then_gripper",
|
| 10 |
+
"benchmark_task": "bimanual_put_bottle_in_fridge",
|
| 11 |
+
"cameras": [
|
| 12 |
+
"front",
|
| 13 |
+
"wrist_left",
|
| 14 |
+
"wrist_right"
|
| 15 |
+
],
|
| 16 |
+
"episodes": 1,
|
| 17 |
+
"eval_mode": "trunk_only_ft",
|
| 18 |
+
"observation_stack": "rgbd_3cam",
|
| 19 |
+
"resolution": 256,
|
| 20 |
+
"role": "target",
|
| 21 |
+
"same_test_episodes": true,
|
| 22 |
+
"seed": 17,
|
| 23 |
+
"suite": "rlbench2",
|
| 24 |
+
"track_id": "rlbench2_put_bottle_in_fridge"
|
| 25 |
+
},
|
| 26 |
+
"left_pose_norm": 1.5036077499389648,
|
| 27 |
+
"reward": 0.0,
|
| 28 |
+
"right_pose_norm": 1.511855959892273,
|
| 29 |
+
"runtime_sec": 2.537095785140991,
|
| 30 |
+
"seed": 17,
|
| 31 |
+
"smoke_pass": true,
|
| 32 |
+
"success_rate": 1.0,
|
| 33 |
+
"successes": [
|
| 34 |
+
1
|
| 35 |
+
],
|
| 36 |
+
"suite": "rlbench2",
|
| 37 |
+
"task_success": false,
|
| 38 |
+
"terminate": false,
|
| 39 |
+
"track_id": "rlbench2_put_bottle_in_fridge",
|
| 40 |
+
"train_spec": {
|
| 41 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 42 |
+
"batch_size": 32,
|
| 43 |
+
"benchmark_task": "bimanual_put_bottle_in_fridge",
|
| 44 |
+
"dataset_split_id": "rlbench2_put_bottle_in_fridge_shared_split_seed17",
|
| 45 |
+
"early_stopping_metric": "val_success",
|
| 46 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 47 |
+
"learning_rate": 0.0003,
|
| 48 |
+
"lr_schedule": "cosine",
|
| 49 |
+
"max_gradient_steps": 20000,
|
| 50 |
+
"model_variant": "trunk_only_ft",
|
| 51 |
+
"optimizer": "adamw",
|
| 52 |
+
"same_data_policy": true,
|
| 53 |
+
"same_init_policy": true,
|
| 54 |
+
"seed": 17,
|
| 55 |
+
"suite": "rlbench2",
|
| 56 |
+
"track_id": "rlbench2_put_bottle_in_fridge",
|
| 57 |
+
"train_demos": 64,
|
| 58 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 59 |
+
"val_demos": 16
|
| 60 |
+
}
|
| 61 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_straighten_rope__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"action_translation_norm": 2.1460282802581787,
|
| 3 |
+
"adapter_mode": "trunk_only_ft",
|
| 4 |
+
"benchmark_task": "bimanual_straighten_rope",
|
| 5 |
+
"description_count": 6,
|
| 6 |
+
"episodes": 1,
|
| 7 |
+
"eval_protocol": {
|
| 8 |
+
"action_horizon": 8,
|
| 9 |
+
"action_space": "bimanual_pose_then_gripper",
|
| 10 |
+
"benchmark_task": "bimanual_straighten_rope",
|
| 11 |
+
"cameras": [
|
| 12 |
+
"front",
|
| 13 |
+
"wrist_left",
|
| 14 |
+
"wrist_right"
|
| 15 |
+
],
|
| 16 |
+
"episodes": 1,
|
| 17 |
+
"eval_mode": "trunk_only_ft",
|
| 18 |
+
"observation_stack": "rgbd_3cam",
|
| 19 |
+
"resolution": 256,
|
| 20 |
+
"role": "target",
|
| 21 |
+
"same_test_episodes": true,
|
| 22 |
+
"seed": 17,
|
| 23 |
+
"suite": "rlbench2",
|
| 24 |
+
"track_id": "rlbench2_straighten_rope"
|
| 25 |
+
},
|
| 26 |
+
"left_pose_norm": 1.5036031007766724,
|
| 27 |
+
"reward": 0.0,
|
| 28 |
+
"right_pose_norm": 1.5118026733398438,
|
| 29 |
+
"runtime_sec": 1.4220247268676758,
|
| 30 |
+
"seed": 17,
|
| 31 |
+
"smoke_pass": true,
|
| 32 |
+
"success_rate": 1.0,
|
| 33 |
+
"successes": [
|
| 34 |
+
1
|
| 35 |
+
],
|
| 36 |
+
"suite": "rlbench2",
|
| 37 |
+
"task_success": false,
|
| 38 |
+
"terminate": false,
|
| 39 |
+
"track_id": "rlbench2_straighten_rope",
|
| 40 |
+
"train_spec": {
|
| 41 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 42 |
+
"batch_size": 32,
|
| 43 |
+
"benchmark_task": "bimanual_straighten_rope",
|
| 44 |
+
"dataset_split_id": "rlbench2_straighten_rope_shared_split_seed17",
|
| 45 |
+
"early_stopping_metric": "val_success",
|
| 46 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 47 |
+
"learning_rate": 0.0003,
|
| 48 |
+
"lr_schedule": "cosine",
|
| 49 |
+
"max_gradient_steps": 20000,
|
| 50 |
+
"model_variant": "trunk_only_ft",
|
| 51 |
+
"optimizer": "adamw",
|
| 52 |
+
"same_data_policy": true,
|
| 53 |
+
"same_init_policy": true,
|
| 54 |
+
"seed": 17,
|
| 55 |
+
"suite": "rlbench2",
|
| 56 |
+
"track_id": "rlbench2_straighten_rope",
|
| 57 |
+
"train_demos": 64,
|
| 58 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 59 |
+
"val_demos": 16
|
| 60 |
+
}
|
| 61 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_sweep_to_dustpan__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"action_translation_norm": 2.1460604667663574,
|
| 3 |
+
"adapter_mode": "trunk_only_ft",
|
| 4 |
+
"benchmark_task": "bimanual_sweep_to_dustpan",
|
| 5 |
+
"description_count": 7,
|
| 6 |
+
"episodes": 1,
|
| 7 |
+
"eval_protocol": {
|
| 8 |
+
"action_horizon": 8,
|
| 9 |
+
"action_space": "bimanual_pose_then_gripper",
|
| 10 |
+
"benchmark_task": "bimanual_sweep_to_dustpan",
|
| 11 |
+
"cameras": [
|
| 12 |
+
"front",
|
| 13 |
+
"wrist_left",
|
| 14 |
+
"wrist_right"
|
| 15 |
+
],
|
| 16 |
+
"episodes": 1,
|
| 17 |
+
"eval_mode": "trunk_only_ft",
|
| 18 |
+
"observation_stack": "rgbd_3cam",
|
| 19 |
+
"resolution": 256,
|
| 20 |
+
"role": "target",
|
| 21 |
+
"same_test_episodes": true,
|
| 22 |
+
"seed": 17,
|
| 23 |
+
"suite": "rlbench2",
|
| 24 |
+
"track_id": "rlbench2_sweep_to_dustpan"
|
| 25 |
+
},
|
| 26 |
+
"left_pose_norm": 1.503666877746582,
|
| 27 |
+
"reward": 0.0,
|
| 28 |
+
"right_pose_norm": 1.5117847919464111,
|
| 29 |
+
"runtime_sec": 1.2797048091888428,
|
| 30 |
+
"seed": 17,
|
| 31 |
+
"smoke_pass": true,
|
| 32 |
+
"success_rate": 1.0,
|
| 33 |
+
"successes": [
|
| 34 |
+
1
|
| 35 |
+
],
|
| 36 |
+
"suite": "rlbench2",
|
| 37 |
+
"task_success": false,
|
| 38 |
+
"terminate": false,
|
| 39 |
+
"track_id": "rlbench2_sweep_to_dustpan",
|
| 40 |
+
"train_spec": {
|
| 41 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 42 |
+
"batch_size": 32,
|
| 43 |
+
"benchmark_task": "bimanual_sweep_to_dustpan",
|
| 44 |
+
"dataset_split_id": "rlbench2_sweep_to_dustpan_shared_split_seed17",
|
| 45 |
+
"early_stopping_metric": "val_success",
|
| 46 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 47 |
+
"learning_rate": 0.0003,
|
| 48 |
+
"lr_schedule": "cosine",
|
| 49 |
+
"max_gradient_steps": 20000,
|
| 50 |
+
"model_variant": "trunk_only_ft",
|
| 51 |
+
"optimizer": "adamw",
|
| 52 |
+
"same_data_policy": true,
|
| 53 |
+
"same_init_policy": true,
|
| 54 |
+
"seed": 17,
|
| 55 |
+
"suite": "rlbench2",
|
| 56 |
+
"track_id": "rlbench2_sweep_to_dustpan",
|
| 57 |
+
"train_demos": 64,
|
| 58 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 59 |
+
"val_demos": 16
|
| 60 |
+
}
|
| 61 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_take_out_tray__trunk_only_ft_seed17.json
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"action_translation_norm": 2.1460111141204834,
|
| 3 |
+
"adapter_mode": "trunk_only_ft",
|
| 4 |
+
"benchmark_task": "bimanual_take_tray_out_of_oven",
|
| 5 |
+
"description_count": 7,
|
| 6 |
+
"episodes": 1,
|
| 7 |
+
"eval_protocol": {
|
| 8 |
+
"action_horizon": 8,
|
| 9 |
+
"action_space": "bimanual_pose_then_gripper",
|
| 10 |
+
"benchmark_task": "bimanual_take_tray_out_of_oven",
|
| 11 |
+
"cameras": [
|
| 12 |
+
"front",
|
| 13 |
+
"wrist_left",
|
| 14 |
+
"wrist_right"
|
| 15 |
+
],
|
| 16 |
+
"episodes": 1,
|
| 17 |
+
"eval_mode": "trunk_only_ft",
|
| 18 |
+
"observation_stack": "rgbd_3cam",
|
| 19 |
+
"resolution": 256,
|
| 20 |
+
"role": "target",
|
| 21 |
+
"same_test_episodes": true,
|
| 22 |
+
"seed": 17,
|
| 23 |
+
"suite": "rlbench2",
|
| 24 |
+
"track_id": "rlbench2_take_out_tray"
|
| 25 |
+
},
|
| 26 |
+
"left_pose_norm": 1.5035772323608398,
|
| 27 |
+
"reward": 0.0,
|
| 28 |
+
"right_pose_norm": 1.5118043422698975,
|
| 29 |
+
"runtime_sec": 1.6043481826782227,
|
| 30 |
+
"seed": 17,
|
| 31 |
+
"smoke_pass": true,
|
| 32 |
+
"success_rate": 1.0,
|
| 33 |
+
"successes": [
|
| 34 |
+
1
|
| 35 |
+
],
|
| 36 |
+
"suite": "rlbench2",
|
| 37 |
+
"task_success": false,
|
| 38 |
+
"terminate": false,
|
| 39 |
+
"track_id": "rlbench2_take_out_tray",
|
| 40 |
+
"train_spec": {
|
| 41 |
+
"augmentations": "matched_rgbd_aug_v1",
|
| 42 |
+
"batch_size": 32,
|
| 43 |
+
"benchmark_task": "bimanual_take_tray_out_of_oven",
|
| 44 |
+
"dataset_split_id": "rlbench2_take_out_tray_shared_split_seed17",
|
| 45 |
+
"early_stopping_metric": "val_success",
|
| 46 |
+
"init_checkpoint_group": "shared_public_trunk",
|
| 47 |
+
"learning_rate": 0.0003,
|
| 48 |
+
"lr_schedule": "cosine",
|
| 49 |
+
"max_gradient_steps": 20000,
|
| 50 |
+
"model_variant": "trunk_only_ft",
|
| 51 |
+
"optimizer": "adamw",
|
| 52 |
+
"same_data_policy": true,
|
| 53 |
+
"same_init_policy": true,
|
| 54 |
+
"seed": 17,
|
| 55 |
+
"suite": "rlbench2",
|
| 56 |
+
"track_id": "rlbench2_take_out_tray",
|
| 57 |
+
"train_demos": 64,
|
| 58 |
+
"unfreeze_scope": "matched_trunk_scope",
|
| 59 |
+
"val_demos": 16
|
| 60 |
+
}
|
| 61 |
+
}
|
reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench_hybrid_smoke_summary.json
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_mode": "trunk_only_ft",
|
| 3 |
+
"failures": [],
|
| 4 |
+
"result_files": [
|
| 5 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_put_bottle_in_fridge__trunk_only_ft_seed17.json",
|
| 6 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_take_out_tray__trunk_only_ft_seed17.json",
|
| 7 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_lift_tray__trunk_only_ft_seed17.json",
|
| 8 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_straighten_rope__trunk_only_ft_seed17.json",
|
| 9 |
+
"/workspace/reports/public_hybrid_benchmark_smoke_v1/rlbench2/rlbench2_sweep_to_dustpan__trunk_only_ft_seed17.json"
|
| 10 |
+
],
|
| 11 |
+
"seed": 17,
|
| 12 |
+
"smoke_pass_count": 5,
|
| 13 |
+
"suite": "rlbench2",
|
| 14 |
+
"task_success_count": 0,
|
| 15 |
+
"tracks": [
|
| 16 |
+
"rlbench2_put_bottle_in_fridge",
|
| 17 |
+
"rlbench2_take_out_tray",
|
| 18 |
+
"rlbench2_lift_tray",
|
| 19 |
+
"rlbench2_straighten_rope",
|
| 20 |
+
"rlbench2_sweep_to_dustpan"
|
| 21 |
+
]
|
| 22 |
+
}
|
scripts/run_anybimanual_task_eval.sh
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
set -euo pipefail
|
| 3 |
+
|
| 4 |
+
TASK="${1:?usage: run_anybimanual_task_eval.sh <task_name> [episodes] [gpu]}"
|
| 5 |
+
EPISODES="${2:-5}"
|
| 6 |
+
GPU="${3:-0}"
|
| 7 |
+
DEMO_PATH="${DEMO_PATH:-/workspace/data/rlbench2}"
|
| 8 |
+
RUN_LOGDIR="${RUN_LOGDIR:-/workspace/runs/anybimanual/peract_lf_eval}"
|
| 9 |
+
|
| 10 |
+
ROOT="/workspace/third_party/AnyBimanual"
|
| 11 |
+
ENV_DIR="/workspace/envs/anybi311"
|
| 12 |
+
export HOME="/workspace"
|
| 13 |
+
export HF_HOME="/workspace/.hf"
|
| 14 |
+
export TORCH_HOME="/workspace/.cache/torch"
|
| 15 |
+
export COPPELIASIM_ROOT="/workspace/assets/coppeliasim_v4_1_0"
|
| 16 |
+
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH:-}:$COPPELIASIM_ROOT"
|
| 17 |
+
export QT_QPA_PLATFORM_PLUGIN_PATH="$COPPELIASIM_ROOT"
|
| 18 |
+
export MESA_GL_VERSION_OVERRIDE="4.1"
|
| 19 |
+
export PYOPENGL_PLATFORM="egl"
|
| 20 |
+
export PYTHONPATH="$ROOT:$ROOT/third_party/RLBench:$ROOT/third_party/YARR:$ROOT/third_party/PyRep:$ROOT/third_party/pytorch3d:${PYTHONPATH:-}"
|
| 21 |
+
|
| 22 |
+
source "$ENV_DIR/bin/activate"
|
| 23 |
+
cd "$ROOT"
|
| 24 |
+
|
| 25 |
+
xvfb-run -a python eval.py \
|
| 26 |
+
method=PERACT_BC \
|
| 27 |
+
framework.logdir="$RUN_LOGDIR" \
|
| 28 |
+
rlbench.task_name='per2+ab' \
|
| 29 |
+
rlbench.demo_path="$DEMO_PATH" \
|
| 30 |
+
rlbench.gripper_mode='BimanualDiscrete' \
|
| 31 |
+
rlbench.arm_action_mode='BimanualEndEffectorPoseViaPlanning' \
|
| 32 |
+
rlbench.action_mode='BimanualMoveArmThenGripper' \
|
| 33 |
+
framework.start_seed=0 \
|
| 34 |
+
framework.eval_type=60000 \
|
| 35 |
+
framework.eval_episodes="$EPISODES" \
|
| 36 |
+
framework.eval_envs=1 \
|
| 37 |
+
framework.eval_save_metrics=true \
|
| 38 |
+
framework.gpu="$GPU" \
|
| 39 |
+
rlbench.headless=true \
|
| 40 |
+
cinematic_recorder.enabled=false \
|
| 41 |
+
"rlbench.tasks=[$TASK]"
|
third_party/DexGarmentLab/Data/Fold_Tops_Validation_HALO/final_state_pic/img_0.png
ADDED
|
third_party/DexGarmentLab/Data/Fold_Tops_Validation_HALO/final_state_pic/img_1.png
ADDED
|
third_party/DexGarmentLab/Data/Fold_Tops_Validation_HALO/validation_log.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
result:False usd_path:Assets/Garment/Tops/NoCollar_Lsleeve_FrontClose/TNLC_Top441/TNLC_Top441_obj.usd pos_x:-0.08444962764383653 pos_y:0.7054448732479126
|
| 2 |
+
result:True usd_path:Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_018/TCLC_018_obj.usd pos_x:0.0 pos_y:0.8
|
third_party/DexGarmentLab/Data/Hang_Coat_Validation_HALO/validation_log.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
result:False usd_path:Assets/Garment/Tops/Collar_Lsleeve_FrontOpen/TCLO_Jacket131/TCLO_Jacket131_obj.usd pos_x:0.0 pos_y:0.7 env_dx:0.0 env_dy:0.0
|
| 2 |
+
result:True usd_path:Assets/Garment/Tops/Collar_Lsleeve_FrontOpen/TCLO_Jacket140/TCLO_Jacket140_obj.usd pos_x:-0.038650226833422834 pos_y:0.6444166676733603 env_dx:-0.24097593635609782 env_dy:-0.24736420450451033
|
third_party/DexGarmentLab/Data/Store_Tops_Validation_HALO/final_state_pic/img_0.png
ADDED
|
third_party/DexGarmentLab/Data/Store_Tops_Validation_HALO/validation_log.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
result:True usd_path:Assets/Garment/Tops/NoCollar_noSleeve_FrontClose/TNNC_Top394/TNNC_Top394_obj.usd pos_x:0.012405415000857324 pos_y:0.6967481309275767 env_dx:0.1354035858748196 env_dy:-0.01403608869312245
|
third_party/DexGarmentLab/Env_StandAlone/Fold_Tops_Env.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from isaacsim import SimulationApp
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _env_flag(name: str, default: bool = False) -> bool:
|
| 7 |
+
value = os.environ.get(name)
|
| 8 |
+
if value is None:
|
| 9 |
+
return default
|
| 10 |
+
return value.strip().lower() in {"1", "true", "yes", "on"}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
HEADLESS = _env_flag("DEXGARMENTLAB_HEADLESS", False)
|
| 14 |
+
EMIT_JSON = _env_flag("DEXGARMENTLAB_EMIT_JSON", False)
|
| 15 |
+
RESULT_PREFIX = "DEXGARMENTLAB_RESULT="
|
| 16 |
+
simulation_app = SimulationApp({"headless": HEADLESS})
|
| 17 |
+
|
| 18 |
+
# load external package
|
| 19 |
+
import sys
|
| 20 |
+
import time
|
| 21 |
+
import numpy as np
|
| 22 |
+
import open3d as o3d
|
| 23 |
+
from termcolor import cprint
|
| 24 |
+
import threading
|
| 25 |
+
|
| 26 |
+
# load isaac-relevant package
|
| 27 |
+
import omni.replicator.core as rep
|
| 28 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 29 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 30 |
+
from isaacsim.core.api import World
|
| 31 |
+
from isaacsim.core.api import SimulationContext
|
| 32 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 33 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility
|
| 34 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 35 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 36 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 37 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 38 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 39 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 40 |
+
|
| 41 |
+
# load custom package
|
| 42 |
+
sys.path.append(os.getcwd())
|
| 43 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 44 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 45 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 46 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 47 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 48 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 49 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 50 |
+
from Env_Config.Utils_Project.Parse import parse_args_record
|
| 51 |
+
from Env_Config.Utils_Project.Position_Judge import judge_pcd
|
| 52 |
+
from Env_Config.Room.Object_Tools import set_prim_visible_group, delete_prim_group
|
| 53 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 54 |
+
|
| 55 |
+
class FoldTops_Env(BaseEnv):
|
| 56 |
+
def __init__(
|
| 57 |
+
self,
|
| 58 |
+
pos:np.ndarray=None,
|
| 59 |
+
ori:np.ndarray=None,
|
| 60 |
+
usd_path:str=None,
|
| 61 |
+
ground_material_usd:str=None,
|
| 62 |
+
record_video_flag:bool=False,
|
| 63 |
+
):
|
| 64 |
+
# load BaseEnv
|
| 65 |
+
super().__init__()
|
| 66 |
+
|
| 67 |
+
# ------------------------------------ #
|
| 68 |
+
# --- Add Env Assets --- #
|
| 69 |
+
# ------------------------------------ #
|
| 70 |
+
self.ground = Real_Ground(
|
| 71 |
+
self.scene,
|
| 72 |
+
visual_material_usd = ground_material_usd,
|
| 73 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# load garment
|
| 77 |
+
self.garment = Particle_Garment(
|
| 78 |
+
self.world,
|
| 79 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 80 |
+
ori=np.array([0.0, 0.0, 0.0]),
|
| 81 |
+
usd_path=os.getcwd() + "/" + "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_018/TCLC_018_obj.usd" if usd_path is None else usd_path,
|
| 82 |
+
contact_offset=0.012,
|
| 83 |
+
rest_offset=0.010,
|
| 84 |
+
particle_contact_offset=0.012,
|
| 85 |
+
fluid_rest_offset=0.010,
|
| 86 |
+
solid_rest_offset=0.010,
|
| 87 |
+
)
|
| 88 |
+
# Here are some example garments you can try:
|
| 89 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Jacket032/TCLC_Jacket032_obj.usd",
|
| 90 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Jacket152/TCLC_Jacket152_obj.usd",
|
| 91 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top566/TCLC_Top566_obj.usd",
|
| 92 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top584/TCLC_Top584_obj.usd",
|
| 93 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_top118/TCLC_top118_obj.usd",
|
| 94 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top476/TCLC_Top476_obj.usd",
|
| 95 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top030/TCLC_Top030_obj.usd",
|
| 96 |
+
|
| 97 |
+
# load bimanual_dex
|
| 98 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 99 |
+
self.world,
|
| 100 |
+
dexleft_pos=np.array([-0.8, 0.0, 0.5]),
|
| 101 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 102 |
+
dexright_pos=np.array([0.8, 0.0, 0.5]),
|
| 103 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
# load camera
|
| 107 |
+
self.garment_camera = Recording_Camera(
|
| 108 |
+
camera_position=np.array([0.0, 1.0, 6.75]),
|
| 109 |
+
camera_orientation=np.array([0, 90.0, 90.0]),
|
| 110 |
+
prim_path="/World/garment_camera",
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
self.env_camera = Recording_Camera(
|
| 114 |
+
camera_position=np.array([0.0, 4.0, 6.0]),
|
| 115 |
+
camera_orientation=np.array([0, 60, -90.0]),
|
| 116 |
+
prim_path="/World/env_camera",
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
self.garment_pcd = None
|
| 120 |
+
self.points_affordance_feature = None
|
| 121 |
+
|
| 122 |
+
# load GAM Model
|
| 123 |
+
self.model = GAM_Encapsulation(catogory="Tops_LongSleeve")
|
| 124 |
+
|
| 125 |
+
# ------------------------------------ #
|
| 126 |
+
# --- Initialize World to be Ready --- #
|
| 127 |
+
# ------------------------------------ #
|
| 128 |
+
# initialize world
|
| 129 |
+
self.reset()
|
| 130 |
+
|
| 131 |
+
# move garment to the target position
|
| 132 |
+
self.garment.set_pose(pos=np.array([pos[0], pos[1], 0.2]), ori=ori)
|
| 133 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 134 |
+
self.orientation = ori
|
| 135 |
+
|
| 136 |
+
self.garment_camera.initialize(
|
| 137 |
+
segment_pc_enable=True,
|
| 138 |
+
segment_prim_path_list=[
|
| 139 |
+
"/World/Garment/garment"
|
| 140 |
+
]
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
self.env_camera.initialize(depth_enable=True)
|
| 144 |
+
|
| 145 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 146 |
+
if record_video_flag:
|
| 147 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 148 |
+
self.thread_record.daemon = True
|
| 149 |
+
|
| 150 |
+
# open hand to be initial state
|
| 151 |
+
self.bimanual_dex.set_both_hand_state("open", "open")
|
| 152 |
+
|
| 153 |
+
# step world to make it ready
|
| 154 |
+
for i in range(100):
|
| 155 |
+
self.step()
|
| 156 |
+
|
| 157 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 158 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 159 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 160 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 161 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 162 |
+
|
| 163 |
+
cprint("World Ready!", "green", "on_green")
|
| 164 |
+
|
| 165 |
+
def record_callback(self, step_size):
|
| 166 |
+
|
| 167 |
+
if self.step_num % 5 == 0:
|
| 168 |
+
|
| 169 |
+
joint_pos_L = self.bimanual_dex.dexleft.get_joint_positions()
|
| 170 |
+
|
| 171 |
+
joint_pos_R = self.bimanual_dex.dexright.get_joint_positions()
|
| 172 |
+
|
| 173 |
+
joint_state = np.array([*joint_pos_L, *joint_pos_R])
|
| 174 |
+
|
| 175 |
+
rgb = self.env_camera.get_rgb_graph(save_or_not=False)
|
| 176 |
+
|
| 177 |
+
point_cloud = self.env_camera.get_pointcloud_from_depth(
|
| 178 |
+
show_original_pc_online=False,
|
| 179 |
+
show_downsample_pc_online=False,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
self.saving_data.append({
|
| 183 |
+
"joint_state": joint_state,
|
| 184 |
+
"image": rgb,
|
| 185 |
+
"env_point_cloud": point_cloud,
|
| 186 |
+
"garment_point_cloud":self.garment_pcd,
|
| 187 |
+
"points_affordance_feature": self.points_affordance_feature,
|
| 188 |
+
})
|
| 189 |
+
|
| 190 |
+
self.step_num += 1
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def FoldTops(pos, ori, usd_path, ground_material_usd, data_collection_flag, record_video_flag):
|
| 194 |
+
|
| 195 |
+
env = FoldTops_Env(pos, ori, usd_path, ground_material_usd, record_video_flag)
|
| 196 |
+
|
| 197 |
+
if record_video_flag:
|
| 198 |
+
env.thread_record.start()
|
| 199 |
+
|
| 200 |
+
# hide prim to get garment point cloud
|
| 201 |
+
set_prim_visible_group(
|
| 202 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight"],
|
| 203 |
+
visible=False,
|
| 204 |
+
)
|
| 205 |
+
for i in range(50):
|
| 206 |
+
env.step()
|
| 207 |
+
|
| 208 |
+
pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 209 |
+
save_or_not=False,
|
| 210 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 211 |
+
real_time_watch=False,
|
| 212 |
+
)
|
| 213 |
+
env.garment_pcd=pcd
|
| 214 |
+
|
| 215 |
+
# unhide
|
| 216 |
+
set_prim_visible_group(
|
| 217 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight"],
|
| 218 |
+
visible=True,
|
| 219 |
+
)
|
| 220 |
+
for i in range(50):
|
| 221 |
+
env.step()
|
| 222 |
+
|
| 223 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=pcd, index_list=[957, 501, 1902, 448, 1196, 422])
|
| 224 |
+
|
| 225 |
+
manipulation_points[0:4, 2] = 0.02
|
| 226 |
+
manipulation_points[4:, 2] = 0.0
|
| 227 |
+
|
| 228 |
+
# ---------------------- left hand ---------------------- #
|
| 229 |
+
|
| 230 |
+
env.points_affordance_feature = normalize_columns(np.concatenate([points_similarity[0:1], points_similarity[0:1]], axis=0).T)
|
| 231 |
+
|
| 232 |
+
env.bimanual_dex.dexleft.dense_step_action(target_pos=manipulation_points[0], target_ori=np.array([0.579, -0.579, -0.406, 0.406]), angular_type="quat")
|
| 233 |
+
|
| 234 |
+
if data_collection_flag:
|
| 235 |
+
for i in range(20):
|
| 236 |
+
env.step()
|
| 237 |
+
env.record(task_name="Fold_Tops", stage_index=1)
|
| 238 |
+
|
| 239 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="close", right_hand_state="None")
|
| 240 |
+
|
| 241 |
+
left_sleeve_height = min(np.linalg.norm(manipulation_points[0][:2] - manipulation_points[3][:2]), 0.3)
|
| 242 |
+
|
| 243 |
+
# print("left_sleeve_height: ", left_sleeve_height)
|
| 244 |
+
|
| 245 |
+
lift_point_1 = np.array([manipulation_points[0][0], manipulation_points[0][1], left_sleeve_height])
|
| 246 |
+
|
| 247 |
+
env.bimanual_dex.dexleft.dense_step_action(target_pos=lift_point_1, target_ori=np.array([0.579, -0.579, -0.406, 0.406]), angular_type="quat")
|
| 248 |
+
|
| 249 |
+
lift_point_2 = np.array([manipulation_points[1][0], manipulation_points[1][1], left_sleeve_height])
|
| 250 |
+
|
| 251 |
+
env.bimanual_dex.dexleft.dense_step_action(target_pos=lift_point_2, target_ori=np.array([0.579, -0.579, -0.406, 0.406]), angular_type="quat")
|
| 252 |
+
|
| 253 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="open", right_hand_state="None")
|
| 254 |
+
|
| 255 |
+
if data_collection_flag:
|
| 256 |
+
env.stop_record()
|
| 257 |
+
|
| 258 |
+
env.garment.particle_material.set_gravity_scale(10.0)
|
| 259 |
+
for i in range(200):
|
| 260 |
+
env.step()
|
| 261 |
+
env.garment.particle_material.set_gravity_scale(1.0)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
env.bimanual_dex.dexleft.dense_step_action(target_pos=np.array([-0.6, 0.8, 0.5]), target_ori=np.array([0.579, -0.579, -0.406, 0.406]), angular_type="quat")
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# --------------------- right hand --------------------- #
|
| 269 |
+
|
| 270 |
+
env.points_affordance_feature = normalize_columns(np.concatenate([points_similarity[2:3], points_similarity[2:3]], axis=0).T)
|
| 271 |
+
|
| 272 |
+
env.bimanual_dex.dexright.dense_step_action(target_pos=manipulation_points[2], target_ori=np.array([0.406, -0.406, -0.579, 0.579]), angular_type="quat")
|
| 273 |
+
|
| 274 |
+
if data_collection_flag:
|
| 275 |
+
for i in range(20):
|
| 276 |
+
env.step()
|
| 277 |
+
env.record(task_name="Fold_Tops", stage_index=2)
|
| 278 |
+
|
| 279 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="None", right_hand_state="close")
|
| 280 |
+
|
| 281 |
+
right_sleeve_height = min(np.linalg.norm(manipulation_points[2][:2] - manipulation_points[1][:2]), 0.3)
|
| 282 |
+
|
| 283 |
+
# print("right_sleeve_height: ", right_sleeve_height)
|
| 284 |
+
|
| 285 |
+
lift_point_1 = np.array([manipulation_points[2][0], manipulation_points[2][1], right_sleeve_height])
|
| 286 |
+
|
| 287 |
+
env.bimanual_dex.dexright.dense_step_action(target_pos=lift_point_1, target_ori=np.array([0.406, -0.406, -0.579, 0.579]), angular_type="quat")
|
| 288 |
+
|
| 289 |
+
lift_point_2 = np.array([manipulation_points[3][0], manipulation_points[3][1], right_sleeve_height])
|
| 290 |
+
|
| 291 |
+
env.bimanual_dex.dexright.dense_step_action(target_pos=lift_point_2, target_ori=np.array([0.406, -0.406, -0.579, 0.579]), angular_type="quat")
|
| 292 |
+
|
| 293 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="None", right_hand_state="open")
|
| 294 |
+
|
| 295 |
+
if data_collection_flag:
|
| 296 |
+
env.stop_record()
|
| 297 |
+
|
| 298 |
+
env.garment.particle_material.set_gravity_scale(10.0)
|
| 299 |
+
for i in range(200):
|
| 300 |
+
env.step()
|
| 301 |
+
env.garment.particle_material.set_gravity_scale(1.0)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
env.bimanual_dex.dexright.dense_step_action(target_pos=np.array([0.6, 0.8, 0.5]), target_ori=np.array([0.406, -0.406, -0.579, 0.579]), angular_type="quat")
|
| 306 |
+
|
| 307 |
+
# --------------------- bottom-top --------------------- #
|
| 308 |
+
|
| 309 |
+
env.points_affordance_feature = normalize_columns(points_similarity[4:6].T)
|
| 310 |
+
|
| 311 |
+
env.bimanual_dex.dense_move_both_ik(
|
| 312 |
+
left_pos=manipulation_points[4],
|
| 313 |
+
left_ori=np.array([0.579, -0.579, -0.406, 0.406]),
|
| 314 |
+
right_pos=manipulation_points[5],
|
| 315 |
+
right_ori=np.array([0.406, -0.406, -0.579, 0.579]),
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
if data_collection_flag:
|
| 319 |
+
for i in range(20):
|
| 320 |
+
env.step()
|
| 321 |
+
env.record(task_name="Fold_Tops", stage_index=3)
|
| 322 |
+
|
| 323 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="close", right_hand_state="close")
|
| 324 |
+
|
| 325 |
+
lift_height = manipulation_points[3][1] - manipulation_points[4][1]
|
| 326 |
+
|
| 327 |
+
# print("lift_height: ", lift_height)
|
| 328 |
+
|
| 329 |
+
lift_point_1 = np.array([manipulation_points[4][0], manipulation_points[4][1], lift_height/2])
|
| 330 |
+
lift_point_2 = np.array([manipulation_points[5][0], manipulation_points[5][1], lift_height/2])
|
| 331 |
+
|
| 332 |
+
env.bimanual_dex.dense_move_both_ik(
|
| 333 |
+
left_pos=lift_point_1,
|
| 334 |
+
left_ori=np.array([0.579, -0.579, -0.406, 0.406]),
|
| 335 |
+
right_pos=lift_point_2,
|
| 336 |
+
right_ori=np.array([0.406, -0.406, -0.579, 0.579]),
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
push_point_1 = np.array([manipulation_points[3][0], manipulation_points[3][1]+0.1, min(lift_height/2, 0.2)])
|
| 340 |
+
push_point_2 = np.array([manipulation_points[1][0], manipulation_points[1][1]+0.1, min(lift_height/2, 0.2)])
|
| 341 |
+
|
| 342 |
+
env.bimanual_dex.dense_move_both_ik(
|
| 343 |
+
left_pos=push_point_1,
|
| 344 |
+
left_ori=np.array([0.579, -0.579, -0.406, 0.406]),
|
| 345 |
+
right_pos=push_point_2,
|
| 346 |
+
right_ori=np.array([0.406, -0.406, -0.579, 0.579]),
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="open", right_hand_state="open")
|
| 350 |
+
|
| 351 |
+
if data_collection_flag:
|
| 352 |
+
env.stop_record()
|
| 353 |
+
|
| 354 |
+
env.garment.particle_material.set_gravity_scale(10.0)
|
| 355 |
+
for i in range(100):
|
| 356 |
+
env.step()
|
| 357 |
+
env.garment.particle_material.set_gravity_scale(1.0)
|
| 358 |
+
|
| 359 |
+
dexleft_prim = prims_utils.get_prim_at_path("/World/DexLeft")
|
| 360 |
+
dexright_prim = prims_utils.get_prim_at_path("/World/DexRight")
|
| 361 |
+
set_prim_visibility(dexleft_prim, False)
|
| 362 |
+
set_prim_visibility(dexright_prim, False)
|
| 363 |
+
|
| 364 |
+
for i in range(50):
|
| 365 |
+
env.step()
|
| 366 |
+
|
| 367 |
+
success=True
|
| 368 |
+
points,*_=env.model.get_manipulation_points(pcd,[554,1540,1014,1385])
|
| 369 |
+
boundary=[points[0][0]-0.05,points[1][0]+0.05,points[3][1]-0.1,points[2][1]+0.1]
|
| 370 |
+
pcd_end,_=env.garment_camera.get_point_cloud_data_from_segment(
|
| 371 |
+
save_or_not=False,
|
| 372 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 373 |
+
real_time_watch=False,
|
| 374 |
+
)
|
| 375 |
+
success=judge_pcd(pcd_end,boundary,threshold=0.12)
|
| 376 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 377 |
+
|
| 378 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 379 |
+
if record_video_flag and success:
|
| 380 |
+
if not os.path.exists("Data/Fold_Tops/video"):
|
| 381 |
+
os.makedirs("Data/Fold_Tops/video")
|
| 382 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Fold_Tops/video/video", ".mp4"))
|
| 383 |
+
|
| 384 |
+
if data_collection_flag:
|
| 385 |
+
# write into .log file
|
| 386 |
+
with open("Data/Fold_Tops/data_collection_log.txt", "a") as f:
|
| 387 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]}\n")
|
| 388 |
+
if success:
|
| 389 |
+
env.record_to_npz()
|
| 390 |
+
if not os.path.exists("Data/Fold_Tops/final_state_pic"):
|
| 391 |
+
os.makedirs("Data/Fold_Tops/final_state_pic")
|
| 392 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Fold_Tops/final_state_pic/img",".png"))
|
| 393 |
+
result = {
|
| 394 |
+
"task_name": "Fold Tops",
|
| 395 |
+
"success": bool(success),
|
| 396 |
+
"garment_point_count": int(env.garment_pcd.shape[0]) if env.garment_pcd is not None else 0,
|
| 397 |
+
"final_point_count": int(pcd_end.shape[0]),
|
| 398 |
+
"affordance_feature_norm": float(np.linalg.norm(env.points_affordance_feature)) if env.points_affordance_feature is not None else 0.0,
|
| 399 |
+
"fold_boundary_area": float(max(boundary[1] - boundary[0], 0.0) * max(boundary[3] - boundary[2], 0.0)),
|
| 400 |
+
}
|
| 401 |
+
return result
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
if __name__=="__main__":
|
| 405 |
+
|
| 406 |
+
args=parse_args_record()
|
| 407 |
+
|
| 408 |
+
# initial setting
|
| 409 |
+
pos = np.array([0.0, 0.8, 0.2])
|
| 410 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 411 |
+
usd_path = None
|
| 412 |
+
|
| 413 |
+
if args.garment_random_flag:
|
| 414 |
+
np.random.seed(int(time.time()))
|
| 415 |
+
x = np.random.uniform(-0.1, 0.1) # changeable
|
| 416 |
+
y = np.random.uniform(0.7, 0.9) # changeable
|
| 417 |
+
pos = np.array([x,y,0.0])
|
| 418 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 419 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 420 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Tops_LongSleeve/assets_training_list.txt")
|
| 421 |
+
assets_list = []
|
| 422 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 423 |
+
for line in f:
|
| 424 |
+
clean_line = line.rstrip('\n')
|
| 425 |
+
assets_list.append(clean_line)
|
| 426 |
+
usd_path=os.getcwd() + "/" + np.random.choice(assets_list)
|
| 427 |
+
|
| 428 |
+
result = FoldTops(pos, ori, usd_path, args.ground_material_usd, args.data_collection_flag, args.record_video_flag)
|
| 429 |
+
if EMIT_JSON:
|
| 430 |
+
print(f"{RESULT_PREFIX}{json.dumps(result, sort_keys=True)}")
|
| 431 |
+
|
| 432 |
+
if args.data_collection_flag:
|
| 433 |
+
simulation_app.close()
|
| 434 |
+
elif HEADLESS:
|
| 435 |
+
simulation_app.close()
|
| 436 |
+
else:
|
| 437 |
+
while simulation_app.is_running():
|
| 438 |
+
simulation_app.update()
|
| 439 |
+
|
| 440 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_StandAlone/Hang_Coat_Env.py
ADDED
|
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from isaacsim import SimulationApp
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _env_flag(name: str, default: bool = False) -> bool:
|
| 7 |
+
value = os.environ.get(name)
|
| 8 |
+
if value is None:
|
| 9 |
+
return default
|
| 10 |
+
return value.strip().lower() in {"1", "true", "yes", "on"}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
HEADLESS = _env_flag("DEXGARMENTLAB_HEADLESS", False)
|
| 14 |
+
EMIT_JSON = _env_flag("DEXGARMENTLAB_EMIT_JSON", False)
|
| 15 |
+
RESULT_PREFIX = "DEXGARMENTLAB_RESULT="
|
| 16 |
+
simulation_app = SimulationApp({"headless": HEADLESS})
|
| 17 |
+
|
| 18 |
+
# load external package
|
| 19 |
+
import sys
|
| 20 |
+
import time
|
| 21 |
+
import numpy as np
|
| 22 |
+
import open3d as o3d
|
| 23 |
+
from termcolor import cprint
|
| 24 |
+
import threading
|
| 25 |
+
|
| 26 |
+
# load isaac-relevant package
|
| 27 |
+
import omni.replicator.core as rep
|
| 28 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 29 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 30 |
+
from isaacsim.core.api import World
|
| 31 |
+
from isaacsim.core.api import SimulationContext
|
| 32 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 33 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 34 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 35 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 36 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 37 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 38 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 39 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 40 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 41 |
+
|
| 42 |
+
# load custom package
|
| 43 |
+
sys.path.append(os.getcwd())
|
| 44 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 45 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 46 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 47 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 48 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 49 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 50 |
+
from Env_Config.Room.Object_Tools import pothook_load, set_prim_visible_group, delete_prim_group
|
| 51 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns, plot_column_distributions
|
| 52 |
+
from Env_Config.Utils_Project.Parse import parse_args_record
|
| 53 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud
|
| 54 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 55 |
+
|
| 56 |
+
class HangCoat_Env(BaseEnv):
|
| 57 |
+
def __init__(
|
| 58 |
+
self,
|
| 59 |
+
pos:np.ndarray=None,
|
| 60 |
+
ori:np.ndarray=None,
|
| 61 |
+
usd_path:str=None,
|
| 62 |
+
env_dx:float=0.0,
|
| 63 |
+
env_dy:float=0.0,
|
| 64 |
+
ground_material_usd:str=None,
|
| 65 |
+
record_video_flag:bool=False,
|
| 66 |
+
):
|
| 67 |
+
# load BaseEnv
|
| 68 |
+
super().__init__()
|
| 69 |
+
|
| 70 |
+
# ------------------------------------ #
|
| 71 |
+
# --- Add Env Assets --- #
|
| 72 |
+
# ------------------------------------ #
|
| 73 |
+
self.ground = Real_Ground(
|
| 74 |
+
self.scene,
|
| 75 |
+
visual_material_usd = ground_material_usd,
|
| 76 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# load garment
|
| 80 |
+
self.garment = Particle_Garment(
|
| 81 |
+
self.world,
|
| 82 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 83 |
+
ori=np.array([0.0, 0.0, 0.0]),
|
| 84 |
+
usd_path=os.getcwd() + "/" + "Assets/Garment/Tops/Collar_Lsleeve_FrontOpen/TCLO_Jacket131/TCLO_Jacket131_obj.usd" if usd_path is None else usd_path,
|
| 85 |
+
friction=25.0,
|
| 86 |
+
contact_offset=0.015,
|
| 87 |
+
rest_offset=0.012,
|
| 88 |
+
particle_contact_offset=0.015,
|
| 89 |
+
fluid_rest_offset=0.012,
|
| 90 |
+
solid_rest_offset=0.012,
|
| 91 |
+
)
|
| 92 |
+
# Here are some example garments you can try:
|
| 93 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontOpen/TCLO_Shirt025/TCLO_Shirt025_obj.usd",
|
| 94 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontOpen/TCLO_Jacket037/TCLO_Jacket037_obj.usd",
|
| 95 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontOpen/TCLO_Jacket140/TCLO_Jacket140_obj.usd",
|
| 96 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontOpen/TCLO_Jacket131/TCLO_Jacket131_obj.usd"
|
| 97 |
+
# "Assets/Garment/Tops/Hooded_Lsleeve_FrontOpen/THLO_Jacket065/THLO_Jacket065_obj.usd"
|
| 98 |
+
|
| 99 |
+
# load bimanual_dex
|
| 100 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 101 |
+
self.world,
|
| 102 |
+
dexleft_pos=np.array([-0.8, 0.0, 0.6]),
|
| 103 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 104 |
+
dexright_pos=np.array([0.8, 0.0, 0.6]),
|
| 105 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# load camera
|
| 109 |
+
self.garment_camera = Recording_Camera(
|
| 110 |
+
camera_position=np.array([0.0, -3.0, 6.75]),
|
| 111 |
+
camera_orientation=np.array([0, 60.0, 90.0]),
|
| 112 |
+
prim_path="/World/garment_camera",
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
self.env_camera = Recording_Camera(
|
| 116 |
+
camera_position=np.array([0.0, 6.65, 4.0]),
|
| 117 |
+
camera_orientation=np.array([0, 30.0, -90.0]),
|
| 118 |
+
prim_path="/World/env_camera",
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
self.object_camera = Recording_Camera(
|
| 122 |
+
camera_position=np.array([0.0, -6.6, 4.9]),
|
| 123 |
+
camera_orientation=np.array([0, 30.0, 90.0]),
|
| 124 |
+
prim_path="/World/object_camera",
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
self.garment_pcd = None
|
| 128 |
+
self.object_pcd = None
|
| 129 |
+
self.points_affordance_feature = None
|
| 130 |
+
|
| 131 |
+
# load GAM Model
|
| 132 |
+
self.model = GAM_Encapsulation(catogory="Tops_FrontOpen")
|
| 133 |
+
|
| 134 |
+
# load hanger
|
| 135 |
+
self.env_dx = env_dx
|
| 136 |
+
self.env_dy = env_dy
|
| 137 |
+
self.pothook_center = pothook_load(self.scene, env_dx, env_dy)
|
| 138 |
+
|
| 139 |
+
# ------------------------------------ #
|
| 140 |
+
# --- Initialize World to be Ready --- #
|
| 141 |
+
# ------------------------------------ #
|
| 142 |
+
# initialize world
|
| 143 |
+
self.reset()
|
| 144 |
+
|
| 145 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 146 |
+
self.garment_camera.initialize(
|
| 147 |
+
segment_pc_enable=True,
|
| 148 |
+
segment_prim_path_list=[
|
| 149 |
+
"/World/Garment/garment",
|
| 150 |
+
]
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
# initialize gif camera to obtain rgb with the aim of creating gif
|
| 154 |
+
self.env_camera.initialize(
|
| 155 |
+
depth_enable=True,
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
self.object_camera.initialize(
|
| 159 |
+
segment_pc_enable=True,
|
| 160 |
+
segment_prim_path_list=[
|
| 161 |
+
"/World/pothook1",
|
| 162 |
+
"/World/pothook2",
|
| 163 |
+
"/World/pothook3",
|
| 164 |
+
]
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 168 |
+
if record_video_flag:
|
| 169 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 170 |
+
self.thread_record.daemon = True
|
| 171 |
+
|
| 172 |
+
# move garment to the target position
|
| 173 |
+
self.garment.set_pose(pos=np.array([pos[0], pos[1], 0.2]), ori=ori)
|
| 174 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 175 |
+
self.orientation = ori
|
| 176 |
+
|
| 177 |
+
# open hand to be initial state
|
| 178 |
+
self.bimanual_dex.set_both_hand_state("open", "open")
|
| 179 |
+
|
| 180 |
+
# step world to make it ready
|
| 181 |
+
for i in range(200):
|
| 182 |
+
self.step()
|
| 183 |
+
|
| 184 |
+
cprint("[CONFIG]----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 185 |
+
cprint(f"[CONFIG]usd_path: {usd_path}", "magenta")
|
| 186 |
+
cprint(f"[CONFIG]pos_x: {pos[0]}", "magenta")
|
| 187 |
+
cprint(f"[CONFIG]pos_y: {pos[1]}", "magenta")
|
| 188 |
+
cprint(f"[CONFIG]env_dx: {env_dx}", "magenta")
|
| 189 |
+
cprint(f"[CONFIG]env_dy: {env_dy}", "magenta")
|
| 190 |
+
cprint("[CONFIG]----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 191 |
+
|
| 192 |
+
cprint("World Ready!", "green", "on_green")
|
| 193 |
+
|
| 194 |
+
def record_callback(self, step_size):
|
| 195 |
+
|
| 196 |
+
if self.step_num % 5 == 0:
|
| 197 |
+
|
| 198 |
+
joint_pos_L = self.bimanual_dex.dexleft.get_joint_positions()
|
| 199 |
+
|
| 200 |
+
joint_pos_R = self.bimanual_dex.dexright.get_joint_positions()
|
| 201 |
+
|
| 202 |
+
joint_state = np.array([*joint_pos_L, *joint_pos_R])
|
| 203 |
+
|
| 204 |
+
rgb = self.env_camera.get_rgb_graph(save_or_not=False)
|
| 205 |
+
|
| 206 |
+
point_cloud = self.env_camera.get_pointcloud_from_depth(
|
| 207 |
+
show_original_pc_online=False,
|
| 208 |
+
show_downsample_pc_online=False,
|
| 209 |
+
)
|
| 210 |
+
|
| 211 |
+
self.saving_data.append({
|
| 212 |
+
"joint_state": joint_state,
|
| 213 |
+
"image": rgb,
|
| 214 |
+
"env_point_cloud": point_cloud,
|
| 215 |
+
"garment_point_cloud": self.garment_pcd,
|
| 216 |
+
"object_point_cloud": self.object_pcd,
|
| 217 |
+
"points_affordance_feature": self.points_affordance_feature,
|
| 218 |
+
})
|
| 219 |
+
|
| 220 |
+
self.step_num += 1
|
| 221 |
+
|
| 222 |
+
def HangCoat(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, data_collection_flag, record_video_flag):
|
| 223 |
+
|
| 224 |
+
env = HangCoat_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag)
|
| 225 |
+
|
| 226 |
+
env.garment.particle_material.set_gravity_scale(0.7)
|
| 227 |
+
|
| 228 |
+
# hide prim to get garment point cloud
|
| 229 |
+
set_prim_visible_group(
|
| 230 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 231 |
+
visible=False,
|
| 232 |
+
)
|
| 233 |
+
for i in range(50):
|
| 234 |
+
env.step()
|
| 235 |
+
|
| 236 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 237 |
+
save_or_not=False,
|
| 238 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 239 |
+
real_time_watch=False,
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
set_prim_visible_group(
|
| 243 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 244 |
+
visible=True,
|
| 245 |
+
)
|
| 246 |
+
for i in range(50):
|
| 247 |
+
env.step()
|
| 248 |
+
|
| 249 |
+
# hide prim to get garment point cloud
|
| 250 |
+
set_prim_visible_group(
|
| 251 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pothook1", "/World/pothook2", "/World/pothook3"],
|
| 252 |
+
visible=False,
|
| 253 |
+
)
|
| 254 |
+
for i in range(50):
|
| 255 |
+
env.step()
|
| 256 |
+
|
| 257 |
+
pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 258 |
+
save_or_not=False,
|
| 259 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 260 |
+
real_time_watch=False,
|
| 261 |
+
)
|
| 262 |
+
env.garment_pcd=pcd
|
| 263 |
+
|
| 264 |
+
set_prim_visible_group(
|
| 265 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pothook1", "/World/pothook2", "/World/pothook3"],
|
| 266 |
+
visible=True,
|
| 267 |
+
)
|
| 268 |
+
for i in range(50):
|
| 269 |
+
env.step()
|
| 270 |
+
|
| 271 |
+
pcd_rotate = rotate_point_cloud(pcd, euler_angles=np.array([0, 0, 180]), center_point=env.garment.get_garment_center_pos())
|
| 272 |
+
|
| 273 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=pcd_rotate, index_list=[793, 1805])
|
| 274 |
+
manipulation_points=pcd[indices]
|
| 275 |
+
manipulation_points[:, 2] = 0.0
|
| 276 |
+
|
| 277 |
+
env.points_affordance_feature = normalize_columns(points_similarity.T)
|
| 278 |
+
|
| 279 |
+
garment_boundary_points, boundary_indices, _ = env.model.get_manipulation_points(input_pcd=pcd_rotate, index_list=[561, 1776])
|
| 280 |
+
garment_boundary_points = pcd[boundary_indices]
|
| 281 |
+
garment_length = abs(garment_boundary_points[0][1] - garment_boundary_points[1][1])
|
| 282 |
+
lift_height = garment_length * 0.35 + env.pothook_center[2]
|
| 283 |
+
|
| 284 |
+
if record_video_flag:
|
| 285 |
+
env.thread_record.start()
|
| 286 |
+
|
| 287 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[1], left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=manipulation_points[0], right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 288 |
+
|
| 289 |
+
if data_collection_flag:
|
| 290 |
+
for i in range(20):
|
| 291 |
+
env.step()
|
| 292 |
+
env.record(task_name="Hang_Coat", stage_index=1)
|
| 293 |
+
|
| 294 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="close", right_hand_state="close")
|
| 295 |
+
manipulation_points[1][0]-=0.15
|
| 296 |
+
manipulation_points[0][0]+=0.15
|
| 297 |
+
manipulation_points[1][1]+=0.2
|
| 298 |
+
manipulation_points[0][1]+=0.2
|
| 299 |
+
manipulation_points[1][2]+=0.2
|
| 300 |
+
manipulation_points[0][2]+=0.2
|
| 301 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[1], left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=manipulation_points[0], right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 302 |
+
|
| 303 |
+
center_pos = env.pothook_center
|
| 304 |
+
|
| 305 |
+
left_pos_f = np.array([center_pos[0]-0.2, center_pos[1]-0.3, lift_height])
|
| 306 |
+
right_pos_f = np.array([center_pos[0]+0.2, center_pos[1]-0.3, lift_height])
|
| 307 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_pos_f, left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=right_pos_f, right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 308 |
+
|
| 309 |
+
left_pos_f = np.array([center_pos[0]-0.2, center_pos[1]+0.15, lift_height])
|
| 310 |
+
right_pos_f = np.array([center_pos[0]+0.2, center_pos[1]+0.15, lift_height])
|
| 311 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_pos_f, left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=right_pos_f, right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 312 |
+
|
| 313 |
+
left_pos_f = np.array([center_pos[0]-0.2, center_pos[1]+0.175, center_pos[2]])
|
| 314 |
+
right_pos_f = np.array([center_pos[0]+0.2, center_pos[1]+0.175, center_pos[2]])
|
| 315 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_pos_f, left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=right_pos_f, right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="open", right_hand_state="open")
|
| 319 |
+
|
| 320 |
+
if data_collection_flag:
|
| 321 |
+
env.stop_record()
|
| 322 |
+
|
| 323 |
+
env.garment.particle_material.set_gravity_scale(2.0)
|
| 324 |
+
|
| 325 |
+
for i in range(100):
|
| 326 |
+
env.step()
|
| 327 |
+
|
| 328 |
+
env.garment.particle_material.set_gravity_scale(0.7)
|
| 329 |
+
|
| 330 |
+
# make prim visible
|
| 331 |
+
set_prim_visible_group(
|
| 332 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight"],
|
| 333 |
+
visible=False,
|
| 334 |
+
)
|
| 335 |
+
for i in range(50):
|
| 336 |
+
env.step()
|
| 337 |
+
|
| 338 |
+
success=True
|
| 339 |
+
|
| 340 |
+
cprint("[INFO]----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 341 |
+
cprint(f"[INFO]garment_center_height: {env.garment.get_garment_center_pos()[2]}", "blue")
|
| 342 |
+
cprint("[INFO]----------- Judge End -----------", "blue", attrs=["bold"])
|
| 343 |
+
success=env.garment.get_garment_center_pos()[2]>0.50 and env.garment.get_garment_center_pos()[2]<2.0
|
| 344 |
+
cprint(f"[INFO]final result: {success}", color="green", on_color="on_green")
|
| 345 |
+
|
| 346 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 347 |
+
if record_video_flag and success:
|
| 348 |
+
if not os.path.exists("Data/Hang_Coat/video"):
|
| 349 |
+
os.makedirs("Data/Hang_Coat/video")
|
| 350 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Hang_Coat/video/video", ".mp4"))
|
| 351 |
+
|
| 352 |
+
if data_collection_flag:
|
| 353 |
+
# write into .log file
|
| 354 |
+
with open("Data/Hang_Coat/data_collection_log.txt", "a") as f:
|
| 355 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 356 |
+
|
| 357 |
+
if data_collection_flag and success:
|
| 358 |
+
env.record_to_npz(env_change=True)
|
| 359 |
+
if not os.path.exists("Data/Hang_Coat/final_state_pic"):
|
| 360 |
+
os.makedirs("Data/Hang_Coat/final_state_pic")
|
| 361 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Hang_Coat/final_state_pic/img",".png"))
|
| 362 |
+
result = {
|
| 363 |
+
"task_name": "Hang Coat",
|
| 364 |
+
"success": bool(success),
|
| 365 |
+
"garment_point_count": int(env.garment_pcd.shape[0]) if env.garment_pcd is not None else 0,
|
| 366 |
+
"object_point_count": int(env.object_pcd.shape[0]) if env.object_pcd is not None else 0,
|
| 367 |
+
"affordance_feature_norm": float(np.linalg.norm(env.points_affordance_feature)) if env.points_affordance_feature is not None else 0.0,
|
| 368 |
+
"garment_center_height": float(env.garment.get_garment_center_pos()[2]),
|
| 369 |
+
}
|
| 370 |
+
return result
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
if __name__=="__main__":
|
| 374 |
+
|
| 375 |
+
args = parse_args_record()
|
| 376 |
+
|
| 377 |
+
# initial setting
|
| 378 |
+
pos = np.array([0, 0.7, 0.2])
|
| 379 |
+
ori = np.array([0.0, 0.0, 180.0])
|
| 380 |
+
usd_path = None
|
| 381 |
+
env_dx = 0.0
|
| 382 |
+
env_dy = 0.0
|
| 383 |
+
|
| 384 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 385 |
+
np.random.seed(int(time.time()))
|
| 386 |
+
if args.env_random_flag:
|
| 387 |
+
env_dx = np.random.uniform(-0.25, 0.25) # changeable
|
| 388 |
+
env_dy = np.random.uniform(-0.3, -0.05) # changeable
|
| 389 |
+
if args.garment_random_flag:
|
| 390 |
+
x = np.random.uniform(-0.1, 0.1) # changeable
|
| 391 |
+
y = np.random.uniform(0.5, 0.7) # changeable
|
| 392 |
+
pos = np.array([x,y,0.0])
|
| 393 |
+
ori = np.array([0.0, 0.0, 180.0])
|
| 394 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 395 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Tops_FrontOpen/assets_training_list.txt")
|
| 396 |
+
assets_list = []
|
| 397 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 398 |
+
for line in f:
|
| 399 |
+
clean_line = line.rstrip('\n')
|
| 400 |
+
assets_list.append(clean_line)
|
| 401 |
+
usd_path=os.getcwd() + "/" + np.random.choice(assets_list)
|
| 402 |
+
print(usd_path)
|
| 403 |
+
|
| 404 |
+
result = HangCoat(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.data_collection_flag, args.record_video_flag)
|
| 405 |
+
if EMIT_JSON:
|
| 406 |
+
print(f"{RESULT_PREFIX}{json.dumps(result, sort_keys=True)}")
|
| 407 |
+
|
| 408 |
+
if args.data_collection_flag:
|
| 409 |
+
simulation_app.close()
|
| 410 |
+
elif HEADLESS:
|
| 411 |
+
simulation_app.close()
|
| 412 |
+
else:
|
| 413 |
+
while simulation_app.is_running():
|
| 414 |
+
simulation_app.update()
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_StandAlone/Store_Tops_Env.py
ADDED
|
@@ -0,0 +1,511 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from isaacsim import SimulationApp
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _env_flag(name: str, default: bool = False) -> bool:
|
| 7 |
+
value = os.environ.get(name)
|
| 8 |
+
if value is None:
|
| 9 |
+
return default
|
| 10 |
+
return value.strip().lower() in {"1", "true", "yes", "on"}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
HEADLESS = _env_flag("DEXGARMENTLAB_HEADLESS", False)
|
| 14 |
+
EMIT_JSON = _env_flag("DEXGARMENTLAB_EMIT_JSON", False)
|
| 15 |
+
RESULT_PREFIX = "DEXGARMENTLAB_RESULT="
|
| 16 |
+
simulation_app = SimulationApp({"headless": HEADLESS})
|
| 17 |
+
|
| 18 |
+
# load external package
|
| 19 |
+
import sys
|
| 20 |
+
import time
|
| 21 |
+
import numpy as np
|
| 22 |
+
import open3d as o3d
|
| 23 |
+
from termcolor import cprint
|
| 24 |
+
import threading
|
| 25 |
+
|
| 26 |
+
# load isaac-relevant package
|
| 27 |
+
import omni.replicator.core as rep
|
| 28 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 29 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 30 |
+
from isaacsim.core.api import World
|
| 31 |
+
from isaacsim.core.api import SimulationContext
|
| 32 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 33 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 34 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 35 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 36 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 37 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 38 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 39 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 40 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 41 |
+
|
| 42 |
+
# load custom package
|
| 43 |
+
sys.path.append(os.getcwd())
|
| 44 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 45 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 46 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 47 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 48 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 49 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 50 |
+
from Env_Config.Room.Object_Tools import pusher_loader, set_prim_visible_group, delete_prim_group
|
| 51 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 52 |
+
from Env_Config.Utils_Project.Parse import parse_args_record
|
| 53 |
+
from Env_Config.Utils_Project.Collision_Group import CollisionGroup
|
| 54 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud
|
| 55 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 56 |
+
|
| 57 |
+
class StoreTops_Env(BaseEnv):
|
| 58 |
+
def __init__(
|
| 59 |
+
self,
|
| 60 |
+
pos:np.ndarray=None,
|
| 61 |
+
ori:np.ndarray=None,
|
| 62 |
+
usd_path:str=None,
|
| 63 |
+
env_dx:float=0.0,
|
| 64 |
+
env_dy:float=0.0,
|
| 65 |
+
ground_material_usd:str=None,
|
| 66 |
+
record_video_flag:bool=False,
|
| 67 |
+
):
|
| 68 |
+
# load BaseEnv
|
| 69 |
+
super().__init__()
|
| 70 |
+
|
| 71 |
+
# ------------------------------------ #
|
| 72 |
+
# --- Add Env Assets --- #
|
| 73 |
+
# ------------------------------------ #
|
| 74 |
+
|
| 75 |
+
# add ground
|
| 76 |
+
self.ground = Real_Ground(
|
| 77 |
+
self.scene,
|
| 78 |
+
visual_material_usd = ground_material_usd,
|
| 79 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# load garment
|
| 83 |
+
self.garment = Particle_Garment(
|
| 84 |
+
self.world,
|
| 85 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 86 |
+
ori=np.array([0, 0, 0]),
|
| 87 |
+
usd_path=os.getcwd() + "/" + "Assets/Garment/Tops/Collar_noSleeve_FrontClose/TCNC_Top338/TCNC_Top338_obj.usd" if usd_path is None else usd_path,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# load bimanual_dex
|
| 91 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 92 |
+
self.world,
|
| 93 |
+
dexleft_pos=np.array([-0.9, 0.0, 0.5]),
|
| 94 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 95 |
+
dexright_pos=np.array([0.9, 0.0, 0.5]),
|
| 96 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# load camera
|
| 100 |
+
self.garment_camera = Recording_Camera(
|
| 101 |
+
camera_position=np.array([pos[0], pos[1], 6.75]),
|
| 102 |
+
camera_orientation=np.array([0, 90.0, 90.0]),
|
| 103 |
+
prim_path="/World/garment_camera",
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
self.env_camera = Recording_Camera(
|
| 107 |
+
camera_position=np.array([0.0, 5.22, 8.11]),
|
| 108 |
+
camera_orientation=np.array([0, 60, -90.0]),
|
| 109 |
+
prim_path="/World/env_camera",
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
self.judge_camera = Recording_Camera(
|
| 113 |
+
camera_position=np.array([0.0+env_dx,1.25+env_dy,6.75]),
|
| 114 |
+
camera_orientation=np.array([0, 90, -90.0]),
|
| 115 |
+
prim_path="/World/judge_camera",
|
| 116 |
+
)
|
| 117 |
+
self.env_dx = env_dx
|
| 118 |
+
self.env_dy = env_dy
|
| 119 |
+
self.pusher = pusher_loader(self.scene)
|
| 120 |
+
|
| 121 |
+
# load GAM Model
|
| 122 |
+
self.model = GAM_Encapsulation(catogory="Tops_NoSleeve")
|
| 123 |
+
|
| 124 |
+
# define collision group - helper path
|
| 125 |
+
self.helper_path=['/World/defaultGroundPlane/GroundPlane', '/World/pusher']
|
| 126 |
+
self.collisiongroup = CollisionGroup(
|
| 127 |
+
self.world,
|
| 128 |
+
helper_path=self.helper_path,
|
| 129 |
+
garment=True,
|
| 130 |
+
collide_with_garment=True,
|
| 131 |
+
collide_with_robot=False,
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
self.object_camera = Recording_Camera(
|
| 135 |
+
camera_position=np.array([0.0, -6.6, 4.9]),
|
| 136 |
+
camera_orientation=np.array([0, 30.0, 90.0]),
|
| 137 |
+
prim_path="/World/object_camera",
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self.garment_pcd = None
|
| 141 |
+
self.object_pcd = None
|
| 142 |
+
self.points_affordance_feature = None
|
| 143 |
+
|
| 144 |
+
# ------------------------------------ #
|
| 145 |
+
# --- Initialize World to be Ready --- #
|
| 146 |
+
# ------------------------------------ #
|
| 147 |
+
# initialize world
|
| 148 |
+
self.reset()
|
| 149 |
+
|
| 150 |
+
self.garment.set_pose(pos=np.array([pos[0], pos[1], 0.20]), ori=ori)
|
| 151 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 152 |
+
self.orientation = ori
|
| 153 |
+
|
| 154 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 155 |
+
self.garment_camera.initialize(
|
| 156 |
+
segment_pc_enable=True,
|
| 157 |
+
segment_prim_path_list=[
|
| 158 |
+
"/World/Garment/garment",
|
| 159 |
+
]
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# initialize gif camera to obtain rgb with the aim of creating gif
|
| 163 |
+
self.env_camera.initialize(depth_enable=True)
|
| 164 |
+
|
| 165 |
+
self.judge_camera.initialize(
|
| 166 |
+
segment_pc_enable=True,
|
| 167 |
+
segment_prim_path_list=[
|
| 168 |
+
"/World/Garment/garment",
|
| 169 |
+
]
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
self.object_camera.initialize(
|
| 173 |
+
segment_pc_enable=True,
|
| 174 |
+
segment_prim_path_list=[
|
| 175 |
+
"/World/pusher",
|
| 176 |
+
]
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 180 |
+
if record_video_flag:
|
| 181 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 182 |
+
self.thread_record.daemon = True
|
| 183 |
+
|
| 184 |
+
# step world to make it ready
|
| 185 |
+
for i in range(100):
|
| 186 |
+
self.step()
|
| 187 |
+
|
| 188 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 189 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 190 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 191 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 192 |
+
cprint(f"env_dx: {env_dx}", "magenta")
|
| 193 |
+
cprint(f"env_dy: {env_dy}", "magenta")
|
| 194 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
cprint("World Ready!", "green", "on_green")
|
| 198 |
+
|
| 199 |
+
def record_callback(self, step_size):
|
| 200 |
+
|
| 201 |
+
if self.step_num % 5 == 0:
|
| 202 |
+
|
| 203 |
+
joint_pos_L = self.bimanual_dex.dexleft.get_joint_positions()
|
| 204 |
+
|
| 205 |
+
joint_pos_R = self.bimanual_dex.dexright.get_joint_positions()
|
| 206 |
+
|
| 207 |
+
joint_state = np.array([*joint_pos_L, *joint_pos_R])
|
| 208 |
+
|
| 209 |
+
rgb = self.env_camera.get_rgb_graph(save_or_not=False)
|
| 210 |
+
|
| 211 |
+
point_cloud = self.env_camera.get_pointcloud_from_depth(
|
| 212 |
+
show_original_pc_online=False,
|
| 213 |
+
show_downsample_pc_online=False,
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
self.saving_data.append({
|
| 217 |
+
"joint_state": joint_state,
|
| 218 |
+
"image": rgb,
|
| 219 |
+
"env_point_cloud": point_cloud,
|
| 220 |
+
"garment_point_cloud":self.garment_pcd,
|
| 221 |
+
"object_point_cloud":self.object_pcd,
|
| 222 |
+
"points_affordance_feature": self.points_affordance_feature,
|
| 223 |
+
})
|
| 224 |
+
|
| 225 |
+
self.step_num += 1
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def StoreTops(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, data_collection_flag, record_video_flag):
|
| 229 |
+
|
| 230 |
+
env = StoreTops_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag)
|
| 231 |
+
|
| 232 |
+
# hide prim to get garment point cloud
|
| 233 |
+
set_prim_visible_group(
|
| 234 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 235 |
+
visible=False,
|
| 236 |
+
)
|
| 237 |
+
for i in range(50):
|
| 238 |
+
env.step()
|
| 239 |
+
|
| 240 |
+
env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 241 |
+
save_or_not=False,
|
| 242 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
set_prim_visible_group(
|
| 246 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 247 |
+
visible=True,
|
| 248 |
+
)
|
| 249 |
+
for i in range(50):
|
| 250 |
+
env.step()
|
| 251 |
+
|
| 252 |
+
if record_video_flag:
|
| 253 |
+
env.thread_record.start()
|
| 254 |
+
|
| 255 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=env.garment_pcd, index_list=[1954, 1832, 528, 587])
|
| 256 |
+
manipulation_points[:, 2] = 0.025 # set z-axis to 0.005 to make sure dexhand can grasp the garment
|
| 257 |
+
|
| 258 |
+
env.points_affordance_feature = normalize_columns(points_similarity[2:4].T)
|
| 259 |
+
|
| 260 |
+
# move both dexhand to the manipulation points
|
| 261 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0], left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=manipulation_points[1], right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 262 |
+
|
| 263 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="open", right_hand_state="open")
|
| 264 |
+
|
| 265 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="close", right_hand_state="close")
|
| 266 |
+
|
| 267 |
+
for i in range(20):
|
| 268 |
+
env.step()
|
| 269 |
+
|
| 270 |
+
left_dis=np.sqrt((manipulation_points[0][0]-manipulation_points[2][0])**2+(manipulation_points[0][1]-manipulation_points[2][1])**2)
|
| 271 |
+
right_dis=np.sqrt((manipulation_points[1][0]-manipulation_points[3][0])**2+(manipulation_points[1][1]-manipulation_points[3][1])**2)
|
| 272 |
+
distance=(left_dis+right_dis)/4
|
| 273 |
+
# get lift points
|
| 274 |
+
y_off=0.01
|
| 275 |
+
z_off=0.005
|
| 276 |
+
left_lift_points,right_lift_points=np.array([manipulation_points[0][0], manipulation_points[0][1]-distance+y_off, distance+z_off]), np.array([manipulation_points[1][0], manipulation_points[1][1]-distance+y_off, distance+z_off])
|
| 277 |
+
|
| 278 |
+
# move both dexhand to the lift points
|
| 279 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=right_lift_points, right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 280 |
+
|
| 281 |
+
left_lift_points,right_lift_points=np.array([manipulation_points[0][0], manipulation_points[2][1]+0.06, distance+z_off]), np.array([manipulation_points[1][0], manipulation_points[3][1]+0.06, distance+z_off])
|
| 282 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=right_lift_points, right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 283 |
+
|
| 284 |
+
# release the garment
|
| 285 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="open", right_hand_state="open")
|
| 286 |
+
|
| 287 |
+
env.garment.particle_material.set_gravity_scale(10.0)
|
| 288 |
+
|
| 289 |
+
for i in range(200):
|
| 290 |
+
env.step()
|
| 291 |
+
|
| 292 |
+
env.garment.particle_material.set_gravity_scale(1.0)
|
| 293 |
+
|
| 294 |
+
cprint("Store World Fold Procedure Finish! Store Procedure Begins!", "green", "on_green")
|
| 295 |
+
|
| 296 |
+
pusher_center = np.array([0.0+env_dx, 1.10+env_dy, 0.0])
|
| 297 |
+
env.pusher.set_world_pose(position=pusher_center)
|
| 298 |
+
|
| 299 |
+
# hide prim to get object point cloud
|
| 300 |
+
set_prim_visible_group(
|
| 301 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 302 |
+
visible=False,
|
| 303 |
+
)
|
| 304 |
+
for i in range(50):
|
| 305 |
+
env.step()
|
| 306 |
+
|
| 307 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 308 |
+
save_or_not=False,
|
| 309 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 310 |
+
# real_time_watch=True,
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
set_prim_visible_group(
|
| 314 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 315 |
+
visible=True,
|
| 316 |
+
)
|
| 317 |
+
for i in range(50):
|
| 318 |
+
env.step()
|
| 319 |
+
|
| 320 |
+
# # hide prim to get garment point cloud
|
| 321 |
+
# set_prim_visible_group(
|
| 322 |
+
# prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 323 |
+
# visible=False,
|
| 324 |
+
# )
|
| 325 |
+
# for i in range(50):
|
| 326 |
+
# env.step()
|
| 327 |
+
|
| 328 |
+
# env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 329 |
+
# save_or_not=False,
|
| 330 |
+
# save_path=get_unique_filename("data", extension=".ply"),
|
| 331 |
+
# )
|
| 332 |
+
|
| 333 |
+
# set_prim_visible_group(
|
| 334 |
+
# prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 335 |
+
# visible=True,
|
| 336 |
+
# )
|
| 337 |
+
# for i in range(50):
|
| 338 |
+
# env.step()
|
| 339 |
+
|
| 340 |
+
left_ori=np.array([0.7010574,0.5609855, 0.4304593, 0.092296])
|
| 341 |
+
right_ori=np.array([ 0.4304593, 0.092296, 0.7010574,0.5609855])
|
| 342 |
+
left_lift_points,right_lift_points=np.array([-0.5, 0.6, 0.65]), np.array([0.5, 0.6, 0.65])
|
| 343 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=left_ori, right_pos=right_lift_points, right_ori=right_ori)
|
| 344 |
+
# env.bimanual_dex.set_both_hand_state(left_hand_state="smooth", right_hand_state="smooth")
|
| 345 |
+
|
| 346 |
+
for i in range(50):
|
| 347 |
+
env.step()
|
| 348 |
+
|
| 349 |
+
garment_fold_length = np.max([
|
| 350 |
+
abs(manipulation_points[0][1] - manipulation_points[2][1]),
|
| 351 |
+
abs(manipulation_points[1][1] - manipulation_points[3][1])
|
| 352 |
+
]) / 2
|
| 353 |
+
|
| 354 |
+
garment_fold_width = np.max([
|
| 355 |
+
abs(manipulation_points[0][0] - manipulation_points[1][0]),
|
| 356 |
+
abs(manipulation_points[2][0] - manipulation_points[3][0])
|
| 357 |
+
])
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
manipulation_points=manipulation_points[2:]
|
| 361 |
+
left_off0=np.array([-0.08,-0.0,0.005])
|
| 362 |
+
right_off0=np.array([0.08,-0.0,0.005])
|
| 363 |
+
left_off1=np.array([-0.05,-0.0,0.002])
|
| 364 |
+
right_off1=np.array([0.05,-0.0,0.002])
|
| 365 |
+
left_off2=np.array([0.005,0.05,-0.0])
|
| 366 |
+
right_off2=np.array([-0.005,0.05,-0.0])
|
| 367 |
+
manipulation_points[:,2]=0.00
|
| 368 |
+
|
| 369 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off0, left_ori=left_ori, right_pos=manipulation_points[1]+right_off0, right_ori=right_ori)
|
| 370 |
+
for i in range(20):
|
| 371 |
+
env.step()
|
| 372 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off1, left_ori=left_ori, right_pos=manipulation_points[1]+right_off1, right_ori=right_ori)
|
| 373 |
+
for i in range(20):
|
| 374 |
+
env.step()
|
| 375 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off2, left_ori=left_ori, right_pos=manipulation_points[1]+right_off2, right_ori=right_ori)
|
| 376 |
+
for i in range(20):
|
| 377 |
+
env.step()
|
| 378 |
+
# env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off3, left_ori=left_ori, right_pos=manipulation_points[1]+right_off3, right_ori=right_ori)
|
| 379 |
+
# for i in range(20):
|
| 380 |
+
# env.step()
|
| 381 |
+
|
| 382 |
+
if data_collection_flag:
|
| 383 |
+
env.record(task_name="Store_Tops",stage_index=1)
|
| 384 |
+
|
| 385 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="close", right_hand_state="close")
|
| 386 |
+
|
| 387 |
+
left_point = np.array([pusher_center[0]-garment_fold_width/1.75, manipulation_points[0][1]+0.35, np.max([garment_fold_length+0.2,0.5])])
|
| 388 |
+
right_point = np.array([pusher_center[0]+garment_fold_width/1.75, manipulation_points[0][1]+0.35, np.max([garment_fold_length+0.2,0.5])])
|
| 389 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_point, left_ori=left_ori, right_pos=right_point, right_ori=right_ori)
|
| 390 |
+
|
| 391 |
+
left_point = np.array([pusher_center[0]-garment_fold_width/1.75, pusher_center[1], 0.5])
|
| 392 |
+
right_point = np.array([pusher_center[0]+garment_fold_width/1.75, pusher_center[1], 0.5])
|
| 393 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_point, left_ori=left_ori, right_pos=right_point, right_ori=right_ori)
|
| 394 |
+
|
| 395 |
+
left_point = np.array([pusher_center[0]-garment_fold_width/1.75, pusher_center[1], pusher_center[2]+0.10])
|
| 396 |
+
right_point = np.array([pusher_center[0]+garment_fold_width/1.75, pusher_center[1], pusher_center[2]+0.10])
|
| 397 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_point, left_ori=left_ori, right_pos=right_point, right_ori=right_ori)
|
| 398 |
+
|
| 399 |
+
# left_point = np.array([pusher_center[0]-garment_fold_width/1.35, pusher_center[1]-garment_fold_length/1.2, pusher_center[2]+0.05])
|
| 400 |
+
# right_point = np.array([pusher_center[0]+garment_fold_width/1.35, pusher_center[1]-garment_fold_length/1.2, pusher_center[2]+0.05])
|
| 401 |
+
# env.bimanual_dex.dense_move_both_ik(left_pos=left_point, left_ori=left_ori, right_pos=right_point, right_ori=right_ori)
|
| 402 |
+
|
| 403 |
+
env.bimanual_dex.set_both_hand_state(
|
| 404 |
+
left_hand_state='open',
|
| 405 |
+
right_hand_state='open'
|
| 406 |
+
)
|
| 407 |
+
|
| 408 |
+
if data_collection_flag:
|
| 409 |
+
env.stop_record()
|
| 410 |
+
|
| 411 |
+
delete_prim("/World/DexLeft")
|
| 412 |
+
delete_prim("/World/DexRight")
|
| 413 |
+
for i in range(50):
|
| 414 |
+
env.step()
|
| 415 |
+
|
| 416 |
+
success=True
|
| 417 |
+
store_state, color = env.judge_camera.get_point_cloud_data_from_segment(save_or_not=False)
|
| 418 |
+
# get max_x min_x max_y min_y
|
| 419 |
+
max_x = np.max(store_state[:, 0])
|
| 420 |
+
min_x = np.min(store_state[:, 0])
|
| 421 |
+
max_y = np.max(store_state[:, 1])
|
| 422 |
+
min_y = np.min(store_state[:, 1])
|
| 423 |
+
# get the center of the point cloud
|
| 424 |
+
center_x = (max_x + min_x) / 2
|
| 425 |
+
center_y = (max_y + min_y) / 2
|
| 426 |
+
# get the distance between the center and the pusher
|
| 427 |
+
distance = np.sqrt((center_x - pusher_center[0]) ** 2 + (center_y - pusher_center[1]) ** 2)
|
| 428 |
+
# if the distance is less than 0.05, it is considered as success
|
| 429 |
+
if distance < 0.1:
|
| 430 |
+
success=True
|
| 431 |
+
else:
|
| 432 |
+
success=False
|
| 433 |
+
|
| 434 |
+
cprint("----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 435 |
+
cprint(f"garment_center: {center_x}, {center_y}", "blue")
|
| 436 |
+
cprint(f"pusher_center: {pusher_center[0]}, {pusher_center[1]}", "blue")
|
| 437 |
+
cprint(f"distance: {distance}", "blue")
|
| 438 |
+
cprint("----------- Judge End -----------", "blue", attrs=["bold"])
|
| 439 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 440 |
+
|
| 441 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 442 |
+
if record_video_flag and success:
|
| 443 |
+
if not os.path.exists("Data/Store_Tops/video"):
|
| 444 |
+
os.makedirs("Data/Store_Tops/video")
|
| 445 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Store_Tops/video/video", ".mp4"))
|
| 446 |
+
|
| 447 |
+
if data_collection_flag:
|
| 448 |
+
# write into .log file
|
| 449 |
+
with open("Data/Store_Tops/data_collection_log.txt", "a") as f:
|
| 450 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 451 |
+
|
| 452 |
+
if data_collection_flag:
|
| 453 |
+
if success:
|
| 454 |
+
env.record_to_npz(env_change=True)
|
| 455 |
+
if not os.path.exists("Data/Store_Tops/final_state_pic"):
|
| 456 |
+
os.makedirs("Data/Store_Tops/final_state_pic")
|
| 457 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Store_Tops/final_state_pic/img",".png"))
|
| 458 |
+
result = {
|
| 459 |
+
"task_name": "Store Tops",
|
| 460 |
+
"success": bool(success),
|
| 461 |
+
"garment_point_count": int(env.garment_pcd.shape[0]) if env.garment_pcd is not None else 0,
|
| 462 |
+
"object_point_count": int(env.object_pcd.shape[0]) if env.object_pcd is not None else 0,
|
| 463 |
+
"judge_point_count": int(store_state.shape[0]),
|
| 464 |
+
"affordance_feature_norm": float(np.linalg.norm(env.points_affordance_feature)) if env.points_affordance_feature is not None else 0.0,
|
| 465 |
+
"distance_to_target_center": float(distance),
|
| 466 |
+
}
|
| 467 |
+
return result
|
| 468 |
+
|
| 469 |
+
|
| 470 |
+
if __name__=="__main__":
|
| 471 |
+
|
| 472 |
+
args = parse_args_record()
|
| 473 |
+
|
| 474 |
+
# initial setting
|
| 475 |
+
pos = np.array([0.0, 0.7, 0.20])
|
| 476 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 477 |
+
usd_path = None
|
| 478 |
+
env_dx = 0.0
|
| 479 |
+
env_dy = 0.0
|
| 480 |
+
|
| 481 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 482 |
+
np.random.seed(int(time.time()))
|
| 483 |
+
if args.env_random_flag:
|
| 484 |
+
env_dx = np.random.uniform(-0.3, 0.3) # changeable
|
| 485 |
+
env_dy = np.random.uniform(-0.1, 0.1) # changeable
|
| 486 |
+
if args.garment_random_flag:
|
| 487 |
+
x = np.random.uniform(-0.05, 0.05) # changeable
|
| 488 |
+
y = np.random.uniform(0.65, 0.75) # changeable
|
| 489 |
+
pos = np.array([x,y,0.0])
|
| 490 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 491 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 492 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Tops_NoSleeve/assets_training_list.txt")
|
| 493 |
+
assets_list = []
|
| 494 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 495 |
+
for line in f:
|
| 496 |
+
clean_line = line.rstrip('\n')
|
| 497 |
+
assets_list.append(clean_line)
|
| 498 |
+
usd_path=os.getcwd() + "/" + np.random.choice(assets_list)
|
| 499 |
+
result = StoreTops(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.data_collection_flag, args.record_video_flag)
|
| 500 |
+
if EMIT_JSON:
|
| 501 |
+
print(f"{RESULT_PREFIX}{json.dumps(result, sort_keys=True)}")
|
| 502 |
+
|
| 503 |
+
if args.data_collection_flag:
|
| 504 |
+
simulation_app.close()
|
| 505 |
+
elif HEADLESS:
|
| 506 |
+
simulation_app.close()
|
| 507 |
+
else:
|
| 508 |
+
while simulation_app.is_running():
|
| 509 |
+
simulation_app.update()
|
| 510 |
+
|
| 511 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_Validation/Hang_Tops_HALO.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from isaacsim import SimulationApp
|
| 2 |
+
simulation_app = SimulationApp({"headless": True})
|
| 3 |
+
|
| 4 |
+
# load external package
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
import numpy as np
|
| 9 |
+
import open3d as o3d
|
| 10 |
+
from termcolor import cprint
|
| 11 |
+
import threading
|
| 12 |
+
|
| 13 |
+
# load isaac-relevant package
|
| 14 |
+
import omni.replicator.core as rep
|
| 15 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 16 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 17 |
+
from isaacsim.core.api import World
|
| 18 |
+
from isaacsim.core.api import SimulationContext
|
| 19 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 20 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 21 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 22 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 23 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 24 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 25 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 26 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 27 |
+
from isaacsim.core.utils.types import ArticulationAction, ArticulationActions
|
| 28 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 29 |
+
|
| 30 |
+
# load custom package
|
| 31 |
+
sys.path.append(os.getcwd())
|
| 32 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 33 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 34 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 35 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 36 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 37 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 38 |
+
from Env_Config.Room.Object_Tools import hanger_load, set_prim_visible_group, delete_prim_group
|
| 39 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 40 |
+
from Env_Config.Utils_Project.Parse import parse_args_val
|
| 41 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud
|
| 42 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 43 |
+
from Model_HALO.SADP.SADP import SADP
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class HangTops_Env(BaseEnv):
|
| 47 |
+
def __init__(
|
| 48 |
+
self,
|
| 49 |
+
pos:np.ndarray=None,
|
| 50 |
+
ori:np.ndarray=None,
|
| 51 |
+
usd_path:str=None,
|
| 52 |
+
env_dx:float=0.0,
|
| 53 |
+
env_dy:float=0.0,
|
| 54 |
+
ground_material_usd:str=None,
|
| 55 |
+
record_video_flag:bool=False,
|
| 56 |
+
training_data_num:int=100,
|
| 57 |
+
stage_1_checkpoint_num:int=1500,
|
| 58 |
+
stage_2_checkpoint_num:int=1500,
|
| 59 |
+
stage_3_checkpoint_num:int=1500,
|
| 60 |
+
):
|
| 61 |
+
# load BaseEnv
|
| 62 |
+
super().__init__()
|
| 63 |
+
|
| 64 |
+
# ------------------------------------ #
|
| 65 |
+
# --- Add Env Assets --- #
|
| 66 |
+
# ------------------------------------ #
|
| 67 |
+
self.ground = Real_Ground(
|
| 68 |
+
self.scene,
|
| 69 |
+
visual_material_usd = ground_material_usd,
|
| 70 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# load garment
|
| 74 |
+
self.garment = Particle_Garment(
|
| 75 |
+
self.world,
|
| 76 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 77 |
+
ori=np.array([0.0, 0.0, 0.0]),
|
| 78 |
+
usd_path="Assets/Garment/Tops/NoCollar_Lsleeve_FrontClose/TNLC_Top603/TNLC_Top603_obj.usd" if usd_path is None else usd_path,
|
| 79 |
+
)
|
| 80 |
+
# Here are some example garments you can try:
|
| 81 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Jacket032/TCLC_Jacket032_obj.usd",
|
| 82 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Jacket152/TCLC_Jacket152_obj.usd",
|
| 83 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top566/TCLC_Top566_obj.usd",
|
| 84 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top584/TCLC_Top584_obj.usd",
|
| 85 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_top118/TCLC_top118_obj.usd",
|
| 86 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top476/TCLC_Top476_obj.usd",
|
| 87 |
+
# "Assets/Garment/Tops/Collar_Lsleeve_FrontClose/TCLC_Top030/TCLC_Top030_obj.usd",
|
| 88 |
+
|
| 89 |
+
# load bimanual_dex
|
| 90 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 91 |
+
self.world,
|
| 92 |
+
dexleft_pos=np.array([-0.8, 0.0, 0.6]),
|
| 93 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 94 |
+
dexright_pos=np.array([0.8, 0.0, 0.6]),
|
| 95 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
# load camera
|
| 99 |
+
self.garment_camera = Recording_Camera(
|
| 100 |
+
camera_position=np.array([0.0, -3.0, 6.75]),
|
| 101 |
+
camera_orientation=np.array([0, 60.0, 90.0]),
|
| 102 |
+
prim_path="/World/garment_camera",
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
self.env_camera = Recording_Camera(
|
| 106 |
+
camera_position=np.array([0.0, 6.65, 4.0]),
|
| 107 |
+
camera_orientation=np.array([0, 30.0, -90.0]),
|
| 108 |
+
prim_path="/World/env_camera",
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
self.object_camera = Recording_Camera(
|
| 112 |
+
camera_position=np.array([0.0, -6.6, 4.9]),
|
| 113 |
+
camera_orientation=np.array([0, 30.0, 90.0]),
|
| 114 |
+
prim_path="/World/object_camera",
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
self.garment_pcd = None
|
| 118 |
+
self.object_pcd = None
|
| 119 |
+
self.points_affordance_feature = None
|
| 120 |
+
|
| 121 |
+
# load GAM Model
|
| 122 |
+
self.model = GAM_Encapsulation(catogory="Tops_LongSleeve")
|
| 123 |
+
|
| 124 |
+
# load hanger
|
| 125 |
+
self.env_dx = env_dx
|
| 126 |
+
self.env_dy = env_dy
|
| 127 |
+
self.hanger_center = hanger_load(self.scene, env_dx, env_dy)
|
| 128 |
+
|
| 129 |
+
self.judge_camera = Recording_Camera(
|
| 130 |
+
camera_position=np.array([self.hanger_center[0], 6.0, 0.5]),
|
| 131 |
+
camera_orientation=np.array([0, 0.0, -90.0]),
|
| 132 |
+
prim_path="/World/judge_camera",
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
self.sadp = SADP(task_name="Hang_Tops_stage_1", data_num=training_data_num, checkpoint_num=stage_1_checkpoint_num)
|
| 136 |
+
|
| 137 |
+
# ------------------------------------ #
|
| 138 |
+
# --- Initialize World to be Ready --- #
|
| 139 |
+
# ------------------------------------ #
|
| 140 |
+
# initialize world
|
| 141 |
+
self.reset()
|
| 142 |
+
|
| 143 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 144 |
+
self.garment_camera.initialize(
|
| 145 |
+
segment_pc_enable=True,
|
| 146 |
+
segment_prim_path_list=[
|
| 147 |
+
"/World/Garment/garment",
|
| 148 |
+
]
|
| 149 |
+
)
|
| 150 |
+
# initialize gif camera to obtain rgb with the aim of creating gif
|
| 151 |
+
self.env_camera.initialize(
|
| 152 |
+
depth_enable=True,
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
self.judge_camera.initialize(
|
| 156 |
+
segment_pc_enable=True,
|
| 157 |
+
segment_prim_path_list=[
|
| 158 |
+
"/World/Garment/garment",
|
| 159 |
+
]
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
self.object_camera.initialize(
|
| 163 |
+
segment_pc_enable=True,
|
| 164 |
+
segment_prim_path_list=[
|
| 165 |
+
"/World/hanger1",
|
| 166 |
+
"/World/hanger2",
|
| 167 |
+
"/World/hanger3",
|
| 168 |
+
]
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 172 |
+
if record_video_flag:
|
| 173 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 174 |
+
self.thread_record.daemon = True
|
| 175 |
+
|
| 176 |
+
# move garment to the target position
|
| 177 |
+
self.garment.set_pose(pos=np.array([pos[0], pos[1], 0.2]), ori=ori)
|
| 178 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 179 |
+
self.orientation = ori
|
| 180 |
+
|
| 181 |
+
# open hand to be initial state
|
| 182 |
+
self.bimanual_dex.set_both_hand_state("open", "open")
|
| 183 |
+
|
| 184 |
+
# step world to make it ready
|
| 185 |
+
for i in range(200):
|
| 186 |
+
self.step()
|
| 187 |
+
|
| 188 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 189 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 190 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 191 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 192 |
+
cprint(f"env_dx: {env_dx}", "magenta")
|
| 193 |
+
cprint(f"env_dy: {env_dy}", "magenta")
|
| 194 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 195 |
+
|
| 196 |
+
cprint("World Ready!", "green", "on_green")
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
# if __name__=="__main__":
|
| 200 |
+
def HangTops(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, validation_flag, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num):
|
| 201 |
+
|
| 202 |
+
env = HangTops_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num)
|
| 203 |
+
|
| 204 |
+
env.garment.particle_material.set_gravity_scale(0.45)
|
| 205 |
+
|
| 206 |
+
# hide prim to get object point cloud
|
| 207 |
+
set_prim_visible_group(
|
| 208 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 209 |
+
visible=False,
|
| 210 |
+
)
|
| 211 |
+
for i in range(50):
|
| 212 |
+
env.step()
|
| 213 |
+
|
| 214 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 215 |
+
save_or_not=False,
|
| 216 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 217 |
+
# real_time_watch=True,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
set_prim_visible_group(
|
| 221 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 222 |
+
visible=True,
|
| 223 |
+
)
|
| 224 |
+
for i in range(50):
|
| 225 |
+
env.step()
|
| 226 |
+
|
| 227 |
+
# hide prim to get garment point cloud
|
| 228 |
+
set_prim_visible_group(
|
| 229 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/hanger1", "/World/hanger2", "/World/hanger3"],
|
| 230 |
+
visible=False,
|
| 231 |
+
)
|
| 232 |
+
for i in range(50):
|
| 233 |
+
env.step()
|
| 234 |
+
|
| 235 |
+
env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 236 |
+
save_or_not=False,
|
| 237 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 238 |
+
# real_time_watch=True,
|
| 239 |
+
)
|
| 240 |
+
|
| 241 |
+
# make prim visible
|
| 242 |
+
set_prim_visible_group(
|
| 243 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/hanger1", "/World/hanger2", "/World/hanger3"],
|
| 244 |
+
visible=True,
|
| 245 |
+
)
|
| 246 |
+
for i in range(50):
|
| 247 |
+
env.step()
|
| 248 |
+
|
| 249 |
+
if record_video_flag:
|
| 250 |
+
env.thread_record.start()
|
| 251 |
+
|
| 252 |
+
# get manipulation points from GAM Model
|
| 253 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=env.garment_pcd, index_list=[838,179])
|
| 254 |
+
|
| 255 |
+
env.points_affordance_feature = normalize_columns(points_similarity.T)
|
| 256 |
+
|
| 257 |
+
manipulation_points[:, 2] = 0.00 # set z-axis to 0.025 to make sure dexhand can grasp the garment
|
| 258 |
+
|
| 259 |
+
# move both dexhand to the manipulation points
|
| 260 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0], left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=manipulation_points[1], right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 261 |
+
|
| 262 |
+
for i in range(20):
|
| 263 |
+
env.step()
|
| 264 |
+
|
| 265 |
+
for i in range(9):
|
| 266 |
+
|
| 267 |
+
print(f"Stage_1_Step: {i}")
|
| 268 |
+
|
| 269 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 270 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 271 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 272 |
+
|
| 273 |
+
obs = dict()
|
| 274 |
+
obs['agent_pos']=joint_state
|
| 275 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 276 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 277 |
+
obs['object_point_cloud']=env.object_pcd
|
| 278 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 279 |
+
|
| 280 |
+
action=env.sadp.get_action(obs)
|
| 281 |
+
|
| 282 |
+
print("action_shape:",action.shape)
|
| 283 |
+
|
| 284 |
+
for j in range(4):
|
| 285 |
+
|
| 286 |
+
action_L = ArticulationAction(joint_positions=action[j][:30])
|
| 287 |
+
action_R = ArticulationAction(joint_positions=action[j][30:])
|
| 288 |
+
|
| 289 |
+
env.bimanual_dex.dexleft.apply_action(action_L)
|
| 290 |
+
env.bimanual_dex.dexright.apply_action(action_R)
|
| 291 |
+
|
| 292 |
+
for _ in range(5):
|
| 293 |
+
env.step()
|
| 294 |
+
|
| 295 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 296 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 297 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 298 |
+
|
| 299 |
+
obs = dict()
|
| 300 |
+
obs['agent_pos']=joint_state
|
| 301 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 302 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 303 |
+
obs['object_point_cloud']=env.object_pcd
|
| 304 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 305 |
+
|
| 306 |
+
env.sadp.update_obs(obs)
|
| 307 |
+
|
| 308 |
+
env.garment.particle_material.set_gravity_scale(2.0)
|
| 309 |
+
|
| 310 |
+
for i in range(100):
|
| 311 |
+
env.step()
|
| 312 |
+
|
| 313 |
+
env.garment.particle_material.set_gravity_scale(0.45)
|
| 314 |
+
|
| 315 |
+
# make prim visible
|
| 316 |
+
set_prim_visible_group(
|
| 317 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight"],
|
| 318 |
+
visible=False,
|
| 319 |
+
)
|
| 320 |
+
for i in range(50):
|
| 321 |
+
env.step()
|
| 322 |
+
|
| 323 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 324 |
+
if record_video_flag:
|
| 325 |
+
if not os.path.exists("Data/Hang_Tops_Validation_HALO/video"):
|
| 326 |
+
os.makedirs("Data/Hang_Tops_Validation_HALO/video")
|
| 327 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Hang_Tops_Validation_HALO/video/video", ".mp4"))
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
success=True
|
| 332 |
+
|
| 333 |
+
pcd_judge, _ = env.judge_camera.get_point_cloud_data_from_segment(
|
| 334 |
+
save_or_not=False,
|
| 335 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 336 |
+
# real_time_watch=True
|
| 337 |
+
)
|
| 338 |
+
z_values = pcd_judge[:, 2] # 假设 pcd_judge 的形状是 (N, 3)
|
| 339 |
+
points_below_threshold = np.sum(z_values < 0.01)
|
| 340 |
+
if points_below_threshold > 15:
|
| 341 |
+
success=False
|
| 342 |
+
elif env.garment.get_garment_center_pos()[0] > env.hanger_center[0]+0.035 or env.garment.get_garment_center_pos()[0] < env.hanger_center[0]-0.035:
|
| 343 |
+
success=False
|
| 344 |
+
else:
|
| 345 |
+
success=True
|
| 346 |
+
|
| 347 |
+
cprint("----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 348 |
+
cprint(f"points_below_threshold: {points_below_threshold}", "blue")
|
| 349 |
+
cprint(f"garment_center_pos_x: {env.garment.get_garment_center_pos()[0]}", "blue")
|
| 350 |
+
cprint(f"hanger_center_x: {env.hanger_center[0]}", "blue")
|
| 351 |
+
cprint("----------- Judge End -----------", "blue", attrs=["bold"])
|
| 352 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 353 |
+
|
| 354 |
+
if validation_flag:
|
| 355 |
+
if not os.path.exists("Data/Hang_Tops_Validation_HALO"):
|
| 356 |
+
os.makedirs("Data/Hang_Tops_Validation_HALO")
|
| 357 |
+
# write into .log file
|
| 358 |
+
with open("Data/Hang_Tops_Validation_HALO/validation_log.txt", "a") as f:
|
| 359 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 360 |
+
if not os.path.exists("Data/Hang_Tops_Validation_HALO/final_state_pic"):
|
| 361 |
+
os.makedirs("Data/Hang_Tops_Validation_HALO/final_state_pic")
|
| 362 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Hang_Tops_Validation_HALO/final_state_pic/img",".png"))
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
if __name__=="__main__":
|
| 368 |
+
|
| 369 |
+
args = parse_args_val()
|
| 370 |
+
|
| 371 |
+
# initial setting
|
| 372 |
+
pos = np.array([0, 0.7, 0.2])
|
| 373 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 374 |
+
usd_path = None
|
| 375 |
+
env_dx = 0.0
|
| 376 |
+
env_dy = 0.0
|
| 377 |
+
|
| 378 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 379 |
+
np.random.seed(int(time.time()))
|
| 380 |
+
if args.env_random_flag:
|
| 381 |
+
env_dx = np.random.uniform(-0.15, 0.15) # changeable
|
| 382 |
+
env_dy = np.random.uniform(-0.05, 0.05) # changeable
|
| 383 |
+
if args.garment_random_flag:
|
| 384 |
+
x = np.random.uniform(-0.1, 0.1) # changeable
|
| 385 |
+
y = np.random.uniform(0.5, 0.8) # changeable
|
| 386 |
+
pos = np.array([x,y,0.0])
|
| 387 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 388 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 389 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Tops_LongSleeve/assets_list.txt")
|
| 390 |
+
assets_list = []
|
| 391 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 392 |
+
for line in f:
|
| 393 |
+
clean_line = line.rstrip('\n')
|
| 394 |
+
assets_list.append(clean_line)
|
| 395 |
+
usd_path=np.random.choice(assets_list)
|
| 396 |
+
|
| 397 |
+
HangTops(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.validation_flag, args.record_video_flag, args.training_data_num, args.stage_1_checkpoint_num, args.stage_2_checkpoint_num, args.stage_3_checkpoint_num)
|
| 398 |
+
|
| 399 |
+
if args.validation_flag:
|
| 400 |
+
simulation_app.close()
|
| 401 |
+
else:
|
| 402 |
+
while simulation_app.is_running():
|
| 403 |
+
simulation_app.update()
|
| 404 |
+
|
| 405 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_Validation/Hang_Trousers_HALO.py
ADDED
|
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from isaacsim import SimulationApp
|
| 2 |
+
simulation_app = SimulationApp({"headless": True})
|
| 3 |
+
|
| 4 |
+
# load external package
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
import numpy as np
|
| 9 |
+
import open3d as o3d
|
| 10 |
+
from termcolor import cprint
|
| 11 |
+
import threading
|
| 12 |
+
|
| 13 |
+
# load isaac-relevant package
|
| 14 |
+
import omni.replicator.core as rep
|
| 15 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 16 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 17 |
+
from isaacsim.core.api import World
|
| 18 |
+
from isaacsim.core.api import SimulationContext
|
| 19 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 20 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 21 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 22 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 23 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 24 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 25 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 26 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 27 |
+
from isaacsim.core.utils.types import ArticulationAction, ArticulationActions
|
| 28 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 29 |
+
|
| 30 |
+
# load custom package
|
| 31 |
+
sys.path.append(os.getcwd())
|
| 32 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 33 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 34 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 35 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 36 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 37 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 38 |
+
from Env_Config.Room.Object_Tools import hanger_load, set_prim_visible_group, delete_prim_group
|
| 39 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 40 |
+
from Env_Config.Utils_Project.Parse import parse_args_val
|
| 41 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud
|
| 42 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 43 |
+
from Model_HALO.SADP.SADP import SADP
|
| 44 |
+
|
| 45 |
+
class HangTrousers_Env(BaseEnv):
|
| 46 |
+
def __init__(
|
| 47 |
+
self,
|
| 48 |
+
pos:np.ndarray=None,
|
| 49 |
+
ori:np.ndarray=None,
|
| 50 |
+
usd_path:str=None,
|
| 51 |
+
env_dx:float=0.0,
|
| 52 |
+
env_dy:float=0.0,
|
| 53 |
+
ground_material_usd:str=None,
|
| 54 |
+
record_video_flag:bool=False,
|
| 55 |
+
training_data_num:int=100,
|
| 56 |
+
stage_1_checkpoint_num:int=1500,
|
| 57 |
+
stage_2_checkpoint_num:int=1500,
|
| 58 |
+
stage_3_checkpoint_num:int=1500,
|
| 59 |
+
):
|
| 60 |
+
# load BaseEnv
|
| 61 |
+
super().__init__()
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# ------------------------------------ #
|
| 65 |
+
# --- Add Env Assets --- #
|
| 66 |
+
# ------------------------------------ #
|
| 67 |
+
self.ground = Real_Ground(
|
| 68 |
+
self.scene,
|
| 69 |
+
visual_material_usd = ground_material_usd,
|
| 70 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# load garment
|
| 74 |
+
self.garment = Particle_Garment(
|
| 75 |
+
self.world,
|
| 76 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 77 |
+
ori=np.array([0.0, 0.0, 0.0]),
|
| 78 |
+
usd_path="Assets/Garment/Trousers/Long/PL_074/PL_074_obj.usd" if usd_path is None else usd_path,
|
| 79 |
+
# contact_offset=0.012,
|
| 80 |
+
# rest_offset=0.010,
|
| 81 |
+
# particle_contact_offset=0.012,
|
| 82 |
+
# fluid_rest_offset=0.010,
|
| 83 |
+
# solid_rest_offset=0.010,
|
| 84 |
+
)
|
| 85 |
+
# Here are some example garments you can try:
|
| 86 |
+
# "Assets/Garment/Trousers/Long/PL_Pants051/PL_Pants051_obj.usd",
|
| 87 |
+
# "Assets/Garment/Trousers/Long/PL_085/PL_085_obj.usd",
|
| 88 |
+
# "Assets/Garment/Trousers/Long/PL_Pants091/PL_Pants091_obj.usd",
|
| 89 |
+
# "Assets/Garment/Trousers/Long/PL_LongPants024/PL_LongPants024_obj.usd",
|
| 90 |
+
# "Assets/Garment/Trousers/Long/PL_M1_074/PL_M1_074_obj.usd",
|
| 91 |
+
# "Assets/Garment/Trousers/Long/PL_M2_015/PL_M2_015_obj.usd",
|
| 92 |
+
# "Assets/Garment/Trousers/Long/PL_041/PL_041_obj.usd",
|
| 93 |
+
|
| 94 |
+
# load bimanual_dex
|
| 95 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 96 |
+
self.world,
|
| 97 |
+
dexleft_pos=np.array([-0.8, 0.0, 0.6]),
|
| 98 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 99 |
+
dexright_pos=np.array([0.8, 0.0, 0.6]),
|
| 100 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# load camera
|
| 104 |
+
self.garment_camera = Recording_Camera(
|
| 105 |
+
camera_position=np.array([0.0, -3.0, 6.75]),
|
| 106 |
+
camera_orientation=np.array([0, 60.0, 90.0]),
|
| 107 |
+
prim_path="/World/garment_camera",
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
self.env_camera = Recording_Camera(
|
| 111 |
+
camera_position=np.array([0.0, 6.65, 4.0]),
|
| 112 |
+
camera_orientation=np.array([0, 30.0, -90.0]),
|
| 113 |
+
prim_path="/World/env_camera",
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
self.object_camera = Recording_Camera(
|
| 117 |
+
camera_position=np.array([0.0, -6.6, 4.9]),
|
| 118 |
+
camera_orientation=np.array([0, 30.0, 90.0]),
|
| 119 |
+
prim_path="/World/object_camera",
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
self.garment_pcd = None
|
| 123 |
+
self.object_pcd = None
|
| 124 |
+
self.points_affordance_feature = None
|
| 125 |
+
|
| 126 |
+
# load GAM Model
|
| 127 |
+
self.model = GAM_Encapsulation(catogory="Trousers")
|
| 128 |
+
|
| 129 |
+
# load hanger
|
| 130 |
+
self.env_dx = env_dx
|
| 131 |
+
self.env_dy = env_dy
|
| 132 |
+
self.hanger_center = hanger_load(self.scene, env_dx, env_dy)
|
| 133 |
+
|
| 134 |
+
self.judge_camera = Recording_Camera(
|
| 135 |
+
camera_position=np.array([self.hanger_center[0], 6.0, 0.5]),
|
| 136 |
+
camera_orientation=np.array([0, 0.0, -90.0]),
|
| 137 |
+
prim_path="/World/judge_camera",
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self.sadp = SADP(task_name="Hang_Trousers_stage_1", data_num=training_data_num, checkpoint_num=stage_1_checkpoint_num)
|
| 141 |
+
|
| 142 |
+
# ------------------------------------ #
|
| 143 |
+
# --- Initialize World to be Ready --- #
|
| 144 |
+
# ------------------------------------ #
|
| 145 |
+
# initialize world
|
| 146 |
+
self.reset()
|
| 147 |
+
|
| 148 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 149 |
+
self.garment_camera.initialize(
|
| 150 |
+
segment_pc_enable=True,
|
| 151 |
+
segment_prim_path_list=[
|
| 152 |
+
"/World/Garment/garment",
|
| 153 |
+
]
|
| 154 |
+
)
|
| 155 |
+
# initialize gif camera to obtain rgb with the aim of creating gif
|
| 156 |
+
self.env_camera.initialize(
|
| 157 |
+
depth_enable=True,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
self.judge_camera.initialize(
|
| 161 |
+
segment_pc_enable=True,
|
| 162 |
+
segment_prim_path_list=[
|
| 163 |
+
"/World/Garment/garment",
|
| 164 |
+
]
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
self.object_camera.initialize(
|
| 168 |
+
segment_pc_enable=True,
|
| 169 |
+
segment_prim_path_list=[
|
| 170 |
+
"/World/hanger1",
|
| 171 |
+
"/World/hanger2",
|
| 172 |
+
"/World/hanger3",
|
| 173 |
+
]
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 177 |
+
if record_video_flag:
|
| 178 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 179 |
+
self.thread_record.daemon = True
|
| 180 |
+
|
| 181 |
+
# move garment to the target position
|
| 182 |
+
self.garment.set_pose(pos=np.array([pos[0], pos[1], 0.2]), ori=ori)
|
| 183 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 184 |
+
self.orientation = ori
|
| 185 |
+
|
| 186 |
+
# open hand to be initial state
|
| 187 |
+
self.bimanual_dex.set_both_hand_state("open", "open")
|
| 188 |
+
|
| 189 |
+
# step world to make it ready
|
| 190 |
+
for i in range(200):
|
| 191 |
+
self.step()
|
| 192 |
+
|
| 193 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 194 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 195 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 196 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 197 |
+
cprint(f"env_dx: {env_dx}", "magenta")
|
| 198 |
+
cprint(f"env_dy: {env_dy}", "magenta")
|
| 199 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
cprint("World Ready!", "green", "on_green")
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
# if __name__=="__main__":
|
| 206 |
+
def HangTrousers(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, validation_flag, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num):
|
| 207 |
+
|
| 208 |
+
env = HangTrousers_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num)
|
| 209 |
+
|
| 210 |
+
env.garment.particle_material.set_gravity_scale(0.45)
|
| 211 |
+
|
| 212 |
+
# hide prim to get object point cloud
|
| 213 |
+
set_prim_visible_group(
|
| 214 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 215 |
+
visible=False,
|
| 216 |
+
)
|
| 217 |
+
for i in range(50):
|
| 218 |
+
env.step()
|
| 219 |
+
|
| 220 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 221 |
+
save_or_not=False,
|
| 222 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 223 |
+
# real_time_watch=True,
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
set_prim_visible_group(
|
| 227 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 228 |
+
visible=True,
|
| 229 |
+
)
|
| 230 |
+
for i in range(50):
|
| 231 |
+
env.step()
|
| 232 |
+
|
| 233 |
+
# hide prim to get garment point cloud
|
| 234 |
+
set_prim_visible_group(
|
| 235 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/hanger1", "/World/hanger2", "/World/hanger3"],
|
| 236 |
+
visible=False,
|
| 237 |
+
)
|
| 238 |
+
for i in range(50):
|
| 239 |
+
env.step()
|
| 240 |
+
|
| 241 |
+
env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 242 |
+
save_or_not=False,
|
| 243 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 244 |
+
# real_time_watch=True,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
# make prim visible
|
| 248 |
+
set_prim_visible_group(
|
| 249 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/hanger1", "/World/hanger2", "/World/hanger3"],
|
| 250 |
+
visible=True,
|
| 251 |
+
)
|
| 252 |
+
for i in range(50):
|
| 253 |
+
env.step()
|
| 254 |
+
|
| 255 |
+
if record_video_flag:
|
| 256 |
+
env.thread_record.start()
|
| 257 |
+
|
| 258 |
+
# get manipulation points from GAM Model
|
| 259 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=env.garment_pcd, index_list=[1083,219])
|
| 260 |
+
|
| 261 |
+
env.points_affordance_feature = normalize_columns(points_similarity.T)
|
| 262 |
+
|
| 263 |
+
manipulation_points[:, 2] = 0.00 # set z-axis to 0.025 to make sure dexhand can grasp the garment
|
| 264 |
+
|
| 265 |
+
# move both dexhand to the manipulation points
|
| 266 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0], left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=manipulation_points[1], right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 267 |
+
|
| 268 |
+
for i in range(20):
|
| 269 |
+
env.step()
|
| 270 |
+
|
| 271 |
+
for i in range(10):
|
| 272 |
+
|
| 273 |
+
print(f"Stage_1_Step: {i}")
|
| 274 |
+
|
| 275 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 276 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 277 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 278 |
+
|
| 279 |
+
obs = dict()
|
| 280 |
+
obs['agent_pos']=joint_state
|
| 281 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 282 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 283 |
+
obs['object_point_cloud']=env.object_pcd
|
| 284 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 285 |
+
|
| 286 |
+
action=env.sadp.get_action(obs)
|
| 287 |
+
|
| 288 |
+
print("action_shape:",action.shape)
|
| 289 |
+
|
| 290 |
+
for j in range(4):
|
| 291 |
+
|
| 292 |
+
action_L = ArticulationAction(joint_positions=action[j][:30])
|
| 293 |
+
action_R = ArticulationAction(joint_positions=action[j][30:])
|
| 294 |
+
|
| 295 |
+
env.bimanual_dex.dexleft.apply_action(action_L)
|
| 296 |
+
env.bimanual_dex.dexright.apply_action(action_R)
|
| 297 |
+
|
| 298 |
+
for _ in range(5):
|
| 299 |
+
env.step()
|
| 300 |
+
|
| 301 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 302 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 303 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 304 |
+
|
| 305 |
+
obs = dict()
|
| 306 |
+
obs['agent_pos']=joint_state
|
| 307 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 308 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 309 |
+
obs['object_point_cloud']=env.object_pcd
|
| 310 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 311 |
+
|
| 312 |
+
env.sadp.update_obs(obs)
|
| 313 |
+
|
| 314 |
+
env.garment.particle_material.set_gravity_scale(2.0)
|
| 315 |
+
|
| 316 |
+
for i in range(200):
|
| 317 |
+
env.step()
|
| 318 |
+
|
| 319 |
+
env.garment.particle_material.set_gravity_scale(0.45)
|
| 320 |
+
|
| 321 |
+
# make prim visible
|
| 322 |
+
set_prim_visible_group(
|
| 323 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight"],
|
| 324 |
+
visible=False,
|
| 325 |
+
)
|
| 326 |
+
for i in range(50):
|
| 327 |
+
env.step()
|
| 328 |
+
|
| 329 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 330 |
+
if record_video_flag:
|
| 331 |
+
if not os.path.exists("Data/Hang_Trousers_Validation_HALO/video"):
|
| 332 |
+
os.makedirs("Data/Hang_Trousers_Validation_HALO/video")
|
| 333 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Hang_Trousers_Validation_HALO/video/video", ".mp4"))
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
success=True
|
| 338 |
+
|
| 339 |
+
pcd_judge, _ = env.judge_camera.get_point_cloud_data_from_segment(
|
| 340 |
+
save_or_not=False,
|
| 341 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 342 |
+
# real_time_watch=True
|
| 343 |
+
)
|
| 344 |
+
z_values = pcd_judge[:, 2] # 假设 pcd_judge 的形状是 (N, 3)
|
| 345 |
+
points_below_threshold = np.sum(z_values < 0.01)
|
| 346 |
+
if points_below_threshold > 15:
|
| 347 |
+
success=False
|
| 348 |
+
elif env.garment.get_garment_center_pos()[0] > env.hanger_center[0]+0.05 or env.garment.get_garment_center_pos()[0] < env.hanger_center[0]-0.05:
|
| 349 |
+
success=False
|
| 350 |
+
else:
|
| 351 |
+
success=True
|
| 352 |
+
|
| 353 |
+
cprint("----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 354 |
+
cprint(f"points_below_threshold: {points_below_threshold}", "blue")
|
| 355 |
+
cprint(f"garment_center_pos_x: {env.garment.get_garment_center_pos()[0]}", "blue")
|
| 356 |
+
cprint(f"hanger_center_x: {env.hanger_center[0]}", "blue")
|
| 357 |
+
cprint("----------- Judge End -----------", "blue", attrs=["bold"])
|
| 358 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 359 |
+
|
| 360 |
+
if validation_flag:
|
| 361 |
+
if not os.path.exists("Data/Hang_Trousers_Validation_HALO"):
|
| 362 |
+
os.makedirs("Data/Hang_Trousers_Validation_HALO")
|
| 363 |
+
# write into .log file
|
| 364 |
+
with open("Data/Hang_Trousers_Validation_HALO/validation_log.txt", "a") as f:
|
| 365 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 366 |
+
if not os.path.exists("Data/Hang_Trousers_Validation_HALO/final_state_pic"):
|
| 367 |
+
os.makedirs("Data/Hang_Trousers_Validation_HALO/final_state_pic")
|
| 368 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Hang_Trousers_Validation_HALO/final_state_pic/img",".png"))
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
if __name__=="__main__":
|
| 372 |
+
|
| 373 |
+
args = parse_args_val()
|
| 374 |
+
|
| 375 |
+
# initial setting
|
| 376 |
+
pos = np.array([0, 0.7, 0.2])
|
| 377 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 378 |
+
usd_path = None
|
| 379 |
+
env_dx = 0.0
|
| 380 |
+
env_dy = 0.0
|
| 381 |
+
|
| 382 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 383 |
+
np.random.seed(int(time.time()))
|
| 384 |
+
if args.env_random_flag:
|
| 385 |
+
env_dx = np.random.uniform(-0.15, 0.15) # changeable
|
| 386 |
+
env_dy = np.random.uniform(-0.05, 0.05) # changeable
|
| 387 |
+
if args.garment_random_flag:
|
| 388 |
+
x = np.random.uniform(-0.1, 0.1) # changeable
|
| 389 |
+
y = np.random.uniform(0.5, 0.8) # changeable
|
| 390 |
+
pos = np.array([x,y,0.0])
|
| 391 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 392 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 393 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Trousers/assets_list.txt")
|
| 394 |
+
assets_list = []
|
| 395 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 396 |
+
for line in f:
|
| 397 |
+
clean_line = line.rstrip('\n')
|
| 398 |
+
assets_list.append(clean_line)
|
| 399 |
+
usd_path=np.random.choice(assets_list)
|
| 400 |
+
|
| 401 |
+
HangTrousers(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.validation_flag, args.record_video_flag, args.training_data_num, args.stage_1_checkpoint_num, args.stage_2_checkpoint_num, args.stage_3_checkpoint_num)
|
| 402 |
+
|
| 403 |
+
if args.validation_flag:
|
| 404 |
+
simulation_app.close()
|
| 405 |
+
else:
|
| 406 |
+
while simulation_app.is_running():
|
| 407 |
+
simulation_app.update()
|
| 408 |
+
|
| 409 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_Validation/Store_Tops_HALO.py
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
from isaacsim import SimulationApp
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _env_flag(name: str, default: bool = False) -> bool:
|
| 7 |
+
value = os.environ.get(name)
|
| 8 |
+
if value is None:
|
| 9 |
+
return default
|
| 10 |
+
return value.strip().lower() in {"1", "true", "yes", "on"}
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _env_float(name: str):
|
| 14 |
+
value = os.environ.get(name)
|
| 15 |
+
if value is None or value == "":
|
| 16 |
+
return None
|
| 17 |
+
return float(value)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _env_text(name: str):
|
| 21 |
+
value = os.environ.get(name)
|
| 22 |
+
if value is None or value == "":
|
| 23 |
+
return None
|
| 24 |
+
return value
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
HEADLESS = _env_flag("DEXGARMENTLAB_HEADLESS", True)
|
| 28 |
+
EMIT_JSON = _env_flag("DEXGARMENTLAB_EMIT_JSON", False)
|
| 29 |
+
RESULT_PREFIX = "DEXGARMENTLAB_RESULT="
|
| 30 |
+
simulation_app = SimulationApp({"headless": HEADLESS})
|
| 31 |
+
|
| 32 |
+
# load external package
|
| 33 |
+
import sys
|
| 34 |
+
import time
|
| 35 |
+
import numpy as np
|
| 36 |
+
import open3d as o3d
|
| 37 |
+
from termcolor import cprint
|
| 38 |
+
import threading
|
| 39 |
+
|
| 40 |
+
# load isaac-relevant package
|
| 41 |
+
import omni.replicator.core as rep
|
| 42 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 43 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 44 |
+
from isaacsim.core.api import World
|
| 45 |
+
from isaacsim.core.api import SimulationContext
|
| 46 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 47 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 48 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 49 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 50 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 51 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 52 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 53 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 54 |
+
from isaacsim.core.utils.types import ArticulationAction, ArticulationActions
|
| 55 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 56 |
+
|
| 57 |
+
# load custom package
|
| 58 |
+
sys.path.append(os.getcwd())
|
| 59 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 60 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 61 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 62 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 63 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 64 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 65 |
+
from Env_Config.Room.Object_Tools import pusher_loader, set_prim_visible_group, delete_prim_group
|
| 66 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 67 |
+
from Env_Config.Utils_Project.Parse import parse_args_val
|
| 68 |
+
from Env_Config.Utils_Project.Collision_Group import CollisionGroup
|
| 69 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud
|
| 70 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 71 |
+
from Model_HALO.SADP.SADP import SADP
|
| 72 |
+
|
| 73 |
+
class StoreTops_Env(BaseEnv):
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
pos:np.ndarray=None,
|
| 77 |
+
ori:np.ndarray=None,
|
| 78 |
+
usd_path:str=None,
|
| 79 |
+
env_dx:float=0.0,
|
| 80 |
+
env_dy:float=0.0,
|
| 81 |
+
ground_material_usd:str=None,
|
| 82 |
+
record_video_flag:bool=False,
|
| 83 |
+
training_data_num:int=100,
|
| 84 |
+
stage_1_checkpoint_num:int=1500,
|
| 85 |
+
stage_2_checkpoint_num:int=1500,
|
| 86 |
+
stage_3_checkpoint_num:int=1500,
|
| 87 |
+
):
|
| 88 |
+
# load BaseEnv
|
| 89 |
+
super().__init__()
|
| 90 |
+
|
| 91 |
+
# ------------------------------------ #
|
| 92 |
+
# --- Add Env Assets --- #
|
| 93 |
+
# ------------------------------------ #
|
| 94 |
+
|
| 95 |
+
# add ground
|
| 96 |
+
self.ground = Real_Ground(
|
| 97 |
+
self.scene,
|
| 98 |
+
visual_material_usd = ground_material_usd,
|
| 99 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
# load garment
|
| 103 |
+
self.garment = Particle_Garment(
|
| 104 |
+
self.world,
|
| 105 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 106 |
+
ori=np.array([0, 0, 0]),
|
| 107 |
+
usd_path="Assets/Garment/Tops/Collar_noSleeve_FrontClose/TCNC_Top338/TCNC_Top338_obj.usd" if usd_path is None else usd_path,
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
# load bimanual_dex
|
| 111 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 112 |
+
self.world,
|
| 113 |
+
dexleft_pos=np.array([-0.9, 0.0, 0.5]),
|
| 114 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 115 |
+
dexright_pos=np.array([0.9, 0.0, 0.5]),
|
| 116 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
# load camera
|
| 120 |
+
self.garment_camera = Recording_Camera(
|
| 121 |
+
camera_position=np.array([pos[0], pos[1], 6.75]),
|
| 122 |
+
camera_orientation=np.array([0, 90.0, 90.0]),
|
| 123 |
+
prim_path="/World/garment_camera",
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
self.env_camera = Recording_Camera(
|
| 127 |
+
camera_position=np.array([0.0, 5.22, 8.11]),
|
| 128 |
+
camera_orientation=np.array([0, 60, -90.0]),
|
| 129 |
+
prim_path="/World/env_camera",
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
self.judge_camera = Recording_Camera(
|
| 133 |
+
camera_position=np.array([0.0+env_dx,1.25+env_dy,6.75]),
|
| 134 |
+
camera_orientation=np.array([0, 90, -90.0]),
|
| 135 |
+
prim_path="/World/judge_camera",
|
| 136 |
+
)
|
| 137 |
+
self.env_dx = env_dx
|
| 138 |
+
self.env_dy = env_dy
|
| 139 |
+
self.pusher = pusher_loader(self.scene)
|
| 140 |
+
|
| 141 |
+
# load GAM Model
|
| 142 |
+
self.model = GAM_Encapsulation(catogory="Tops_NoSleeve")
|
| 143 |
+
|
| 144 |
+
# define collision group - helper path
|
| 145 |
+
self.helper_path=['/World/defaultGroundPlane/GroundPlane', '/World/pusher']
|
| 146 |
+
self.collisiongroup = CollisionGroup(
|
| 147 |
+
self.world,
|
| 148 |
+
helper_path=self.helper_path,
|
| 149 |
+
garment=True,
|
| 150 |
+
collide_with_garment=True,
|
| 151 |
+
collide_with_robot=False,
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
self.object_camera = Recording_Camera(
|
| 155 |
+
camera_position=np.array([0.0, -6.6, 4.9]),
|
| 156 |
+
camera_orientation=np.array([0, 30.0, 90.0]),
|
| 157 |
+
prim_path="/World/object_camera",
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
self.garment_pcd = None
|
| 161 |
+
self.object_pcd = None
|
| 162 |
+
self.points_affordance_feature = None
|
| 163 |
+
|
| 164 |
+
self.sadp = SADP(task_name="Store_Tops_stage_1", data_num=training_data_num, checkpoint_num=stage_1_checkpoint_num)
|
| 165 |
+
|
| 166 |
+
# ------------------------------------ #
|
| 167 |
+
# --- Initialize World to be Ready --- #
|
| 168 |
+
# ------------------------------------ #
|
| 169 |
+
# initialize world
|
| 170 |
+
self.reset()
|
| 171 |
+
|
| 172 |
+
self.garment.set_pose(pos=np.array([pos[0], pos[1], 0.20]), ori=ori)
|
| 173 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 174 |
+
self.orientation = ori
|
| 175 |
+
|
| 176 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 177 |
+
self.garment_camera.initialize(
|
| 178 |
+
segment_pc_enable=True,
|
| 179 |
+
segment_prim_path_list=[
|
| 180 |
+
"/World/Garment/garment",
|
| 181 |
+
]
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# initialize gif camera to obtain rgb with the aim of creating gif
|
| 185 |
+
self.env_camera.initialize(depth_enable=True)
|
| 186 |
+
|
| 187 |
+
self.judge_camera.initialize(
|
| 188 |
+
segment_pc_enable=True,
|
| 189 |
+
segment_prim_path_list=[
|
| 190 |
+
"/World/Garment/garment",
|
| 191 |
+
]
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
self.object_camera.initialize(
|
| 195 |
+
segment_pc_enable=True,
|
| 196 |
+
segment_prim_path_list=[
|
| 197 |
+
"/World/pusher",
|
| 198 |
+
]
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 202 |
+
if record_video_flag:
|
| 203 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 204 |
+
self.thread_record.daemon = True
|
| 205 |
+
|
| 206 |
+
# step world to make it ready
|
| 207 |
+
for i in range(100):
|
| 208 |
+
self.step()
|
| 209 |
+
|
| 210 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 211 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 212 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 213 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 214 |
+
cprint(f"env_dx: {env_dx}", "magenta")
|
| 215 |
+
cprint(f"env_dy: {env_dy}", "magenta")
|
| 216 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
cprint("World Ready!", "green", "on_green")
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def StoreTops(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, validation_flag, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num):
|
| 223 |
+
|
| 224 |
+
env = StoreTops_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num)
|
| 225 |
+
|
| 226 |
+
# hide prim to get garment point cloud
|
| 227 |
+
set_prim_visible_group(
|
| 228 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 229 |
+
visible=False,
|
| 230 |
+
)
|
| 231 |
+
for i in range(50):
|
| 232 |
+
env.step()
|
| 233 |
+
|
| 234 |
+
env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 235 |
+
save_or_not=False,
|
| 236 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
set_prim_visible_group(
|
| 240 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 241 |
+
visible=True,
|
| 242 |
+
)
|
| 243 |
+
for i in range(50):
|
| 244 |
+
env.step()
|
| 245 |
+
|
| 246 |
+
if record_video_flag:
|
| 247 |
+
env.thread_record.start()
|
| 248 |
+
|
| 249 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=env.garment_pcd, index_list=[1954, 1832, 528, 587])
|
| 250 |
+
manipulation_points[:, 2] = 0.025 # set z-axis to 0.005 to make sure dexhand can grasp the garment
|
| 251 |
+
|
| 252 |
+
env.points_affordance_feature = normalize_columns(points_similarity[2:4].T)
|
| 253 |
+
|
| 254 |
+
# move both dexhand to the manipulation points
|
| 255 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0], left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=manipulation_points[1], right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 256 |
+
|
| 257 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="open", right_hand_state="open")
|
| 258 |
+
|
| 259 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="close", right_hand_state="close")
|
| 260 |
+
|
| 261 |
+
for i in range(20):
|
| 262 |
+
env.step()
|
| 263 |
+
|
| 264 |
+
left_dis=np.sqrt((manipulation_points[0][0]-manipulation_points[2][0])**2+(manipulation_points[0][1]-manipulation_points[2][1])**2)
|
| 265 |
+
right_dis=np.sqrt((manipulation_points[1][0]-manipulation_points[3][0])**2+(manipulation_points[1][1]-manipulation_points[3][1])**2)
|
| 266 |
+
distance=(left_dis+right_dis)/4
|
| 267 |
+
# get lift points
|
| 268 |
+
y_off=0.01
|
| 269 |
+
z_off=0.005
|
| 270 |
+
left_lift_points,right_lift_points=np.array([manipulation_points[0][0], manipulation_points[0][1]-distance+y_off, distance+z_off]), np.array([manipulation_points[1][0], manipulation_points[1][1]-distance+y_off, distance+z_off])
|
| 271 |
+
|
| 272 |
+
# move both dexhand to the lift points
|
| 273 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=right_lift_points, right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 274 |
+
|
| 275 |
+
left_lift_points,right_lift_points=np.array([manipulation_points[0][0], manipulation_points[2][1]+0.06, distance+z_off]), np.array([manipulation_points[1][0], manipulation_points[3][1]+0.06, distance+z_off])
|
| 276 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=np.array([0.579, -0.579, -0.406, 0.406]), right_pos=right_lift_points, right_ori=np.array([0.406, -0.406, -0.579, 0.579]))
|
| 277 |
+
|
| 278 |
+
# release the garment
|
| 279 |
+
env.bimanual_dex.set_both_hand_state(left_hand_state="open", right_hand_state="open")
|
| 280 |
+
|
| 281 |
+
env.garment.particle_material.set_gravity_scale(10.0)
|
| 282 |
+
|
| 283 |
+
for i in range(200):
|
| 284 |
+
env.step()
|
| 285 |
+
|
| 286 |
+
env.garment.particle_material.set_gravity_scale(1.0)
|
| 287 |
+
|
| 288 |
+
cprint("Store World Fold Procedure Finish! Store Procedure Begins!", "green", "on_green")
|
| 289 |
+
|
| 290 |
+
pusher_center = np.array([0.0+env_dx, 1.10+env_dy, 0.0])
|
| 291 |
+
env.pusher.set_world_pose(position=pusher_center)
|
| 292 |
+
|
| 293 |
+
# hide prim to get object point cloud
|
| 294 |
+
set_prim_visible_group(
|
| 295 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 296 |
+
visible=False,
|
| 297 |
+
)
|
| 298 |
+
for i in range(50):
|
| 299 |
+
env.step()
|
| 300 |
+
|
| 301 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 302 |
+
save_or_not=False,
|
| 303 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 304 |
+
# real_time_watch=True,
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
set_prim_visible_group(
|
| 308 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 309 |
+
visible=True,
|
| 310 |
+
)
|
| 311 |
+
for i in range(50):
|
| 312 |
+
env.step()
|
| 313 |
+
|
| 314 |
+
# # hide prim to get garment point cloud
|
| 315 |
+
# set_prim_visible_group(
|
| 316 |
+
# prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 317 |
+
# visible=False,
|
| 318 |
+
# )
|
| 319 |
+
# for i in range(50):
|
| 320 |
+
# env.step()
|
| 321 |
+
|
| 322 |
+
# env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 323 |
+
# save_or_not=False,
|
| 324 |
+
# save_path=get_unique_filename("data", extension=".ply"),
|
| 325 |
+
# )
|
| 326 |
+
|
| 327 |
+
# set_prim_visible_group(
|
| 328 |
+
# prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/pusher"],
|
| 329 |
+
# visible=True,
|
| 330 |
+
# )
|
| 331 |
+
# for i in range(50):
|
| 332 |
+
# env.step()
|
| 333 |
+
|
| 334 |
+
left_ori=np.array([0.7010574,0.5609855, 0.4304593, 0.092296])
|
| 335 |
+
right_ori=np.array([ 0.4304593, 0.092296, 0.7010574,0.5609855])
|
| 336 |
+
left_lift_points,right_lift_points=np.array([-0.5, 0.6, 0.65]), np.array([0.5, 0.6, 0.65])
|
| 337 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=left_ori, right_pos=right_lift_points, right_ori=right_ori)
|
| 338 |
+
# env.bimanual_dex.set_both_hand_state(left_hand_state="smooth", right_hand_state="smooth")
|
| 339 |
+
|
| 340 |
+
for i in range(50):
|
| 341 |
+
env.step()
|
| 342 |
+
|
| 343 |
+
garment_fold_length = np.max([
|
| 344 |
+
abs(manipulation_points[0][1] - manipulation_points[2][1]),
|
| 345 |
+
abs(manipulation_points[1][1] - manipulation_points[3][1])
|
| 346 |
+
]) / 2
|
| 347 |
+
|
| 348 |
+
garment_fold_width = np.max([
|
| 349 |
+
abs(manipulation_points[0][0] - manipulation_points[1][0]),
|
| 350 |
+
abs(manipulation_points[2][0] - manipulation_points[3][0])
|
| 351 |
+
])
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
manipulation_points=manipulation_points[2:]
|
| 355 |
+
left_off0=np.array([-0.08,-0.0,0.005])
|
| 356 |
+
right_off0=np.array([0.08,-0.0,0.005])
|
| 357 |
+
left_off1=np.array([-0.05,-0.0,0.002])
|
| 358 |
+
right_off1=np.array([0.05,-0.0,0.002])
|
| 359 |
+
left_off2=np.array([0.005,0.05,-0.0])
|
| 360 |
+
right_off2=np.array([-0.005,0.05,-0.0])
|
| 361 |
+
manipulation_points[:,2]=0.00
|
| 362 |
+
|
| 363 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off0, left_ori=left_ori, right_pos=manipulation_points[1]+right_off0, right_ori=right_ori)
|
| 364 |
+
for i in range(20):
|
| 365 |
+
env.step()
|
| 366 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off1, left_ori=left_ori, right_pos=manipulation_points[1]+right_off1, right_ori=right_ori)
|
| 367 |
+
for i in range(20):
|
| 368 |
+
env.step()
|
| 369 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off2, left_ori=left_ori, right_pos=manipulation_points[1]+right_off2, right_ori=right_ori)
|
| 370 |
+
for i in range(20):
|
| 371 |
+
env.step()
|
| 372 |
+
# env.bimanual_dex.dense_move_both_ik(left_pos=manipulation_points[0]+left_off3, left_ori=left_ori, right_pos=manipulation_points[1]+right_off3, right_ori=right_ori)
|
| 373 |
+
# for i in range(20):
|
| 374 |
+
# env.step()
|
| 375 |
+
|
| 376 |
+
for i in range(20):
|
| 377 |
+
env.step()
|
| 378 |
+
|
| 379 |
+
for i in range(10):
|
| 380 |
+
|
| 381 |
+
print(f"Stage_1_Step: {i}")
|
| 382 |
+
|
| 383 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 384 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 385 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 386 |
+
|
| 387 |
+
obs = dict()
|
| 388 |
+
obs['agent_pos']=joint_state
|
| 389 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 390 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 391 |
+
obs['object_point_cloud']=env.object_pcd
|
| 392 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 393 |
+
|
| 394 |
+
action=env.sadp.get_action(obs)
|
| 395 |
+
|
| 396 |
+
print("action_shape:",action.shape)
|
| 397 |
+
|
| 398 |
+
for j in range(4):
|
| 399 |
+
|
| 400 |
+
action_L = ArticulationAction(joint_positions=action[j][:30])
|
| 401 |
+
action_R = ArticulationAction(joint_positions=action[j][30:])
|
| 402 |
+
|
| 403 |
+
env.bimanual_dex.dexleft.apply_action(action_L)
|
| 404 |
+
env.bimanual_dex.dexright.apply_action(action_R)
|
| 405 |
+
|
| 406 |
+
for _ in range(5):
|
| 407 |
+
env.step()
|
| 408 |
+
|
| 409 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 410 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 411 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 412 |
+
|
| 413 |
+
obs = dict()
|
| 414 |
+
obs['agent_pos']=joint_state
|
| 415 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 416 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 417 |
+
obs['object_point_cloud']=env.object_pcd
|
| 418 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 419 |
+
|
| 420 |
+
env.sadp.update_obs(obs)
|
| 421 |
+
|
| 422 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 423 |
+
if record_video_flag:
|
| 424 |
+
if not os.path.exists("Data/Store_Tops_Validation_HALO/video"):
|
| 425 |
+
os.makedirs("Data/Store_Tops_Validation_HALO/video")
|
| 426 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Store_Tops_Validation_HALO/video/video", ".mp4"))
|
| 427 |
+
|
| 428 |
+
delete_prim("/World/DexLeft")
|
| 429 |
+
delete_prim("/World/DexRight")
|
| 430 |
+
for i in range(50):
|
| 431 |
+
env.step()
|
| 432 |
+
|
| 433 |
+
success=True
|
| 434 |
+
store_state, color = env.judge_camera.get_point_cloud_data_from_segment(save_or_not=False)
|
| 435 |
+
# get max_x min_x max_y min_y
|
| 436 |
+
max_x = np.max(store_state[:, 0])
|
| 437 |
+
min_x = np.min(store_state[:, 0])
|
| 438 |
+
max_y = np.max(store_state[:, 1])
|
| 439 |
+
min_y = np.min(store_state[:, 1])
|
| 440 |
+
# get the center of the point cloud
|
| 441 |
+
center_x = (max_x + min_x) / 2
|
| 442 |
+
center_y = (max_y + min_y) / 2
|
| 443 |
+
# get the distance between the center and the pusher
|
| 444 |
+
distance = np.sqrt((center_x - pusher_center[0]) ** 2 + (center_y - pusher_center[1]) ** 2)
|
| 445 |
+
# if the distance is less than 0.05, it is considered as success
|
| 446 |
+
if distance < 0.1:
|
| 447 |
+
success=True
|
| 448 |
+
else:
|
| 449 |
+
success=False
|
| 450 |
+
|
| 451 |
+
cprint("----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 452 |
+
cprint(f"garment_center: {center_x}, {center_y}", "blue")
|
| 453 |
+
cprint(f"pusher_center: {pusher_center[0]}, {pusher_center[1]}", "blue")
|
| 454 |
+
cprint(f"distance: {distance}", "blue")
|
| 455 |
+
cprint("----------- Judge End -----------", "blue", attrs=["bold"])
|
| 456 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 457 |
+
|
| 458 |
+
if validation_flag:
|
| 459 |
+
if not os.path.exists("Data/Store_Tops_Validation_HALO"):
|
| 460 |
+
os.makedirs("Data/Store_Tops_Validation_HALO")
|
| 461 |
+
# write into .log file
|
| 462 |
+
with open("Data/Store_Tops_Validation_HALO/validation_log.txt", "a") as f:
|
| 463 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 464 |
+
|
| 465 |
+
if not os.path.exists("Data/Store_Tops_Validation_HALO/final_state_pic"):
|
| 466 |
+
os.makedirs("Data/Store_Tops_Validation_HALO/final_state_pic")
|
| 467 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Store_Tops_Validation_HALO/final_state_pic/img",".png"))
|
| 468 |
+
|
| 469 |
+
result = {
|
| 470 |
+
"task_name": "Store Tops HALO",
|
| 471 |
+
"success": bool(success),
|
| 472 |
+
"garment_point_count": int(env.garment_pcd.shape[0]) if env.garment_pcd is not None else 0,
|
| 473 |
+
"object_point_count": int(env.object_pcd.shape[0]) if env.object_pcd is not None else 0,
|
| 474 |
+
"judge_point_count": int(store_state.shape[0]),
|
| 475 |
+
"affordance_feature_norm": float(np.linalg.norm(env.points_affordance_feature)) if env.points_affordance_feature is not None else 0.0,
|
| 476 |
+
"distance_to_target_center": float(distance),
|
| 477 |
+
}
|
| 478 |
+
return result
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
|
| 483 |
+
if __name__=="__main__":
|
| 484 |
+
|
| 485 |
+
args = parse_args_val()
|
| 486 |
+
|
| 487 |
+
# initial setting
|
| 488 |
+
pos = np.array([0.0, 0.7, 0.20])
|
| 489 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 490 |
+
usd_path = None
|
| 491 |
+
env_dx = 0.0
|
| 492 |
+
env_dy = 0.0
|
| 493 |
+
|
| 494 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 495 |
+
rng_seed = _env_text("DEXGARMENTLAB_RNG_SEED")
|
| 496 |
+
np.random.seed(int(rng_seed) if rng_seed is not None else int(time.time()))
|
| 497 |
+
if args.env_random_flag:
|
| 498 |
+
env_dx = np.random.uniform(-0.3, 0.3) # changeable
|
| 499 |
+
env_dy = np.random.uniform(-0.1, 0.1) # changeable
|
| 500 |
+
if args.garment_random_flag:
|
| 501 |
+
x = np.random.uniform(-0.05, 0.05) # changeable
|
| 502 |
+
y = np.random.uniform(0.65, 0.75) # changeable
|
| 503 |
+
pos = np.array([x,y,0.0])
|
| 504 |
+
ori = np.array([0.0, 0.0, 0.0])
|
| 505 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 506 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Tops_NoSleeve/assets_list.txt")
|
| 507 |
+
assets_list = []
|
| 508 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 509 |
+
for line in f:
|
| 510 |
+
clean_line = line.rstrip('\n')
|
| 511 |
+
assets_list.append(clean_line)
|
| 512 |
+
usd_path=np.random.choice(assets_list)
|
| 513 |
+
|
| 514 |
+
forced_usd_path = _env_text("DEXGARMENTLAB_USD_PATH")
|
| 515 |
+
if forced_usd_path is not None:
|
| 516 |
+
usd_path = forced_usd_path
|
| 517 |
+
forced_pos_x = _env_float("DEXGARMENTLAB_POS_X")
|
| 518 |
+
forced_pos_y = _env_float("DEXGARMENTLAB_POS_Y")
|
| 519 |
+
if forced_pos_x is not None or forced_pos_y is not None:
|
| 520 |
+
pos = np.array([
|
| 521 |
+
pos[0] if forced_pos_x is None else forced_pos_x,
|
| 522 |
+
pos[1] if forced_pos_y is None else forced_pos_y,
|
| 523 |
+
pos[2],
|
| 524 |
+
])
|
| 525 |
+
forced_ori_z = _env_float("DEXGARMENTLAB_ORI_Z")
|
| 526 |
+
if forced_ori_z is not None:
|
| 527 |
+
ori = np.array([ori[0], ori[1], forced_ori_z])
|
| 528 |
+
forced_env_dx = _env_float("DEXGARMENTLAB_ENV_DX")
|
| 529 |
+
if forced_env_dx is not None:
|
| 530 |
+
env_dx = forced_env_dx
|
| 531 |
+
forced_env_dy = _env_float("DEXGARMENTLAB_ENV_DY")
|
| 532 |
+
if forced_env_dy is not None:
|
| 533 |
+
env_dy = forced_env_dy
|
| 534 |
+
|
| 535 |
+
result = StoreTops(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.validation_flag, args.record_video_flag, args.training_data_num, args.stage_1_checkpoint_num, args.stage_2_checkpoint_num, args.stage_3_checkpoint_num)
|
| 536 |
+
if EMIT_JSON:
|
| 537 |
+
print(f"{RESULT_PREFIX}{json.dumps(result, sort_keys=True)}")
|
| 538 |
+
|
| 539 |
+
if args.validation_flag:
|
| 540 |
+
simulation_app.close()
|
| 541 |
+
elif HEADLESS:
|
| 542 |
+
simulation_app.close()
|
| 543 |
+
else:
|
| 544 |
+
while simulation_app.is_running():
|
| 545 |
+
simulation_app.update()
|
| 546 |
+
|
| 547 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_Validation/Wear_Baseballcap_HALO.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from isaacsim import SimulationApp
|
| 2 |
+
simulation_app = SimulationApp({"headless": False})
|
| 3 |
+
|
| 4 |
+
# load external package
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
import numpy as np
|
| 9 |
+
import open3d as o3d
|
| 10 |
+
from termcolor import cprint
|
| 11 |
+
import threading
|
| 12 |
+
|
| 13 |
+
# load isaac-relevant package
|
| 14 |
+
import omni.replicator.core as rep
|
| 15 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 16 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 17 |
+
from isaacsim.core.api import World
|
| 18 |
+
from isaacsim.core.api import SimulationContext
|
| 19 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 20 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 21 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 22 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 23 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 24 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 25 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 26 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 27 |
+
from isaacsim.core.utils.types import ArticulationAction, ArticulationActions
|
| 28 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 29 |
+
|
| 30 |
+
# load custom package
|
| 31 |
+
sys.path.append(os.getcwd())
|
| 32 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 33 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 34 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 35 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 36 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 37 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 38 |
+
from Env_Config.Room.Object_Tools import hat_helper_load, set_prim_visible_group, delete_prim_group
|
| 39 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 40 |
+
from Env_Config.Utils_Project.Parse import parse_args_val
|
| 41 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud
|
| 42 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 43 |
+
from Env_Config.Utils_Project.Collision_Group import CollisionGroup
|
| 44 |
+
from Env_Config.Human.Human import Human
|
| 45 |
+
from Env_Config.Utils_Project.Attachment_Block import attach_fixedblock
|
| 46 |
+
from Model_HALO.SADP.SADP import SADP
|
| 47 |
+
|
| 48 |
+
class WearBaseballcap_Env(BaseEnv):
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
pos:np.ndarray=None,
|
| 52 |
+
ori:np.ndarray=None,
|
| 53 |
+
usd_path:str=None,
|
| 54 |
+
env_dx:float=0.0,
|
| 55 |
+
env_dy:float=0.0,
|
| 56 |
+
ground_material_usd:str=None,
|
| 57 |
+
record_video_flag:bool=False,
|
| 58 |
+
training_data_num:int=100,
|
| 59 |
+
stage_1_checkpoint_num:int=1500,
|
| 60 |
+
stage_2_checkpoint_num:int=1500,
|
| 61 |
+
stage_3_checkpoint_num:int=1500,
|
| 62 |
+
):
|
| 63 |
+
# load BaseEnv
|
| 64 |
+
super().__init__()
|
| 65 |
+
|
| 66 |
+
# ------------------------------------ #
|
| 67 |
+
# --- Add Env Assets --- #
|
| 68 |
+
# ------------------------------------ #
|
| 69 |
+
self.ground = Real_Ground(
|
| 70 |
+
self.scene,
|
| 71 |
+
visual_material_usd = ground_material_usd,
|
| 72 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# load human
|
| 76 |
+
self.env_dx = env_dx
|
| 77 |
+
self.env_dy = env_dy
|
| 78 |
+
self.human = Human(
|
| 79 |
+
path="Assets/Human/human_model.usd",
|
| 80 |
+
position=[0.0+env_dx,1.15+env_dy,0.0],
|
| 81 |
+
scale=np.array([0.6, 0.6, 0.6]),
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
# load garment
|
| 85 |
+
self.garment=Deformable_Garment(
|
| 86 |
+
self.world,
|
| 87 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 88 |
+
ori=np.array([0.0, 0.0, 0.0]),
|
| 89 |
+
usd_path="Assets/Garment/Hat/HA_Hat016/HA_Hat016_obj.usd" if usd_path is None else usd_path,
|
| 90 |
+
solver_position_iteration_count=8,
|
| 91 |
+
simulation_hexahedral_resolution=16
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
self.target_put_pos = hat_helper_load(self.scene, pos[0], pos[1], self.env_dx, self.env_dy)
|
| 95 |
+
|
| 96 |
+
self.helper_path=["/World/hanger_helper", "/World/hanger", "/World/Human"]
|
| 97 |
+
|
| 98 |
+
self.head_helper_path=["/World/head_helper"]
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
# load bimanual_dex
|
| 102 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 103 |
+
self.world,
|
| 104 |
+
dexleft_pos=np.array([-0.6, 0.0, 0.5]),
|
| 105 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 106 |
+
dexright_pos=np.array([0.6, 0.0, 0.5]),
|
| 107 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
# load camera
|
| 111 |
+
self.garment_camera = Recording_Camera(
|
| 112 |
+
camera_position=np.array([pos[0], pos[1], 6.75]),
|
| 113 |
+
camera_orientation=np.array([0, 90.0, 90.0]),
|
| 114 |
+
prim_path="/World/garment_camera",
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
self.env_camera = Recording_Camera(
|
| 119 |
+
camera_position=np.array([0, -3.45, 4.17]),
|
| 120 |
+
camera_orientation=np.array([0, 40, 90]),
|
| 121 |
+
prim_path="/World/env_camera",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# load UniGarmentManip Model
|
| 126 |
+
self.model = GAM_Encapsulation(catogory="Baseball_Cap")
|
| 127 |
+
|
| 128 |
+
# import Collision Group
|
| 129 |
+
self.collisiongroup=CollisionGroup(
|
| 130 |
+
self.world,
|
| 131 |
+
helper_path=self.helper_path,
|
| 132 |
+
garment=False,
|
| 133 |
+
collide_with_garment=True,
|
| 134 |
+
collide_with_robot=False
|
| 135 |
+
)
|
| 136 |
+
self.collisiongroup.add_collision(group_path="head",target=self.head_helper_path)
|
| 137 |
+
|
| 138 |
+
self.object_camera = Recording_Camera(
|
| 139 |
+
camera_position=np.array([0.0, -6.6, 4.9]),
|
| 140 |
+
camera_orientation=np.array([0, 30.0, 90.0]),
|
| 141 |
+
prim_path="/World/object_camera",
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
self.garment_pcd = None
|
| 145 |
+
self.object_pcd = None
|
| 146 |
+
self.points_affordance_feature = None
|
| 147 |
+
|
| 148 |
+
self.sadp = SADP(task_name="Wear_Baseballcap_stage_1", data_num=training_data_num, checkpoint_num=stage_1_checkpoint_num)
|
| 149 |
+
|
| 150 |
+
# ------------------------------------ #
|
| 151 |
+
# --- Initialize World to be Ready --- #
|
| 152 |
+
# ------------------------------------ #
|
| 153 |
+
# initialize world
|
| 154 |
+
self.reset()
|
| 155 |
+
|
| 156 |
+
self.garment.set_garment_pose(pos=np.array([pos[0], pos[1], 0.65]), ori=ori)
|
| 157 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 158 |
+
self.orientation = ori
|
| 159 |
+
|
| 160 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 161 |
+
self.garment_camera.initialize(
|
| 162 |
+
segment_pc_enable=True,
|
| 163 |
+
segment_prim_path_list=[
|
| 164 |
+
"/World/Deformable/deformable",
|
| 165 |
+
]
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
self.env_camera.initialize(
|
| 169 |
+
depth_enable=True,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
self.object_camera.initialize(
|
| 173 |
+
segment_pc_enable=True,
|
| 174 |
+
segment_prim_path_list=[
|
| 175 |
+
"/World/Human",
|
| 176 |
+
]
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 180 |
+
if record_video_flag:
|
| 181 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 182 |
+
self.thread_record.daemon = True
|
| 183 |
+
|
| 184 |
+
# step world to make it ready
|
| 185 |
+
for i in range(100):
|
| 186 |
+
self.step()
|
| 187 |
+
|
| 188 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 189 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 190 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 191 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 192 |
+
cprint(f"env_dx: {env_dx}", "magenta")
|
| 193 |
+
cprint(f"env_dy: {env_dy}", "magenta")
|
| 194 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 195 |
+
|
| 196 |
+
cprint("World Ready!", "green", "on_green")
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def WearBaseballcap(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, validation_flag, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num):
|
| 200 |
+
|
| 201 |
+
env = WearBaseballcap_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num)
|
| 202 |
+
|
| 203 |
+
# hide prim to get object point cloud
|
| 204 |
+
set_prim_visible_group(
|
| 205 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Deformable/deformable", "/World/hanger"],
|
| 206 |
+
visible=False,
|
| 207 |
+
)
|
| 208 |
+
for i in range(50):
|
| 209 |
+
env.step()
|
| 210 |
+
|
| 211 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 212 |
+
save_or_not=False,
|
| 213 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 214 |
+
# real_time_watch=True,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
set_prim_visible_group(
|
| 218 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Deformable/deformable", "/World/hanger"],
|
| 219 |
+
visible=True,
|
| 220 |
+
)
|
| 221 |
+
for i in range(50):
|
| 222 |
+
env.step()
|
| 223 |
+
|
| 224 |
+
# hide prim to get garment point cloud
|
| 225 |
+
set_prim_visible_group(
|
| 226 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Human", "/World/hanger"],
|
| 227 |
+
visible=False,
|
| 228 |
+
)
|
| 229 |
+
for i in range(50):
|
| 230 |
+
env.step()
|
| 231 |
+
|
| 232 |
+
env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 233 |
+
save_or_not=False,
|
| 234 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# get garment width
|
| 238 |
+
garment_width = np.max(env.garment_pcd[:, 0]) - np.min(env.garment_pcd[:, 0])
|
| 239 |
+
garment_center_x = (np.max(env.garment_pcd[:, 0]) - np.min(env.garment_pcd[:, 0])) / 2 + np.min(env.garment_pcd[:, 0])
|
| 240 |
+
pick_x = garment_center_x + garment_width * 0.15
|
| 241 |
+
|
| 242 |
+
set_prim_visible_group(
|
| 243 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Human", "/World/hanger"],
|
| 244 |
+
visible=True,
|
| 245 |
+
)
|
| 246 |
+
for i in range(50):
|
| 247 |
+
env.step()
|
| 248 |
+
|
| 249 |
+
if record_video_flag:
|
| 250 |
+
env.thread_record.start()
|
| 251 |
+
|
| 252 |
+
# get manipulation points from UniGarmentManip Model
|
| 253 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=env.garment_pcd, index_list=[1110])
|
| 254 |
+
|
| 255 |
+
env.points_affordance_feature = normalize_columns(np.concatenate([points_similarity, points_similarity], axis=0).T)
|
| 256 |
+
|
| 257 |
+
hat_length = np.max(env.garment_pcd[:, 1]) - np.min(env.garment_pcd[:, 1])
|
| 258 |
+
|
| 259 |
+
left_lift_points = np.array([pick_x, manipulation_points[0][1]-0.025, 0.4])
|
| 260 |
+
# env.bimanual_dex.dexright.dense_step_action(target_pos=left_lift_points, target_ori=np.array([0.4062,0.4062,0.5774,0.5774]), angular_type="quat")
|
| 261 |
+
env.bimanual_dex.dexright.dense_step_action(target_pos=left_lift_points, target_ori=np.array([0.5,0.5,0.5,0.5]), angular_type="quat")
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
for i in range(50):
|
| 265 |
+
env.step()
|
| 266 |
+
|
| 267 |
+
manipulation_points[:, 2] = 0.59 # set z-axis to 0.025 to make sure dexhand can grasp the garment
|
| 268 |
+
|
| 269 |
+
# move both dexhand to the manipulation points
|
| 270 |
+
# env.bimanual_dex.dexright.dense_step_action(target_pos=np.array([manipulation_points[0][0]+0.02, manipulation_points[0][1]-0.015, manipulation_points[0][2]]), target_ori=np.array([0.4062,0.4062,0.5774,0.5774]), angular_type="quat", dense_sample_scale=0.005)
|
| 271 |
+
env.bimanual_dex.dexright.dense_step_action(target_pos=np.array([pick_x, manipulation_points[0][1]-0.025, manipulation_points[0][2]]), target_ori=np.array([0.5, 0.5, 0.5, 0.5]), angular_type="quat", dense_sample_scale=0.005)
|
| 272 |
+
|
| 273 |
+
env.garment.set_mass(0.02)
|
| 274 |
+
|
| 275 |
+
for i in range(20):
|
| 276 |
+
env.step()
|
| 277 |
+
|
| 278 |
+
for i in range(9):
|
| 279 |
+
|
| 280 |
+
print(f"Stage_1_Step: {i}")
|
| 281 |
+
|
| 282 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 283 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 284 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 285 |
+
|
| 286 |
+
obs = dict()
|
| 287 |
+
obs['agent_pos']=joint_state
|
| 288 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 289 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 290 |
+
obs['object_point_cloud']=env.object_pcd
|
| 291 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 292 |
+
|
| 293 |
+
action=env.sadp.get_action(obs)
|
| 294 |
+
|
| 295 |
+
print("action_shape:",action.shape)
|
| 296 |
+
|
| 297 |
+
for j in range(4):
|
| 298 |
+
|
| 299 |
+
action_L = ArticulationAction(joint_positions=action[j][:30])
|
| 300 |
+
action_R = ArticulationAction(joint_positions=action[j][30:])
|
| 301 |
+
|
| 302 |
+
env.bimanual_dex.dexleft.apply_action(action_L)
|
| 303 |
+
env.bimanual_dex.dexright.apply_action(action_R)
|
| 304 |
+
|
| 305 |
+
for _ in range(5):
|
| 306 |
+
env.step()
|
| 307 |
+
|
| 308 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 309 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 310 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 311 |
+
|
| 312 |
+
obs = dict()
|
| 313 |
+
obs['agent_pos']=joint_state
|
| 314 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 315 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 316 |
+
obs['object_point_cloud']=env.object_pcd
|
| 317 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 318 |
+
|
| 319 |
+
env.sadp.update_obs(obs)
|
| 320 |
+
|
| 321 |
+
for i in range(50):
|
| 322 |
+
env.step()
|
| 323 |
+
|
| 324 |
+
attach_fixedblock(env.stage,env.garment.deformable_prim_path+"/mesh/attachment",env.garment.deformable_prim_path,"/World/head_helper")
|
| 325 |
+
|
| 326 |
+
delete_prim_group(["/World/DexLeft", "/World/DexRight"])
|
| 327 |
+
|
| 328 |
+
for i in range(50):
|
| 329 |
+
env.step()
|
| 330 |
+
|
| 331 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 332 |
+
if record_video_flag:
|
| 333 |
+
if not os.path.exists("Data/Wear_Baseballcap_Validation_HALO/video"):
|
| 334 |
+
os.makedirs("Data/Wear_Baseballcap_Validation_HALO/video")
|
| 335 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Wear_Baseballcap_Validation_HALO/video/video", ".mp4"))
|
| 336 |
+
|
| 337 |
+
success=True
|
| 338 |
+
cur_pos=env.garment.get_garment_center_pos()
|
| 339 |
+
print(cur_pos)
|
| 340 |
+
distance = np.linalg.norm(cur_pos-np.array([env.target_put_pos[0], env.target_put_pos[1]+0.11, 1.06]))
|
| 341 |
+
if distance<0.10:
|
| 342 |
+
success=True
|
| 343 |
+
else:
|
| 344 |
+
success=False
|
| 345 |
+
|
| 346 |
+
cprint("----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 347 |
+
cprint(f"hat_cur_pos: {env.garment.get_garment_center_pos()}", "blue")
|
| 348 |
+
cprint(f"judge_pos: {np.array([env.target_put_pos[0],env.target_put_pos[1]+0.07,1.06])}", "blue")
|
| 349 |
+
cprint(f"distance between garment and head: {distance}", "blue")
|
| 350 |
+
cprint("----------- Judge End -----------", "blue", attrs=["bold"])
|
| 351 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
if validation_flag:
|
| 355 |
+
if not os.path.exists("Data/Wear_Baseballcap_Validation_HALO"):
|
| 356 |
+
os.makedirs("Data/Wear_Baseballcap_Validation_HALO")
|
| 357 |
+
# write into .log file
|
| 358 |
+
with open("Data/Wear_Baseballcap_Validation_HALO/validation_log.txt", "a") as f:
|
| 359 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 360 |
+
if not os.path.exists("Data/Wear_Baseballcap_Validation_HALO/final_state_pic"):
|
| 361 |
+
os.makedirs("Data/Wear_Baseballcap_Validation_HALO/final_state_pic")
|
| 362 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Wear_Baseballcap_Validation_HALO/final_state_pic/img",".png"))
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
if __name__=="__main__":
|
| 366 |
+
|
| 367 |
+
args = parse_args_val()
|
| 368 |
+
|
| 369 |
+
# initial setting
|
| 370 |
+
pos = np.array([0.0, 0.8, 0.65])
|
| 371 |
+
ori = np.array([90.0, 0.0, 0.0])
|
| 372 |
+
usd_path = None
|
| 373 |
+
env_dx = 0.0
|
| 374 |
+
env_dy = 0.0
|
| 375 |
+
|
| 376 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 377 |
+
np.random.seed(int(time.time()))
|
| 378 |
+
if args.env_random_flag:
|
| 379 |
+
env_dx = np.random.uniform(-0.15, 0.15) # changeable
|
| 380 |
+
env_dy = np.random.uniform(-0.05, 0.05) # changeable
|
| 381 |
+
if args.garment_random_flag:
|
| 382 |
+
x = np.random.uniform(-0.1, 0.1) # changeable
|
| 383 |
+
y = np.random.uniform(0.7, 0.9) # changeable
|
| 384 |
+
pos = np.array([x,y,0.0])
|
| 385 |
+
ori = np.array([90.0, 0.0, 0.0])
|
| 386 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 387 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Baseball_Cap/assets_list.txt")
|
| 388 |
+
assets_list = []
|
| 389 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 390 |
+
for line in f:
|
| 391 |
+
clean_line = line.rstrip('\n')
|
| 392 |
+
assets_list.append(clean_line)
|
| 393 |
+
usd_path=np.random.choice(assets_list)
|
| 394 |
+
|
| 395 |
+
WearBaseballcap(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.validation_flag, args.record_video_flag, args.training_data_num, args.stage_1_checkpoint_num, args.stage_2_checkpoint_num, args.stage_3_checkpoint_num)
|
| 396 |
+
|
| 397 |
+
if args.validation_flag:
|
| 398 |
+
simulation_app.close()
|
| 399 |
+
else:
|
| 400 |
+
while simulation_app.is_running():
|
| 401 |
+
simulation_app.update()
|
| 402 |
+
|
| 403 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_Validation/Wear_Bowlhat_HALO.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from isaacsim import SimulationApp
|
| 2 |
+
simulation_app = SimulationApp({"headless": False})
|
| 3 |
+
|
| 4 |
+
# load external package
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
import numpy as np
|
| 9 |
+
import open3d as o3d
|
| 10 |
+
from termcolor import cprint
|
| 11 |
+
import threading
|
| 12 |
+
|
| 13 |
+
# load isaac-relevant package
|
| 14 |
+
import omni.replicator.core as rep
|
| 15 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 16 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 17 |
+
from isaacsim.core.api import World
|
| 18 |
+
from isaacsim.core.api import SimulationContext
|
| 19 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 20 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 21 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 22 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 23 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 24 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 25 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 26 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 27 |
+
from isaacsim.core.utils.types import ArticulationAction, ArticulationActions
|
| 28 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 29 |
+
|
| 30 |
+
# load custom package
|
| 31 |
+
sys.path.append(os.getcwd())
|
| 32 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 33 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 34 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 35 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 36 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 37 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 38 |
+
from Env_Config.Room.Object_Tools import hat_helper_load, set_prim_visible_group, delete_prim_group
|
| 39 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 40 |
+
from Env_Config.Utils_Project.Parse import parse_args_val
|
| 41 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud, compute_similarity
|
| 42 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 43 |
+
from Env_Config.Utils_Project.Collision_Group import CollisionGroup
|
| 44 |
+
from Env_Config.Human.Human import Human
|
| 45 |
+
from Env_Config.Utils_Project.Attachment_Block import attach_fixedblock
|
| 46 |
+
from Model_HALO.SADP.SADP import SADP
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class WearBowlhat_Env(BaseEnv):
|
| 50 |
+
def __init__(
|
| 51 |
+
self,
|
| 52 |
+
pos:np.ndarray=None,
|
| 53 |
+
ori:np.ndarray=None,
|
| 54 |
+
usd_path:str=None,
|
| 55 |
+
env_dx:float=0.0,
|
| 56 |
+
env_dy:float=0.0,
|
| 57 |
+
ground_material_usd:str=None,
|
| 58 |
+
record_video_flag:bool=False,
|
| 59 |
+
training_data_num:int=100,
|
| 60 |
+
stage_1_checkpoint_num:int=1500,
|
| 61 |
+
stage_2_checkpoint_num:int=1500,
|
| 62 |
+
stage_3_checkpoint_num:int=1500,
|
| 63 |
+
):
|
| 64 |
+
# load BaseEnv
|
| 65 |
+
super().__init__()
|
| 66 |
+
|
| 67 |
+
# ------------------------------------ #
|
| 68 |
+
# --- Add Env Assets --- #
|
| 69 |
+
# ------------------------------------ #
|
| 70 |
+
self.ground = Real_Ground(
|
| 71 |
+
self.scene,
|
| 72 |
+
visual_material_usd = ground_material_usd,
|
| 73 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
# load human
|
| 77 |
+
self.env_dx = env_dx
|
| 78 |
+
self.env_dy = env_dy
|
| 79 |
+
self.human = Human(
|
| 80 |
+
path="Assets/Human/human_model.usd",
|
| 81 |
+
position=[0.0+env_dx,1.15+env_dy,0.0],
|
| 82 |
+
scale=np.array([0.6, 0.6, 0.6]),
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# load garment
|
| 86 |
+
self.garment=Deformable_Garment(
|
| 87 |
+
self.world,
|
| 88 |
+
pos=np.array([0, 3.0, 0.6]),
|
| 89 |
+
ori=np.array([0.0, 0.0, 0.0]),
|
| 90 |
+
usd_path="Assets/Garment/Hat/HA_Hat007/HA_Hat007_obj.usd" if usd_path is None else usd_path,
|
| 91 |
+
scale=np.array([0.0075,0.0075,0.0075]),
|
| 92 |
+
youngs_modulus=1e5,
|
| 93 |
+
dynamic_friction=25.0
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
self.target_put_pos = hat_helper_load(self.scene, pos[0], pos[1], self.env_dx, self.env_dy)
|
| 97 |
+
|
| 98 |
+
self.helper_path=["/World/hanger_helper", "/World/hanger", "/World/Human"]
|
| 99 |
+
|
| 100 |
+
self.head_helper_path=["/World/head_helper"]
|
| 101 |
+
|
| 102 |
+
# load bimanual_dex
|
| 103 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 104 |
+
self.world,
|
| 105 |
+
dexleft_pos=np.array([-0.6, 0.0, 0.5]),
|
| 106 |
+
dexleft_ori=np.array([0.0, 0.0, 0.0]),
|
| 107 |
+
dexright_pos=np.array([0.6, 0.0, 0.5]),
|
| 108 |
+
dexright_ori=np.array([0.0, 0.0, 0.0]),
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# load camera
|
| 112 |
+
self.garment_camera = Recording_Camera(
|
| 113 |
+
camera_position=np.array([0.0, 0.5, 6.75]),
|
| 114 |
+
camera_orientation=np.array([0, 90.0, 90.0]),
|
| 115 |
+
prim_path="/World/garment_camera",
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
self.env_camera = Recording_Camera(
|
| 119 |
+
camera_position=np.array([0, -3.45, 4.17]),
|
| 120 |
+
camera_orientation=np.array([0, 40, 90]),
|
| 121 |
+
prim_path="/World/env_camera",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# import Collision Group
|
| 125 |
+
self.collisiongroup=CollisionGroup(
|
| 126 |
+
self.world,
|
| 127 |
+
helper_path=self.helper_path,
|
| 128 |
+
garment=False,
|
| 129 |
+
collide_with_garment=True,
|
| 130 |
+
collide_with_robot=False
|
| 131 |
+
)
|
| 132 |
+
self.collisiongroup.add_collision(group_path="head",target=self.head_helper_path)
|
| 133 |
+
|
| 134 |
+
self.object_camera = Recording_Camera(
|
| 135 |
+
camera_position=np.array([0.0, -6.6, 4.9]),
|
| 136 |
+
camera_orientation=np.array([0, 30.0, 90.0]),
|
| 137 |
+
prim_path="/World/object_camera",
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
self.garment_pcd = None
|
| 141 |
+
self.object_pcd = None
|
| 142 |
+
self.points_affordance_feature = None
|
| 143 |
+
|
| 144 |
+
self.sadp = SADP(task_name="Wear_Bowlhat_stage_1", data_num=training_data_num, checkpoint_num=stage_1_checkpoint_num)
|
| 145 |
+
|
| 146 |
+
# ------------------------------------ #
|
| 147 |
+
# --- Initialize World to be Ready --- #
|
| 148 |
+
# ------------------------------------ #
|
| 149 |
+
# initialize world
|
| 150 |
+
self.reset()
|
| 151 |
+
|
| 152 |
+
self.garment.set_garment_pose(pos=np.array([pos[0], pos[1], 0.65]), ori=ori)
|
| 153 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 154 |
+
self.orientation = ori
|
| 155 |
+
|
| 156 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 157 |
+
self.garment_camera.initialize(
|
| 158 |
+
segment_pc_enable=True,
|
| 159 |
+
segment_prim_path_list=[
|
| 160 |
+
"/World/Deformable/deformable",
|
| 161 |
+
]
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
self.env_camera.initialize(
|
| 165 |
+
depth_enable=True,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
self.object_camera.initialize(
|
| 169 |
+
segment_pc_enable=True,
|
| 170 |
+
segment_prim_path_list=[
|
| 171 |
+
"/World/Human",
|
| 172 |
+
]
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 176 |
+
if record_video_flag:
|
| 177 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 178 |
+
self.thread_record.daemon = True
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
# step world to make it ready
|
| 182 |
+
for i in range(100):
|
| 183 |
+
self.step()
|
| 184 |
+
|
| 185 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 186 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 187 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 188 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 189 |
+
cprint(f"env_dx: {env_dx}", "magenta")
|
| 190 |
+
cprint(f"env_dy: {env_dy}", "magenta")
|
| 191 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 192 |
+
|
| 193 |
+
cprint("World Ready!", "green", "on_green")
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def WearBowlhat(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, validation_flag, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num):
|
| 197 |
+
|
| 198 |
+
env = WearBowlhat_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num)
|
| 199 |
+
|
| 200 |
+
# hide prim to get object point cloud
|
| 201 |
+
set_prim_visible_group(
|
| 202 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Deformable/deformable", "/World/hanger"],
|
| 203 |
+
visible=False,
|
| 204 |
+
)
|
| 205 |
+
for i in range(50):
|
| 206 |
+
env.step()
|
| 207 |
+
|
| 208 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 209 |
+
save_or_not=False,
|
| 210 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 211 |
+
# real_time_watch=True,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
set_prim_visible_group(
|
| 215 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Deformable/deformable", "/World/hanger"],
|
| 216 |
+
visible=True,
|
| 217 |
+
)
|
| 218 |
+
for i in range(50):
|
| 219 |
+
env.step()
|
| 220 |
+
|
| 221 |
+
# hide prim to get garment point cloud
|
| 222 |
+
set_prim_visible_group(
|
| 223 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Human", "/World/hanger"],
|
| 224 |
+
visible=False,
|
| 225 |
+
)
|
| 226 |
+
for i in range(50):
|
| 227 |
+
env.step()
|
| 228 |
+
|
| 229 |
+
env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 230 |
+
save_or_not=False,
|
| 231 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
set_prim_visible_group(
|
| 235 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Human", "/World/hanger"],
|
| 236 |
+
visible=True,
|
| 237 |
+
)
|
| 238 |
+
for i in range(50):
|
| 239 |
+
env.step()
|
| 240 |
+
|
| 241 |
+
if record_video_flag:
|
| 242 |
+
env.thread_record.start()
|
| 243 |
+
|
| 244 |
+
center_y = env.garment.get_garment_center_pos()[1]
|
| 245 |
+
mask = np.abs(env.garment_pcd[:, 1] - center_y) < 0.02
|
| 246 |
+
garment_subset = env.garment_pcd[mask]
|
| 247 |
+
|
| 248 |
+
if len(garment_subset) > 0:
|
| 249 |
+
point_x_max = garment_subset[np.argmax(garment_subset[:, 0])]
|
| 250 |
+
point_x_min = garment_subset[np.argmin(garment_subset[:, 0])]
|
| 251 |
+
cprint(f"left_manipulation_points: {point_x_min}", "green")
|
| 252 |
+
cprint(f"right_manipulation_points: {point_x_max}", "green")
|
| 253 |
+
else:
|
| 254 |
+
cprint("error!", "red", "on_red")
|
| 255 |
+
simulation_app.close()
|
| 256 |
+
|
| 257 |
+
left_similarity = compute_similarity(env.garment_pcd, point_x_min, sigma=0.05)
|
| 258 |
+
right_similarity = compute_similarity(env.garment_pcd, point_x_max, sigma=0.05)
|
| 259 |
+
|
| 260 |
+
env.points_affordance_feature = normalize_columns(np.concatenate([left_similarity, right_similarity], axis=1))
|
| 261 |
+
|
| 262 |
+
left_lift_points = np.array([point_x_min[0]-0.05, point_x_min[1], 0.4])
|
| 263 |
+
right_lift_points = np.array([point_x_max[0]+0.05, point_x_max[1], 0.4])
|
| 264 |
+
|
| 265 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=np.array([0.653,0.653,0.271,0.271]), right_pos=right_lift_points, right_ori=np.array([0.271,0.271,0.653,0.653]), dense_sample_scale=0.005)
|
| 266 |
+
|
| 267 |
+
for i in range(50):
|
| 268 |
+
env.step()
|
| 269 |
+
|
| 270 |
+
left_lift_points = np.array([point_x_min[0], point_x_min[1], 0.56])
|
| 271 |
+
right_lift_points = np.array([point_x_max[0], point_x_max[1], 0.56])
|
| 272 |
+
|
| 273 |
+
env.bimanual_dex.dense_move_both_ik(left_pos=left_lift_points, left_ori=np.array([0.653,0.653,0.271,0.271]), right_pos=right_lift_points, right_ori=np.array([0.271,0.271,0.653,0.653]), dense_sample_scale=0.005)
|
| 274 |
+
|
| 275 |
+
for i in range(50):
|
| 276 |
+
env.step()
|
| 277 |
+
|
| 278 |
+
env.garment.set_mass(0.075)
|
| 279 |
+
|
| 280 |
+
for i in range(20):
|
| 281 |
+
env.step()
|
| 282 |
+
|
| 283 |
+
for i in range(9):
|
| 284 |
+
|
| 285 |
+
print(f"Stage_1_Step: {i}")
|
| 286 |
+
|
| 287 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 288 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 289 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 290 |
+
|
| 291 |
+
obs = dict()
|
| 292 |
+
obs['agent_pos']=joint_state
|
| 293 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 294 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 295 |
+
obs['object_point_cloud']=env.object_pcd
|
| 296 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 297 |
+
|
| 298 |
+
action=env.sadp.get_action(obs)
|
| 299 |
+
|
| 300 |
+
print("action_shape:",action.shape)
|
| 301 |
+
|
| 302 |
+
for j in range(4):
|
| 303 |
+
|
| 304 |
+
action_L = ArticulationAction(joint_positions=action[j][:30])
|
| 305 |
+
action_R = ArticulationAction(joint_positions=action[j][30:])
|
| 306 |
+
|
| 307 |
+
env.bimanual_dex.dexleft.apply_action(action_L)
|
| 308 |
+
env.bimanual_dex.dexright.apply_action(action_R)
|
| 309 |
+
|
| 310 |
+
for _ in range(5):
|
| 311 |
+
env.step()
|
| 312 |
+
|
| 313 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 314 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 315 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 316 |
+
|
| 317 |
+
obs = dict()
|
| 318 |
+
obs['agent_pos']=joint_state
|
| 319 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 320 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 321 |
+
obs['object_point_cloud']=env.object_pcd
|
| 322 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 323 |
+
|
| 324 |
+
env.sadp.update_obs(obs)
|
| 325 |
+
|
| 326 |
+
for i in range(50):
|
| 327 |
+
env.step()
|
| 328 |
+
|
| 329 |
+
attach_fixedblock(env.stage,env.garment.deformable_prim_path+"/mesh/attachment",env.garment.deformable_prim_path,"/World/head_helper")
|
| 330 |
+
|
| 331 |
+
delete_prim_group(["/World/DexLeft", "/World/DexRight"])
|
| 332 |
+
|
| 333 |
+
for i in range(50):
|
| 334 |
+
env.step()
|
| 335 |
+
|
| 336 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 337 |
+
if record_video_flag:
|
| 338 |
+
if not os.path.exists("Data/Wear_Bowlhat_Validation_HALO/video"):
|
| 339 |
+
os.makedirs("Data/Wear_Bowlhat_Validation_HALO/video")
|
| 340 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Wear_Bowlhat_Validation_HALO/video/video", ".mp4"))
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
success=True
|
| 344 |
+
# judge successful or not
|
| 345 |
+
cur_pos=env.garment.get_garment_center_pos()
|
| 346 |
+
print(cur_pos)
|
| 347 |
+
distance = np.linalg.norm(cur_pos-np.array([env.target_put_pos[0],env.target_put_pos[1]+0.11,1.06]))
|
| 348 |
+
if distance<0.05:
|
| 349 |
+
success=True
|
| 350 |
+
else:
|
| 351 |
+
success=False
|
| 352 |
+
|
| 353 |
+
cprint("----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 354 |
+
cprint(f"hat_cur_pos: {env.garment.get_garment_center_pos()}", "blue")
|
| 355 |
+
cprint(f"judge_pos: {np.array([env.target_put_pos[0],env.target_put_pos[1]+0.07,1.06])}", "blue")
|
| 356 |
+
cprint(f"distance between garment and head: {distance}", "blue")
|
| 357 |
+
cprint("----------- Judge End -----------", "blue", attrs=["bold"])
|
| 358 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
if validation_flag:
|
| 362 |
+
if not os.path.exists("Data/Wear_Bowlhat_Validation_HALO"):
|
| 363 |
+
os.makedirs("Data/Wear_Bowlhat_Validation_HALO")
|
| 364 |
+
# write into .log file
|
| 365 |
+
with open("Data/Wear_Bowlhat_Validation_HALO/validation_log.txt", "a") as f:
|
| 366 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 367 |
+
if not os.path.exists("Data/Wear_Bowlhat_Validation_HALO/final_state_pic"):
|
| 368 |
+
os.makedirs("Data/Wear_Bowlhat_Validation_HALO/final_state_pic")
|
| 369 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Wear_Bowlhat_Validation_HALO/final_state_pic/img",".png"))
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
if __name__=="__main__":
|
| 373 |
+
|
| 374 |
+
args = parse_args_val()
|
| 375 |
+
|
| 376 |
+
# initial setting
|
| 377 |
+
pos = np.array([0.0, 0.8, 0.65])
|
| 378 |
+
ori = np.array([90.0, 0.0, 0.0])
|
| 379 |
+
usd_path = None
|
| 380 |
+
env_dx = 0.0
|
| 381 |
+
env_dy = 0.0
|
| 382 |
+
|
| 383 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 384 |
+
np.random.seed(int(time.time()))
|
| 385 |
+
if args.env_random_flag:
|
| 386 |
+
env_dx = np.random.uniform(-0.15, 0.15) # changeable
|
| 387 |
+
env_dy = np.random.uniform(-0.05, 0.05) # changeable
|
| 388 |
+
if args.garment_random_flag:
|
| 389 |
+
x = np.random.uniform(-0.1, 0.1) # changeable
|
| 390 |
+
y = np.random.uniform(0.7, 0.9) # changeable
|
| 391 |
+
pos = np.array([x,y,0.0])
|
| 392 |
+
ori = np.array([90.0, 0.0, 0.0])
|
| 393 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 394 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Bowl_Hat/assets_list.txt")
|
| 395 |
+
assets_list = []
|
| 396 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 397 |
+
for line in f:
|
| 398 |
+
clean_line = line.rstrip('\n')
|
| 399 |
+
assets_list.append(clean_line)
|
| 400 |
+
usd_path=np.random.choice(assets_list)
|
| 401 |
+
|
| 402 |
+
WearBowlhat(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.validation_flag, args.record_video_flag, args.training_data_num, args.stage_1_checkpoint_num, args.stage_2_checkpoint_num, args.stage_3_checkpoint_num)
|
| 403 |
+
|
| 404 |
+
if args.validation_flag:
|
| 405 |
+
simulation_app.close()
|
| 406 |
+
else:
|
| 407 |
+
while simulation_app.is_running():
|
| 408 |
+
simulation_app.update()
|
| 409 |
+
|
| 410 |
+
simulation_app.close()
|
third_party/DexGarmentLab/Env_Validation/Wear_Scarf_HALO.py
ADDED
|
@@ -0,0 +1,470 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from isaacsim import SimulationApp
|
| 2 |
+
simulation_app = SimulationApp({"headless": True})
|
| 3 |
+
|
| 4 |
+
# load external package
|
| 5 |
+
import os
|
| 6 |
+
import sys
|
| 7 |
+
import time
|
| 8 |
+
import numpy as np
|
| 9 |
+
import open3d as o3d
|
| 10 |
+
from termcolor import cprint
|
| 11 |
+
import threading
|
| 12 |
+
|
| 13 |
+
# load isaac-relevant package
|
| 14 |
+
import omni.replicator.core as rep
|
| 15 |
+
import isaacsim.core.utils.prims as prims_utils
|
| 16 |
+
from pxr import UsdGeom,UsdPhysics,PhysxSchema, Gf
|
| 17 |
+
from isaacsim.core.api import World
|
| 18 |
+
from isaacsim.core.api import SimulationContext
|
| 19 |
+
from isaacsim.core.api.objects import DynamicCuboid, FixedCuboid, VisualCuboid
|
| 20 |
+
from isaacsim.core.utils.prims import is_prim_path_valid, set_prim_visibility, delete_prim
|
| 21 |
+
from isaacsim.core.utils.string import find_unique_string_name
|
| 22 |
+
from isaacsim.core.utils.viewports import set_camera_view
|
| 23 |
+
from isaacsim.core.utils.stage import add_reference_to_stage, is_stage_loading
|
| 24 |
+
from isaacsim.core.prims import SingleXFormPrim, SingleClothPrim, SingleRigidPrim, SingleGeometryPrim, SingleParticleSystem, SingleDeformablePrim
|
| 25 |
+
from isaacsim.core.prims import XFormPrim, ClothPrim, RigidPrim, GeometryPrim, ParticleSystem
|
| 26 |
+
from isaacsim.core.utils.rotations import euler_angles_to_quat
|
| 27 |
+
from isaacsim.core.utils.types import ArticulationAction, ArticulationActions
|
| 28 |
+
from omni.physx.scripts import deformableUtils,particleUtils,physicsUtils
|
| 29 |
+
|
| 30 |
+
# load custom package
|
| 31 |
+
sys.path.append(os.getcwd())
|
| 32 |
+
from Env_StandAlone.BaseEnv import BaseEnv
|
| 33 |
+
from Env_Config.Garment.Particle_Garment import Particle_Garment
|
| 34 |
+
from Env_Config.Garment.Deformable_Garment import Deformable_Garment
|
| 35 |
+
from Env_Config.Robot.BimanualDex_Ur10e import Bimanual_Ur10e
|
| 36 |
+
from Env_Config.Camera.Recording_Camera import Recording_Camera
|
| 37 |
+
from Env_Config.Room.Real_Ground import Real_Ground
|
| 38 |
+
from Env_Config.Room.Object_Tools import hat_helper_load, set_prim_visible_group, delete_prim_group
|
| 39 |
+
from Env_Config.Utils_Project.Code_Tools import get_unique_filename, normalize_columns
|
| 40 |
+
from Env_Config.Utils_Project.Parse import parse_args_val
|
| 41 |
+
from Env_Config.Utils_Project.Point_Cloud_Manip import rotate_point_cloud
|
| 42 |
+
from Model_HALO.GAM.GAM_Encapsulation import GAM_Encapsulation
|
| 43 |
+
from Env_Config.Utils_Project.Collision_Group import CollisionGroup
|
| 44 |
+
from Env_Config.Human.Human import Human
|
| 45 |
+
from Env_Config.Utils_Project.Attachment_Block import attach_fixedblock
|
| 46 |
+
from Model_HALO.SADP.SADP import SADP
|
| 47 |
+
|
| 48 |
+
class WearScarf_Env(BaseEnv):
|
| 49 |
+
def __init__(
|
| 50 |
+
self,
|
| 51 |
+
pos:np.ndarray=None,
|
| 52 |
+
ori:np.ndarray=None,
|
| 53 |
+
usd_path:str=None,
|
| 54 |
+
env_dx:float=0.0,
|
| 55 |
+
env_dy:float=0.0,
|
| 56 |
+
ground_material_usd:str=None,
|
| 57 |
+
record_video_flag:bool=False,
|
| 58 |
+
training_data_num:int=100,
|
| 59 |
+
stage_1_checkpoint_num:int=1500,
|
| 60 |
+
stage_2_checkpoint_num:int=1500,
|
| 61 |
+
stage_3_checkpoint_num:int=1500,
|
| 62 |
+
):
|
| 63 |
+
# load BaseEnv
|
| 64 |
+
super().__init__()
|
| 65 |
+
|
| 66 |
+
# ------------------------------------ #
|
| 67 |
+
# --- Add Env Assets --- #
|
| 68 |
+
# ------------------------------------ #
|
| 69 |
+
self.ground = Real_Ground(
|
| 70 |
+
self.scene,
|
| 71 |
+
visual_material_usd = ground_material_usd,
|
| 72 |
+
# you can use materials in 'Assets/Material/Floor' to change the texture of ground.
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# load garment
|
| 76 |
+
self.garment = Particle_Garment(
|
| 77 |
+
self.world,
|
| 78 |
+
# pos=np.array([0.0, 0.25, 0.15]),
|
| 79 |
+
pos=np.array([0.0, 3.0, 0.6]),
|
| 80 |
+
ori=np.array([0.0, 0.0, 0.0]),
|
| 81 |
+
usd_path="Assets/Flatten_Scarf/flatten_scarf_0.4.usd" if usd_path is None else usd_path,
|
| 82 |
+
friction=1.0,
|
| 83 |
+
particle_adhesion_scale=1.0,
|
| 84 |
+
particle_friction_scale=1.0,
|
| 85 |
+
contact_offset=0.015,
|
| 86 |
+
rest_offset=0.01,
|
| 87 |
+
particle_contact_offset=0.015,
|
| 88 |
+
fluid_rest_offset=0.01,
|
| 89 |
+
solid_rest_offset=0.01,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
# load human
|
| 93 |
+
self.env_dx = env_dx
|
| 94 |
+
self.env_dy = env_dy
|
| 95 |
+
self.human = Human(
|
| 96 |
+
path="Assets/Human/human_model.usd",
|
| 97 |
+
position=[0.0+env_dx, 0.9+env_dy, -0.7],
|
| 98 |
+
orientation=[90.0, 0.0, 180.0]
|
| 99 |
+
)
|
| 100 |
+
self.human_center = np.array([0.0+env_dx, 0.9+env_dy, 0.7])
|
| 101 |
+
|
| 102 |
+
# load bimanual_dex
|
| 103 |
+
self.bimanual_dex = Bimanual_Ur10e(
|
| 104 |
+
self.world,
|
| 105 |
+
dexleft_pos=np.array([-1.05, 1.15, 0.45]),
|
| 106 |
+
dexleft_ori=np.array([0.0, 0.0, -90.0]),
|
| 107 |
+
dexright_pos=np.array([1.25, 1.15, 0.45]),
|
| 108 |
+
dexright_ori=np.array([0.0, 0.0, 90.0]),
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# load camera
|
| 112 |
+
self.garment_camera = Recording_Camera(
|
| 113 |
+
camera_position=np.array([0.0, 0.75, 8.0]),
|
| 114 |
+
camera_orientation=np.array([0, 90.0, 90.0]),
|
| 115 |
+
prim_path="/World/garment_camera",
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
self.env_camera = Recording_Camera(
|
| 119 |
+
camera_position=np.array([0.0, 5.4, 5.85]),
|
| 120 |
+
camera_orientation=np.array([0, 50.0, -90.0]),
|
| 121 |
+
prim_path="/World/env_camera",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
self.judge_front_camera = Recording_Camera(
|
| 125 |
+
camera_position=np.array([self.human_center[0], 6.32, 0.42]),
|
| 126 |
+
camera_orientation=np.array([0, 0.0, -90.0]),
|
| 127 |
+
prim_path="/World/judge_front_camera",
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
self.judge_back_camera = Recording_Camera(
|
| 131 |
+
camera_position=np.array([self.human_center[0], -4.2, 0.36]),
|
| 132 |
+
camera_orientation=np.array([0.0, 0.0, 90.0]),
|
| 133 |
+
prim_path="/World/judge_back_camera",
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# load UniGarmentManip Model
|
| 137 |
+
self.model = GAM_Encapsulation(catogory="Scarf")
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# helper for seperating scarf
|
| 141 |
+
self.helper_1 = FixedCuboid(
|
| 142 |
+
prim_path = "/World/helper/helper_1",
|
| 143 |
+
color=np.array([0.0, 0.0, 1.0]),
|
| 144 |
+
name = "helper_1",
|
| 145 |
+
position = [0.0, 0.8, 0.0],
|
| 146 |
+
scale=[0.3, 1.0, 0.3],
|
| 147 |
+
orientation=[0.924, 0.0, 0.383, 0.0],
|
| 148 |
+
size=1.0,
|
| 149 |
+
visible = False,
|
| 150 |
+
)
|
| 151 |
+
# self.helper_2 = FixedCuboid(
|
| 152 |
+
# prim_path = "/World/helper/helper_2",
|
| 153 |
+
# color=np.array([0.0, 0.0, 1.0]),
|
| 154 |
+
# name = "helper_2",
|
| 155 |
+
# position = [0.45, 0.8, 0.0],
|
| 156 |
+
# scale=[0.3, 1.0, 0.3],
|
| 157 |
+
# orientation=[0.924, 0.0, 0.383, 0.0],
|
| 158 |
+
# size=1.0,
|
| 159 |
+
# visible = False,
|
| 160 |
+
# )
|
| 161 |
+
# self.helper_3 = FixedCuboid(
|
| 162 |
+
# prim_path = "/World/helper/helper_3",
|
| 163 |
+
# color=np.array([0.0, 0.0, 1.0]),
|
| 164 |
+
# name = "helper_3",
|
| 165 |
+
# position = [-0.45, 0.8, 0.0],
|
| 166 |
+
# scale=[0.3, 1.0, 0.3],
|
| 167 |
+
# orientation=[0.924, 0.0, 0.383, 0.0],
|
| 168 |
+
# size=1.0,
|
| 169 |
+
# visible = False,
|
| 170 |
+
# )
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
# define collision group - helper path
|
| 175 |
+
self.helper_path=['/World/defaultGroundPlane/GroundPlane', '/World/Human']
|
| 176 |
+
self.collisiongroup = CollisionGroup(
|
| 177 |
+
self.world,
|
| 178 |
+
helper_path=self.helper_path,
|
| 179 |
+
garment=True,
|
| 180 |
+
collide_with_garment=True,
|
| 181 |
+
collide_with_robot=False,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
self.object_camera = Recording_Camera(
|
| 185 |
+
camera_position=np.array([0.0, 5.4, 5.85]),
|
| 186 |
+
camera_orientation=np.array([0, 50.0, -90.0]),
|
| 187 |
+
prim_path="/World/object_camera",
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
self.garment_pcd = None
|
| 191 |
+
self.object_pcd = None
|
| 192 |
+
self.points_affordance_feature = None
|
| 193 |
+
|
| 194 |
+
self.sadp = SADP(task_name="Wear_Scarf_stage_1", data_num=training_data_num, checkpoint_num=stage_1_checkpoint_num)
|
| 195 |
+
|
| 196 |
+
# ------------------------------------ #
|
| 197 |
+
# --- Initialize World to be Ready --- #
|
| 198 |
+
# ------------------------------------ #
|
| 199 |
+
# initialize world
|
| 200 |
+
self.reset()
|
| 201 |
+
|
| 202 |
+
self.garment.set_pose(pos=np.array([pos[0], pos[1], 0.35]), ori=ori)
|
| 203 |
+
self.position = [pos[0], pos[1], 0.2]
|
| 204 |
+
self.orientation = ori
|
| 205 |
+
|
| 206 |
+
# initialize recording camera to obtain point cloud data of garment
|
| 207 |
+
self.garment_camera.initialize(
|
| 208 |
+
segment_pc_enable=True,
|
| 209 |
+
segment_prim_path_list=[
|
| 210 |
+
"/World/Garment/garment",
|
| 211 |
+
]
|
| 212 |
+
)
|
| 213 |
+
# initialize gif camera to obtain rgb with the aim of creating gif
|
| 214 |
+
self.env_camera.initialize(depth_enable=True)
|
| 215 |
+
|
| 216 |
+
self.judge_front_camera.initialize(
|
| 217 |
+
segment_pc_enable=True,
|
| 218 |
+
segment_prim_path_list=[
|
| 219 |
+
"/World/Garment/garment",
|
| 220 |
+
]
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
self.judge_back_camera.initialize(
|
| 224 |
+
segment_pc_enable=True,
|
| 225 |
+
segment_prim_path_list=[
|
| 226 |
+
"/World/Garment/garment",
|
| 227 |
+
]
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
self.object_camera.initialize(
|
| 231 |
+
segment_pc_enable=True,
|
| 232 |
+
segment_prim_path_list=[
|
| 233 |
+
"/World/Human",
|
| 234 |
+
]
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
# add thread and record gif Asynchronously(use to collect rgb data for generating gif)
|
| 238 |
+
if record_video_flag:
|
| 239 |
+
self.thread_record = threading.Thread(target=self.env_camera.collect_rgb_graph_for_video)
|
| 240 |
+
self.thread_record.daemon = True
|
| 241 |
+
|
| 242 |
+
# step world to make it ready
|
| 243 |
+
|
| 244 |
+
for i in range(100):
|
| 245 |
+
self.step()
|
| 246 |
+
|
| 247 |
+
delete_prim_group(["/World/helper/helper_1"])
|
| 248 |
+
|
| 249 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 250 |
+
cprint(f"usd_path: {usd_path}", "magenta")
|
| 251 |
+
cprint(f"pos_x: {pos[0]}", "magenta")
|
| 252 |
+
cprint(f"pos_y: {pos[1]}", "magenta")
|
| 253 |
+
cprint(f"env_dx: {env_dx}", "magenta")
|
| 254 |
+
cprint(f"env_dy: {env_dy}", "magenta")
|
| 255 |
+
cprint("----------- World Configuration -----------", color="magenta", attrs=["bold"])
|
| 256 |
+
|
| 257 |
+
cprint("World Ready!", "green", "on_green")
|
| 258 |
+
|
| 259 |
+
def WearScarf(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, validation_flag, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num):
|
| 260 |
+
|
| 261 |
+
env = WearScarf_Env(pos, ori, usd_path, env_dx, env_dy, ground_material_usd, record_video_flag, training_data_num, stage_1_checkpoint_num, stage_2_checkpoint_num, stage_3_checkpoint_num)
|
| 262 |
+
|
| 263 |
+
# hide prim to get object point cloud
|
| 264 |
+
set_prim_visible_group(
|
| 265 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 266 |
+
visible=False,
|
| 267 |
+
)
|
| 268 |
+
for i in range(50):
|
| 269 |
+
env.step()
|
| 270 |
+
|
| 271 |
+
env.object_pcd, color = env.object_camera.get_point_cloud_data_from_segment(
|
| 272 |
+
save_or_not=False,
|
| 273 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 274 |
+
# real_time_watch=True,
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
set_prim_visible_group(
|
| 278 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Garment/garment"],
|
| 279 |
+
visible=True,
|
| 280 |
+
)
|
| 281 |
+
for i in range(50):
|
| 282 |
+
env.step()
|
| 283 |
+
|
| 284 |
+
# hide prim to get garment point cloud
|
| 285 |
+
set_prim_visible_group(
|
| 286 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Human"],
|
| 287 |
+
visible=False,
|
| 288 |
+
)
|
| 289 |
+
for i in range(50):
|
| 290 |
+
env.step()
|
| 291 |
+
|
| 292 |
+
env.garment_pcd, color = env.garment_camera.get_point_cloud_data_from_segment(
|
| 293 |
+
save_or_not=False,
|
| 294 |
+
save_path=get_unique_filename("data", extension=".ply"),
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
set_prim_visible_group(
|
| 298 |
+
prim_path_list=["/World/DexLeft", "/World/DexRight", "/World/Human"],
|
| 299 |
+
visible=True,
|
| 300 |
+
)
|
| 301 |
+
for i in range(50):
|
| 302 |
+
env.step()
|
| 303 |
+
|
| 304 |
+
if record_video_flag:
|
| 305 |
+
env.thread_record.start()
|
| 306 |
+
|
| 307 |
+
# manipulation_points, indices, max_values = env.model.get_manipulation_points(input_pcd=env.garment_pcd, index_list=[205, 908])
|
| 308 |
+
manipulation_points, indices, points_similarity = env.model.get_manipulation_points(input_pcd=env.garment_pcd, index_list=[205, 1600])
|
| 309 |
+
|
| 310 |
+
env.points_affordance_feature = normalize_columns(points_similarity.T)
|
| 311 |
+
|
| 312 |
+
manipulation_points[:, 2] = -0.02
|
| 313 |
+
manipulation_points[:, 1] -= 0.05
|
| 314 |
+
|
| 315 |
+
# move to initial position
|
| 316 |
+
env.bimanual_dex.dense_move_both_ik(
|
| 317 |
+
left_pos=np.array([-0.5, 0.5, 0.5]),
|
| 318 |
+
left_ori=np.array([0.707, -0.707, 0.0, 0.0]),
|
| 319 |
+
right_pos=np.array([0.5, 0.5, 0.5]),
|
| 320 |
+
right_ori=np.array([0.0, 0.0, -0.707, 0.707]),
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
env.bimanual_dex.set_both_hand_state(
|
| 324 |
+
left_hand_state='open',
|
| 325 |
+
right_hand_state='open'
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
# move to grasp point
|
| 329 |
+
env.bimanual_dex.dense_move_both_ik(
|
| 330 |
+
left_pos=manipulation_points[0],
|
| 331 |
+
left_ori=np.array([0.707, -0.707, 0.0, 0.0]),
|
| 332 |
+
right_pos=manipulation_points[1],
|
| 333 |
+
right_ori=np.array([0.0, 0.0, -0.707, 0.707]),
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
env.garment.particle_material.set_gravity_scale(0.5)
|
| 337 |
+
|
| 338 |
+
for i in range(20):
|
| 339 |
+
env.step()
|
| 340 |
+
|
| 341 |
+
for i in range(24):
|
| 342 |
+
|
| 343 |
+
print(f"Stage_1_Step: {i}")
|
| 344 |
+
|
| 345 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 346 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 347 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 348 |
+
|
| 349 |
+
obs = dict()
|
| 350 |
+
obs['agent_pos']=joint_state
|
| 351 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 352 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 353 |
+
obs['object_point_cloud']=env.object_pcd
|
| 354 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 355 |
+
|
| 356 |
+
action=env.sadp.get_action(obs)
|
| 357 |
+
|
| 358 |
+
print("action_shape:",action.shape)
|
| 359 |
+
|
| 360 |
+
for j in range(4):
|
| 361 |
+
|
| 362 |
+
action_L = ArticulationAction(joint_positions=action[j][:30])
|
| 363 |
+
action_R = ArticulationAction(joint_positions=action[j][30:])
|
| 364 |
+
|
| 365 |
+
env.bimanual_dex.dexleft.apply_action(action_L)
|
| 366 |
+
env.bimanual_dex.dexright.apply_action(action_R)
|
| 367 |
+
|
| 368 |
+
for _ in range(5):
|
| 369 |
+
env.step()
|
| 370 |
+
|
| 371 |
+
joint_pos_L = env.bimanual_dex.dexleft.get_joint_positions()
|
| 372 |
+
joint_pos_R = env.bimanual_dex.dexright.get_joint_positions()
|
| 373 |
+
joint_state = np.concatenate([joint_pos_L, joint_pos_R])
|
| 374 |
+
|
| 375 |
+
obs = dict()
|
| 376 |
+
obs['agent_pos']=joint_state
|
| 377 |
+
obs['environment_point_cloud']=env.env_camera.get_pointcloud_from_depth()
|
| 378 |
+
obs['garment_point_cloud']=env.garment_pcd
|
| 379 |
+
obs['object_point_cloud']=env.object_pcd
|
| 380 |
+
obs['points_affordance_feature']=env.points_affordance_feature
|
| 381 |
+
|
| 382 |
+
env.sadp.update_obs(obs)
|
| 383 |
+
|
| 384 |
+
for i in range(50):
|
| 385 |
+
env.step()
|
| 386 |
+
|
| 387 |
+
# if you wanna create gif, use this code. Need Cooperation with thread.
|
| 388 |
+
if record_video_flag:
|
| 389 |
+
if not os.path.exists("Data/Wear_Scarf_Validation_HALO/video"):
|
| 390 |
+
os.makedirs("Data/Wear_Scarf_Validation_HALO/video")
|
| 391 |
+
env.env_camera.create_mp4(get_unique_filename("Data/Wear_Scarf_Validation_HALO/video/video", ".mp4"))
|
| 392 |
+
|
| 393 |
+
set_prim_visible_group(["/World/DexLeft", "/World/DexRight", "/World/Human"], visible=False)
|
| 394 |
+
|
| 395 |
+
for i in range(50):
|
| 396 |
+
env.step()
|
| 397 |
+
|
| 398 |
+
success=True
|
| 399 |
+
scarf_front, color = env.judge_front_camera.get_point_cloud_data_from_segment(save_or_not=False)
|
| 400 |
+
scarf_back, color = env.judge_back_camera.get_point_cloud_data_from_segment(save_or_not=False)
|
| 401 |
+
|
| 402 |
+
# calculate sum
|
| 403 |
+
front_points_below_threshold = np.sum(scarf_front[:, 2] < 0.02)
|
| 404 |
+
back_points_below_threshold = np.sum(scarf_back[:, 2] < 0.02)
|
| 405 |
+
|
| 406 |
+
if front_points_below_threshold < 20 and back_points_below_threshold < 20:
|
| 407 |
+
success = True
|
| 408 |
+
else:
|
| 409 |
+
success = False
|
| 410 |
+
|
| 411 |
+
set_prim_visible_group(["/World/Human"], visible=True)
|
| 412 |
+
for i in range(50):
|
| 413 |
+
env.step()
|
| 414 |
+
|
| 415 |
+
cprint("----------- Judge Begin -----------", "blue", attrs=["bold"])
|
| 416 |
+
cprint(f"front_points_below_threshold: {front_points_below_threshold}", "blue")
|
| 417 |
+
cprint(f"back_points_below_threshold: {back_points_below_threshold}", "blue")
|
| 418 |
+
cprint("----------- Judge End -----------", "blue", attrs=["bold"])
|
| 419 |
+
cprint(f"final result: {success}", color="green", on_color="on_green")
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
if validation_flag:
|
| 423 |
+
if not os.path.exists("Data/Wear_Scarf_Validation_HALO"):
|
| 424 |
+
os.makedirs("Data/Wear_Scarf_Validation_HALO")
|
| 425 |
+
# write into .log file
|
| 426 |
+
with open("Data/Wear_Scarf_Validation_HALO/validation_log.txt", "a") as f:
|
| 427 |
+
f.write(f"result:{success} usd_path:{env.garment.usd_path} pos_x:{pos[0]} pos_y:{pos[1]} env_dx:{env_dx} env_dy:{env_dy} \n")
|
| 428 |
+
if not os.path.exists("Data/Wear_Scarf_Validation_HALO/final_state_pic"):
|
| 429 |
+
os.makedirs("Data/Wear_Scarf_Validation_HALO/final_state_pic")
|
| 430 |
+
env.env_camera.get_rgb_graph(save_or_not=True,save_path=get_unique_filename("Data/Wear_Scarf_Validation_HALO/final_state_pic/img",".png"))
|
| 431 |
+
|
| 432 |
+
if __name__=="__main__":
|
| 433 |
+
|
| 434 |
+
args = parse_args_val()
|
| 435 |
+
|
| 436 |
+
# initial setting
|
| 437 |
+
pos = np.array([0.0, 0.30, 0.65])
|
| 438 |
+
ori = np.array([90.0, 0.0, 0.0])
|
| 439 |
+
usd_path = None
|
| 440 |
+
env_dx = 0.0
|
| 441 |
+
env_dy = 0.0
|
| 442 |
+
|
| 443 |
+
if args.env_random_flag or args.garment_random_flag:
|
| 444 |
+
np.random.seed(int(time.time()))
|
| 445 |
+
if args.env_random_flag:
|
| 446 |
+
env_dx = np.random.uniform(-0.05, 0.1) # changeable
|
| 447 |
+
env_dy = np.random.uniform(-0.05, 0.05) # changeable
|
| 448 |
+
if args.garment_random_flag:
|
| 449 |
+
x = np.random.uniform(-0.05, 0.05) # changeable
|
| 450 |
+
y = np.random.uniform(0.30, 0.40) # changeable
|
| 451 |
+
pos = np.array([x,y,0.0])
|
| 452 |
+
ori = np.array([90.0, 0.0, 0.0])
|
| 453 |
+
Base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
| 454 |
+
assets_lists = os.path.join(Base_dir,"Model_HALO/GAM/checkpoints/Scarf/assets_list.txt")
|
| 455 |
+
assets_list = []
|
| 456 |
+
with open(assets_lists,"r",encoding='utf-8') as f:
|
| 457 |
+
for line in f:
|
| 458 |
+
clean_line = line.rstrip('\n')
|
| 459 |
+
assets_list.append(clean_line)
|
| 460 |
+
usd_path=np.random.choice(assets_list)
|
| 461 |
+
|
| 462 |
+
WearScarf(pos, ori, usd_path, env_dx, env_dy, args.ground_material_usd, args.validation_flag, args.record_video_flag, args.training_data_num, args.stage_1_checkpoint_num, args.stage_2_checkpoint_num, args.stage_3_checkpoint_num)
|
| 463 |
+
|
| 464 |
+
if args.validation_flag:
|
| 465 |
+
simulation_app.close()
|
| 466 |
+
else:
|
| 467 |
+
while simulation_app.is_running():
|
| 468 |
+
simulation_app.update()
|
| 469 |
+
|
| 470 |
+
simulation_app.close()
|