diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ec0e2712e736b6cf413b496379dc62d985918f46 --- /dev/null +++ b/.gitignore @@ -0,0 +1,193 @@ +# Created by .ignore support plugin (hsz.mobi) +### Python template +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +*.npy + +*.zip + +*.ply + + +#### old files #### +exp_runner_arti_* + +exp_runner.py +exp_runner_arti.py +exp_runner_dyn_model.py +exp_runner_sim.py +get-pip.py + +test.py + + +#### old scripts and data&exp folders ### +scripts/ +confs/ +data/ +ckpts/ +exp/ +uni_rep/ + +*/*_local.sh +*/*_local.conf + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +rsc/ +raw_data/ + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject +### VirtualEnv template +# Virtualenv +# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ +.Python +[Bb]in +[Ii]nclude +[Ll]ib +[Ll]ib64 +[Ll]ocal +# [Ss]cripts +pyvenv.cfg +.venv +pip-selfcheck.json +### JetBrains template +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/workspace.xml +.idea/tasks.xml +.idea/dictionaries +.idea/vcs.xml +.idea/jsLibraryMappings.xml + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/dataSources.local.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + +# Gradle: +.idea/gradle.xml +.idea/libraries + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +.idea/ + +## File-based project format: +*.iws + + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +data +public_data +exp +tmp + +.models/*.npy +.models/*.ply diff --git a/README.md b/README.md index 719e46fa636c6d7aaa1bc6bc8c690ac681c7455b..ed46bd795245fc08c0b7709a4ab1daae20292b46 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ --- title: Quasi Physical Sims -emoji: 📚 -colorFrom: red -colorTo: gray +emoji: 🏃 +colorFrom: gray +colorTo: indigo sdk: gradio sdk_version: 4.25.0 app_file: app.py diff --git a/confs_new/dyn_grab_arti_shadow_dm.conf b/confs_new/dyn_grab_arti_shadow_dm.conf new file mode 100644 index 0000000000000000000000000000000000000000..2bf7a984c3ed6c1362a917304c1b79feffca1e24 --- /dev/null +++ b/confs_new/dyn_grab_arti_shadow_dm.conf @@ -0,0 +1,288 @@ +general { + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_diffhand_model_" + + recording = [ + ./, + ./models + ] +} + + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + # learning_rate = 5e-6 + learning_rate_actions = 5e-6 + learning_rate_alpha = 0.05 + end_iter = 300000 + + # batch_size = 128 # 64 + # batch_size = 4000 + # batch_size = 3096 # 64 + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + + load_redmax_robot_actions = "" + penetrating_depth_penalty = 0.0 + train_states = True + + minn_dist_threshold = 0.000 + obj_mass = 100.0 + obj_mass = 30.0 + + # use_mano_hand_for_test = False + use_mano_hand_for_test = True + + # train_residual_friction = False + train_residual_friction = True + + # use_LBFGS = True + use_LBFGS = False + + + use_mano_hand_for_test = False + train_residual_friction = True + + extract_delta_mesh = False + freeze_weights = True + # gt_act_xs_def = True + gt_act_xs_def = False + use_bending_network = True + use_delta_bending = True + + use_passive_nets = True + use_split_network = True + + n_timesteps = 60 + + + # using_delta_glb_trans = True + using_delta_glb_trans = False + # train_multi_seqs = True + + + # optimize_with_intermediates = False + optimize_with_intermediates = True + + + loss_tangential_diff_coef = 1000 + loss_tangential_diff_coef = 0 + + + + optimize_active_object = True + + no_friction_constraint = False + + optimize_glb_transformations = True + + + sim_model_path = "DiffHand/assets/hand_sphere_only_hand_testt.xml" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + mano_mult_const_after_cent = 1.0 + sim_num_steps = 1000000 + + bending_net_type = "active_force_field_v18" + + + ### try to train the residual friction ? ### + train_residual_friction = True + optimize_rules = True + + load_optimized_init_actions = "" + + + use_optimizable_params = True + + + ### grab train seq 224 ### + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + # penetration_proj_k_to_robot_friction = 40000000.0 + # penetration_proj_k_to_robot_friction = 100000000.0 # as friction coefs here # + use_same_contact_spring_k = False + + + # sim_model_path = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere_only_hand_testt.xml" + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + + + optimize_rules = False + + penetration_determining = "sdf_of_canon" + + optimize_rules = False + + optim_sim_model_params_from_mano = False + optimize_rules = False + + + penetration_proj_k_to_robot_friction = 100000000.0 # as friction coefs here # ## confs ## + penetration_proj_k_to_robot = 40000000.0 # + + + penetrating_depth_penalty = 1 + + minn_dist_threshold_robot_to_obj = 0.0 + + + minn_dist_threshold_robot_to_obj = 0.1 + + optim_sim_model_params_from_mano = False + optimize_rules = False + + optim_sim_model_params_from_mano = True + optimize_rules = True + minn_dist_threshold_robot_to_obj = 0.0 + + optim_sim_model_params_from_mano = False + optimize_rules = False + minn_dist_threshold_robot_to_obj = 0.1 + + + ### kinematics confgs ### + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + # ckpt_fn = "" + # load_optimized_init_transformations = "" + optim_sim_model_params_from_mano = True + optimize_rules = True + minn_dist_threshold_robot_to_obj = 0.0 + + optim_sim_model_params_from_mano = False + optimize_rules = False + optimize_rules = True + + ckpt_fn = "ckpts/grab/102/retargeted_shadow.pth" + load_optimized_init_transformations = "ckpts/grab/102/retargeted_shadow.pth" + optimize_rules = False + + optimize_rules = True + + ## opt roboto ## + opt_robo_glb_trans = True + opt_robo_glb_rot = False # opt rot # ## opt rot ## + opt_robo_states = True + + + use_multi_stages = False + + minn_dist_threshold_robot_to_obj = 0.1 + + penetration_proj_k_to_robot = 40000 + penetration_proj_k_to_robot_friction = 100000 + + drive_robot = "actions" + opt_robo_glb_trans = False + opt_robo_states = True + opt_robo_glb_rot = False + + train_with_forces_to_active = True + + + + load_redmax_robot_actions_fn = "" + + + optimize_rules = False + + + + loss_scale_coef = 1.0 + + + + use_opt_rigid_translations=True + + train_def = True + + + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_arti_shadow_dm_curriculum.conf b/confs_new/dyn_grab_arti_shadow_dm_curriculum.conf new file mode 100644 index 0000000000000000000000000000000000000000..b0df78f0edfca6269d9994ea0ca5446a8bf60151 --- /dev/null +++ b/confs_new/dyn_grab_arti_shadow_dm_curriculum.conf @@ -0,0 +1,326 @@ +general { + # base_exp_dir = exp/CASE_NAME/wmask + base_exp_dir = /data2/datasets/xueyi/neus/exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_diffhand_model_curriculum_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_actions = 5e-6 + # learning_rate = 5e-6 + # learning_rate = 5e-5 + learning_rate_alpha = 0.05 + end_iter = 300000 + + # batch_size = 128 # 64 + # batch_size = 4000 + # batch_size = 3096 # 64 + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + + penetration_proj_k_to_robot = 40 + + penetrating_depth_penalty = 1.0 + penetrating_depth_penalty = 0.0 + train_states = True + penetration_proj_k_to_robot = 4000000000.0 + + + minn_dist_threshold = 0.000 + # minn_dist_threshold = 0.01 + obj_mass = 100.0 + obj_mass = 30.0 + + optimize_rules = True + + use_mano_hand_for_test = False + use_mano_hand_for_test = True + + train_residual_friction = False + train_residual_friction = True + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False + train_residual_friction = True + + extract_delta_mesh = False + freeze_weights = True + # gt_act_xs_def = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + use_delta_bending = True + use_passive_nets = True + # use_passive_nets = False # sv mesh root # + + use_split_network = True + + penetration_determining = "plane_primitives" + + + n_timesteps = 3 # + # n_timesteps = 5 # + n_timesteps = 7 + n_timesteps = 60 + + + + + using_delta_glb_trans = True + using_delta_glb_trans = False + + optimize_with_intermediates = False + optimize_with_intermediates = True + + + loss_tangential_diff_coef = 1000 + loss_tangential_diff_coef = 0 + + + + optimize_active_object = False + optimize_active_object = True + + # optimize_expanded_pts = False + # optimize_expanded_pts = True + + no_friction_constraint = False + + optimize_glb_transformations = True + sim_model_path = "DiffHand/assets/hand_sphere_only_hand_testt.xml" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + mano_mult_const_after_cent = 1.0 + sim_num_steps = 1000000 + + bending_net_type = "active_force_field_v18" + + + ### try to train the residual friction ? ### + train_residual_friction = True + optimize_rules = True + ### cube ### + load_optimized_init_actions = "" + + optimize_rules = False + + + ## optimize rules ## penetration proj k to robot ## + optimize_rules = True + penetration_proj_k_to_robot = 4000000.0 + use_optimizable_params = True + + penetration_determining = "ball_primitives" # uing ball primitives + optimize_rules = True # + penetration_proj_k_to_robot = 4000000.0 # + use_optimizable_params = True + train_with_forces_to_active = False + + # penetration_determining = "ball_primitives" + ### obj sdf and normals for colllision eteftion and responses ## + ### grab train seq 54; cylinder ### + penetration_determining = "sdf_of_canon" + optimize_rules = True + train_with_forces_to_active = False + + ### grab train seq 1 ### + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + + ### grab train seq 224 ### + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + penetration_proj_k_to_robot_friction = 40000000.0 + penetration_proj_k_to_robot_friction = 100000000.0 + use_same_contact_spring_k = False + sim_model_path = "DiffHand/assets/hand_sphere_only_hand_testt.xml" + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + + + penetration_determining = "sdf_of_canon" + optimize_rules = True + # optimize_rules = True + + optimize_rules = False + + optimize_rules = True + + + optimize_rules = False + + optim_sim_model_params_from_mano = True + optimize_rules = True + optim_sim_model_params_from_mano = False + optimize_rules = False + + penetration_proj_k_to_robot_friction = 100000000.0 + penetration_proj_k_to_robot = 40000000.0 + + + penetrating_depth_penalty = 1 + + minn_dist_threshold_robot_to_obj = 0.0 + + + minn_dist_threshold_robot_to_obj = 0.1 + + optim_sim_model_params_from_mano = True + optimize_rules = True + optim_sim_model_params_from_mano = False + optimize_rules = False + optim_sim_model_params_from_mano = False + optimize_rules = False + + load_optimized_init_transformations = "" + optim_sim_model_params_from_mano = True + optimize_rules = True + minn_dist_threshold_robot_to_obj = 0.0 + + + optim_sim_model_params_from_mano = False + + minn_dist_threshold_robot_to_obj = 0.1 + + + ### kinematics confgs ### + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + # ckpt_fn = "" + load_optimized_init_transformations = "" + optim_sim_model_params_from_mano = True + optimize_rules = True + minn_dist_threshold_robot_to_obj = 0.0 + + optim_sim_model_params_from_mano = False + + optimize_rules = True + + ckpt_fn = "ckpts/grab/102/retargeted_shadow.pth" + ckpt_fn = "/data2/datasets/xueyi/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_states_optrobot__seq_102_optactswreacts_redmaxacts_rules_/checkpoints/ckpt_035459.pth" + load_optimized_init_transformations = "ckpts/grab/102/retargeted_shadow.pth" + + + optimize_rules = True + + ## opt roboto ## + opt_robo_glb_trans = True + opt_robo_glb_rot = False # opt rot # ## opt rot ## + opt_robo_states = True + + + load_redmax_robot_actions_fn = "ckpts/grab/102/diffhand_act.npy" + + + + ckpt_fn = "" + + use_multi_stages = True + train_with_forces_to_active = True + + + # optimize_rules = False + loss_scale_coef = 1.0 ## loss scale coef ## loss scale coef #### + + + + use_opt_rigid_translations=True + + train_def = True + + # optimizable_rigid_translations = False # + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_arti_shadow_dm_singlestage.conf b/confs_new/dyn_grab_arti_shadow_dm_singlestage.conf new file mode 100644 index 0000000000000000000000000000000000000000..8be30845982816c449f97e7b91b5cc2553d907f5 --- /dev/null +++ b/confs_new/dyn_grab_arti_shadow_dm_singlestage.conf @@ -0,0 +1,318 @@ +general { + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_diffhand_model_curriculum_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_actions = 5e-6 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + + save_freq = 10000 + val_freq = 20 + val_mesh_freq = 20 + report_freq = 10 + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + + penetration_proj_k_to_robot = 40 + + penetrating_depth_penalty = 1.0 + penetrating_depth_penalty = 0.0 + train_states = True + penetration_proj_k_to_robot = 4000000000.0 + + + minn_dist_threshold = 0.000 + # minn_dist_threshold = 0.01 + obj_mass = 100.0 + obj_mass = 30.0 + + optimize_rules = True + + use_mano_hand_for_test = False + use_mano_hand_for_test = True + + train_residual_friction = False + train_residual_friction = True + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False + train_residual_friction = True + + extract_delta_mesh = False + freeze_weights = True + # gt_act_xs_def = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + use_delta_bending = True + use_passive_nets = True + # use_passive_nets = False # sv mesh root # + + use_split_network = True + + penetration_determining = "plane_primitives" + + + n_timesteps = 3 # + # n_timesteps = 5 # + n_timesteps = 7 + n_timesteps = 60 + + + + + using_delta_glb_trans = True + using_delta_glb_trans = False + + optimize_with_intermediates = False + optimize_with_intermediates = True + + + loss_tangential_diff_coef = 1000 + loss_tangential_diff_coef = 0 + + + + optimize_active_object = False + optimize_active_object = True + + # optimize_expanded_pts = False + # optimize_expanded_pts = True + + no_friction_constraint = False + + optimize_glb_transformations = True + sim_model_path = "DiffHand/assets/hand_sphere_only_hand_testt.xml" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + mano_mult_const_after_cent = 1.0 + sim_num_steps = 1000000 + + bending_net_type = "active_force_field_v18" + + + ### try to train the residual friction ? ### + train_residual_friction = True + optimize_rules = True + ### cube ### + load_optimized_init_actions = "" + + optimize_rules = False + + + ## optimize rules ## penetration proj k to robot ## + optimize_rules = True + penetration_proj_k_to_robot = 4000000.0 + use_optimizable_params = True + + penetration_determining = "ball_primitives" # uing ball primitives + optimize_rules = True # + penetration_proj_k_to_robot = 4000000.0 # + use_optimizable_params = True + train_with_forces_to_active = False + + # penetration_determining = "ball_primitives" + ### obj sdf and normals for colllision eteftion and responses ## + ### grab train seq 54; cylinder ### + penetration_determining = "sdf_of_canon" + optimize_rules = True + train_with_forces_to_active = False + + ### grab train seq 1 ### + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + + ### grab train seq 224 ### + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + penetration_proj_k_to_robot_friction = 40000000.0 + penetration_proj_k_to_robot_friction = 100000000.0 + use_same_contact_spring_k = False + sim_model_path = "DiffHand/assets/hand_sphere_only_hand_testt.xml" + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + + + penetration_determining = "sdf_of_canon" + optimize_rules = True + # optimize_rules = True + + optimize_rules = False + + optimize_rules = True + + + optimize_rules = False + + optim_sim_model_params_from_mano = True + optimize_rules = True + optim_sim_model_params_from_mano = False + optimize_rules = False + + penetration_proj_k_to_robot_friction = 100000000.0 + penetration_proj_k_to_robot = 40000000.0 + + + penetrating_depth_penalty = 1 + + minn_dist_threshold_robot_to_obj = 0.0 + + + minn_dist_threshold_robot_to_obj = 0.1 + + optim_sim_model_params_from_mano = True + optimize_rules = True + optim_sim_model_params_from_mano = False + optimize_rules = False + optim_sim_model_params_from_mano = False + optimize_rules = False + + load_optimized_init_transformations = "" + optim_sim_model_params_from_mano = True + optimize_rules = True + minn_dist_threshold_robot_to_obj = 0.0 + + + optim_sim_model_params_from_mano = False + + minn_dist_threshold_robot_to_obj = 0.1 + + + ### kinematics confgs ### + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + # ckpt_fn = "" + load_optimized_init_transformations = "" + optim_sim_model_params_from_mano = True + optimize_rules = True + minn_dist_threshold_robot_to_obj = 0.0 + + optim_sim_model_params_from_mano = False + + optimize_rules = True + + ckpt_fn = "ckpts/grab/102/retargeted_shadow.pth" + load_optimized_init_transformations = "ckpts/grab/102/retargeted_shadow.pth" + + + optimize_rules = True + + ## opt roboto ## + opt_robo_glb_trans = True + opt_robo_glb_rot = False + opt_robo_states = True + + + load_redmax_robot_actions_fn = "ckpts/grab/102/diffhand_act.npy" + + + + ckpt_fn = "" + + use_multi_stages = False + train_with_forces_to_active = True + + + # optimize_rules = False + loss_scale_coef = 1.0 ## loss scale coef ## loss scale coef #### + + + + use_opt_rigid_translations=True + + train_def = True + + # optimizable_rigid_translations = False # + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_mano.conf b/confs_new/dyn_grab_pointset_mano.conf new file mode 100644 index 0000000000000000000000000000000000000000..c793f944575ba2a8f6cbb5888243e31fefe0f3bd --- /dev/null +++ b/confs_new/dyn_grab_pointset_mano.conf @@ -0,0 +1,215 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + + # tag = "train_retargeted_shadow_hand_seq_102_mano_sparse_retargeting_" + tag = "train_dyn_mano_acts_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 + val_mesh_freq = 20 + report_freq = 10 + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### ###### + # drive_pointset = "states" + fix_obj = True # to track the hand only + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "" + load_optimized_init_transformations = "" + ckpt_fn = "" + retar_only_glb = True + # use_multi_stages = True + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_mano_dyn.conf b/confs_new/dyn_grab_pointset_mano_dyn.conf new file mode 100644 index 0000000000000000000000000000000000000000..8108033a765cf1a209bd207f0971326831089654 --- /dev/null +++ b/confs_new/dyn_grab_pointset_mano_dyn.conf @@ -0,0 +1,218 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + + # tag = "train_retargeted_shadow_hand_seq_102_mano_sparse_retargeting_" + # tag = "train_dyn_mano_acts_" + tag = "train_dyn_mano_acts_wreact_optps_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 + val_mesh_freq = 20 + report_freq = 10 + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### Stage 1: optimize for the parametes ###### + # drive_pointset = "states" + fix_obj = False + optimize_rules = True + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + load_optimized_init_transformations = "" + ckpt_fn = "ckpts/grab/102/dyn_mano_arti.pth" + # retar_only_glb = True + # use_multi_stages = True + ###### Stage 1: optimize for the parametes ###### + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} + + diff --git a/confs_new/dyn_grab_pointset_mano_dyn_optacts.conf b/confs_new/dyn_grab_pointset_mano_dyn_optacts.conf new file mode 100644 index 0000000000000000000000000000000000000000..46e759699c0a5d26e8303c16d631749e109ab6f4 --- /dev/null +++ b/confs_new/dyn_grab_pointset_mano_dyn_optacts.conf @@ -0,0 +1,218 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + + # tag = "train_retargeted_shadow_hand_seq_102_mano_sparse_retargeting_" + # tag = "train_dyn_mano_acts_" + tag = "train_dyn_mano_acts_wreact_optps_optacts_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 + val_mesh_freq = 20 + report_freq = 10 + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### Stage 1: optimize for the parametes ###### + # drive_pointset = "states" + fix_obj = False + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + load_optimized_init_transformations = "" + ckpt_fn = "ckpts/grab/102/dyn_mano_arti.pth" + # retar_only_glb = True + # use_multi_stages = True + ###### Stage 1: optimize for the parametes ###### + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} + + diff --git a/confs_new/dyn_grab_pointset_points_dyn.conf b/confs_new/dyn_grab_pointset_points_dyn.conf new file mode 100644 index 0000000000000000000000000000000000000000..71b935d7a5875a00c24d6643d15e62ae55d515df --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn.conf @@ -0,0 +1,257 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 # 64 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + drive_pointset = "states" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + drive_pointset = "actions" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = True + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = True + train_pointset_acts_via_deltas = True + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + ##### model parameters optimized from the MANO hand trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = False + train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + ##### model parameters optimized from the MANO hand expanded set trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_points_dyn_retar.conf b/confs_new/dyn_grab_pointset_points_dyn_retar.conf new file mode 100644 index 0000000000000000000000000000000000000000..2c2695f3c85b94f0ca4d269be1b35fb36db8fcbd --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn_retar.conf @@ -0,0 +1,274 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_" + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_retar_to_shadow_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 # 64 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + # ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + # drive_pointset = "states" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = False + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + # ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = True + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + # ##### model parameters optimized from the MANO hand trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = False + # train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + # ##### model parameters optimized from the MANO hand expanded set trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + + ###### Retargeting Stage 1 ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = False + train_pointset_acts_via_deltas = True + ##### model parameters optimized from the MANO hand expanded set trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps_optimized_acts.pth" + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps_optimized_acts.pth" + load_optimized_init_transformations = "ckpts/grab/102/dyn_mano_shadow_arti.pth" + finger_cd_loss = 1.0 + optimize_pointset_motion_only = True + ###### Retargeting Stage 1 ###### + + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_points_dyn_retar_pts.conf b/confs_new/dyn_grab_pointset_points_dyn_retar_pts.conf new file mode 100644 index 0000000000000000000000000000000000000000..dcb9244086b500790a7c979fe38e4fb531500b73 --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn_retar_pts.conf @@ -0,0 +1,281 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_" + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_retar_to_shadow_pointset_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 # 64 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + # ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + # drive_pointset = "states" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = False + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + # ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = True + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + # ##### model parameters optimized from the MANO hand trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = False + # train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + # ##### model parameters optimized from the MANO hand expanded set trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + + ###### Retargeting Stage 1 ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = False + train_pointset_acts_via_deltas = True + ##### model parameters optimized from the MANO hand expanded set trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps_optimized_acts.pth" + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps_optimized_acts.pth" + load_optimized_init_transformations = "ckpts/grab/102/dyn_mano_shadow_arti_retar.pth" + finger_cd_loss = 1.0 + optimize_pointset_motion_only = True + ###### Retargeting Stage 1 ###### + + + + ###### Retargeting Stage 2 ###### + load_optimized_init_transformations = "ckpts/grab/102/dyn_mano_shadow_arti_retar_optimized_arti.pth" + ###### Retargeting Stage 2 ###### + + + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_points_dyn_retar_pts_opts.conf b/confs_new/dyn_grab_pointset_points_dyn_retar_pts_opts.conf new file mode 100644 index 0000000000000000000000000000000000000000..16e6e1af268e6e011b2070107266e20c37544334 --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn_retar_pts_opts.conf @@ -0,0 +1,287 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_" + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_retar_to_shadow_pointset_" + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_retar_to_shadow_pointset_optrules_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 # 64 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + save_freq = 10000 + val_freq = 20 + val_mesh_freq = 20 + report_freq = 10 + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + # ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + # drive_pointset = "states" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = False + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + # ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = True + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + # ##### model parameters optimized from the MANO hand trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = False + # train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + # ##### model parameters optimized from the MANO hand expanded set trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + + ###### Retargeting Stage 1 ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = False + train_pointset_acts_via_deltas = True + ##### model parameters optimized from the MANO hand expanded set trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps_optimized_acts.pth" + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps_optimized_acts.pth" + load_optimized_init_transformations = "ckpts/grab/102/dyn_mano_shadow_arti_retar.pth" + finger_cd_loss = 1.0 + optimize_pointset_motion_only = True + ###### Retargeting Stage 1 ###### + + + ###### Retargeting Stage 2 ###### + load_optimized_init_transformations = "ckpts/grab/102/dyn_mano_shadow_arti_retar_optimized_arti.pth" + ###### Retargeting Stage 2 ###### + + + ###### Retargeting Stage 3 ###### + load_optimized_init_transformations = "ckpts/grab/102/dyn_mano_shadow_arti_retar_optimized_arti_optimized_pts.pth" + optimize_anchored_pts = False + optimize_rules = True + optimize_pointset_motion_only = False + ###### Retargeting Stage 3 ###### + + + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_points_dyn_s1.conf b/confs_new/dyn_grab_pointset_points_dyn_s1.conf new file mode 100644 index 0000000000000000000000000000000000000000..109ac5cd497c87db2d6de004ed924e6297b482a4 --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn_s1.conf @@ -0,0 +1,256 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 + val_mesh_freq = 20 + report_freq = 10 + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + drive_pointset = "states" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" + # ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = True + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + # ##### model parameters optimized from the MANO hand trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = False + # train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + # ##### model parameters optimized from the MANO hand expanded set trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_points_dyn_s2.conf b/confs_new/dyn_grab_pointset_points_dyn_s2.conf new file mode 100644 index 0000000000000000000000000000000000000000..cb23e32233bfbd4ca609729ec144df412d14c26a --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn_s2.conf @@ -0,0 +1,258 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_optacts_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 # 64 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + drive_pointset = "states" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + drive_pointset = "actions" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = True + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" ## pre-optimized ckpts + load_optimized_init_actions = "exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_/checkpoints/ckpt_004000.pth" + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = True + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + # ##### model parameters optimized from the MANO hand trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + # ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = False + # train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + # ##### model parameters optimized from the MANO hand expanded set trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_points_dyn_s3.conf b/confs_new/dyn_grab_pointset_points_dyn_s3.conf new file mode 100644 index 0000000000000000000000000000000000000000..8860408a69cd17c4236ea2687040a74c1bfe77b4 --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn_s3.conf @@ -0,0 +1,259 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_optacts_optsysps_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 # 64 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + drive_pointset = "states" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + drive_pointset = "actions" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = True + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = True + train_pointset_acts_via_deltas = True + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + load_optimized_init_actions = "exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_optacts_/checkpoints/ckpt_004000.pth" + ##### model parameters optimized from the MANO hand trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + ckpt_fn = "exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_optacts_/checkpoints/ckpt_004000.pth" + ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + # drive_pointset = "actions" + # fix_obj = False + # optimize_rules = False + # train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + # ##### model parameters optimized from the MANO hand expanded set trajectory ##### + # ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + # load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + # ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_pointset_points_dyn_s4.conf b/confs_new/dyn_grab_pointset_points_dyn_s4.conf new file mode 100644 index 0000000000000000000000000000000000000000..6fbc613f4caf184124d5b78efea7f551c63bda3c --- /dev/null +++ b/confs_new/dyn_grab_pointset_points_dyn_s4.conf @@ -0,0 +1,259 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + tag = "train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_optacts_optsysps_optacts_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 # 64 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 # 2500 + val_mesh_freq = 20 # 5000 + report_freq = 10 + ### igr weight ### + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + + + + + + ###### threshold, ks settings 1, optimize acts ###### + # drive_pointset = "actions" + # fix_obj = True + # optimize_rules = False + # train_pointset_acts_via_deltas = True + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d4_/checkpoints/ckpt_002000.pth" + # load_optimized_init_actions = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_dyn_mano_hand_seq_102_mouse_optdynactions_points_optrobo_offsetdriven_optrules_multk100_wfixobj_optdelta_radius0d2_/checkpoints/ckpt_008000.pth" + ###### threshold, ks settings 1, optimize acts ###### + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + drive_pointset = "states" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_arti.pth" + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + drive_pointset = "actions" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = True + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_states.pt" + ###### Stage 2: threshold, ks settings 1, optimize acts ###### + + + ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = True + train_pointset_acts_via_deltas = True + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_acts.pt" + ##### model parameters optimized from the MANO hand trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_opts.pt" + ###### Stage 3: threshold, ks settings 1, optimize params from acts ###### + + + ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + drive_pointset = "actions" + fix_obj = False + optimize_rules = False + train_pointset_acts_via_deltas = True ## pointset acts via deltas ### + ##### model parameters optimized from the MANO hand expanded set trajectory ##### + ckpt_fn = "ckpts/grab/102/dyn_mano_pointset_optimized_acts_optimized_ps.pth" + load_optimized_init_actions = "ckpts/grab/102/dyn_mano_pointset_optimized_acts.pth" + ckpt_fn = "exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_optacts_optsysps_/checkpoints/ckpt_044000.pth" + load_optimized_init_actions = "exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_seq_102_mano_pointset_acts_optstates_optacts_optsysps_/checkpoints/ckpt_044000.pth" + ###### Stage 4: threshold, ks settings 1, optimize acts from optimized params ###### + + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/confs_new/dyn_grab_sparse_retar.conf b/confs_new/dyn_grab_sparse_retar.conf new file mode 100644 index 0000000000000000000000000000000000000000..c93585783bb59f5dd4ff64e3b717b4b619b1b479 --- /dev/null +++ b/confs_new/dyn_grab_sparse_retar.conf @@ -0,0 +1,214 @@ +general { + + + base_exp_dir = exp/CASE_NAME/wmask + + + tag = "train_retargeted_shadow_hand_seq_102_mano_sparse_retargeting_" + + recording = [ + ./, + ./models + ] +} + +dataset { + data_dir = public_data/CASE_NAME/ + render_cameras_name = cameras_sphere.npz + object_cameras_name = cameras_sphere.npz + obj_idx = 102 +} + +train { + learning_rate = 5e-4 + learning_rate_alpha = 0.05 + end_iter = 300000 + + batch_size = 1024 + validate_resolution_level = 4 + warm_up_end = 5000 + anneal_end = 0 + use_white_bkgd = False + + # save_freq = 10000 + save_freq = 10000 + val_freq = 20 + val_mesh_freq = 20 + report_freq = 10 + igr_weight = 0.1 + mask_weight = 0.1 +} + +model { + + optimize_dyn_actions = True + + + optimize_robot = True + + use_penalty_based_friction = True + + use_split_params = False + + use_sqr_spring_stiffness = True + + use_pre_proj_frictions = True + + + + use_sqrt_dist = True + contact_maintaining_dist_thres = 0.2 + + robot_actions_diff_coef = 0.001 + + + use_sdf_as_contact_dist = True + + + # + use_contact_dist_as_sdf = False + + use_glb_proj_delta = True + + + +# penetration_proj_k_to_robot = 30 + penetrating_depth_penalty = 1.0 + train_states = True + + + + minn_dist_threshold = 0.000 + obj_mass = 30.0 + + + use_LBFGS = True + use_LBFGS = False + + use_mano_hand_for_test = False # use the dynamic mano model here # + + extract_delta_mesh = False + freeze_weights = True + gt_act_xs_def = False + use_bending_network = True + ### for ts = 3 ### + # use_delta_bending = False + ### for ts = 3 ### + + + + + sim_model_path = "rsc/shadow_hand_description/shadowhand_new.urdf" + mano_sim_model_path = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + + obj_sdf_fn = "data/grab/102/102_obj.npy" + kinematic_mano_gt_sv_fn = "data/grab/102/102_sv_dict.npy" + scaled_obj_mesh_fn = "data/grab/102/102_obj.obj" + + bending_net_type = "active_force_field_v18" + sim_num_steps = 1000000 + n_timesteps = 60 + optim_sim_model_params_from_mano = False + penetration_determining = "sdf_of_canon" + train_with_forces_to_active = False + loss_scale_coef = 1000.0 + use_same_contact_spring_k = False + use_optimizable_params = True # + train_residual_friction = True + mano_mult_const_after_cent = 1.0 + optimize_glb_transformations = True + no_friction_constraint = False + optimize_active_object = True + loss_tangential_diff_coef = 0 + optimize_with_intermediates = True + using_delta_glb_trans = False + train_multi_seqs = False + use_split_network = True + use_delta_bending = True + + + + + ##### contact spring model settings #### + minn_dist_threshold_robot_to_obj = 0.1 + penetration_proj_k_to_robot_friction = 10000000.0 + penetration_proj_k_to_robot = 4000000.0 + ##### contact spring model settings #### + + + ###### ###### + drive_pointset = "states" + fix_obj = True + optimize_rules = False + train_pointset_acts_via_deltas = False + load_optimized_init_actions = "" + load_optimized_init_transformations = "" + ckpt_fn = "" + retar_only_glb = True + # use_multi_stages = True + ###### Stage 1: threshold, ks settings 1, optimize offsets ###### + + use_opt_rigid_translations=True + + train_def = True + optimizable_rigid_translations=True + + nerf { + D = 8, + d_in = 4, + d_in_view = 3, + W = 256, + multires = 10, + multires_view = 4, + output_ch = 4, + skips=[4], + use_viewdirs=True + } + + sdf_network { + d_out = 257, + d_in = 3, + d_hidden = 256, + n_layers = 8, + skip_in = [4], + multires = 6, + bias = 0.5, + scale = 1.0, + geometric_init = True, + weight_norm = True, + } + + variance_network { + init_val = 0.3 + } + + rendering_network { + d_feature = 256, + mode = idr, + d_in = 9, + d_out = 3, + d_hidden = 256, + n_layers = 4, + weight_norm = True, + multires_view = 4, + squeeze_out = True, + } + + neus_renderer { + n_samples = 64, + n_importance = 64, + n_outside = 0, + up_sample_steps = 4 , + perturb = 1.0, + } + + bending_network { + multires = 6, + bending_latent_size = 32, + d_in = 3, + rigidity_hidden_dimensions = 64, + rigidity_network_depth = 5, + use_rigidity_network = False, + bending_n_timesteps = 10, + } +} diff --git a/exp_runner_stage_1.py b/exp_runner_stage_1.py new file mode 100644 index 0000000000000000000000000000000000000000..7263b292e4105cdd5dae33b84d47acb997e0bbe5 --- /dev/null +++ b/exp_runner_stage_1.py @@ -0,0 +1,9063 @@ +import os +import time +import logging +import argparse +import numpy as np +# import cv2 as cv +import trimesh +import torch +import torch.nn.functional as F +try: + from torch.utils.tensorboard import SummaryWriter +except: + SummaryWriter = None + pass +from shutil import copyfile +# from icecream import ic +from tqdm import tqdm +from pyhocon import ConfigFactory +from models.fields import RenderingNetwork, SDFNetwork, SingleVarianceNetwork, NeRF, BendingNetwork +import models.data_utils_torch as data_utils +# import models.dyn_model_utils as dyn_utils +import torch.nn as nn +# import models.renderer_def_multi_objs as render_utils +import models.fields as fields + +from torch.distributions.categorical import Categorical + +# try: +# import redmax_py as redmax +# except: +# pass +# import open3d as o3d +import models.dyn_model_act as dyn_model_act +import models.dyn_model_act_v2 as dyn_model_act_mano +from scipy.spatial.transform import Rotation as R +# import traceback + +import pickle as pkl +import tempfile + + +## for the quasi physical sims -> point set and point set dynamics model ## + +class Runner: + def __init__(self, conf_path, data_path, mode='train', case='CASE_NAME', is_continue=False): + self.device = torch.device('cuda') + + + self.conf_path = conf_path + self.data_path = data_path + f = open(self.conf_path) + conf_text = f.read() + conf_text = conf_text.replace('CASE_NAME', case) + f.close() + + self.conf = ConfigFactory.parse_string(conf_text) + + + + + # self.base_exp_dir = self.conf['general.base_exp_dir'] + # if not os.path.exists(self.base_exp_dir): + # self.base_exp_dir = "/data/xueyi/NeuS/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask" + + temp_dir = tempfile.gettempdir() + self.base_exp_dir = os.path.join(temp_dir, "quasi_sim") + os.makedirs(self.base_exp_dir, exist_ok=True) + self.base_exp_dir = os.path.join(self.base_exp_dir, "exp") + os.makedirs(self.base_exp_dir, exist_ok=True) + self.base_exp_dir = f"{self.base_exp_dir}/wmask" + + print(f"self.base_exp_dir:", self.base_exp_dir) + self.base_exp_dir = self.base_exp_dir + f"_reverse_value_totviews_tag_" + os.makedirs(self.base_exp_dir, exist_ok=True) + # self.dataset = Dataset(self.conf['dataset']) + + + self.n_timesteps = self.conf['model.n_timesteps'] + + + self.iter_step = 0 + + self.end_iter = self.conf.get_int('train.end_iter') + self.save_freq = self.conf.get_int('train.save_freq') + self.report_freq = self.conf.get_int('train.report_freq') + self.val_freq = self.conf.get_int('train.val_freq') + self.val_mesh_freq = self.conf.get_int('train.val_mesh_freq') + self.batch_size = self.conf.get_int('train.batch_size') + self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') + self.learning_rate = self.conf.get_float('train.learning_rate') + self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') + self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') + self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) + self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) + + self.use_bending_network = True + # use_split_network + self.use_selector = True + + + # Weights # + self.igr_weight = self.conf.get_float('train.igr_weight') + self.mask_weight = self.conf.get_float('train.mask_weight') + self.is_continue = is_continue # + self.mode = mode + self.model_list = [] + self.writer = None + + + self.bending_latent_size = self.conf['model.bending_network']['bending_latent_size'] + + + params_to_train = [] + self.nerf_outside = NeRF(**self.conf['model.nerf']).to(self.device) + self.sdf_network = SDFNetwork(**self.conf['model.sdf_network']).to(self.device) + self.deviation_network = SingleVarianceNetwork(**self.conf['model.variance_network']).to(self.device) + self.color_network = RenderingNetwork(**self.conf['model.rendering_network']).to(self.device) + + # self.use_bending_network = self.conf['model.use_bending_network'] + # # bending network size # + # if self.use_bending_network: # add the bendingnetwork # + self.bending_network = BendingNetwork(**self.conf['model.bending_network']).to(self.device) + + + self.use_split_network = self.conf.get_bool('model.use_split_network', False) + if self.use_split_network: + self.bending_network.set_split_bending_network() + self.bending_network.n_timesteps = self.n_timesteps + + + self.extract_delta_mesh = self.conf['model.extract_delta_mesh'] + + + ## optimize for those points; offsets and the dyns ## + # self.use_passive_nets = self.conf['model.use_passive_nets'] + if 'model.bending_net_type' in self.conf: + self.bending_net_type = self.conf['model.bending_net_type'] + else: + self.bending_net_type = "pts_def" + + if 'model.train_multi_seqs' in self.conf and self.conf['model.train_multi_seqs']: + self.rhand_verts, self.hand_faces, self.obj_faces, self.obj_normals, self.ts_to_contact_pts, self.hand_verts = self.load_active_passive_timestep_to_mesh_multi_seqs() + self.train_multi_seqs = True + self.nn_instances = len(self.rhand_verts) + else: + self.train_multi_seqs = False + self.nn_instances = 1 + + if 'model.minn_dist_threshold' in self.conf: + self.minn_dist_threshold = self.conf['model.minn_dist_threshold'] + else: + self.minn_dist_threshold = 0.05 + + if 'model.optimize_with_intermediates' in self.conf: + self.optimize_with_intermediates = self.conf['model.optimize_with_intermediates'] + else: + self.optimize_with_intermediates = False + + if 'model.no_friction_constraint' in self.conf: + self.no_friction_constraint = self.conf['model.no_friction_constraint'] + else: + self.no_friction_constraint = False + + if 'model.optimize_active_object' in self.conf: + self.optimize_active_object = self.conf['model.optimize_active_object'] + else: + self.optimize_active_object = False + + if 'model.optimize_glb_transformations' in self.conf: + self.optimize_glb_transformations = self.conf['model.optimize_glb_transformations'] + else: + self.optimize_glb_transformations = False + + if 'model.with_finger_tracking_loss' in self.conf: + self.with_finger_tracking_loss = self.conf['model.with_finger_tracking_loss'] + else: + self.with_finger_tracking_loss = True + + + if 'model.finger_cd_loss' in self.conf: + self.finger_cd_loss_coef = self.conf['model.finger_cd_loss'] + else: + self.finger_cd_loss_coef = 0. + + if 'model.finger_tracking_loss' in self.conf: + self.finger_tracking_loss_coef = self.conf['model.finger_tracking_loss'] + else: + self.finger_tracking_loss_coef = 0. + + if 'model.tracking_loss' in self.conf: + self.tracking_loss_coef = self.conf['model.tracking_loss'] + else: + self.tracking_loss_coef = 0. + + if 'model.penetrating_depth_penalty' in self.conf: + self.penetrating_depth_penalty_coef = self.conf['model.penetrating_depth_penalty'] + else: + self.penetrating_depth_penalty_coef = 0. + + + if 'model.ragged_dist' in self.conf: + self.ragged_dist_coef = self.conf['model.ragged_dist'] + else: + self.ragged_dist_coef = 1. + + + if 'mode.load_only_glb' in self.conf: + self.load_only_glb = self.conf['model.load_only_glb'] + else: + self.load_only_glb = False + + # optimize_rules; optimize_robot # + if 'model.optimize_robot' in self.conf: + self.optimize_robot = self.conf['model.optimize_robot'] + else: + self.optimize_robot = True + + if 'model.optimize_rules' in self.conf: + self.optimize_rules = self.conf['model.optimize_rules'] + else: + self.optimize_rules = False + + + if 'model.optimize_expanded_pts' in self.conf: + self.optimize_expanded_pts = self.conf['model.optimize_expanded_pts'] + else: + self.optimize_expanded_pts = True + + if 'model.optimize_expanded_ragged_pts' in self.conf: + self.optimize_expanded_ragged_pts = self.conf['model.optimize_expanded_ragged_pts'] + else: + self.optimize_expanded_ragged_pts = False + + if 'model.add_delta_state_constraints' in self.conf: + self.add_delta_state_constraints = self.conf['model.add_delta_state_constraints'] + else: + self.add_delta_state_constraints = True + + # + if 'model.train_actions_with_states' in self.conf: + self.train_actions_with_states = self.conf['model.train_actions_with_states'] + else: + self.train_actions_with_states = False + + + if 'model.train_with_forces_to_active' in self.conf: + self.train_with_forces_to_active = self.conf['model.train_with_forces_to_active'] + else: + self.train_with_forces_to_active = False + + + if 'model.loss_weight_diff_states' in self.conf: + self.loss_weight_diff_states = self.conf['model.loss_weight_diff_states'] + else: + self.loss_weight_diff_states = 1. + + if 'model.loss_tangential_diff_coef' in self.conf: + self.loss_tangential_diff_coef = float(self.conf['model.loss_tangential_diff_coef']) + else: + self.loss_tangential_diff_coef = 1. + + if 'model.use_penalty_based_friction' in self.conf: + self.use_penalty_based_friction = self.conf['model.use_penalty_based_friction'] + else: + self.use_penalty_based_friction = False + + if 'model.use_disp_based_friction' in self.conf: + self.use_disp_based_friction = self.conf['model.use_disp_based_friction'] + else: + self.use_disp_based_friction = False + + if 'model.use_sqrt_dist' in self.conf: + self.use_sqrt_dist = self.conf['model.use_sqrt_dist'] + else: + self.use_sqrt_dist = False + + if 'model.reg_loss_coef' in self.conf: + self.reg_loss_coef = float(self.conf['model.reg_loss_coef']) + else: + self.reg_loss_coef = 0. + + if 'model.contact_maintaining_dist_thres' in self.conf: + self.contact_maintaining_dist_thres = float(self.conf['model.contact_maintaining_dist_thres']) + else: + self.contact_maintaining_dist_thres = 0.1 + + if 'model.penetration_proj_k_to_robot' in self.conf: + self.penetration_proj_k_to_robot = float(self.conf['model.penetration_proj_k_to_robot']) + else: + self.penetration_proj_k_to_robot = 0.0 + + if 'model.use_mano_inputs' in self.conf: + self.use_mano_inputs = self.conf['model.use_mano_inputs'] + else: + self.use_mano_inputs = False + + + if 'model.use_split_params' in self.conf: + self.use_split_params = self.conf['model.use_split_params'] + else: + self.use_split_params = False + + + if 'model.use_split_params' in self.conf: + self.use_split_params = self.conf['model.use_split_params'] + else: + self.use_split_params = False + + + if 'model.use_sqr_spring_stiffness' in self.conf: + self.use_sqr_spring_stiffness = self.conf['model.use_sqr_spring_stiffness'] + else: + self.use_sqr_spring_stiffness = False + + + if 'model.use_pre_proj_frictions' in self.conf: + self.use_pre_proj_frictions = self.conf['model.use_pre_proj_frictions'] + else: + self.use_pre_proj_frictions = False + + if 'model.use_static_mus' in self.conf: + self.use_static_mus = self.conf['model.use_static_mus'] + else: + self.use_static_mus = False + + if 'model.contact_friction_static_mu' in self.conf: + self.contact_friction_static_mu = self.conf['model.contact_friction_static_mu'] + else: + self.contact_friction_static_mu = 1.0 + + if 'model.debug' in self.conf: + self.debug = self.conf['model.debug'] + else: + self.debug = False + + if 'model.robot_actions_diff_coef' in self.conf: + self.robot_actions_diff_coef = self.conf['model.robot_actions_diff_coef'] + else: + self.robot_actions_diff_coef = 0.1 + + if 'model.use_sdf_as_contact_dist' in self.conf: + self.use_sdf_as_contact_dist = self.conf['model.use_sdf_as_contact_dist'] + else: + self.use_sdf_as_contact_dist = False + + if 'model.use_contact_dist_as_sdf' in self.conf: + self.use_contact_dist_as_sdf = self.conf['model.use_contact_dist_as_sdf'] + else: + self.use_contact_dist_as_sdf = False + + if 'model.use_same_contact_spring_k' in self.conf: + self.use_same_contact_spring_k = self.conf['model.use_same_contact_spring_k'] + else: + self.use_same_contact_spring_k = False + + if 'model.minn_dist_threshold_robot_to_obj' in self.conf: + self.minn_dist_threshold_robot_to_obj = float(self.conf['model.minn_dist_threshold_robot_to_obj']) + else: + self.minn_dist_threshold_robot_to_obj = 0.0 + + if 'model.obj_mass' in self.conf: + self.obj_mass = float(self.conf['model.obj_mass']) + else: + self.obj_mass = 100.0 + + if 'model.diff_hand_tracking_coef' in self.conf: + self.diff_hand_tracking_coef = float(self.conf['model.diff_hand_tracking_coef']) + else: # + self.diff_hand_tracking_coef = 0.0 + + if 'model.use_mano_hand_for_test' in self.conf: + self.use_mano_hand_for_test = self.conf['model.use_mano_hand_for_test'] + else: + self.use_mano_hand_for_test = False + + if 'model.train_residual_friction' in self.conf: + self.train_residual_friction = self.conf['model.train_residual_friction'] + else: + self.train_residual_friction = False + + if 'model.use_LBFGS' in self.conf: + self.use_LBFGS = self.conf['model.use_LBFGS'] + else: + self.use_LBFGS = False + + if 'model.use_optimizable_params' in self.conf: + self.use_optimizable_params = self.conf['model.use_optimizable_params'] + else: + self.use_optimizable_params = False + + if 'model.penetration_determining' in self.conf: + self.penetration_determining = self.conf['model.penetration_determining'] + else: + self.penetration_determining = "sdf_of_canon" + + if 'model.sdf_sv_fn' in self.conf: + self.sdf_sv_fn = self.conf['model.sdf_sv_fn'] + else: + self.sdf_sv_fn = None + + if 'model.loss_scale_coef' in self.conf: + self.loss_scale_coef = float(self.conf['model.loss_scale_coef']) + else: + self.loss_scale_coef = 1.0 + + if 'model.penetration_proj_k_to_robot_friction' in self.conf: + self.penetration_proj_k_to_robot_friction = float(self.conf['model.penetration_proj_k_to_robot_friction']) + else: + self.penetration_proj_k_to_robot_friction = self.penetration_proj_k_to_robot + + if 'model.retar_only_glb' in self.conf: + self.retar_only_glb = self.conf['model.retar_only_glb'] + else: + self.retar_only_glb = False + + if 'model.optim_sim_model_params_from_mano' in self.conf: + self.optim_sim_model_params_from_mano = self.conf['model.optim_sim_model_params_from_mano'] + else: + self.optim_sim_model_params_from_mano = False + + + # opt_robo_states, opt_robo_glb_trans, opt_robo_glb_rot # + if 'model.opt_robo_states' in self.conf: + self.opt_robo_states = self.conf['model.opt_robo_states'] + else: + self.opt_robo_states = True + + if 'model.opt_robo_glb_trans' in self.conf: + self.opt_robo_glb_trans = self.conf['model.opt_robo_glb_trans'] + else: + self.opt_robo_glb_trans = False + + if 'model.opt_robo_glb_rot' in self.conf: + self.opt_robo_glb_rot = self.conf['model.opt_robo_glb_rot'] + else: + self.opt_robo_glb_rot = False + + # motion_reg_loss_coef + + if 'model.motion_reg_loss_coef' in self.conf: + self.motion_reg_loss_coef = self.conf['model.motion_reg_loss_coef'] + else: + self.motion_reg_loss_coef = 1.0 + + if 'model.drive_robot' in self.conf: + self.drive_robot = self.conf['model.drive_robot'] + else: + self.drive_robot = 'states' + + if 'model.use_scaled_urdf' in self.conf: + self.use_scaled_urdf =self.conf['model.use_scaled_urdf'] + else: + self.use_scaled_urdf = False + + if 'model.window_size' in self.conf: + self.window_size = self.conf['model.window_size'] + else: + self.window_size = 60 + + if 'model.use_taco' in self.conf: + self.use_taco = self.conf['model.use_taco'] + else: + self.use_taco = False + + if 'model.ang_vel_damping' in self.conf: + self.ang_vel_damping = float(self.conf['model.ang_vel_damping']) + else: + self.ang_vel_damping = 0.0 + + if 'model.drive_glb_delta' in self.conf: + self.drive_glb_delta = self.conf['model.drive_glb_delta'] + else: + self.drive_glb_delta = False + + if 'model.fix_obj' in self.conf: + self.fix_obj = self.conf['model.fix_obj'] + else: + self.fix_obj = False + + if 'model.diff_reg_coef' in self.conf: + self.diff_reg_coef = self.conf['model.diff_reg_coef'] + else: + self.diff_reg_coef = 0.01 + + if 'model.use_damping_params_vel' in self.conf: + self.use_damping_params_vel = self.conf['model.use_damping_params_vel'] + else: + self.use_damping_params_vel = False + + if 'train.ckpt_sv_freq' in self.conf: + self.ckpt_sv_freq = int(self.conf['train.ckpt_sv_freq']) + else: + self.ckpt_sv_freq = 100 + + + if 'model.optm_alltime_ks' in self.conf: + self.optm_alltime_ks = self.conf['model.optm_alltime_ks'] + else: + self.optm_alltime_ks = False + + if 'model.retar_dense_corres' in self.conf: + self.retar_dense_corres = self.conf['model.retar_dense_corres'] + else: + self.retar_dense_corres = False + + + if 'model.retar_delta_glb_trans' in self.conf: + self.retar_delta_glb_trans = self.conf['model.retar_delta_glb_trans'] + else: + self.retar_delta_glb_trans = False + + if 'model.use_multi_stages' in self.conf: + self.use_multi_stages = self.conf['model.use_multi_stages'] + else: + self.use_multi_stages = False + + if 'model.seq_start_idx' in self.conf: + self.seq_start_idx = self.conf['model.seq_start_idx'] + else: + self.seq_start_idx = 40 + + if 'model.obj_sdf_fn' in self.conf: + self.obj_sdf_fn = self.conf['model.obj_sdf_fn'] + else: + self.obj_sdf_fn = "" + + if 'model.kinematic_mano_gt_sv_fn' in self.conf: + self.kinematic_mano_gt_sv_fn = self.conf['model.kinematic_mano_gt_sv_fn'] + else: + self.kinematic_mano_gt_sv_fn = "" + + if 'model.scaled_obj_mesh_fn' in self.conf: + self.scaled_obj_mesh_fn = self.conf['model.scaled_obj_mesh_fn'] + else: + self.scaled_obj_mesh_fn = "" + + if 'model.ckpt_fn' in self.conf: + self.ckpt_fn = self.conf['model.ckpt_fn'] + else: + self.ckpt_fn = "" + + if 'model.load_optimized_init_transformations' in self.conf: + self.load_optimized_init_transformations = self.conf['model.load_optimized_init_transformations'] + else: + self.load_optimized_init_transformations = "" + + if 'model.optimize_dyn_actions' in self.conf: + self.optimize_dyn_actions = self.conf['model.optimize_dyn_actions'] + else: + self.optimize_dyn_actions = False + + if 'model.load_optimized_obj_transformations' in self.conf: + self.load_optimized_obj_transformations = self.conf['model.load_optimized_obj_transformations'] + else: + self.load_optimized_obj_transformations = None + + if 'model.train_pointset_acts_via_deltas' in self.conf: + self.train_pointset_acts_via_deltas = self.conf['model.train_pointset_acts_via_deltas'] + else: + self.train_pointset_acts_via_deltas = False + + + if 'model.drive_pointset' in self.conf: + self.drive_pointset = self.conf['model.drive_pointset'] + else: + self.drive_pointset = "states" + + if 'model.optimize_anchored_pts' in self.conf: + self.optimize_anchored_pts = self.conf['model.optimize_anchored_pts'] + else: + self.optimize_anchored_pts = True + + if 'model.optimize_pointset_motion_only' in self.conf: + self.optimize_pointset_motion_only = self.conf['model.optimize_pointset_motion_only'] + else: + self.optimize_pointset_motion_only = True + + print(f"optimize_dyn_actions: {self.optimize_dyn_actions}") + + + ### TODO: should create a file where all the information is included ### + # self.obj_sdf_fn, self.kinematic_mano_gt_sv_fn, self.scaled_obj_mesh_fn ## + ### + # if 'dataset.obj_idx' in self.conf: + # print(f"dataset.obj_idx:", self.conf['dataset.obj_idx']) + # self.obj_idx = self.conf['dataset.obj_idx'] + + # # ###### only for the grab dataset only currently ######## + # # GRAB_data_root = "/data1/xueyi/GRAB_extracted_test/train" + # # # /data/xueyi/GRAB/GRAB_extracted_test/train/102_obj.npy + # # if not os.path.exists(GRAB_data_root): + # # GRAB_data_root = "/data/xueyi/GRAB/GRAB_extracted_test/train" + + # # self.conf['model.obj_sdf_fn'] = os.path.join(GRAB_data_root, f"{self.obj_idx}_obj.npy") + # # self.conf['model.kinematic_mano_gt_sv_fn'] = os.path.join(GRAB_data_root, f"{self.obj_idx}_sv_dict.npy") + # # self.conf['model.scaled_obj_mesh_fn'] = os.path.join(GRAB_data_root, f"{self.obj_idx}_obj.obj") + # # self.conf['model.ckpt_fn'] = "" + # # self.conf['model.load_optimized_init_transformations'] = "" + + # ## grab data root ## + + # self.obj_sdf_fn = os.path.join(GRAB_data_root, f"{self.obj_idx}_obj.npy") + # self.kinematic_mano_gt_sv_fn = os.path.join(GRAB_data_root, f"{self.obj_idx}_sv_dict.npy") + # self.scaled_obj_mesh_fn = os.path.join(GRAB_data_root, f"{self.obj_idx}_obj.obj") + # # self.ckpt_fn = self.conf['model.ckpt_fn'] + # # self.load_optimized_init_transformations = self.conf['model.load_optimized_init_transformations'] + + # print(f"obj_sdf_fn:", self.obj_sdf_fn) + # print(f"kinematic_mano_gt_sv_fn:", self.kinematic_mano_gt_sv_fn) + # print(f"scaled_obj_mesh_fn:", self.scaled_obj_mesh_fn) + + + ## + + self.minn_init_passive_mesh = None + self.maxx_init_passive_mesh = None + + + self.mano_nn_substeps = 1 + + self.canon_passive_obj_verts = None + self.canon_passive_obj_normals = None + + tot_data = np.load(self.data_path, allow_pickle=True).item() + self.tot_data = tot_data + + if self.bending_net_type == "active_force_field_v18": + self.other_bending_network = fields.BendingNetworkActiveForceFieldForwardLagV18(**self.conf['model.bending_network'], nn_instances=self.nn_instances, minn_dist_threshold=self.minn_dist_threshold).to(self.device) + + + if mode in ["train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_grab", "train_point_set", "train_sparse_retar", "train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab", "train_point_set_retar", "train_point_set_retar_pts", "train_finger_kinematics_retargeting_arctic_twohands", "train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_arctic_twohands", "train_real_robot_actions_from_mano_model_rules_shadowhand", "train_redmax_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab", "train_dyn_mano_model", "train_dyn_mano_model_wreact"]: + + + if mode in ['train_finger_kinematics_retargeting_arctic_twohands', 'train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_arctic_twohands']: + self.timestep_to_passive_mesh, self.timestep_to_active_mesh, self.timestep_to_passive_mesh_normals = self.load_active_passive_timestep_to_mesh_twohands_arctic() + else: + if self.use_taco: + self.timestep_to_passive_mesh, self.timestep_to_active_mesh, self.timestep_to_passive_mesh_normals = self.load_active_passive_timestep_to_mesh_v3_taco() + else: + self.timestep_to_passive_mesh, self.timestep_to_active_mesh, self.timestep_to_passive_mesh_normals = self.load_active_passive_timestep_to_mesh_v3() + + if self.conf['model.penetration_determining'] == "ball_primitives": + self.center_verts, self.ball_r = self.get_ball_primitives() + + + self.other_bending_network.canon_passive_obj_verts = self.obj_verts + self.other_bending_network.canon_passive_obj_normals = self.obj_normals + + self.canon_passive_obj_verts = self.obj_verts + self.canon_passive_obj_normals = self.obj_normals + + + # tot_obj_quat, tot_reversed_obj_rot_mtx # + ''' Load passive object's SDF ''' + # self.obj_sdf_fn = self.obj_sdf_fn ## load passive object's sdf + self.other_bending_network.sdf_space_center = self.sdf_space_center + self.other_bending_network.sdf_space_scale = self.sdf_space_scale + # self.obj_sdf = np.load(self.obj_sdf_fn, allow_pickle=True) + self.obj_sdf = self.tot_data['obj_sdf'] + self.sdf_res = self.obj_sdf.shape[0] + self.other_bending_network.obj_sdf = self.obj_sdf + self.other_bending_network.sdf_res = self.sdf_res + + + if self.conf['model.penetration_determining'] == "sdf_of_canon": + print(f"Setting the penetration determining method to sdf_of_canon") + self.other_bending_network.penetration_determining = "sdf_of_canon" + elif self.conf['model.penetration_determining'] == 'plane_primitives': + print(f"setting the penetration determining method to plane_primitives with maxx_xyz: {self.maxx_init_passive_mesh}, minn_xyz: {self.minn_init_passive_mesh}") + self.other_bending_network.penetration_determining = "plane_primitives" # + elif self.conf['model.penetration_determining'] == 'ball_primitives': + print(f"Setting the penetration determining method to ball_primitives with ball_r: {self.ball_r}, center: {self.center_verts}") + self.other_bending_network.penetration_determining = "ball_primitives" # + self.other_bending_network.center_verts = self.center_verts + self.other_bending_network.ball_r = self.ball_r ## get the ball primitives here? ## + else: + raise NotImplementedError(f"penetration determining method {self.conf['model.penetration_determining']} not implemented") + + + elif mode in ["train_dyn_mano_model", "train_dyn_mano_model_wreact"]: + self.load_active_passive_timestep_to_mesh() + self.timestep_to_passive_mesh, self.timestep_to_active_mesh, self.timestep_to_passive_mesh_normals = self.load_active_passive_timestep_to_mesh_v3() + self.obj_sdf_grad = None + else: + + if not self.train_multi_seqs: + self.timestep_to_passive_mesh, self.timestep_to_active_mesh, self.timestep_to_passive_mesh_normals = self.load_active_passive_timestep_to_mesh() + + + self.other_bending_network.sdf_space_center = self.sdf_space_center + self.other_bending_network.sdf_space_scale = self.sdf_space_scale + self.other_bending_network.obj_sdf = self.obj_sdf # + self.other_bending_network.sdf_res = self.sdf_res # + else: + raise ValueError(f"Unrecognized bending net type: {self.bending_net_type}") + + + if self.maxx_init_passive_mesh is None and self.minn_init_passive_mesh is None: + self.calculate_collision_geometry_bounding_boxes() + + ###### initialize the dyn model ###### + for i_time_idx in range(self.n_timesteps): + self.other_bending_network.timestep_to_vel[i_time_idx] = torch.zeros((3,), dtype=torch.float32).cuda() + self.other_bending_network.timestep_to_point_accs[i_time_idx] = torch.zeros((3,), dtype=torch.float32).cuda() + self.other_bending_network.timestep_to_total_def[i_time_idx] = torch.zeros((3,), dtype=torch.float32).cuda() + self.other_bending_network.timestep_to_angular_vel[i_time_idx] = torch.zeros((3,), dtype=torch.float32).cuda() + self.other_bending_network.timestep_to_quaternion[i_time_idx] = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + self.other_bending_network.timestep_to_torque[i_time_idx] = torch.zeros((3,), dtype=torch.float32).cuda() + + + # calculate_collision_geometry_bounding_boxes, self.maxx_init_passive_mesh, self.minn_init_passive_mesh # # the best performed DGrasp-tracking? ## + + ### set initial transformations ### + if mode in ["train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_grab", "train_point_set", "train_point_set_retar", "train_point_set_retar_pts", "train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab", "train_finger_kinematics_retargeting_arctic_twohands", "train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_arctic_twohands", "train_real_robot_actions_from_mano_model_rules_shadowhand", "train_redmax_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab", "train_dyn_mano_model_wreact"] and self.bending_net_type == "active_force_field_v18": + self.other_bending_network.timestep_to_total_def[0] = self.object_transl[0] + self.other_bending_network.timestep_to_quaternion[0] = self.tot_obj_quat[0] + self.other_bending_network.timestep_to_optimizable_offset[0] = self.object_transl[0].detach() + self.other_bending_network.timestep_to_optimizable_quaternion[0] = self.tot_obj_quat[0].detach() + self.other_bending_network.timestep_to_optimizable_rot_mtx[0] = self.tot_reversed_obj_rot_mtx[0].detach() + self.other_bending_network.timestep_to_optimizable_total_def[0] = self.object_transl[0].detach() + + if self.fix_obj: + print(f"fix_obj = True") + for i_fr in range(self.object_transl.size(0)): + self.other_bending_network.timestep_to_total_def[i_fr] = self.object_transl[i_fr] + self.other_bending_network.timestep_to_quaternion[i_fr] = self.tot_obj_quat[i_fr] + self.other_bending_network.timestep_to_optimizable_offset[i_fr] = self.object_transl[i_fr].detach() + self.other_bending_network.timestep_to_optimizable_quaternion[i_fr] = self.tot_obj_quat[i_fr].detach() + self.other_bending_network.timestep_to_optimizable_rot_mtx[i_fr] = self.tot_reversed_obj_rot_mtx[i_fr].detach() + self.other_bending_network.timestep_to_optimizable_total_def[i_fr] = self.object_transl[i_fr].detach() + + + + self.calculate_obj_inertia() + + self.other_bending_network.use_penalty_based_friction = self.use_penalty_based_friction + self.other_bending_network.use_disp_based_friction = self.use_disp_based_friction + self.other_bending_network.use_sqrt_dist = self.use_sqrt_dist + self.other_bending_network.contact_maintaining_dist_thres = self.contact_maintaining_dist_thres + self.other_bending_network.penetration_proj_k_to_robot = self.penetration_proj_k_to_robot + self.other_bending_network.use_split_params = self.use_split_params + # self.other_bending_network.use_split_params = self.use_split_params + self.other_bending_network.use_sqr_spring_stiffness = self.use_sqr_spring_stiffness + self.other_bending_network.use_pre_proj_frictions = self.use_pre_proj_frictions + self.other_bending_network.use_static_mus = self.use_static_mus + self.other_bending_network.contact_friction_static_mu = self.contact_friction_static_mu + self.other_bending_network.debug = self.debug + self.obj_sdf_grad = None + self.other_bending_network.obj_sdf_grad = self.obj_sdf_grad ## set obj_sdf # + self.other_bending_network.use_sdf_as_contact_dist = self.use_sdf_as_contact_dist + self.other_bending_network.use_contact_dist_as_sdf = self.use_contact_dist_as_sdf + self.other_bending_network.minn_dist_threshold_robot_to_obj = self.minn_dist_threshold_robot_to_obj + self.other_bending_network.use_same_contact_spring_k = self.use_same_contact_spring_k + self.other_bending_network.I_ref = self.I_ref + self.other_bending_network.I_inv_ref = self.I_inv_ref + self.other_bending_network.obj_mass = self.obj_mass + + # self.maxx_init_passive_mesh, self.minn_init_passive_mesh + self.other_bending_network.maxx_init_passive_mesh = self.maxx_init_passive_mesh + self.other_bending_network.minn_init_passive_mesh = self.minn_init_passive_mesh # ### init maximum passive meshe # + self.other_bending_network.train_residual_friction = self.train_residual_friction + ### use optimizable params ### + self.other_bending_network.use_optimizable_params = self.use_optimizable_params + self.other_bending_network.penetration_proj_k_to_robot_friction = self.penetration_proj_k_to_robot_friction + self.other_bending_network.ang_vel_damping = self.ang_vel_damping + self.other_bending_network.use_damping_params_vel = self.use_damping_params_vel ## use_damping_params + self.other_bending_network.optm_alltime_ks = self.optm_alltime_ks + + # self.ts_to_mesh_offset = self.load_calcu_timestep_to_passive_mesh_offset() + # self.ts_to_mesh_offset_for_opt = self.load_calcu_timestep_to_passive_mesh_offset() + + + params_to_train += list(self.other_bending_network.parameters()) # + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + + + if len(self.ckpt_fn) > 0: + cur_ckpt_fn = self.ckpt_fn + self.load_checkpoint_via_fn(cur_ckpt_fn) + # if self.train_multi_seqs: + # damping_coefs = self.other_bending_network.damping_constant[0].weight.data + # spring_ks_values = self.other_bending_network.spring_ks_values[0].weight.data + # else: + damping_coefs = self.other_bending_network.damping_constant.weight.data + spring_ks_values = self.other_bending_network.spring_ks_values.weight.data + print(f"loaded ckpt has damping_coefs: {damping_coefs}, and spring ks values: {spring_ks_values}") + try: + friction_spring_ks = self.other_bending_network.spring_friction_ks_values.weight.data + print(f"friction_spring_ks:") + print(friction_spring_ks) + + obj_inertia_val = self.other_bending_network.obj_inertia.weight.data + optimizable_obj_mass = self.other_bending_network.optimizable_obj_mass.weight.data + print(f"obj_inertia_val: {obj_inertia_val ** 2}, optimizable_obj_mass: {optimizable_obj_mass ** 2}") + except: + pass + + time_constant = self.other_bending_network.time_constant.weight.data + print(f"time_constant: {time_constant}") + + ''' set gravity ''' + ### gravity ### # + self.gravity_acc = 9.8 + self.gravity_dir = torch.tensor([0., 0., -1]).float().cuda() + self.passive_obj_mass = 1. + + if not self.bending_net_type == "active_force_field_v13": + #### init passive mesh center and I_ref # # + self.init_passive_mesh_center, self.I_ref = self.calculate_passive_mesh_center_intertia() + self.inv_I_ref = torch.linalg.inv(self.I_ref) + self.other_bending_network.passive_obj_inertia = self.I_ref + self.other_bending_network.passive_obj_inertia_inv = self.inv_I_ref + + def get_robohand_type_from_conf_fn(self, conf_model_fn): + if "redmax" in conf_model_fn: + hand_type = "redmax_hand" + elif "shadow" in conf_model_fn: + hand_type = "shadow_hand" + else: + raise ValueError(f"Cannot identify robot hand type from the conf_model file: {conf_model_fn}") + return hand_type + + def calculate_passive_mesh_center_intertia(self, ): # passive + # self.timestep_to_passive_mesh # ## passvie mesh center ### + init_passive_mesh = self.timestep_to_passive_mesh[0] ### nn_mesh_pts x 3 ### + init_passive_mesh_center = torch.mean(init_passive_mesh, dim=0) ### init_center ### + per_vert_mass = self.passive_obj_mass / float(init_passive_mesh.size(0)) + # (center to the vertex) + # assume the mass is uniformly distributed across all vertices ## + I_ref = torch.zeros((3, 3), dtype=torch.float32).cuda() + for i_v in range(init_passive_mesh.size(0)): + cur_vert = init_passive_mesh[i_v] + cur_r = cur_vert - init_passive_mesh_center + dot_r_r = torch.sum(cur_r * cur_r) + cur_eye_mtx = torch.eye(3, dtype=torch.float32).cuda() + r_mult_rT = torch.matmul(cur_r.unsqueeze(-1), cur_r.unsqueeze(0)) + I_ref += (dot_r_r * cur_eye_mtx - r_mult_rT) * per_vert_mass + return init_passive_mesh_center, I_ref + + def calculate_obj_inertia(self, ): + if self.canon_passive_obj_verts is None: + cur_init_passive_mesh_verts = self.timestep_to_passive_mesh[0].clone() + else: + cur_init_passive_mesh_verts = self.canon_passive_obj_verts.clone() + cur_init_passive_mesh_center = torch.mean(cur_init_passive_mesh_verts, dim=0) + cur_init_passive_mesh_verts = cur_init_passive_mesh_verts - cur_init_passive_mesh_center + # per_vert_mass= cur_init_passive_mesh_verts.size(0) / self.obj_mass + per_vert_mass = self.obj_mass / cur_init_passive_mesh_verts.size(0) + ## + print(f"[Calculating obj inertia] per_vert_mass: {per_vert_mass}") + I_ref = torch.zeros((3, 3), dtype=torch.float32).cuda() ## caclulate I_ref; I_inv_ref; ## + for i_v in range(cur_init_passive_mesh_verts.size(0)): + cur_vert = cur_init_passive_mesh_verts[i_v] + cur_r = cur_vert # - cur_init_passive_mesh_center + cur_v_inertia = per_vert_mass * (torch.sum(cur_r * cur_r) - torch.matmul(cur_r.unsqueeze(-1), cur_r.unsqueeze(0))) + # cur_v_inertia = torch.cross(cur_r, cur_r) * per_vert_mass3 # # + I_ref += cur_v_inertia + + print(f"In calculating inertia") + print(I_ref) + self.I_inv_ref = torch.linalg.inv(I_ref) + + self.I_ref = I_ref + + + + + # the collison geometry should be able to locate the contact points # + def calculate_collision_geometry_bounding_boxes(self, ): + # # + # nearest ppont ? + init_passive_mesh = self.timestep_to_passive_mesh[0] + maxx_init_passive_mesh, _ = torch.max(init_passive_mesh, dim=0) ## (3, ) + minn_init_passive_mesh, _ = torch.min(init_passive_mesh, dim=0) ## (3, ) + # maxx init passive mesh; minn init passvie mesh ## + # contact passive mesh # + self.maxx_init_passive_mesh = maxx_init_passive_mesh + self.minn_init_passive_mesh = minn_init_passive_mesh # + + pass + + + def load_active_passive_timestep_to_mesh_v3(self, ): + + ### TODO: should create a file where all the information is included ### + # self.obj_sdf_fn, self.kinematic_mano_gt_sv_fn, self.scaled_obj_mesh_fn ## + + ## { 'save_dict': contents in self.kinematic_mano_gt_sv_fn, } + + sv_fn = self.kinematic_mano_gt_sv_fn + + print(f'Loading from {sv_fn}') + + + ''' Loading mano template ''' + mano_hand_template_fn = 'assets/mano_hand_template.obj' + # if not os.path.exists(mano_hand_template_fn): + # box_sv_fn = "/data2/xueyi/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # box_sv_dict = np.load(box_sv_fn, allow_pickle=True).item() + # mano_hand_faces = box_sv_dict['hand_faces'] + # mano_hand_verts = box_sv_dict['rhand_verts'][0] + # mano_hand_mesh = trimesh.Trimesh(mano_hand_verts, mano_hand_faces) + # mano_hand_mesh.export(mano_hand_template_fn) + mano_hand_temp = trimesh.load(mano_hand_template_fn, force='mesh') + hand_faces = mano_hand_temp.faces + ## get hand faces ## + self.hand_faces = torch.from_numpy(hand_faces).long().to(self.device) + + print(f"Loading data from {sv_fn}") + + # sv_dict = np.load(sv_fn, allow_pickle=True).item() + + sv_dict = self.tot_data['sv_dict'] + + # tot_data = + + + + print(f"sv_dict: {sv_dict.keys()}") + + obj_pcs = sv_dict['object_pc'] + obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + # self.obj_pcs = obj_pcs + + + + obj_vertex_normals = sv_dict['obj_vertex_normals'] + obj_vertex_normals = torch.from_numpy(obj_vertex_normals).float().cuda() + self.obj_normals = obj_vertex_normals + + object_global_orient = sv_dict['object_global_orient'] # glboal orient + object_transl = sv_dict['object_transl'] + + + obj_faces = sv_dict['obj_faces'] + obj_faces = torch.from_numpy(obj_faces).long().cuda() + self.obj_faces = obj_faces + + obj_verts = sv_dict['obj_verts'] + minn_verts = np.min(obj_verts, axis=0) + maxx_verts = np.max(obj_verts, axis=0) + extent = maxx_verts - minn_verts + center_ori = (maxx_verts + minn_verts) / 2 + scale_ori = np.sqrt(np.sum(extent ** 2)) + obj_verts = torch.from_numpy(obj_verts).float().cuda() + + + + self.obj_verts = obj_verts + + + + mesh_scale = 0.8 + bbmin, _ = obj_verts.min(0) # + bbmax, _ = obj_verts.max(0) # + + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # + # vertices = (vertices - center) * scale # (vertices - center) * scale # # + + self.sdf_space_center = center + self.sdf_space_scale = scale + # sdf_sv_fn = self.sdf_sv_fn + # self.obj_sdf = np.load(sdf_sv_fn, allow_pickle=True) + # self.sdf_res = self.obj_sdf.shape[0] + + + tot_reversed_obj_rot_mtx = [] + tot_obj_quat = [] ## rotation matrix + + + transformed_obj_verts = [] + for i_fr in range(object_global_orient.shape[0]): + cur_glb_rot = object_global_orient[i_fr] + cur_transl = object_transl[i_fr] + cur_transl = torch.from_numpy(cur_transl).float().cuda() + cur_glb_rot_struct = R.from_rotvec(cur_glb_rot) + cur_glb_rot_mtx = cur_glb_rot_struct.as_matrix() + cur_glb_rot_mtx = torch.from_numpy(cur_glb_rot_mtx).float().cuda() + + cur_transformed_verts = torch.matmul( + self.obj_verts, cur_glb_rot_mtx + ) + cur_transl.unsqueeze(0) + + cur_glb_rot_mtx_reversed = cur_glb_rot_mtx.contiguous().transpose(1, 0).contiguous() + tot_reversed_obj_rot_mtx.append(cur_glb_rot_mtx_reversed) + + cur_glb_rot_struct = R.from_matrix(cur_glb_rot_mtx_reversed.cpu().numpy()) + cur_obj_quat = cur_glb_rot_struct.as_quat() + cur_obj_quat = cur_obj_quat[[3, 0, 1, 2]] + cur_obj_quat = torch.from_numpy(cur_obj_quat).float().cuda() + tot_obj_quat.append(cur_obj_quat) + + # center_obj_verts = torch.mean(self.obj_verts, dim=0, keepdim=True) + # cur_transformed_verts = torch.matmul( + # (self.obj_verts - center_obj_verts), cur_glb_rot_mtx + # ) + cur_transl.unsqueeze(0) + center_obj_verts + + # cur_transformed_verts = torch.matmul( + # cur_glb_rot_mtx, self.obj_verts.transpose(1, 0) + # ).contiguous().transpose(1, 0).contiguous() + cur_transl.unsqueeze(0) + transformed_obj_verts.append(cur_transformed_verts) + transformed_obj_verts = torch.stack(transformed_obj_verts, dim=0) + + self.obj_pcs = transformed_obj_verts + + rhand_verts = sv_dict['rhand_verts'] + rhand_verts = torch.from_numpy(rhand_verts).float().cuda() + self.rhand_verts = rhand_verts ## rhand verts ## + + + + # if '30_sv_dict' in sv_fn: + # bbox_selected_verts_idxes = torch.tensor([1511, 1847, 2190, 2097, 2006, 2108, 1604], dtype=torch.long).cuda() + # obj_selected_verts = self.obj_verts[bbox_selected_verts_idxes] + # else: + obj_selected_verts = self.obj_verts.clone() + + maxx_init_passive_mesh, _ = torch.max(obj_selected_verts, dim=0) + minn_init_passive_mesh, _ = torch.min(obj_selected_verts, dim=0) + self.maxx_init_passive_mesh = maxx_init_passive_mesh + self.minn_init_passive_mesh = minn_init_passive_mesh + + + init_obj_verts = obj_verts # [0] # cannnot rotate it at all # frictional forces in the pybullet? # + + mesh_scale = 0.8 + bbmin, _ = init_obj_verts.min(0) # + bbmax, _ = init_obj_verts.max(0) # + print(f"bbmin: {bbmin}, bbmax: {bbmax}") + center = (bbmin + bbmax) * 0.5 + + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # + # vertices = (vertices - center) * scale # (vertices - center) * scale # # + + self.sdf_space_center = center.detach().cpu().numpy() + self.sdf_space_scale = scale.detach().cpu().numpy() + + + # tot_obj_quat, tot_reversed_obj_rot_mtx # + tot_obj_quat = torch.stack(tot_obj_quat, dim=0) + tot_reversed_obj_rot_mtx = torch.stack(tot_reversed_obj_rot_mtx, dim=0) + self.tot_obj_quat = tot_obj_quat + self.tot_reversed_obj_rot_mtx = tot_reversed_obj_rot_mtx + + ## should save self.object_global_orient and self.object_transl ## + # object_global_orient, object_transl # + self.object_global_orient = torch.from_numpy(object_global_orient).float().cuda() + self.object_transl = torch.from_numpy(object_transl).float().cuda() + return transformed_obj_verts, rhand_verts, self.obj_normals + + + def load_active_passive_timestep_to_mesh_v3_taco(self, ): + sv_fn = "/data1/xueyi/GRAB_extracted_test/test/30_sv_dict.npy" + # /data1/xueyi/GRAB_extracted_test/train/20_sv_dict_real_obj.obj # data1 + + # start_idx = 40 + + start_idx = self.seq_start_idx + maxx_ws = 150 + # maxx_ws = 90 + + print(f"TACO loading data with start_idx: {start_idx}, maxx_ws: {maxx_ws}") + + + sv_fn = self.kinematic_mano_gt_sv_fn + + ### get hand faces ### + # sv_fn = "/data2/xueyi/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + ''' Loading mano template ''' + mano_hand_template_fn = 'assets/mano_hand_template.obj' + if not os.path.exists(mano_hand_template_fn): + box_sv_fn = "/data2/xueyi/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + box_sv_dict = np.load(box_sv_fn, allow_pickle=True).item() + mano_hand_faces = box_sv_dict['hand_faces'] + mano_hand_verts = box_sv_dict['rhand_verts'][0] + mano_hand_mesh = trimesh.Trimesh(mano_hand_verts, mano_hand_faces) + mano_hand_mesh.export(mano_hand_template_fn) + mano_hand_temp = trimesh.load(mano_hand_template_fn, force='mesh') + hand_faces = mano_hand_temp.faces + self.hand_faces = torch.from_numpy(hand_faces).long().to(self.device) + + + + print(f"Loading data from {sv_fn}") + + # sv_dict = np.load(sv_fn, allow_pickle=True).item() + + sv_dict = pkl.load(open(sv_fn, "rb")) + + self.hand_faces = torch.from_numpy(sv_dict['hand_faces']).float().cuda() + + print(f"sv_dict: {sv_dict.keys()}") + + maxx_ws = min(maxx_ws, len(sv_dict['obj_verts']) - start_idx) + + obj_pcs = sv_dict['obj_verts'][start_idx: start_idx + maxx_ws] + obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + + self.obj_pcs = obj_pcs + # obj_vertex_normals = sv_dict['obj_vertex_normals'] + # obj_vertex_normals = torch.from_numpy(obj_vertex_normals).float().cuda() + self.obj_normals = torch.zeros_like(obj_pcs[0]) ### get the obj naormal vectors ## + + object_pose = sv_dict['obj_pose'][start_idx: start_idx + maxx_ws] + object_pose = torch.from_numpy(object_pose).float().cuda() ### nn_frames x 4 x 4 ### + object_global_orient_mtx = object_pose[:, :3, :3 ] ## nn_frames x 3 x 3 ## + object_transl = object_pose[:, :3, 3] ## nn_frmaes x 3 ## + + + # object_global_orient = sv_dict['object_global_orient'] # glboal orient + # object_transl = sv_dict['object_transl'] + + + obj_faces = sv_dict['obj_faces'] + obj_faces = torch.from_numpy(obj_faces).long().cuda() + self.obj_faces = obj_faces # [0] ### obj faces ## + + # obj_verts = sv_dict['obj_verts'] + # minn_verts = np.min(obj_verts, axis=0) + # maxx_verts = np.max(obj_verts, axis=0) + # extent = maxx_verts - minn_verts + # center_ori = (maxx_verts + minn_verts) / 2 + # scale_ori = np.sqrt(np.sum(extent ** 2)) + # obj_verts = torch.from_numpy(obj_verts).float().cuda() + + init_obj_verts = obj_pcs[0] + init_obj_ornt_mtx = object_global_orient_mtx[0] + init_obj_transl = object_transl[0] + + canon_obj_verts = torch.matmul( + init_obj_ornt_mtx.contiguous().transpose(1, 0).contiguous(), (init_obj_verts - init_obj_transl.unsqueeze(0)).transpose(1, 0).contiguous() + ).transpose(1, 0).contiguous() ### + self.obj_verts = canon_obj_verts.clone() + obj_verts = canon_obj_verts.clone() + + + # self.obj_verts = obj_verts + + + + # mesh_scale = 0.8 + # bbmin, _ = obj_verts.min(0) # + # bbmax, _ = obj_verts.max(0) # + + # center = (bbmin + bbmax) * 0.5 + # scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # + # # vertices = (vertices - center) * scale # (vertices - center) * scale # # + + # self.sdf_space_center = center + # self.sdf_space_scale = scale + + + sdf_sv_fn = self.obj_sdf_fn + print(f'sdf_sv_fn: {sdf_sv_fn}') + self.obj_sdf = np.load(sdf_sv_fn, allow_pickle=True) + self.sdf_res = self.obj_sdf.shape[0] + + self.obj_sdf = torch.from_numpy(self.obj_sdf).float().cuda() + # init_obj_pcs = obj_pcs[0].detach().cpu().numpy() + # init_glb_rot = object_global_orient[0] + # init_glb_trans = object_transl[0] + # init_glb_rot_struct = R.from_rotvec(init_glb_rot) + # init_glb_rot_mtx = init_glb_rot_struct.as_matrix() + # self.obj_verts = np.matmul((init_obj_pcs - init_glb_trans[None]), init_glb_rot_mtx.T) + # obj_verts = self.obj_verts + # minn_verts = np.min(obj_verts, axis=0) + # maxx_verts = np.max(obj_verts, axis=0) + # extent = maxx_verts - minn_verts + # scale_cur = np.sqrt(np.sum(extent ** 2)) + + # center_cur= (minn_verts + maxx_verts) / 2 + + # obj_verts = (sv_dict['obj_verts'] - center_ori[None]) / scale_ori * scale_cur + center_cur[None] + + # obj_verts = torch.from_numpy(obj_verts).float().cuda() + # self.obj_verts = obj_verts + + # sv_fn_obj_fn = sv_fn[:-4] + "_real_obj.obj" + # scaled_obj = trimesh.Trimesh(vertices=self.obj_verts.detach().cpu().numpy(), faces=self.obj_faces.detach().cpu().numpy(), vertex_normals=self.obj_normals.detach().cpu().numpy()) + # scaled_obj.export(sv_fn_obj_fn) + # print(f"Scaled obj saved to {scaled_obj}") + + tot_obj_quat = [] + + for i_fr in range(object_global_orient_mtx.shape[0]): + cur_ornt_mtx = object_global_orient_mtx[i_fr] + cur_ornt_mtx_np = cur_ornt_mtx.detach().cpu().numpy() ### cur ornt mtx ## + cur_ornt_rot_struct = R.from_matrix(cur_ornt_mtx_np) + cur_ornt_quat = cur_ornt_rot_struct.as_quat() + cur_ornt_quat = cur_ornt_quat[[3, 0, 1, 2]] + tot_obj_quat.append(torch.from_numpy(cur_ornt_quat).float().cuda()) ### float cuda ## + + # tot_obj_quat = np.stack(tot_obj_quat, axis=0) ## obj quat ## + tot_obj_quat = torch.stack(tot_obj_quat, dim=0) + + + # tot_reversed_obj_rot_mtx = [] + # tot_obj_quat = [] ## rotation matrix + + + # transformed_obj_verts = [] + # for i_fr in range(object_global_orient.shape[0]): + # cur_glb_rot = object_global_orient[i_fr] + # cur_transl = object_transl[i_fr] + # cur_transl = torch.from_numpy(cur_transl).float().cuda() + # cur_glb_rot_struct = R.from_rotvec(cur_glb_rot) + # cur_glb_rot_mtx = cur_glb_rot_struct.as_matrix() + # cur_glb_rot_mtx = torch.from_numpy(cur_glb_rot_mtx).float().cuda() + + # cur_transformed_verts = torch.matmul( + # self.obj_verts, cur_glb_rot_mtx + # ) + cur_transl.unsqueeze(0) + + # cur_glb_rot_mtx_reversed = cur_glb_rot_mtx.contiguous().transpose(1, 0).contiguous() + # tot_reversed_obj_rot_mtx.append(cur_glb_rot_mtx_reversed) + + # cur_glb_rot_struct = R.from_matrix(cur_glb_rot_mtx_reversed.cpu().numpy()) + # cur_obj_quat = cur_glb_rot_struct.as_quat() + # cur_obj_quat = cur_obj_quat[[3, 0, 1, 2]] + # cur_obj_quat = torch.from_numpy(cur_obj_quat).float().cuda() + # tot_obj_quat.append(cur_obj_quat) + + # # center_obj_verts = torch.mean(self.obj_verts, dim=0, keepdim=True) + # # cur_transformed_verts = torch.matmul( + # # (self.obj_verts - center_obj_verts), cur_glb_rot_mtx + # # ) + cur_transl.unsqueeze(0) + center_obj_verts + + # # cur_transformed_verts = torch.matmul( + # # cur_glb_rot_mtx, self.obj_verts.transpose(1, 0) + # # ).contiguous().transpose(1, 0).contiguous() + cur_transl.unsqueeze(0) + # transformed_obj_verts.append(cur_transformed_verts) + # transformed_obj_verts = torch.stack(transformed_obj_verts, dim=0) + + + rhand_verts = sv_dict['hand_verts'][start_idx: start_idx + maxx_ws] + rhand_verts = torch.from_numpy(rhand_verts).float().cuda() + self.rhand_verts = rhand_verts ## rhand verts ## + + + + # if '30_sv_dict' in sv_fn: + # bbox_selected_verts_idxes = torch.tensor([1511, 1847, 2190, 2097, 2006, 2108, 1604], dtype=torch.long).cuda() + # obj_selected_verts = self.obj_verts[bbox_selected_verts_idxes] + # else: + # obj_selected_verts = self.obj_verts.clone() + + # maxx_init_passive_mesh, _ = torch.max(obj_selected_verts, dim=0) + # minn_init_passive_mesh, _ = torch.min(obj_selected_verts, dim=0) + # self.maxx_init_passive_mesh = maxx_init_passive_mesh + # self.minn_init_passive_mesh = minn_init_passive_mesh + + + init_obj_verts = obj_verts # [0] # cannnot rotate it at all # frictional forces in the pybullet? # + + mesh_scale = 0.8 + bbmin, _ = init_obj_verts.min(0) # + bbmax, _ = init_obj_verts.max(0) # + print(f"bbmin: {bbmin}, bbmax: {bbmax}") + center = (bbmin + bbmax) * 0.5 + + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # + # vertices = (vertices - center) * scale # (vertices - center) * scale # # + + self.sdf_space_center = center.detach().cpu().numpy() + self.sdf_space_scale = scale.detach().cpu().numpy() + # # sdf_sv_fn = "/data/xueyi/diffsim/NeuS/init_box_mesh.npy" + # if not os.path.exists(sdf_sv_fn): + # sdf_sv_fn = "/home/xueyi/diffsim/NeuS/init_box_mesh.npy" + # self.obj_sdf = np.load(sdf_sv_fn, allow_pickle=True) + # self.sdf_res = self.obj_sdf.shape[0] + # print(f"obj_sdf loaded from {sdf_sv_fn} with shape {self.obj_sdf.shape}") + + + + + + # tot_obj_quat, tot_reversed_obj_rot_mtx # + # tot_obj_quat = torch.stack(tot_obj_quat, dim=0) + tot_reversed_obj_rot_mtx = object_global_orient_mtx.clone() # torch.stack(tot_reversed_obj_rot_mtx, dim=0) + self.tot_obj_quat = tot_obj_quat + self.tot_reversed_obj_rot_mtx = tot_reversed_obj_rot_mtx + + ## should save self.object_global_orient and self.object_transl ## + # object_global_orient, object_transl # + # self.object_global_orient = torch.from_numpy(object_global_orient).float().cuda() + self.object_transl = object_transl.clone() # torch.from_numpy(object_transl).float().cuda() + return self.obj_pcs, rhand_verts, self.obj_normals + + + + def load_active_passive_timestep_to_mesh_twohands_arctic(self, ): + # sv_fn = "/data1/xueyi/GRAB_extracted_test/test/30_sv_dict.npy" + # /data1/xueyi/GRAB_extracted_test/train/20_sv_dict_real_obj.obj # data1 + import utils.utils as utils + from manopth.manolayer import ManoLayer + + # mano_hand_template_fn = 'assets/mano_hand_template.obj' + # # if not os.path.exists(mano_hand_template_fn): + # # box_sv_fn = "/data2/xueyi/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # # box_sv_dict = np.load(box_sv_fn, allow_pickle=True).item() + # # mano_hand_faces = box_sv_dict['hand_faces'] + # # mano_hand_verts = box_sv_dict['rhand_verts'][0] + # # mano_hand_mesh = trimesh.Trimesh(mano_hand_verts, mano_hand_faces) + # # mano_hand_mesh.export(mano_hand_template_fn) + # mano_hand_temp = trimesh.load(mano_hand_template_fn, force='mesh') + # hand_faces = mano_hand_temp.faces + + + rgt_hand_pkl_fn = "assets/right_20230917_004.pkl" + data_dict = pkl.load(open(rgt_hand_pkl_fn, "rb")) + hand_faces = data_dict['hand_faces'] # ['faces'] + + self.hand_faces = torch.from_numpy(hand_faces).long().to(self.device) + + self.start_idx = 20 + # self.window_size = 60 + self.window_size = self.window_size + start_idx = self.start_idx + window_size = self.window_size + + + sv_fn = self.kinematic_mano_gt_sv_fn + + # gt_data_folder = "/".join(sv_fn.split("/")[:-1]) ## + gt_data_fn_name = sv_fn.split("/")[-1].split(".")[0] + arctic_processed_data_sv_folder = "/home/xueyi/diffsim/NeuS/raw_data/arctic_processed_canon_obj" + if not os.path.exists(arctic_processed_data_sv_folder): + arctic_processed_data_sv_folder = "/root/diffsim/quasi-dyn/raw_data/arctic_processed_canon_obj" + gt_data_canon_obj_sv_fn = f"{arctic_processed_data_sv_folder}/{gt_data_fn_name}_canon_obj.obj" + + print(f"Loading data from {sv_fn}") + + sv_dict = np.load(sv_fn, allow_pickle=True).item() + + tot_frames_nn = sv_dict["obj_rot"].shape[0] + window_size = min(tot_frames_nn - self.start_idx, window_size) + self.window_size = window_size + + + object_global_orient = sv_dict["obj_rot"][start_idx: start_idx + window_size] # num_frames x 3 + object_transl = sv_dict["obj_trans"][start_idx: start_idx + window_size] * 0.001 # num_frames x 3 + obj_pcs = sv_dict["verts.object"][start_idx: start_idx + window_size] + + # obj_pcs = sv_dict['object_pc'] + obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + + + obj_vertex_normals = torch.zeros_like(obj_pcs) + obj_tot_normals = obj_vertex_normals + print(f"obj_normals: {obj_tot_normals.size()}") + # /data/xueyi/sim/arctic_processed_data/processed_seqs/s01/espressomachine_use_01.npy + + # obj_vertex_normals = sv_dict['obj_vertex_normals'] + # obj_vertex_normals = torch.from_numpy(obj_vertex_normals).float().cuda() + # self.obj_normals = obj_vertex_normals + + # object_global_orient = sv_dict['object_global_orient'] # glboal orient + # object_transl = sv_dict['object_transl'] + + + obj_faces = sv_dict['f'][0] + obj_faces = torch.from_numpy(obj_faces).long().cuda() + self.obj_faces = obj_faces + + # obj_verts = sv_dict['verts.object'] + # minn_verts = np.min(obj_verts, axis=0) + # maxx_verts = np.max(obj_verts, axis=0) + # extent = maxx_verts - minn_verts + # # center_ori = (maxx_verts + minn_verts) / 2 + # # scale_ori = np.sqrt(np.sum(extent ** 2)) + # obj_verts = torch.from_numpy(obj_verts).float().cuda() + + + # obj_sv_path = "/data3/datasets/xueyi/arctic/arctic_data/data/meta/object_vtemplates" + # obj_name = sv_fn.split("/")[-1].split("_")[0] + # obj_mesh_fn = os.path.join(obj_sv_path, obj_name, "mesh.obj") + # print(f"loading from {obj_mesh_fn}") + # # template_obj_vs, template_obj_fs = trimesh.load(obj_mesh_fn, force='mesh') + # template_obj_vs, template_obj_fs = utils.read_obj_file_ours(obj_mesh_fn, sub_one=True) + + + + + # self.obj_verts = obj_verts + init_obj_verts = obj_pcs[0] + init_obj_rot_vec = object_global_orient[0] + init_obj_transl = object_transl[0] + + init_obj_transl = torch.from_numpy(init_obj_transl).float().cuda() + init_rot_struct = R.from_rotvec(init_obj_rot_vec) + + init_glb_rot_mtx = init_rot_struct.as_matrix() + init_glb_rot_mtx = torch.from_numpy(init_glb_rot_mtx).float().cuda() + # ## reverse the global rotation matrix ## + init_glb_rot_mtx_reversed = init_glb_rot_mtx.contiguous().transpose(1, 0).contiguous() + # nn_obj_verts x 3 ## + ##### ## initial tarns of the object and the hand ## + # canon_obj_verts = torch.matmul( + # (init_obj_verts - init_obj_transl.unsqueeze(0)), init_glb_rot_mtx.contiguous().transpose(1, 0).contiguous() + # ) + + ## (R (v - t)^T)^T = (v - t) R^T + canon_obj_verts = torch.matmul( + init_glb_rot_mtx_reversed.transpose(1, 0).contiguous(), (init_obj_verts - init_obj_transl.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + + ## get canon obj verts ## + + # canon_obj_verts = obj_pcs[0].clone() + self.obj_verts = canon_obj_verts.clone() + obj_verts = canon_obj_verts.clone() + + + #### save canonical obj mesh #### + print(f"canon_obj_verts: {canon_obj_verts.size()}, obj_faces: {obj_faces.size()}") + canon_obj_mesh = trimesh.Trimesh(vertices=canon_obj_verts.detach().cpu().numpy(), faces=obj_faces.detach().cpu().numpy()) + canon_obj_mesh.export(gt_data_canon_obj_sv_fn) + print(f"canonical obj exported to {gt_data_canon_obj_sv_fn}") + #### save canonical obj mesh #### + + + + # # glb_rot xx obj_verts + obj_trans = cur_obj_verts + # canon_obj_verts = torch.matmul( + # init_glb_rot_mtx.transpose(1, 0).contiguous(), self.obj_verts[0] - init_obj_transl.unsqueeze(0) + # ) + + + # obj_verts = torch.from_numpy(template_obj_vs).float().cuda() + + self.obj_verts = obj_verts.clone() + + + mesh_scale = 0.8 + bbmin, _ = obj_verts.min(0) # + bbmax, _ = obj_verts.max(0) # + + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # + # vertices = (vertices - center) * scale # (vertices - center) * scale # # + + self.sdf_space_center = center + self.sdf_space_scale = scale + # sdf_sv_fn = self.sdf_sv_fn + # self.obj_sdf = np.load(sdf_sv_fn, allow_pickle=True) + # self.sdf_res = self.obj_sdf.shape[0] + + + # init_obj_pcs = obj_pcs[0].detach().cpu().numpy() + # init_glb_rot = object_global_orient[0] + # init_glb_trans = object_transl[0] + # init_glb_rot_struct = R.from_rotvec(init_glb_rot) + # init_glb_rot_mtx = init_glb_rot_struct.as_matrix() + # self.obj_verts = np.matmul((init_obj_pcs - init_glb_trans[None]), init_glb_rot_mtx.T) + # obj_verts = self.obj_verts + # minn_verts = np.min(obj_verts, axis=0) + # maxx_verts = np.max(obj_verts, axis=0) + # extent = maxx_verts - minn_verts + # scale_cur = np.sqrt(np.sum(extent ** 2)) + + # center_cur= (minn_verts + maxx_verts) / 2 + + # obj_verts = (sv_dict['obj_verts'] - center_ori[None]) / scale_ori * scale_cur + center_cur[None] + + # obj_verts = torch.from_numpy(obj_verts).float().cuda() + # self.obj_verts = obj_verts + + # sv_fn_obj_fn = sv_fn[:-4] + "_real_obj.obj" + # scaled_obj = trimesh.Trimesh(vertices=self.obj_verts.detach().cpu().numpy(), faces=self.obj_faces.detach().cpu().numpy(), vertex_normals=self.obj_normals.detach().cpu().numpy()) + # scaled_obj.export(sv_fn_obj_fn) + # print(f"Scaled obj saved to {scaled_obj}") + + + + tot_reversed_obj_rot_mtx = [] + tot_obj_quat = [] ## rotation matrix + + re_transformed_obj_verts = [] + + # transformed_obj_verts = [] + for i_fr in range(object_global_orient.shape[0]): + cur_glb_rot = object_global_orient[i_fr] + cur_transl = object_transl[i_fr] + cur_transl = torch.from_numpy(cur_transl).float().cuda() + cur_glb_rot_struct = R.from_rotvec(cur_glb_rot) + cur_glb_rot_mtx = cur_glb_rot_struct.as_matrix() + cur_glb_rot_mtx = torch.from_numpy(cur_glb_rot_mtx).float().cuda() + + # transformed verts ## canon_verts x R + t = transformed_verts # + # (transformed_verts - t) x R^T = canon_verts # + # cur_transformed_verts = torch.matmul( + # self.obj_verts, cur_glb_rot_mtx + # ) + cur_transl.unsqueeze(0) + + cur_glb_rot_mtx_reversed = cur_glb_rot_mtx.contiguous().transpose(1, 0).contiguous() + tot_reversed_obj_rot_mtx.append(cur_glb_rot_mtx_reversed) + + cur_glb_rot_struct = R.from_matrix(cur_glb_rot_mtx_reversed.cpu().numpy()) + cur_obj_quat = cur_glb_rot_struct.as_quat() + cur_obj_quat = cur_obj_quat[[3, 0, 1, 2]] + cur_obj_quat = torch.from_numpy(cur_obj_quat).float().cuda() + tot_obj_quat.append(cur_obj_quat) + + cur_re_transformed_obj_verts = torch.matmul( + cur_glb_rot_mtx_reversed, self.obj_verts.transpose(1, 0) + ).transpose(1, 0) + cur_transl.unsqueeze(0) + re_transformed_obj_verts.append(cur_re_transformed_obj_verts) + + # cur_re_transformed_obj_verts = torch.matmul( + # cur_glb_rot_mtx, self.obj_verts.transpose(1, 0) + # ).transpose(1, 0) + cur_transl.unsqueeze(0) + # re_transformed_obj_verts.append(cur_re_transformed_obj_verts) + + # center_obj_verts = torch.mean(self.obj_verts, dim=0, keepdim=True) + # cur_transformed_verts = torch.matmul( + # (self.obj_verts - center_obj_verts), cur_glb_rot_mtx + # ) + cur_transl.unsqueeze(0) + center_obj_verts + + # cur_transformed_verts = torch.matmul( + # cur_glb_rot_mtx, self.obj_verts.transpose(1, 0) + # ).contiguous().transpose(1, 0).contiguous() + cur_transl.unsqueeze(0) + # transformed_obj_verts.append(self.obj_) + # transformed_obj_verts = torch.stack(transformed_obj_verts, dim=0) + + transformed_obj_verts = obj_pcs.clone() + + + + # rhand_verts = sv_dict['rhand_verts'] + # rhand_verts = torch.from_numpy(rhand_verts).float().cuda() + # self.rhand_verts = rhand_verts ## rhand verts ## + + + self.mano_path = "/data1/xueyi/mano_models/mano/models" ### mano_path + if not os.path.exists(self.mano_path): + self.mano_path = '/data/xueyi/mano_v1_2/models' + self.rgt_mano_layer = ManoLayer( + flat_hand_mean=False, + side='right', + mano_root=self.mano_path, + ncomps=45, + use_pca=False, + ).cuda() + + self.lft_mano_layer = ManoLayer( + flat_hand_mean=False, + side='left', + mano_root=self.mano_path, + ncomps=45, + use_pca=False, + ).cuda() + + + ##### rhand parameters ##### + rhand_global_orient_gt, rhand_pose_gt = sv_dict["rot_r"], sv_dict["pose_r"] + # print(f"rhand_global_orient_gt: {rhand_global_orient_gt.shape}") + rhand_global_orient_gt = rhand_global_orient_gt[start_idx: start_idx + self.window_size] + # print(f"rhand_global_orient_gt: {rhand_global_orient_gt.shape}, start_idx: {start_idx}, window_size: {self.window_size}, len: {self.len}") + rhand_pose_gt = rhand_pose_gt[start_idx: start_idx + self.window_size] + + rhand_global_orient_gt = rhand_global_orient_gt.reshape(self.window_size, -1).astype(np.float32) + rhand_pose_gt = rhand_pose_gt.reshape(self.window_size, -1).astype(np.float32) + + rhand_transl, rhand_betas = sv_dict["trans_r"], sv_dict["shape_r"][0] + rhand_transl, rhand_betas = rhand_transl[start_idx: start_idx + self.window_size], rhand_betas + + # print(f"rhand_transl: {rhand_transl.shape}, rhand_betas: {rhand_betas.shape}") + rhand_transl = rhand_transl.reshape(self.window_size, -1).astype(np.float32) + rhand_betas = rhand_betas.reshape(-1).astype(np.float32) + + rhand_global_orient_var = torch.from_numpy(rhand_global_orient_gt).float().cuda() + rhand_pose_var = torch.from_numpy(rhand_pose_gt).float().cuda() + rhand_beta_var = torch.from_numpy(rhand_betas).float().cuda() + rhand_transl_var = torch.from_numpy(rhand_transl).float().cuda() + # R.from_rotvec(obj_rot).as_matrix() + ##### rhand parameters ##### + + + ##### lhand parameters ##### + lhand_global_orient_gt, lhand_pose_gt = sv_dict["rot_l"], sv_dict["pose_l"] + # print(f"rhand_global_orient_gt: {rhand_global_orient_gt.shape}") + lhand_global_orient_gt = lhand_global_orient_gt[start_idx: start_idx + self.window_size] + # print(f"rhand_global_orient_gt: {rhand_global_orient_gt.shape}, start_idx: {start_idx}, window_size: {self.window_size}, len: {self.len}") + lhand_pose_gt = lhand_pose_gt[start_idx: start_idx + self.window_size] + + lhand_global_orient_gt = lhand_global_orient_gt.reshape(self.window_size, -1).astype(np.float32) + lhand_pose_gt = lhand_pose_gt.reshape(self.window_size, -1).astype(np.float32) + + lhand_transl, lhand_betas = sv_dict["trans_l"], sv_dict["shape_l"][0] + lhand_transl, lhand_betas = lhand_transl[start_idx: start_idx + self.window_size], lhand_betas + + # print(f"rhand_transl: {rhand_transl.shape}, rhand_betas: {rhand_betas.shape}") + lhand_transl = lhand_transl.reshape(self.window_size, -1).astype(np.float32) + lhand_betas = lhand_betas.reshape(-1).astype(np.float32) + + lhand_global_orient_var = torch.from_numpy(lhand_global_orient_gt).float().cuda() + lhand_pose_var = torch.from_numpy(lhand_pose_gt).float().cuda() + lhand_beta_var = torch.from_numpy(lhand_betas).float().cuda() + lhand_transl_var = torch.from_numpy(lhand_transl).float().cuda() # self.window_size x 3 + # R.from_rotvec(obj_rot).as_matrix() + ##### lhand parameters ##### + + + + rhand_verts, rhand_joints = self.rgt_mano_layer( + torch.cat([rhand_global_orient_var, rhand_pose_var], dim=-1), + rhand_beta_var.unsqueeze(0).repeat(self.window_size, 1).view(-1, 10), rhand_transl_var + ) + ### rhand_joints: for joints ### + rhand_verts = rhand_verts * 0.001 + rhand_joints = rhand_joints * 0.001 + + + lhand_verts, lhand_joints = self.lft_mano_layer( + torch.cat([lhand_global_orient_var, lhand_pose_var], dim=-1), + lhand_beta_var.unsqueeze(0).repeat(self.window_size, 1).view(-1, 10), lhand_transl_var + ) + ### rhand_joints: for joints ### + lhand_verts = lhand_verts * 0.001 + lhand_joints = lhand_joints * 0.001 + + + ### lhand and the rhand ### + # rhand_verts, lhand_verts # + self.rhand_verts = rhand_verts + self.lhand_verts = lhand_verts + + self.hand_faces = self.rgt_mano_layer.th_faces + + + + if '30_sv_dict' in sv_fn: + bbox_selected_verts_idxes = torch.tensor([1511, 1847, 2190, 2097, 2006, 2108, 1604], dtype=torch.long).cuda() + obj_selected_verts = self.obj_verts[bbox_selected_verts_idxes] + else: + obj_selected_verts = self.obj_verts.clone() + + maxx_init_passive_mesh, _ = torch.max(obj_selected_verts, dim=0) + minn_init_passive_mesh, _ = torch.min(obj_selected_verts, dim=0) + self.maxx_init_passive_mesh = maxx_init_passive_mesh + self.minn_init_passive_mesh = minn_init_passive_mesh + + + init_obj_verts = obj_verts # [0] # cannnot rotate it at all # frictional forces in the pybullet? # + + mesh_scale = 0.8 + bbmin, _ = init_obj_verts.min(0) # + bbmax, _ = init_obj_verts.max(0) # + print(f"bbmin: {bbmin}, bbmax: {bbmax}") + center = (bbmin + bbmax) * 0.5 + + self.obj_normals = torch.zeros_like(obj_verts) + + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # + # vertices = (vertices - center) * scale # (vertices - center) * scale # # + + self.sdf_space_center = center.detach().cpu().numpy() + self.sdf_space_scale = scale.detach().cpu().numpy() + # sdf_sv_fn = "/data/xueyi/diffsim/NeuS/init_box_mesh.npy" + # if not os.path.exists(sdf_sv_fn): + # sdf_sv_fn = "/home/xueyi/diffsim/NeuS/init_box_mesh.npy" + # self.obj_sdf = np.load(sdf_sv_fn, allow_pickle=True) + # self.sdf_res = self.obj_sdf.shape[0] + # print(f"obj_sdf loaded from {sdf_sv_fn} with shape {self.obj_sdf.shape}") + + re_transformed_obj_verts = torch.stack(re_transformed_obj_verts, dim=0) + self.re_transformed_obj_verts = re_transformed_obj_verts + + # tot_obj_quat, tot_reversed_obj_rot_mtx # + tot_obj_quat = torch.stack(tot_obj_quat, dim=0) ## tot obj quat ## + tot_reversed_obj_rot_mtx = torch.stack(tot_reversed_obj_rot_mtx, dim=0) + self.tot_obj_quat = tot_obj_quat # obj quat # + + # self.tot_obj_quat[0, 0] = 1. + # self.tot_obj_quat[0, 1] = 0. + # self.tot_obj_quat[0, 2] = 0. + # self.tot_obj_quat[0, 3] = 0. + + self.tot_reversed_obj_rot_mtx = tot_reversed_obj_rot_mtx + + # self.tot_reversed_obj_rot_mtx[0] = torch.eye(3, dtype=torch.float32).cuda() + + ## should save self.object_global_orient and self.object_transl ## + # object_global_orient, object_transl # + self.object_global_orient = torch.from_numpy(object_global_orient).float().cuda() + self.object_transl = torch.from_numpy(object_transl).float().cuda() + + # self.object_transl[0, :] = self.object_transl[0, :] * 0.0 + return transformed_obj_verts, rhand_verts, obj_tot_normals + + + ### get ball primitives ### + + def get_ball_primitives(self, ): + + maxx_verts, _ = torch.max(self.obj_verts, dim=0) + minn_verts, _ = torch.min(self.obj_verts, dim=0) # + center_verts = (maxx_verts + minn_verts) / 2. + extent_verts = (maxx_verts - minn_verts) + ball_d = max(extent_verts[0].item(), max(extent_verts[1].item(), extent_verts[2].item())) + ball_r = ball_d / 2. + return center_verts, ball_r + + + + + # tot_l2loss = self.compute_loss_optimized_offset_with_preopt_offset() + def compute_loss_optimized_offset_with_preopt_offset(self, tot_time_idx): + # timestep_to_optimizable_offset + optimized_offset = self.renderer.bending_network[1].timestep_to_optimizable_offset + preopt_offset = self.ts_to_mesh_offset_for_opt # + # tot_l2loss = 0. + tot_l2losses = [] + # for ts in range(0, self.n_timesteps): + for ts in range(1, tot_time_idx + 1): + if ts in optimized_offset and ts in preopt_offset: + cur_optimized_offset = optimized_offset[ts] + cur_preopt_offset = preopt_offset[ts] + diff_optimized_preopt_offset = torch.mean(torch.sum((cur_preopt_offset - cur_optimized_offset) ** 2)) + # if ts == 1: + # tot_l2loss = + tot_l2losses.append(diff_optimized_preopt_offset) + # tot_l2loss += diff_optimized_preopt_offset + tot_l2losses = torch.stack(tot_l2losses, dim=0) + tot_l2loss = torch.mean(tot_l2losses) + # tot_l2loss = tot_l2loss / float(self.n_timesteps - 1) + return tot_l2loss + + # tracking_loss = self.compute_loss_optimized_transformations(cur_time_idx) + def compute_loss_optimized_transformations(self, cur_time_idx): + # # + cur_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[cur_time_idx] + cur_translations = self.other_bending_network.timestep_to_optimizable_total_def[cur_time_idx] + init_passive_mesh = self.timestep_to_passive_mesh[0] + center_passive_mesh = torch.mean(init_passive_mesh, dim=0) + pred_passive_mesh = torch.matmul( + cur_rot_mtx, (init_passive_mesh - center_passive_mesh.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + center_passive_mesh.unsqueeze(0) + cur_translations.unsqueeze(0) + gt_passive_mesh = self.timestep_to_passive_mesh[cur_time_idx] + tracking_loss = torch.sum( + (pred_passive_mesh - gt_passive_mesh) ** 2, dim=-1 + ).mean() + return tracking_loss + + def compute_loss_optimized_transformations_v2(self, cur_time_idx, cur_passive_time_idx): + # # ## get the + + # timestep_to_optimizable_rot_mtx, timestep_to_optimizable_total_def + cur_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[cur_time_idx] + cur_translations = self.other_bending_network.timestep_to_optimizable_total_def[cur_time_idx] + + if self.other_bending_network.canon_passive_obj_verts is None: + init_passive_mesh = self.timestep_to_passive_mesh[0] + center_passive_mesh = torch.mean(init_passive_mesh, dim=0) + # center_passive_mesh = torch.zeros((3, )).cuda() + else: + init_passive_mesh = self.other_bending_network.canon_passive_obj_verts + center_passive_mesh = torch.zeros((3, )).cuda() + pred_passive_mesh = torch.matmul( + cur_rot_mtx, (init_passive_mesh - center_passive_mesh.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + center_passive_mesh.unsqueeze(0) + cur_translations.unsqueeze(0) + gt_passive_mesh = self.timestep_to_passive_mesh[cur_passive_time_idx] + tracking_loss = torch.sum( # gt mehses # + (pred_passive_mesh - gt_passive_mesh) ** 2, dim=-1 + ).mean() + return tracking_loss + + + def construct_field_network(self, input_dim, hidden_dimensions, output_dim): + cur_field_network = nn.Sequential( + *[ + nn.Linear(input_dim, hidden_dimensions), nn.ReLU(), + nn.Linear(hidden_dimensions, hidden_dimensions * 2), # with maxpoll layers # + nn.Linear(hidden_dimensions * 2, hidden_dimensions), nn.ReLU(), # + nn.Linear(hidden_dimensions, output_dim), # hidden + ] + ) + + with torch.no_grad(): + for i, cc in enumerate(cur_field_network[:]): + # for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(cur_field_network) - 1: + torch.nn.init.zeros_(cc.bias) + torch.nn.init.zeros_(cur_field_network[-1].weight) + torch.nn.init.zeros_(cur_field_network[-1].bias) # initialize the field network for bendign and deofrmation + + return cur_field_network + + + + + + ''' GRAB clips --- expanded point set and expanded points for retargeting ''' + def train_point_set(self, ): + + ## ## GRAB clips ## ## + # states -> the robot actions --- in this sim ## + # chagne # # mano notjmano but the mano ---> optimize the mano delta states? # + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() # update learning rrate # + # robot actions ## + + nn_timesteps = self.timestep_to_passive_mesh.size(0) + self.nn_timesteps = nn_timesteps + num_steps = self.nn_timesteps + + + ''' Load the robot hand ''' + model_path = self.conf['model.sim_model_path'] # + # robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + self.hand_type = "redmax_hand" + if model_path.endswith(".xml"): + self.hand_type = "redmax_hand" + robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + else: + self.hand_type = "shadow_hand" + robot_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path, args=None) + ## shadow hand; redmax hand ## + self.robot_agent = robot_agent + robo_init_verts = self.robot_agent.robot_pts + # if self.hand_type == "redmax_hand": + robo_sampled_verts_idxes_fn = "robo_sampled_verts_idxes.npy" + if os.path.exists(robo_sampled_verts_idxes_fn): + sampled_verts_idxes = np.load(robo_sampled_verts_idxes_fn) + sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + else: + n_sampling = 1000 + pts_fps_idx = data_utils.farthest_point_sampling(robo_init_verts.unsqueeze(0), n_sampling=n_sampling) + sampled_verts_idxes = pts_fps_idx + np.save(robo_sampled_verts_idxes_fn, sampled_verts_idxes.detach().cpu().numpy()) + # else: + # sampled_verts_idxes = None + self.robo_hand_faces = self.robot_agent.robot_faces + + + ## sampled verts idxes ## + self.sampled_verts_idxes = sampled_verts_idxes + ''' Load the robot hand ''' + + + ''' Load robot hand in DiffHand simulator ''' + # redmax_sim = redmax.Simulation(model_path) + # redmax_sim.reset(backward_flag = True) # redmax_sim -- # # --- robot hand mani asset? ## ## robot hand mani asse ## + # # ### redmax_ndof_u, redmax_ndof_r ### # + # redmax_ndof_u = redmax_sim.ndof_u + # redmax_ndof_r = redmax_sim.ndof_r + # redmax_ndof_m = redmax_sim.ndof_m + #### retargeting is also a problem here #### + + + ''' Load the mano hand model ''' + model_path_mano = self.conf['model.mano_sim_model_path'] + # mano_agent = dyn_model_act_mano_deformable.RobotAgent(xml_fn=model_path_mano) # robot # + mano_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path_mano) ## model path mano ## # + self.mano_agent = mano_agent + # ''' Load the mano hand ''' + self.mano_agent.active_robot.expand_visual_pts() + # self.robo_hand_faces = self.mano_agent.robot_faces + + # if self.use_mano_hand_for_test: ## + # self.robo_hand_faces = self.hand_faces + ## + + ## start expanding the current visual pts ## + print(f"Start expanding the current visual pts...") + expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + + self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + + ## expanded_visual_pts of the expanded visual pts # + expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + print(f"Saving expanded visual pts with shape {expanded_visual_pts.size()} to {expanded_visual_pts_sv_fn}") + np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) # + + + nn_substeps = 10 + + ## + mano_nn_substeps = 1 + # mano_nn_substeps = 10 # + self.mano_nn_substeps = mano_nn_substeps + + # self.hand_faces # + + + ''' Expnad the current visual points ''' + + params_to_train = [] # params to train # + ### robot_actions, robot_init_states, robot_glb_rotation, robot_actuator_friction_forces, robot_glb_trans ### + + ''' Define MANO robot actions, delta_states, init_states, frictions, and others ''' + self.mano_robot_actions = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_actions.weight) + # params_to_train += list(self.robot_actions.parameters()) + + self.mano_robot_delta_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + + self.mano_robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_init_states.weight) + # params_to_train += list(self.robot_init_states.parameters()) + + self.mano_robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=4 + ).cuda() + self.mano_robot_glb_rotation.weight.data[:, 0] = 1. + self.mano_robot_glb_rotation.weight.data[:, 1:] = 0. + # params_to_train += list(self.robot_glb_rotation.parameters()) + + + self.mano_robot_glb_trans = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_robot_glb_trans.weight) + # params_to_train += list(self.robot_glb_trans.parameters()) + + ## + self.mano_robot_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, # embedding; a realistic thing # # ## so the optimizable modle deisgn --- approxmimate what you see and approximate the target simulator ## # at a distance; the asymmetric contact froces spring ks -- all of them wold affect model's behaviours ## ## mao robot glb + ).cuda() + torch.nn.init.zeros_(self.mano_robot_states.weight) + self.mano_robot_states.weight.data[0, :] = self.mano_robot_init_states.weight.data[0, :].clone() + + + + self.mano_expanded_actuator_delta_offset = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_delta_offset.weight) + # params_to_train += list(self.mano_expanded_actuator_delta_offset.parameters()) + + ### mano friction forces ### + # mano_expanded_actuator_friction_forces, mano_expanded_actuator_delta_offset # + self.mano_expanded_actuator_friction_forces = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_friction_forces.weight) + + ## load mano states and actions ## + if 'model.load_optimized_init_actions' in self.conf and len(self.conf['model.load_optimized_init_actions']) > 0: + print(f"[MANO] Loading optimized init transformations from {self.conf['model.load_optimized_init_actions']}") + cur_optimized_init_actions_fn = self.conf['model.load_optimized_init_actions'] + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + if 'mano_robot_init_states' in optimized_init_actions_ckpt: + self.mano_robot_init_states.load_state_dict(optimized_init_actions_ckpt['robot_init_states']) + if 'mano_robot_glb_rotation' in optimized_init_actions_ckpt: + self.mano_robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_rotation']) + # if 'robot_delta_states' in optimized_init_actions_ckpt: + # self.mano_robot_delta_states.load_state_dict(optimized_init_actions_ckpt['robot_delta_states']) + # self.mano_robot_actions.load_state_dict(optimized_init_actions_ckpt['robot_actions']) + + if 'mano_robot_states' in optimized_init_actions_ckpt: + self.mano_robot_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_states']) + # self.mano_robot_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['robot_actuator_friction_forces']) + self.mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + if 'expanded_actuator_friction_forces' in optimized_init_actions_ckpt: + try: + self.mano_expanded_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_friction_forces']) + except: + pass + if 'expanded_actuator_delta_offset' in optimized_init_actions_ckpt: + self.mano_expanded_actuator_delta_offset.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_delta_offset']) + + + ''' parameters for the real robot hand ''' + self.robot_actions = nn.Embedding( + num_embeddings=num_steps, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_actions.weight) + params_to_train += list(self.robot_actions.parameters()) + + + # self.robot_delta_states = nn.Embedding( + # num_embeddings=num_steps, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + self.robot_states = nn.Embedding( + num_embeddings=num_steps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.robot_states.weight) + params_to_train += list(self.robot_states.parameters()) + + self.robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_init_states.weight) + params_to_train += list(self.robot_init_states.parameters()) + + ## robot glb rotations ## + self.robot_glb_rotation = nn.Embedding( ## robot hand rotation + num_embeddings=num_steps, embedding_dim=4 + ).cuda() + self.robot_glb_rotation.weight.data[:, 0] = 1. + self.robot_glb_rotation.weight.data[:, 1:] = 0. + + self.robot_glb_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_glb_trans.weight) + + + # ### local minimum -> ## robot + self.robot_actuator_friction_forces = nn.Embedding( # frictional forces ## + num_embeddings=365428 * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_actuator_friction_forces.weight) + + + + if len(self.load_optimized_init_transformations) > 0: + print(f"[Robot] Loading optimized init transformations from {self.load_optimized_init_transformations}") + cur_optimized_init_actions_fn = self.load_optimized_init_transformations + # cur_optimized_init_actions = # optimized init states # ## robot init states ## + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + try: + self.robot_init_states.load_state_dict(optimized_init_actions_ckpt['robot_init_states']) + except: + pass + self.robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['robot_glb_rotation']) + if 'robot_delta_states' in optimized_init_actions_ckpt: + try: + self.robot_delta_states.load_state_dict(optimized_init_actions_ckpt['robot_delta_states']) + except: + pass + if 'robot_states' in optimized_init_actions_ckpt: + self.robot_states.load_state_dict(optimized_init_actions_ckpt['robot_states']) + # if 'robot_delta_states' ## robot delta states ## + # self.robot_actions.load_state_dict(optimized_init_actions_ckpt['robot_actions']) + # self.mano_robot_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['robot_actuator_friction_forces']) + self.robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['robot_glb_trans']) + + + + if self.hand_type == "redmax_hand": + self.maxx_robo_pts = 25. + self.minn_robo_pts = -15. + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.5437551664260203 + else: + self.minn_robo_pts = -0.1 + self.maxx_robo_pts = 0.2 + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.437551664260203 + ## for grab ## + self.mult_const_after_cent = self.mult_const_after_cent / 3. * 0.9507 + + + ### figners for the finger retargeting approach ### + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # self.robot_fingers = [3591, 4768, 6358, 10228, 6629, 10566, 5631, 9673] + self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877] + # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + if self.hand_type == "redmax_hand": + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + + # self.mano_mult_const_after_cent = 3. + + if 'model.mano_mult_const_after_cent' in self.conf: + self.mano_mult_const_after_cent = self.conf['model.mano_mult_const_after_cent'] + + + self.nn_ts = self.nn_timesteps - 1 + + ''' parameters for the real robot hand ''' + + + self.timestep_to_active_mesh = {} + self.timestep_to_expanded_visual_pts = {} + self.timestep_to_active_mesh_opt_ours_sim = {} + + self.timestep_to_active_mesh_w_delta_states = {} + + + # states -> get states -> only update the acitons # + with torch.no_grad(): # init them to zero + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + ''' Get rotations, translations, and actions of the current robot ''' ## mano robot glb rotation + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) # mano glb rot + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + self.mano_agent.set_init_states_target_value(link_cur_states) + + cur_visual_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=True) + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent + + + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + cur_visual_pts_offset = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) ## get the idxes ### + + + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + ## + ### transform by the glboal transformation and the translation ### ## cur visual pts ## ## contiguous() ## + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) ## transformed pts ## + + + cur_visual_pts = cur_visual_pts + cur_visual_pts_offset + + + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_w_delta_states[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_opt_ours_sim[cur_ts] = cur_visual_pts.detach() + + self.iter_step = 0 + + ''' Set redmax robot actions ''' + + + params_to_train_kines = [] + # params_to_train_kines += list(self.mano_robot_glb_rotation.parameters()) + # params_to_train_kines += list(self.mano_robot_glb_trans.parameters()) + # params_to_train_kines += list(self.mano_robot_delta_states.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_delta_offset.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + # + # params_to_train_kines += list(self.robot_states.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + params_to_train_kines += list(self.mano_expanded_actuator_delta_offset.parameters()) + params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + + + + + # can tray the optimizer ## ## mano states ## + # if self.use_LBFGS: + # self.kines_optimizer = torch.optim.LBFGS(params_to_train_kines, lr=self.learning_rate) + # else: + # self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + + + mano_expanded_actuator_delta_offset_ori = self.mano_expanded_actuator_delta_offset.weight.data.clone().detach() + + # if self.optimize_rules: + # params_to_train_kines = [] + # params_to_train_kines += list(self.other_bending_network.parameters()) + # # self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + # if self.use_LBFGS: + # self.kines_optimizer = torch.optim.LBFGS(params_to_train_kines, lr=self.learning_rate) + # else: + # self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + # # self.kines_optimizer = torch.optim.LBFGS(params_to_train_kines, lr=1e-2) + + + + ## policy and the controller ## + # self.other_bending_network.spring_ks_values.weight.data[:, :] = 0.1395 + # self.other_bending_network.spring_ks_values.weight.data[0, :] = 0.1395 + # self.other_bending_network.spring_ks_values.weight.data[1, :] = 0.00 + # self.other_bending_network.inertia_div_factor.weight.data[:, :] = 10.0 + # self.other_bending_network.inertia_div_factor.weight.data[:, :] = 1000.0 + # self.other_bending_network.inertia_div_factor.weight.data[:, :] = 100.0 + + + # load_redmax_robot_actions_fn = "/data3/datasets/diffsim/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_forces_rule_v13__hieoptrotorules_penalty_friction_ct_01_thres_0001_avg_optrobo_nglbtrans_reg001_manorules_projk_0_nsp_res_sqrstiff_preprojfri_projk_0_optrules_diffh_subs_10_/checkpoints/redmax_robot_actions_ckpt_001900.pth" + # if len(load_redmax_robot_actions_fn) > 0: + # redmax_robot_actions_ckpt = torch.load(load_redmax_robot_actions_fn, map_location=self.device, ) + # self.redmax_robot_actions.load_state_dict(redmax_robot_actions_ckpt['redmax_robot_actions']) + + ''' prepare for keeping the original global rotations, trans, and states ''' + # ori_mano_robot_glb_rot = self.mano_robot_glb_rotation.weight.data.clone() + # ori_mano_robot_glb_trans = self.mano_robot_glb_trans.weight.data.clone() + # ori_mano_robot_delta_states = self.mano_robot_delta_states.weight.data.clone() + + + self.iter_step = 0 + + + for i_iter in tqdm(range(100000)): + tot_losses = [] + tot_tracking_loss = [] + + # timestep # + # self.timestep_to_active_mesh = {} + self.timestep_to_posed_active_mesh = {} + self.timestep_to_posed_mano_active_mesh = {} + self.timestep_to_mano_active_mesh = {} + self.timestep_to_corr_mano_pts = {} + # # # # + timestep_to_tot_rot = {} + timestep_to_tot_trans = {} + + # correspondence_pts_idxes = None + + # tot_penetrating_depth_penalty = [] + # tot_ragged_dist = [] + # tot_delta_offset_reg_motion = [] + # tot_dist_mano_visual_ori_to_cur = [] + # tot_reg_loss = [] + # tot_diff_cur_states_to_ref_states = [] + # tot_diff_tangential_forces = [] + # penetration_forces = None ### + # sampled_visual_pts_joint_idxes = None + + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_raw_active_meshes = {} + self.timestep_to_penetration_points = {} + self.timestep_to_penetration_points_forces = {} + self.joint_name_to_penetration_forces_intermediates = {} + + self.timestep_to_anchored_mano_pts = {} + + + self.ts_to_contact_passive_normals = {} + self.ts_to_passive_normals = {} + self.ts_to_passive_pts = {} + self.ts_to_contact_force_d = {} + self.ts_to_penalty_frictions = {} + self.ts_to_penalty_disp_pts = {} + self.ts_to_redmax_states = {} + self.ts_to_dyn_mano_pts = {} + # constraitns for states # + # with 17 dimensions on the states; [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16] + + contact_pairs_set = None + self.contact_pairs_sets = {} + + # redmax_sim.reset(backward_flag = True) + + # tot_grad_qs = [] + + robo_intermediates_states = [] + + tot_penetration_depth = [] + + robo_actions_diff_loss = [] + mano_tracking_loss = [] + + tot_interpenetration_nns = [] + + # init global transformations ## + # cur_ts_redmax_delta_rotations = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_delta_rotations = torch.tensor([0., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_robot_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + # for cur_ts in range(self.nn_ts): + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + # tot_redmax_actions = [] + # actions = {} + + self.free_def_bending_weight = 0.0 + + # mano_robot_glb_rotation, mano_robot_glb_trans, mano_robot_delta_states # + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_rot = cur_glb_rot + cur_ts_redmax_delta_rotations + + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + # # # cur_ts_delta_rot, cur_ts_redmax_robot_trans # # # + # cur_glb_rot = torch.matmul(cur_ts_delta_rot, cur_glb_rot) + cur_glb_trans = cur_glb_trans + cur_ts_redmax_robot_trans # redmax robot transj## + + link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_init_states_target_value(link_cur_states) + cur_visual_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=True) # init state visual pts # + + cur_dyn_mano_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=False) # init s + + # not taht the scale is differne but would not affect the final result + # expanded_pts # + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + cur_visual_pts_offset = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) ## get the idxes ### + + ## get the friction forces ## + cur_visual_pts_friction_forces = self.mano_expanded_actuator_friction_forces(cur_visual_pts_idxes) + + ### transform the visual pts ### ## fricton forces ## + # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # cur_visual_pts = cur_visual_pts * 2. - 1. # cur visual pts # + # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # # mult_const # + + ### visual pts are expanded from xxx ### + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent # mult cnst after cent # + cur_dyn_mano_pts = cur_dyn_mano_pts * self.mano_mult_const_after_cent + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + cur_dyn_mano_pts = torch.matmul(cur_rot, cur_dyn_mano_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + + cur_visual_pts = cur_visual_pts + cur_visual_pts_offset + + # diff_redmax_visual_pts_with_ori_visual_pts = torch.sum( + # (cur_visual_pts[sampled_verts_idxes] - self.timestep_to_active_mesh_opt_ours_sim[cur_ts].detach()) ** 2, dim=-1 + # ) + # diff_redmax_visual_pts_with_ori_visual_pts = diff_redmax_visual_pts_with_ori_visual_pts.mean() + + # train the friction net? how to train the friction net? # + # if self.use_mano_hand_for_test: + # self.timestep_to_active_mesh[cur_ts] = self.rhand_verts[cur_ts] # .detach() + # else: + + + # timestep_to_anchored_mano_pts, timestep_to_raw_active_meshes # + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + self.timestep_to_raw_active_meshes[cur_ts] = cur_visual_pts.detach().cpu().numpy() + self.ts_to_dyn_mano_pts[cur_ts] = cur_dyn_mano_pts.detach().cpu().numpy() + + # # ragged_dist = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # # dist_transformed_expanded_visual_pts_to_ori_visual_pts = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # # diff_cur_states_to_ref_states = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # cur_robo_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # cur_robo_glb_rot = cur_robo_glb_rot / torch.clamp(torch.norm(cur_robo_glb_rot, dim=-1, p=2), min=1e-7) + # cur_robo_glb_rot = dyn_model_act.quaternion_to_matrix(cur_robo_glb_rot) # mano glboal rotations # + # cur_robo_glb_trans = self.robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + # robo_links_states = self.robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # self.robot_agent.set_init_states_target_value(robo_links_states) + # cur_robo_visual_pts = self.robot_agent.get_init_state_visual_pts() + + + # ### transform the visual pts ### + # cur_robo_visual_pts = (cur_robo_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # cur_robo_visual_pts = cur_robo_visual_pts * 2. -1. + # cur_robo_visual_pts = cur_robo_visual_pts * self.mult_const_after_cent # mult_const # + + + # cur_rot = cur_robo_glb_rot + # cur_trans = cur_glb_trans + + # timestep_to_tot_rot[cur_ts] = cur_rot.detach() + # timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + # ### transform by the glboal transformation and the translation ### + # cur_robo_visual_pts = torch.matmul(cur_robo_glb_rot, cur_robo_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_robo_glb_trans.unsqueeze(0) ## transformed pts ## + + + # self.timestep_to_active_mesh[cur_ts] = cur_robo_visual_pts.clone() # robo visual pts # + + # if self.hand_type == 'redmax_hand': + # cur_robo_visual_pts = cur_robo_visual_pts[sampled_verts_idxes] # + + + + self.free_def_bending_weight = 0.0 + # self.free_def_bending_weight = 0.5 + + # if i_iter == 0 and cur_ts == 0: ## for the tiantianquan sequence ## + # dist_robot_pts_to_mano_pts = torch.sum( + # (cur_robo_visual_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + # ) + # minn_dist_robot_pts_to_mano_pts, correspondence_pts_idxes = torch.min(dist_robot_pts_to_mano_pts, dim=-1) + # minn_dist_robot_pts_to_mano_pts = torch.sqrt(minn_dist_robot_pts_to_mano_pts) + # # dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.01 + # dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.005 + + # corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] + + # dist_corr_correspondence_pts_to_mano_visual_pts = torch.sum( + # (corr_correspondence_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_corr_correspondence_pts_to_mano_visual_pts = torch.sqrt(dist_corr_correspondence_pts_to_mano_visual_pts) + # minn_dist_to_corr_pts, _ = torch.min(dist_corr_correspondence_pts_to_mano_visual_pts, dim=0) + # anchored_mano_visual_pts = minn_dist_to_corr_pts < 0.005 + + # corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] + # # corr_robo = cur_visual_pts[sampled_verts_idxes] + # cd_robo_pts_to_corr_mano_pts = torch.sum( + # (cur_robo_visual_pts.unsqueeze(1) - cur_visual_pts[anchored_mano_visual_pts].unsqueeze(0)) ** 2, dim=-1 + # ) + + # self.timestep_to_anchored_mano_pts[cur_ts] = cur_visual_pts[anchored_mano_visual_pts].detach().cpu().numpy() + + # cd_robo_to_mano, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=-1) + # cd_mano_to_robo, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=0) + # # diff_robo_to_corr_mano_pts = cd_mano_to_robo.mean() + # diff_robo_to_corr_mano_pts = cd_robo_to_mano.mean() + + # mano_fingers = self.rhand_verts[cur_ts][self.mano_fingers] + + # if self.hand_type == 'redmax_hand': + # # sampled_verts_idxes + # robo_fingers = cur_robo_visual_pts[sampled_verts_idxes][self.robot_fingers] + # else: + # robo_fingers = cur_robo_visual_pts[self.robot_fingers] + + # pure finger tracking ## + # pure_finger_tracking_loss = torch.sum((mano_fingers - robo_fingers) ** 2) + + + # diff_robo_to_corr_mano_pts_finger_tracking = torch.sum( + # (corr_correspondence_pts - cur_robo_visual_pts) ** 2, dim=-1 + # ) + # diff_robo_to_corr_mano_pts_finger_tracking = diff_robo_to_corr_mano_pts_finger_tracking[dist_smaller_than_thres] + # diff_robo_to_corr_mano_pts_finger_tracking = diff_robo_to_corr_mano_pts_finger_tracking.mean() + + # loss_finger_tracking = diff_robo_to_corr_mano_pts * self.finger_cd_loss_coef + pure_finger_tracking_loss * 0.5 # + diff_robo_to_corr_mano_pts_finger_tracking * self.finger_tracking_loss_coef + + ## TODO: add the glboal retargeting using fingers before conducting this approach + + + # def evaluate_tracking_loss(): + # self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=False, contact_pairs_set=self.contact_pairs_set) + + # ## + # # init states # + # # cur_ts % mano_nn_substeps == 0: + # if (cur_ts + 1) % mano_nn_substeps == 0: + # cur_passive_big_ts = cur_ts // mano_nn_substeps + # in_func_tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + # # tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + # else: + # in_func_tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # return in_func_tracking_loss + + + if contact_pairs_set is None: + self.contact_pairs_set = None + else: + self.contact_pairs_set = contact_pairs_set.copy() + + # ### if traiing the jrbpt h + # print(self.timestep_to_active_mesh[cur_ts].size(), cur_visual_pts_friction_forces.size()) + contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set, pts_frictional_forces=cur_visual_pts_friction_forces) + + ### train with force to active ## + # if self.train_with_forces_to_active and (not self.use_mano_inputs): + # # penetration_forces # + # if torch.sum(self.other_bending_network.penetrating_indicator.float()) > 0.5: + # net_penetrating_forces = self.other_bending_network.penetrating_forces + # net_penetrating_points = self.other_bending_network.penetrating_points + + + # # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + # self.timestep_to_penetration_points[cur_ts] = net_penetrating_points.detach().cpu().numpy() + # self.timestep_to_penetration_points_forces[cur_ts] = net_penetrating_forces.detach().cpu().numpy() + + + # ### transform the visual pts ### + # # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # # cur_visual_pts = cur_visual_pts * 2. - 1. + # # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # mult_const # + + # # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[finger_sampled_idxes][self.other_bending_network.penetrating_indicator] + + # # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[self.other_bending_network.penetrating_indicator] + + # net_penetrating_forces = torch.matmul( + # cur_rot.transpose(1, 0), net_penetrating_forces.transpose(1, 0) + # ).transpose(1, 0) + # net_penetrating_forces = net_penetrating_forces / self.mult_const_after_cent + # net_penetrating_forces = net_penetrating_forces / 2 + # net_penetrating_forces = net_penetrating_forces * self.extent_robo_pts + + # net_penetrating_points = torch.matmul( + # cur_rot.transpose(1, 0), (net_penetrating_points - cur_trans.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + # net_penetrating_points = net_penetrating_points / self.mult_const_after_cent + # net_penetrating_points = (net_penetrating_points + 1.) / 2. # penetrating points # + # net_penetrating_points = (net_penetrating_points * self.extent_robo_pts) + self.minn_robo_pts + + + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + + # else: + # # penetration_forces = None + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + + # if contact_pairs_set is not None: + # self.contact_pairs_sets[cur_ts] = contact_pairs_set.copy() + + # # contact force d ## ts to the passive normals ## + # self.ts_to_contact_passive_normals[cur_ts] = self.other_bending_network.tot_contact_passive_normals.detach().cpu().numpy() + # self.ts_to_passive_pts[cur_ts] = self.other_bending_network.cur_passive_obj_verts.detach().cpu().numpy() + # self.ts_to_passive_normals[cur_ts] = self.other_bending_network.cur_passive_obj_ns.detach().cpu().numpy() + # self.ts_to_contact_force_d[cur_ts] = self.other_bending_network.contact_force_d.detach().cpu().numpy() + # self.ts_to_penalty_frictions[cur_ts] = self.other_bending_network.penalty_friction_tangential_forces.detach().cpu().numpy() + # if self.other_bending_network.penalty_based_friction_forces is not None: + # self.ts_to_penalty_disp_pts[cur_ts] = self.other_bending_network.penalty_based_friction_forces.detach().cpu().numpy() + + # # # get the penetration depth of the bending network # + + + # ### optimize with intermediates ### # optimize with intermediates # + # if self.optimize_with_intermediates: + # tracking_loss = self.compute_loss_optimized_transformations(cur_ts + 1) # + # else: + # tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + + # # cur_ts % mano_nn_substeps == 0: # + if (cur_ts + 1) % mano_nn_substeps == 0: + cur_passive_big_ts = cur_ts // mano_nn_substeps + ## compute optimized transformations ## + tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + else: + tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + # # hand_tracking_loss = torch.sum( ## delta states? ## + # # (self.timestep_to_active_mesh_w_delta_states[cur_ts] - cur_visual_pts) ** 2, dim=-1 + # # ) + # # hand_tracking_loss = hand_tracking_loss.mean() + + + # # loss = tracking_loss + self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + # # diff_redmax_visual_pts_with_ori_visual_pts.backward() + penetraton_penalty = self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + + tot_penetration_depth.append(penetraton_penalty.detach().item()) + + # smaller_than_zero_level_set_indicator + cur_interpenetration_nns = self.other_bending_network.smaller_than_zero_level_set_indicator.float().sum() + + tot_interpenetration_nns.append(cur_interpenetration_nns) + + # diff_hand_tracking = torch.zeros((1,), dtype=torch.float32).cuda().mean() ## + + # ## diff + # # diff_hand_tracking_coef + # # kinematics_proj_loss = kinematics_trans_diff + penetraton_penalty + diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + + # # if self.use_mano_hand_for_test: ## only the kinematics mano hand is optimized here ## + # # kinematics_proj_loss = tracking_loss + + # # kinematics_proj_loss = hand_tracking_loss * 1e2 ## 1e2 and the 1e2 ## + + # kinematics_proj_loss = diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + penetraton_penalty + + ## kinematics + # kinematics_proj_loss = loss_finger_tracking # + tracking_loss + penetraton_penalty + + reg_delta_offset_loss = torch.sum( + (mano_expanded_actuator_delta_offset_ori - self.mano_expanded_actuator_delta_offset.weight.data) ** 2, dim=-1 + ) + reg_delta_offset_loss = reg_delta_offset_loss.mean() + # motion_reg_loss_coef + reg_delta_offset_loss = reg_delta_offset_loss * self.motion_reg_loss_coef # ## motion reg loss ## # + + + ### tracking loss and the penetration penalty ### + kinematics_proj_loss = tracking_loss + penetraton_penalty + reg_delta_offset_loss ## tracking and the penetration penalty ## + + ### kinematics proj loss ### + loss = kinematics_proj_loss # * self.loss_scale_coef ## get + + + + self.kines_optimizer.zero_grad() + + try: + kinematics_proj_loss.backward(retain_graph=True) + + self.kines_optimizer.step() + except: + pass + + + # mano_expanded_actuator_delta_offset, # point to the gradient of a # + ### get the gradient information ### + # if self.iter_step > 1239 and self.mano_expanded_actuator_delta_offset.weight.grad is not None: + # grad_mano_expanded_actuator_delta_offset = self.mano_expanded_actuator_delta_offset.weight.grad.data + # grad_mano_expanded_actuator_delta_offset = torch.sum(grad_mano_expanded_actuator_delta_offset) + # print(f"iter_step: {self.iter_step}, grad_offset: {grad_mano_expanded_actuator_delta_offset}") + # if self.iter_step > 1239 and self.mano_expanded_actuator_friction_forces.weight.grad is not None: + # grad_mano_expanded_actuator_friction_forces = self.mano_expanded_actuator_friction_forces.weight.grad.data + # grad_mano_expanded_actuator_friction_forces = torch.sum(grad_mano_expanded_actuator_friction_forces) + # print(f"iter_step: {self.iter_step}, grad_friction_forces: {grad_mano_expanded_actuator_friction_forces}") + + # if self.iter_step > 1239 and cur_visual_pts.grad is not None: + # grad_cur_visual_pts = torch.sum(cur_visual_pts.grad.data) + # print(f"iter_step: {self.iter_step}, grad_cur_visual_pts: {grad_cur_visual_pts}") + + + # # + + # if self.use_LBFGS: + # self.kines_optimizer.step(evaluate_tracking_loss) # + # else: + # self.kines_optimizer.step() + + # + # tracking_loss.backward(retain_graph=True) + # if self.use_LBFGS: + # self.other_bending_network.reset_timestep_to_quantities(cur_ts) + + + robot_states_actions_diff_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + robo_actions_diff_loss.append(reg_delta_offset_loss.item()) + + + tot_losses.append(loss.detach().item()) # total losses # # total losses # + # tot_penalty_dot_forces_normals.append(cur_penalty_dot_forces_normals.detach().item()) + # tot_penalty_friction_constraint.append(cur_penalty_friction_constraint.detach().item()) + + self.iter_step += 1 + + self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + if self.iter_step % self.save_freq == 0: + self.save_checkpoint() # a smart solution for them ? # # save checkpoint # ## save checkpoint ## + self.update_learning_rate() ## update learning rate ## + + torch.cuda.empty_cache() + + + ''' Get nn_forward_ts and backward through the actions for updating ''' + tot_losses = sum(tot_losses) / float(len(tot_losses)) + if len(tot_tracking_loss) > 0: + tot_tracking_loss = sum(tot_tracking_loss) / float(len(tot_tracking_loss)) + else: + tot_tracking_loss = 0.0 + if len(tot_penetration_depth) > 0: + tot_penetration_depth = sum(tot_penetration_depth) / float(len(tot_penetration_depth)) + else: + tot_penetration_depth = 0.0 + robo_actions_diff_loss = sum(robo_actions_diff_loss) / float(len(robo_actions_diff_loss)) + if len(mano_tracking_loss) > 0: + mano_tracking_loss = sum(mano_tracking_loss) / float(len(mano_tracking_loss)) + else: + mano_tracking_loss = 0.0 + + avg_tot_interpenetration_nns = float(sum(tot_interpenetration_nns) ) / float(len(tot_interpenetration_nns)) + + if i_iter % self.report_freq == 0: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} tracking_loss = {} mano_tracking_loss = {} penetration_depth = {} actions_diff_loss = {} penetration = {} / {} lr={}'.format(self.iter_step, tot_losses, tot_tracking_loss, mano_tracking_loss, tot_penetration_depth, robo_actions_diff_loss, avg_tot_interpenetration_nns, self.timestep_to_active_mesh[0].size(0), self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + ''' Dump to the file ''' + with open(logs_sv_fn, 'a') as log_file: + log_file.write(cur_log_sv_str + '\n') + + + if i_iter % self.val_mesh_freq == 0: + self.validate_mesh_robo_g() + self.validate_mesh_robo() + self.validate_contact_info_robo() + + + torch.cuda.empty_cache() + + + ''' GRAB clips --- expanded point set and expanded points for retargeting ''' + def train_point_set_dyn(self, ): + + + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() + + nn_timesteps = self.timestep_to_passive_mesh.size(0) + self.nn_timesteps = nn_timesteps + num_steps = self.nn_timesteps + + + ''' Load the robot hand ''' + # model_path = self.conf['model.sim_model_path'] + # self.hand_type = "redmax_hand" + # if model_path.endswith(".xml"): + # self.hand_type = "redmax_hand" + # robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + # else: + # self.hand_type = "shadow_hand" + # robot_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path, args=None) + # ## shadow hand; redmax hand ## + # self.robot_agent = robot_agent + # robo_init_verts = self.robot_agent.robot_pts + # # if self.hand_type == "redmax_hand": + # robo_sampled_verts_idxes_fn = "robo_sampled_verts_idxes.npy" + # if os.path.exists(robo_sampled_verts_idxes_fn): + # sampled_verts_idxes = np.load(robo_sampled_verts_idxes_fn) + # sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + # else: + # n_sampling = 1000 + # pts_fps_idx = data_utils.farthest_point_sampling(robo_init_verts.unsqueeze(0), n_sampling=n_sampling) + # sampled_verts_idxes = pts_fps_idx + # np.save(robo_sampled_verts_idxes_fn, sampled_verts_idxes.detach().cpu().numpy()) + # # else: + # # sampled_verts_idxes = None + # self.robo_hand_faces = self.robot_agent.robot_faces + + + ## sampled verts idxes ## + # self.sampled_verts_idxes = sampled_verts_idxes + ''' Load the robot hand ''' + + + ''' Load robot hand in DiffHand simulator ''' + # redmax_sim = redmax.Simulation(model_path) + # redmax_sim.reset(backward_flag = True) # redmax_sim -- # # --- robot hand mani asset? ## ## robot hand mani asse ## + # # ### redmax_ndof_u, redmax_ndof_r ### # + # redmax_ndof_u = redmax_sim.ndof_u + # redmax_ndof_r = redmax_sim.ndof_r + # redmax_ndof_m = redmax_sim.ndof_m + #### retargeting is also a problem here #### + + + ''' Load the mano hand model ''' + model_path_mano = self.conf['model.mano_sim_model_path'] + # mano_agent = dyn_model_act_mano_deformable.RobotAgent(xml_fn=model_path_mano) # robot # + mano_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path_mano) ## model path mano ## # + self.mano_agent = mano_agent + # ''' Load the mano hand ''' + self.mano_agent.active_robot.expand_visual_pts() + self.robo_hand_faces = self.mano_agent.robot_faces + + # if self.use_mano_hand_for_test: ## + # self.robo_hand_faces = self.hand_faces + ## + + ## start expanding the current visual pts ## + print(f"Start expanding the current visual pts...") + expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + + self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + + ## expanded_visual_pts of the expanded visual pts # + expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + # expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + # print(f"Saving expanded visual pts with shape {expanded_visual_pts.size()} to {expanded_visual_pts_sv_fn}") + # np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) # + + + nn_substeps = 10 + + ## + mano_nn_substeps = 1 + # mano_nn_substeps = 10 # + self.mano_nn_substeps = mano_nn_substeps + + # self.hand_faces # + + + ''' Expnad the current visual points ''' + + params_to_train = [] # params to train # + ### robot_actions, robot_init_states, robot_glb_rotation, robot_actuator_friction_forces, robot_glb_trans ### + + ''' Define MANO robot actions, delta_states, init_states, frictions, and others ''' + self.mano_robot_actions = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_actions.weight) + # params_to_train += list(self.robot_actions.parameters()) + + self.mano_robot_delta_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + + self.mano_robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_init_states.weight) + # params_to_train += list(self.robot_init_states.parameters()) + + self.mano_robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=4 + ).cuda() + self.mano_robot_glb_rotation.weight.data[:, 0] = 1. + self.mano_robot_glb_rotation.weight.data[:, 1:] = 0. + # params_to_train += list(self.robot_glb_rotation.parameters()) + + + self.mano_robot_glb_trans = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_robot_glb_trans.weight) + # params_to_train += list(self.robot_glb_trans.parameters()) + + ## + self.mano_robot_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, # embedding; a realistic thing # # ## so the optimizable modle deisgn --- approxmimate what you see and approximate the target simulator ## # at a distance; the asymmetric contact froces spring ks -- all of them wold affect model's behaviours ## ## mao robot glb + ).cuda() + torch.nn.init.zeros_(self.mano_robot_states.weight) + self.mano_robot_states.weight.data[0, :] = self.mano_robot_init_states.weight.data[0, :].clone() + + + + self.mano_expanded_actuator_delta_offset = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_delta_offset.weight) + # params_to_train += list(self.mano_expanded_actuator_delta_offset.parameters()) + + + self.mano_expanded_actuator_friction_forces = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_friction_forces.weight) + + ##### expanded actuators pointact jforces ### + self.mano_expanded_actuator_pointact_forces = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_pointact_forces.weight) + + self.mano_expanded_actuator_pointact_damping_coefs = nn.Embedding( + num_embeddings=10, embedding_dim=1 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_pointact_damping_coefs.weight) + + + ## load mano states and actions ## + if 'model.load_optimized_init_actions' in self.conf and len(self.conf['model.load_optimized_init_actions']) > 0: + print(f"[MANO] Loading optimized init transformations from {self.conf['model.load_optimized_init_actions']}") + cur_optimized_init_actions_fn = self.conf['model.load_optimized_init_actions'] + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + if 'mano_robot_init_states' in optimized_init_actions_ckpt: + self.mano_robot_init_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_init_states']) + if 'mano_robot_glb_rotation' in optimized_init_actions_ckpt: + self.mano_robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_rotation']) + + if 'mano_robot_states' in optimized_init_actions_ckpt: + self.mano_robot_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_states']) + + if 'mano_robot_actions' in optimized_init_actions_ckpt: + self.mano_robot_actions.load_state_dict(optimized_init_actions_ckpt['mano_robot_actions']) + + self.mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + if 'expanded_actuator_friction_forces' in optimized_init_actions_ckpt: + try: + self.mano_expanded_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_friction_forces']) + except: + pass + #### actuator point forces and actuator point offsets #### + if 'mano_expanded_actuator_delta_offset' in optimized_init_actions_ckpt: + print(f"loading mano_expanded_actuator_delta_offset...") + self.mano_expanded_actuator_delta_offset.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_delta_offset']) + if 'mano_expanded_actuator_pointact_forces' in optimized_init_actions_ckpt: + self.mano_expanded_actuator_pointact_forces.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_pointact_forces']) + if 'mano_expanded_actuator_pointact_damping_coefs' in optimized_init_actions_ckpt: + self.mano_expanded_actuator_pointact_damping_coefs.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_pointact_damping_coefs']) + + + + ## load + ''' parameters for the real robot hand ''' + self.robot_actions = nn.Embedding( + num_embeddings=num_steps, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_actions.weight) + params_to_train += list(self.robot_actions.parameters()) + + + # self.robot_delta_states = nn.Embedding( + # num_embeddings=num_steps, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + self.robot_states = nn.Embedding( + num_embeddings=num_steps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.robot_states.weight) + params_to_train += list(self.robot_states.parameters()) + + self.robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_init_states.weight) + params_to_train += list(self.robot_init_states.parameters()) + + ## robot glb rotations ## + self.robot_glb_rotation = nn.Embedding( ## robot hand rotation + num_embeddings=num_steps, embedding_dim=4 + ).cuda() + self.robot_glb_rotation.weight.data[:, 0] = 1. + self.robot_glb_rotation.weight.data[:, 1:] = 0. + + self.robot_glb_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_glb_trans.weight) + + + # ### local minimum -> ## robot + self.robot_actuator_friction_forces = nn.Embedding( # frictional forces ## + num_embeddings=365428 * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_actuator_friction_forces.weight) + + + + # if len(self.load_optimized_init_transformations) > 0: + # print(f"[Robot] Loading optimized init transformations from {self.load_optimized_init_transformations}") + # cur_optimized_init_actions_fn = self.load_optimized_init_transformations + # # cur_optimized_init_actions = # optimized init states # ## robot init states ## + # optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + # try: + # self.robot_init_states.load_state_dict(optimized_init_actions_ckpt['robot_init_states']) + # except: + # pass + # self.robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['robot_glb_rotation']) + # if 'robot_delta_states' in optimized_init_actions_ckpt: + # try: + # self.robot_delta_states.load_state_dict(optimized_init_actions_ckpt['robot_delta_states']) + # except: + # pass + # if 'robot_states' in optimized_init_actions_ckpt: + # self.robot_states.load_state_dict(optimized_init_actions_ckpt['robot_states']) + # # if 'robot_delta_states' ## robot delta states ## + # # self.robot_actions.load_state_dict(optimized_init_actions_ckpt['robot_actions']) + # # self.mano_robot_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['robot_actuator_friction_forces']) + # self.robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['robot_glb_trans']) + + + + if self.hand_type == "redmax_hand": + self.maxx_robo_pts = 25. + self.minn_robo_pts = -15. + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.5437551664260203 + else: + self.minn_robo_pts = -0.1 + self.maxx_robo_pts = 0.2 + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.437551664260203 + ## for grab ## + self.mult_const_after_cent = self.mult_const_after_cent / 3. * 0.9507 + + + ### figners for the finger retargeting approach ### + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # self.robot_fingers = [3591, 4768, 6358, 10228, 6629, 10566, 5631, 9673] + self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877] + # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + if self.hand_type == "redmax_hand": + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + + # self.mano_mult_const_after_cent = 3. + + if 'model.mano_mult_const_after_cent' in self.conf: + self.mano_mult_const_after_cent = self.conf['model.mano_mult_const_after_cent'] + + + self.nn_ts = self.nn_timesteps - 1 + + ''' parameters for the real robot hand ''' + + + self.timestep_to_active_mesh = {} + self.timestep_to_expanded_visual_pts = {} + self.timestep_to_active_mesh_opt_ours_sim = {} + + self.timestep_to_active_mesh_w_delta_states = {} + + + ### mano_expanded_actuator_pointact_forces ### + ### timestep_to_actuator_points_vels ### + ### timestep_to_actuator_points_offsets ### + self.mass_point_mass = 1.0 + + self.timestep_to_actuator_points_vels = {} + self.timestep_to_actuator_points_passive_forces = {} + + self.timestep_to_actuator_points_offsets = {} + time_cons = 0.0005 + pointset_expansion_alpha = 0.1 + + # ### + with torch.no_grad(): + cur_vel_damping_coef = self.mano_expanded_actuator_pointact_damping_coefs(torch.zeros((1,), dtype=torch.long).cuda()).squeeze(0) ### velocity damping coef + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + ''' Get rotations, translations, and actions of the current robot ''' ## mano robot glb rotation + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) # mano glb rot + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + ### motivate via states #### + # link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + # self.mano_agent.set_init_states_target_value(link_cur_states) + ### motivate via states #### + + + ### motivate via actions #### + link_cur_actions = self.mano_robot_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_actions_and_update_states_v2( link_cur_actions, cur_ts, penetration_forces=None, sampled_visual_pts_joint_idxes=None) + ### motivate via actions #### + + + cur_visual_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=True) + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent + + + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + # cur_visual_pts_offset = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) ## get the idxes ### + + cur_visual_pts_forces = self.mano_expanded_actuator_pointact_forces(cur_visual_pts_idxes) ## get vsual pts forces ### + + ## cur visual pts forces ## ## cur visual pts forces ## + cur_visual_pts_forces = cur_visual_pts_forces * pointset_expansion_alpha ## pts forces that combines pts forces nad the alpha ## + + ## --- linear damping here ? ## + cur_visual_pts_accs = cur_visual_pts_forces * time_cons / self.mass_point_mass ### get the mass pont accs ## + if cur_ts == 0: + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + else: + prev_visual_pts_vels = self.timestep_to_actuator_points_vels[cur_ts - 1] ### nn_pts x 3 ## ## nn_pts x 3 ## + cur_visual_pts_accs = cur_visual_pts_accs - cur_vel_damping_coef * prev_visual_pts_vels ### nn_pts x 3 ## ## prev visual pts vels ## ## prev visual pts vels ## + cur_visual_pts_vels = prev_visual_pts_vels + cur_visual_pts_accs * time_cons ## nn_pts x 3 ## ## get the current vels --- cur_vels = prev_vels + cur_acc * time_cons ## + self.timestep_to_actuator_points_vels[cur_ts] = cur_visual_pts_vels.detach().clone() # + cur_visual_pts_offsets = cur_visual_pts_vels * time_cons + # + if cur_ts > 0: + prev_visual_pts_offset = self.timestep_to_actuator_points_offsets[cur_ts - 1] + cur_visual_pts_offsets = prev_visual_pts_offset + cur_visual_pts_offsets ## add to the visual pts offsets ## + self.timestep_to_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() ### pts offset ### + + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + ### transform by the glboal transformation and the translation ### ## cur visual pts ## ## contiguous() ## + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) ## transformed pts ## + + + cur_visual_pts = cur_visual_pts + cur_visual_pts_offsets + + + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_w_delta_states[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_opt_ours_sim[cur_ts] = cur_visual_pts.detach() + + self.iter_step = 0 + + ''' Set redmax robot actions ''' + + + params_to_train_kines = [] + # params_to_train_kines += list(self.mano_robot_glb_rotation.parameters()) + # params_to_train_kines += list(self.mano_robot_glb_trans.parameters()) + # params_to_train_kines += list(self.mano_robot_delta_states.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_delta_offset.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + # + # params_to_train_kines += list(self.robot_states.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + # train_point_set_dyn + params_to_train_kines += list(self.mano_expanded_actuator_delta_offset.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + + # mano_expanded_actuator_pointact_forces ## st the forces ## + params_to_train_kines += list(self.mano_expanded_actuator_pointact_forces.parameters()) + + + + # can tray the optimizer + # if self.use_LBFGS: + # self.kines_optimizer = torch.optim.LBFGS(params_to_train_kines, lr=self.learning_rate) + # else: + # self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + + + mano_expanded_actuator_delta_offset_ori = self.mano_expanded_actuator_delta_offset.weight.data.clone().detach() + mano_expanded_actuator_pointact_forces_ori = self.mano_expanded_actuator_pointact_forces.weight.data.clone().detach() + + print(f"optimize_rules: {self.optimize_rules}") + if self.optimize_rules: + print(f"optimize_rules: {self.optimize_rules}") + params_to_train_kines = [] + params_to_train_kines += list(self.other_bending_network.parameters()) + # self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + # if self.use_LBFGS: + # self.kines_optimizer = torch.optim.LBFGS(params_to_train_kines, lr=self.learning_rate) + # else: + self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + # self.kines_optimizer = torch.optim.LBFGS(params_to_train_kines, lr=1e-2) + + + + ## policy and the controller ## + # self.other_bending_network.spring_ks_values.weight.data[:, :] = 0.1395 + # self.other_bending_network.spring_ks_values.weight.data[0, :] = 0.1395 + # self.other_bending_network.spring_ks_values.weight.data[1, :] = 0.00 + # self.other_bending_network.inertia_div_factor.weight.data[:, :] = 10.0 + # self.other_bending_network.inertia_div_factor.weight.data[:, :] = 1000.0 + # self.other_bending_network.inertia_div_factor.weight.data[:, :] = 100.0 + + + # load_redmax_robot_actions_fn = "/data3/datasets/diffsim/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_forces_rule_v13__hieoptrotorules_penalty_friction_ct_01_thres_0001_avg_optrobo_nglbtrans_reg001_manorules_projk_0_nsp_res_sqrstiff_preprojfri_projk_0_optrules_diffh_subs_10_/checkpoints/redmax_robot_actions_ckpt_001900.pth" + # if len(load_redmax_robot_actions_fn) > 0: + # redmax_robot_actions_ckpt = torch.load(load_redmax_robot_actions_fn, map_location=self.device, ) + # self.redmax_robot_actions.load_state_dict(redmax_robot_actions_ckpt['redmax_robot_actions']) + + ''' prepare for keeping the original global rotations, trans, and states ''' + # ori_mano_robot_glb_rot = self.mano_robot_glb_rotation.weight.data.clone() + # ori_mano_robot_glb_trans = self.mano_robot_glb_trans.weight.data.clone() + # ori_mano_robot_delta_states = self.mano_robot_delta_states.weight.data.clone() + + + self.iter_step = 0 + + self.ts_to_dyn_mano_pts_th = {} + + + for i_iter in tqdm(range(100000)): + tot_losses = [] + tot_tracking_loss = [] + + # timestep # + # self.timestep_to_active_mesh = {} + self.timestep_to_posed_active_mesh = {} + self.timestep_to_posed_mano_active_mesh = {} + self.timestep_to_mano_active_mesh = {} + self.timestep_to_corr_mano_pts = {} + # # # # + timestep_to_tot_rot = {} + timestep_to_tot_trans = {} + + # correspondence_pts_idxes = None + + # tot_penetrating_depth_penalty = [] + # tot_ragged_dist = [] + # tot_delta_offset_reg_motion = [] + # tot_dist_mano_visual_ori_to_cur = [] + # tot_reg_loss = [] + # tot_diff_cur_states_to_ref_states = [] + # tot_diff_tangential_forces = [] + penetration_forces = None ### + sampled_visual_pts_joint_idxes = None + + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_raw_active_meshes = {} + self.timestep_to_penetration_points = {} + self.timestep_to_penetration_points_forces = {} + self.joint_name_to_penetration_forces_intermediates = {} + + self.timestep_to_anchored_mano_pts = {} + + ## + self.ts_to_contact_passive_normals = {} + self.ts_to_passive_normals = {} + self.ts_to_passive_pts = {} + self.ts_to_contact_force_d = {} + self.ts_to_penalty_frictions = {} + self.ts_to_penalty_disp_pts = {} + self.ts_to_redmax_states = {} + self.ts_to_dyn_mano_pts = {} + + self.timestep_to_actuator_points_passive_forces = {} + # constraitns for states # + # with 17 dimensions on the states; [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16] + + contact_pairs_set = None + self.contact_pairs_sets = {} + + # redmax_sim.reset(backward_flag = True) + + # tot_grad_qs = [] + + robo_intermediates_states = [] + + tot_penetration_depth = [] + + robo_actions_diff_loss = [] + mano_tracking_loss = [] + + tot_interpenetration_nns = [] + + tot_diff_cur_visual_pts_offsets_with_ori = [] + + tot_summ_grad_mano_expanded_actuator_delta_offset_weight = [] + tot_summ_grad_mano_expanded_actuator_pointact_forces_weight = [] + + # init global transformations ## + # cur_ts_redmax_delta_rotations = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_delta_rotations = torch.tensor([0., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_robot_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + + + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + # tot_redmax_actions = [] + # actions = {} + + self.free_def_bending_weight = 0.0 + + # mano_robot_glb_rotation, mano_robot_glb_trans, mano_robot_delta_states # + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_rot = cur_glb_rot + cur_ts_redmax_delta_rotations + + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + # # # cur_ts_delta_rot, cur_ts_redmax_robot_trans # # # + # cur_glb_rot = torch.matmul(cur_ts_delta_rot, cur_glb_rot) + cur_glb_trans = cur_glb_trans + cur_ts_redmax_robot_trans # redmax robot transj## + + # link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + # self.mano_agent.set_init_states_target_value(link_cur_states) + + + ### motivate via states #### + # link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + # self.mano_agent.set_init_states_target_value(link_cur_states) + ### motivate via states #### + + + ### motivate via actions #### + link_cur_actions = self.mano_robot_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_actions_and_update_states_v2( link_cur_actions, cur_ts, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) + ### motivate via actions #### + + + cur_visual_pts, visual_pts_joint_idxes = self.mano_agent.get_init_state_visual_pts(expanded_pts=True, ret_joint_idxes=True) # init state visual pts # + + ## visual pts sampled ## + + ## visual pts sampled ## + cur_dyn_mano_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=False) # init s + + # not taht the scale is differne but would not affect the final result + # expanded_pts # # expanded pts # + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + # cur_visual_pts_offset = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) ## get the idxes ### + + + + if self.drive_pointset == "actions": + ''' Act-React-driven point motions ''' + ###### get the point offset via actuation forces and reaction forces ###### + ### actuation forces at this timestep ### + cur_visual_pts_forces = self.mano_expanded_actuator_pointact_forces(cur_visual_pts_idxes) * 1e7 ## get vsual pts forces ### + + # if cur_ts > 0 and (cur_ts - 1 in self.timestep_to_actuator_points_passive_forces): + # cur_visual_pts_passive_forces = self.timestep_to_actuator_points_passive_forces[cur_ts - 1] ## nn_visual_pts x 3 ## + # cur_visual_pts_forces = cur_visual_pts_forces + cur_visual_pts_passive_forces ## two forces ### + + cur_visual_pts_forces = cur_visual_pts_forces * pointset_expansion_alpha + + ## --- linear damping here ? ## + cur_visual_pts_accs = cur_visual_pts_forces / self.mass_point_mass ### get the mass pont accs ## + if cur_ts == 0: + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + else: # visual pts acc -> visual pts vels # + # prev_visual_pts_vels = self.timestep_to_actuator_points_vels[cur_ts - 1] ### nn_pts x 3 ## + # cur_visual_pts_accs = cur_visual_pts_accs - cur_vel_damping_coef * prev_visual_pts_vels ### nn_pts x 3 ## + # cur_visual_pts_vels = prev_visual_pts_vels + cur_visual_pts_accs * time_cons ## nn_pts x 3 ## + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + self.timestep_to_actuator_points_vels[cur_ts] = cur_visual_pts_vels.detach().clone() + cur_visual_pts_offsets = cur_visual_pts_vels * time_cons + # + # if cur_ts > 0: + # prev_visual_pts_offset = self.timestep_to_actuator_points_offsets[cur_ts - 1] + # cur_visual_pts_offsets = prev_visual_pts_offset + cur_visual_pts_offsets + + + # train_pointset_acts_via_deltas, diff_cur_visual_pts_offsets_with_ori + # cur_visual_pts_offsets_from_delta = mano_expanded_actuator_delta_offset_ori[cur_ts] + # cur_visual_pts_offsets_from_delta = self.mano_expanded_actuator_delta_offset.weight.data[cur_ts].detach() + cur_visual_pts_offsets_from_delta = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes).detach() + ## + diff_cur_visual_pts_offsets_with_ori = torch.sum((cur_visual_pts_offsets - cur_visual_pts_offsets_from_delta) ** 2, dim=-1).mean() ## mean of the avg offset differences ## + tot_diff_cur_visual_pts_offsets_with_ori.append(diff_cur_visual_pts_offsets_with_ori.item()) + + + self.timestep_to_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ###### get the point offset via actuation forces and reaction forces ###### + ''' Act-React-driven point motions ''' + elif self.drive_pointset == "states": + ''' Offset-driven point motions ''' + ## points should be able to manipulate the object accordingly ### + ### we should avoid the penetrations between points and the object ### + ### we should restrict relative point displacement / the point offsets at each timestep to relatively small values ### + ## -> so we have three losses for the delta offset optimization ## + cur_visual_pts_offsets = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) + # cur_visual_pts_offsets = cur_visual_pts_offsets * 10 + self.timestep_to_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ''' Offset-driven point motions ''' + + else: + raise ValueError(f"Unknown drive_pointset: {self.drive_pointset}") + + + + ### add forces + cur_visual_pts_friction_forces = self.mano_expanded_actuator_friction_forces(cur_visual_pts_idxes) + + ### transform the visual pts ### ## fricton forces ## + # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # cur_visual_pts = cur_visual_pts * 2. - 1. # cur visual pts # + # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # # mult_const # + + #### cur visual pts #### + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent + cur_dyn_mano_pts = cur_dyn_mano_pts * self.mano_mult_const_after_cent + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + cur_dyn_mano_pts = torch.matmul(cur_rot, cur_dyn_mano_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + + ## update the visual points positions ## + cur_visual_pts = cur_visual_pts + cur_visual_pts_offsets + + # diff_redmax_visual_pts_with_ori_visual_pts = torch.sum( + # (cur_visual_pts[sampled_verts_idxes] - self.timestep_to_active_mesh_opt_ours_sim[cur_ts].detach()) ** 2, dim=-1 + # ) + # diff_redmax_visual_pts_with_ori_visual_pts = diff_redmax_visual_pts_with_ori_visual_pts.mean() + + # train the friction net? how to train the friction net? # + # if self.use_mano_hand_for_test: + # self.timestep_to_active_mesh[cur_ts] = self.rhand_verts[cur_ts] # .detach() + # else: + + ### timesttep to active mesh ### + # timestep_to_anchored_mano_pts, timestep_to_raw_active_meshes # + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + self.timestep_to_raw_active_meshes[cur_ts] = cur_visual_pts.detach().cpu().numpy() + self.ts_to_dyn_mano_pts[cur_ts] = cur_dyn_mano_pts.detach().cpu().numpy() + self.ts_to_dyn_mano_pts_th[cur_ts] = cur_dyn_mano_pts + + ## ragged dist ## + # # ragged_dist = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # # dist_transformed_expanded_visual_pts_to_ori_visual_pts = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # # diff_cur_states_to_ref_states = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # cur_robo_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # cur_robo_glb_rot = cur_robo_glb_rot / torch.clamp(torch.norm(cur_robo_glb_rot, dim=-1, p=2), min=1e-7) + # cur_robo_glb_rot = dyn_model_act.quaternion_to_matrix(cur_robo_glb_rot) # mano glboal rotations # + # cur_robo_glb_trans = self.robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + # robo_links_states = self.robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # self.robot_agent.set_init_states_target_value(robo_links_states) + # cur_robo_visual_pts = self.robot_agent.get_init_state_visual_pts() + + + # ### transform the visual pts ### + # cur_robo_visual_pts = (cur_robo_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # cur_robo_visual_pts = cur_robo_visual_pts * 2. -1. + # cur_robo_visual_pts = cur_robo_visual_pts * self.mult_const_after_cent # mult_const # + + + # cur_rot = cur_robo_glb_rot + # cur_trans = cur_glb_trans + + # timestep_to_tot_rot[cur_ts] = cur_rot.detach() + # timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + # ### transform by the glboal transformation and the translation ### + # cur_robo_visual_pts = torch.matmul(cur_robo_glb_rot, cur_robo_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_robo_glb_trans.unsqueeze(0) ## transformed pts ## + + + # self.timestep_to_active_mesh[cur_ts] = cur_robo_visual_pts.clone() # robo visual pts # + + # if self.hand_type == 'redmax_hand': + # cur_robo_visual_pts = cur_robo_visual_pts[sampled_verts_idxes] # + + + + self.free_def_bending_weight = 0.0 + # self.free_def_bending_weight = 0.5 + + # if i_iter == 0 and cur_ts == 0: ## for the tiantianquan sequence ## + # dist_robot_pts_to_mano_pts = torch.sum( + # (cur_robo_visual_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + # ) + # minn_dist_robot_pts_to_mano_pts, correspondence_pts_idxes = torch.min(dist_robot_pts_to_mano_pts, dim=-1) + # minn_dist_robot_pts_to_mano_pts = torch.sqrt(minn_dist_robot_pts_to_mano_pts) + # # dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.01 + # dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.005 + + # corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] + + # dist_corr_correspondence_pts_to_mano_visual_pts = torch.sum( + # (corr_correspondence_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_corr_correspondence_pts_to_mano_visual_pts = torch.sqrt(dist_corr_correspondence_pts_to_mano_visual_pts) + # minn_dist_to_corr_pts, _ = torch.min(dist_corr_correspondence_pts_to_mano_visual_pts, dim=0) + # anchored_mano_visual_pts = minn_dist_to_corr_pts < 0.005 + + # corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] + # # corr_robo = cur_visual_pts[sampled_verts_idxes] + # cd_robo_pts_to_corr_mano_pts = torch.sum( + # (cur_robo_visual_pts.unsqueeze(1) - cur_visual_pts[anchored_mano_visual_pts].unsqueeze(0)) ** 2, dim=-1 + # ) + + # self.timestep_to_anchored_mano_pts[cur_ts] = cur_visual_pts[anchored_mano_visual_pts].detach().cpu().numpy() + + # cd_robo_to_mano, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=-1) + # cd_mano_to_robo, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=0) + # # diff_robo_to_corr_mano_pts = cd_mano_to_robo.mean() + # diff_robo_to_corr_mano_pts = cd_robo_to_mano.mean() + + # mano_fingers = self.rhand_verts[cur_ts][self.mano_fingers] + + # if self.hand_type == 'redmax_hand': + # # sampled_verts_idxes + # robo_fingers = cur_robo_visual_pts[sampled_verts_idxes][self.robot_fingers] + # else: + # robo_fingers = cur_robo_visual_pts[self.robot_fingers] + + # pure finger tracking ## + # pure_finger_tracking_loss = torch.sum((mano_fingers - robo_fingers) ** 2) + + + # diff_robo_to_corr_mano_pts_finger_tracking = torch.sum( + # (corr_correspondence_pts - cur_robo_visual_pts) ** 2, dim=-1 + # ) + # diff_robo_to_corr_mano_pts_finger_tracking = diff_robo_to_corr_mano_pts_finger_tracking[dist_smaller_than_thres] + # diff_robo_to_corr_mano_pts_finger_tracking = diff_robo_to_corr_mano_pts_finger_tracking.mean() + + # loss_finger_tracking = diff_robo_to_corr_mano_pts * self.finger_cd_loss_coef + pure_finger_tracking_loss * 0.5 # + diff_robo_to_corr_mano_pts_finger_tracking * self.finger_tracking_loss_coef + + ## TODO: add the glboal retargeting using fingers before conducting this approach + + + # def evaluate_tracking_loss(): + # self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=False, contact_pairs_set=self.contact_pairs_set) + + # ## + # # init states # + # # cur_ts % mano_nn_substeps == 0: + # if (cur_ts + 1) % mano_nn_substeps == 0: + # cur_passive_big_ts = cur_ts // mano_nn_substeps + # in_func_tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + # # tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + # else: + # in_func_tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # return in_func_tracking_loss + + + ''' cache contact pair set for exporting contact information ''' + if contact_pairs_set is None: + self.contact_pairs_set = None + else: ## contact pairs set ## + self.contact_pairs_set = contact_pairs_set.copy() + + + ts_to_act_mesh = self.timestep_to_active_mesh + # ts_to_act_mesh = self.ts_to_dyn_mano_pts_th + # self.timestep_to_active_mesh = self.ts_to_dyn_mano_pts_th + + ## to passive mesh normals; to friction forces ## + # print(self.timestep_to_active_mesh[cur_ts].size(), cur_visual_pts_friction_forces.size()) ## to active mesh ## + # contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=ts_to_act_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set, pts_frictional_forces=cur_visual_pts_friction_forces) + + if not self.fix_obj: + ### then usin gthe other bending network to forward the simulation ### + contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=ts_to_act_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set) + + ### train with force to active ## + # if self.train_with_forces_to_active and (not self.use_mano_inputs): + # # penetration_forces # + # if torch.sum(self.other_bending_network.penetrating_indicator.float()) > 0.5: + # net_penetrating_forces = self.other_bending_network.penetrating_forces + # net_penetrating_points = self.other_bending_network.penetrating_points + + + # # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + # self.timestep_to_penetration_points[cur_ts] = net_penetrating_points.detach().cpu().numpy() + # self.timestep_to_penetration_points_forces[cur_ts] = net_penetrating_forces.detach().cpu().numpy() + + + # ### transform the visual pts ### + # # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # # cur_visual_pts = cur_visual_pts * 2. - 1. + # # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # mult_const # + + # # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[finger_sampled_idxes][self.other_bending_network.penetrating_indicator] + + # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[self.other_bending_network.penetrating_indicator] + + # ## from net penetration forces to to the + + # ### get the passvie force for each point ## ## + # self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() ## for + + # net_penetrating_forces = torch.matmul( + # cur_rot.transpose(1, 0), net_penetrating_forces.transpose(1, 0) + # ).transpose(1, 0) + # # net_penetrating_forces = net_penetrating_forces / self.mult_const_after_cent + # # net_penetrating_forces = net_penetrating_forces / 2 + # # net_penetrating_forces = net_penetrating_forces * self.extent_robo_pts + + # net_penetrating_forces = (1.0 - pointset_expansion_alpha) * net_penetrating_forces + + # net_penetrating_points = torch.matmul( + # cur_rot.transpose(1, 0), (net_penetrating_points - cur_trans.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + # # net_penetrating_points = net_penetrating_points / self.mult_const_after_cent + # # net_penetrating_points = (net_penetrating_points + 1.) / 2. # penetrating points # + # # net_penetrating_points = (net_penetrating_points * self.extent_robo_pts) + self.minn_robo_pts + + # penetration_forces = net_penetrating_forces + # # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + # # + # else: + penetration_forces = None + sampled_visual_pts_joint_idxes = None + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + ''' the bending network still have this property and we can get force values here for the expanded visual points ''' + self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() + + # if contact_pairs_set is not None: + # self.contact_pairs_sets[cur_ts] = contact_pairs_set.copy() + + # # contact force d ## ts to the passive normals ## + # self.ts_to_contact_passive_normals[cur_ts] = self.other_bending_network.tot_contact_passive_normals.detach().cpu().numpy() + # self.ts_to_passive_pts[cur_ts] = self.other_bending_network.cur_passive_obj_verts.detach().cpu().numpy() + # self.ts_to_passive_normals[cur_ts] = self.other_bending_network.cur_passive_obj_ns.detach().cpu().numpy() + # self.ts_to_contact_force_d[cur_ts] = self.other_bending_network.contact_force_d.detach().cpu().numpy() + # self.ts_to_penalty_frictions[cur_ts] = self.other_bending_network.penalty_friction_tangential_forces.detach().cpu().numpy() + # if self.other_bending_network.penalty_based_friction_forces is not None: + # self.ts_to_penalty_disp_pts[cur_ts] = self.other_bending_network.penalty_based_friction_forces.detach().cpu().numpy() + + + + # if self.optimize_with_intermediates: + # tracking_loss = self.compute_loss_optimized_transformations(cur_ts + 1) # + # else: + # tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + ## expanded points set dyn ## + + # # cur_ts % mano_nn_substeps == 0: # + if (cur_ts + 1) % mano_nn_substeps == 0: + cur_passive_big_ts = cur_ts // mano_nn_substeps + ## compute optimized transformations ## + tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + else: + tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + # # hand_tracking_loss = torch.sum( ## delta states? ## + # # (self.timestep_to_active_mesh_w_delta_states[cur_ts] - cur_visual_pts) ** 2, dim=-1 + # # ) + # # hand_tracking_loss = hand_tracking_loss.mean() + + + # # loss = tracking_loss + self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + # # diff_redmax_visual_pts_with_ori_visual_pts.backward() + ### penetrating depth penalty ### penetrating depth penatly ### ## penetration depth ## + penetraton_penalty = self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + + tot_penetration_depth.append(penetraton_penalty.detach().item()) + + # smaller_than_zero_level_set_indicator + cur_interpenetration_nns = self.other_bending_network.smaller_than_zero_level_set_indicator.float().sum() + + tot_interpenetration_nns.append(cur_interpenetration_nns) + + # diff_hand_tracking = torch.zeros((1,), dtype=torch.float32).cuda().mean() ## + + + # # kinematics_proj_loss = kinematics_trans_diff + penetraton_penalty + diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + + # # if self.use_mano_hand_for_test: ## only the kinematics mano hand is optimized here ## + # # kinematics_proj_loss = tracking_loss + + # # kinematics_proj_loss = hand_tracking_loss * 1e2 ## 1e2 and the 1e2 ## + + # kinematics_proj_loss = diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + penetraton_penalty + + # kinematics_proj_loss = loss_finger_tracking # + tracking_loss + penetraton_penalty + + reg_delta_offset_loss = torch.sum( + (mano_expanded_actuator_delta_offset_ori - self.mano_expanded_actuator_delta_offset.weight.data) ** 2, dim=-1 + ) + reg_delta_offset_loss = reg_delta_offset_loss.mean() + + reg_act_force_loss = torch.sum( + (mano_expanded_actuator_pointact_forces_ori - self.mano_expanded_actuator_pointact_forces.weight.data) ** 2, dim=-1 + ) + reg_act_force_loss = reg_act_force_loss.mean() + + # motion_reg_loss_coef ## delta offset loss and the act force loss ### + reg_delta_offset_loss = (reg_delta_offset_loss + reg_act_force_loss) * self.motion_reg_loss_coef + + + ### tracking loss and the penetration penalty ### + ## tracking; penetrations; delta offset ## + + # train_pointset_acts_via_deltas, diff_cur_visual_pts_offsets_with_ori + + if self.fix_obj: + kinematics_proj_loss = penetraton_penalty + reg_delta_offset_loss * 100 + if self.train_pointset_acts_via_deltas and self.drive_pointset == "actions": + kinematics_proj_loss = reg_delta_offset_loss * 100 + diff_cur_visual_pts_offsets_with_ori * 100 + else: + ## + # kinematics_proj_loss = tracking_loss + penetraton_penalty + reg_delta_offset_loss * 100 + kinematics_proj_loss = tracking_loss + reg_delta_offset_loss * 100 + if self.train_pointset_acts_via_deltas: ## via deltas ## + kinematics_proj_loss = kinematics_proj_loss + diff_cur_visual_pts_offsets_with_ori + + ### kinematics proj loss ### + loss = kinematics_proj_loss + + + self.kines_optimizer.zero_grad() + + try: + + # kinematics_proj_loss = kinematics_proj_loss * 1e15 + if self.optimize_rules: ## optimize rules ## + # kinematics_proj_loss = kinematics_proj_loss * 10 + kinematics_proj_loss = kinematics_proj_loss * 100 + + kinematics_proj_loss.backward(retain_graph=True) + + + ## mano_expanded_actuator_pointact_forces ## --> get the gradient of the pointact_forces here ## + if self.mano_expanded_actuator_pointact_forces.weight.grad is not None: + grad_mano_expanded_actuator_pointact_forces_weight = self.mano_expanded_actuator_pointact_forces.weight.grad.data + summ_grad_mano_expanded_actuator_pointact_forces_weight = torch.sum(grad_mano_expanded_actuator_pointact_forces_weight).item() + tot_summ_grad_mano_expanded_actuator_pointact_forces_weight.append(summ_grad_mano_expanded_actuator_pointact_forces_weight) + # print(f"i_iter: {i_iter}, cur_ts: {cur_ts}, grad_pointact_forces: {summ_grad_mano_expanded_actuator_pointact_forces_weight}") ## forces weight -> expanded forces weight ## + elif self.mano_expanded_actuator_delta_offset.weight.grad is not None: + grad_mano_expanded_actuator_delta_offset_weight = self.mano_expanded_actuator_delta_offset.weight.grad.data + summ_grad_mano_expanded_actuator_delta_offset_weight = torch.sum(grad_mano_expanded_actuator_delta_offset_weight).item() + tot_summ_grad_mano_expanded_actuator_delta_offset_weight.append(summ_grad_mano_expanded_actuator_delta_offset_weight) + # print(f"i_iter: {i_iter}, cur_ts: {cur_ts}, grad_pointact_offset: {summ_grad_mano_expanded_actuator_delta_offset_weight}") + + self.kines_optimizer.step() + except: + pass + + + ### get the gradient information ### + # if self.iter_step > 1239 and self.mano_expanded_actuator_delta_offset.weight.grad is not None: + # grad_mano_expanded_actuator_delta_offset = self.mano_expanded_actuator_delta_offset.weight.grad.data + # grad_mano_expanded_actuator_delta_offset = torch.sum(grad_mano_expanded_actuator_delta_offset) + # print(f"iter_step: {self.iter_step}, grad_offset: {grad_mano_expanded_actuator_delta_offset}") + # if self.iter_step > 1239 and self.mano_expanded_actuator_friction_forces.weight.grad is not None: + # grad_mano_expanded_actuator_friction_forces = self.mano_expanded_actuator_friction_forces.weight.grad.data + # grad_mano_expanded_actuator_friction_forces = torch.sum(grad_mano_expanded_actuator_friction_forces) + # print(f"iter_step: {self.iter_step}, grad_friction_forces: {grad_mano_expanded_actuator_friction_forces}") + + # if self.iter_step > 1239 and cur_visual_pts.grad is not None: + # grad_cur_visual_pts = torch.sum(cur_visual_pts.grad.data) + # print(f"iter_step: {self.iter_step}, grad_cur_visual_pts: {grad_cur_visual_pts}") + + + + # if self.use_LBFGS: + # self.kines_optimizer.step(evaluate_tracking_loss) # + # else: + # self.kines_optimizer.step() + + # + # tracking_loss.backward(retain_graph=True) + # if self.use_LBFGS: + # self.other_bending_network.reset_timestep_to_quantities(cur_ts) + + + # robot_states_actions_diff_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + robo_actions_diff_loss.append(reg_delta_offset_loss.item()) + + + tot_losses.append(loss.detach().item()) + # tot_penalty_dot_forces_normals.append(cur_penalty_dot_forces_normals.detach().item()) + # tot_penalty_friction_constraint.append(cur_penalty_friction_constraint.detach().item()) + + self.iter_step += 1 + + self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + # if self.iter_step % self.save_freq == 0: + if self.iter_step % 2000 == 0: + self.save_checkpoint() + self.update_learning_rate() + + torch.cuda.empty_cache() + + + ''' Get nn_forward_ts and backward through the actions for updating ''' + tot_losses = sum(tot_losses) / float(len(tot_losses)) + if len(tot_tracking_loss) > 0: + tot_tracking_loss = sum(tot_tracking_loss) / float(len(tot_tracking_loss)) + else: + tot_tracking_loss = 0.0 + if len(tot_penetration_depth) > 0: + tot_penetration_depth = sum(tot_penetration_depth) / float(len(tot_penetration_depth)) + else: + tot_penetration_depth = 0.0 + robo_actions_diff_loss = sum(robo_actions_diff_loss) / float(len(robo_actions_diff_loss)) + if len(mano_tracking_loss) > 0: + mano_tracking_loss = sum(mano_tracking_loss) / float(len(mano_tracking_loss)) + else: + mano_tracking_loss = 0.0 + + if len(tot_diff_cur_visual_pts_offsets_with_ori) > 0: + diff_cur_visual_pts_offsets_with_ori = sum(tot_diff_cur_visual_pts_offsets_with_ori) / float(len(tot_diff_cur_visual_pts_offsets_with_ori)) + else: + diff_cur_visual_pts_offsets_with_ori = 0.0 + + if len(tot_summ_grad_mano_expanded_actuator_pointact_forces_weight) > 0: + summ_grad_mano_expanded_actuator_pointact_forces_weight = sum(tot_summ_grad_mano_expanded_actuator_pointact_forces_weight) / float(len(tot_summ_grad_mano_expanded_actuator_pointact_forces_weight)) + else: + summ_grad_mano_expanded_actuator_pointact_forces_weight = 0.0 + + if len(tot_summ_grad_mano_expanded_actuator_delta_offset_weight) > 0: + summ_grad_mano_expanded_actuator_delta_offset_weight = sum(tot_summ_grad_mano_expanded_actuator_delta_offset_weight) / float(len(tot_summ_grad_mano_expanded_actuator_delta_offset_weight)) + else: + summ_grad_mano_expanded_actuator_delta_offset_weight = 0.0 + + avg_tot_interpenetration_nns = float(sum(tot_interpenetration_nns) ) / float(len(tot_interpenetration_nns)) + + + ##### logging the losses information ##### + cur_log_sv_str = 'iter:{:8>d} loss = {} tracking_loss = {} mano_tracking_loss = {} penetration_depth = {} actions_diff_loss = {} diff_cur_visual_pts_offsets_with_ori = {} penetration = {} / {} lr={}'.format(self.iter_step, tot_losses, tot_tracking_loss, mano_tracking_loss, tot_penetration_depth, robo_actions_diff_loss, diff_cur_visual_pts_offsets_with_ori, avg_tot_interpenetration_nns, self.timestep_to_active_mesh[0].size(0), self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + ##### logging the losses information ##### + + if i_iter % self.report_freq == 0: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} tracking_loss = {} mano_tracking_loss = {} penetration_depth = {} actions_diff_loss = {} pointact_forces_grad = {} point_delta_offset_grad = {} penetration = {} / {} lr={}'.format(self.iter_step, tot_losses, tot_tracking_loss, mano_tracking_loss, tot_penetration_depth, robo_actions_diff_loss, summ_grad_mano_expanded_actuator_pointact_forces_weight, summ_grad_mano_expanded_actuator_delta_offset_weight, avg_tot_interpenetration_nns, self.timestep_to_active_mesh[0].size(0), self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + # ''' Dump to the file ''' + # with open(logs_sv_fn, 'a') as log_file: + # log_file.write(cur_log_sv_str + '\n') + + + # if i_iter % self.val_mesh_freq == 0: + # self.validate_mesh_robo_g() + # self.validate_mesh_robo() + # self.validate_contact_info_robo() + + + torch.cuda.empty_cache() + + + ts_to_hand_obj_verts = self.get_hand_obj_infos() + return ts_to_hand_obj_verts + + ''' GRAB clips --- expanded point set and expanded points for retargeting ''' + def train_point_set_retargeting(self, ): + ## ## GRAB clips ## ## + # states -> the robot actions --- in this sim ## + # chagne # # mano notjmano but the mano ---> optimize the mano delta states? # + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() # update learning rrate # + # robot actions ## + + nn_timesteps = self.timestep_to_passive_mesh.size(0) + self.nn_timesteps = nn_timesteps + num_steps = self.nn_timesteps + + # load # # load the robot hand # # load + ''' Load the robot hand ''' + model_path = self.conf['model.sim_model_path'] # + # robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + self.hand_type = "redmax_hand" if "redmax" in model_path else "shadow_hand" + # self.hand_type = "redmax_hand" + if model_path.endswith(".xml"): + # self.hand_type = "redmax_hand" + robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + else: + # self.hand_type = "shadow_hand" + robot_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path, args=None) + self.robot_agent = robot_agent + + robo_init_verts = self.robot_agent.robot_pts + if self.hand_type == "redmax_hand": + redmax_sampled_verts_idxes_fn = "redmax_robo_sampled_verts_idxes_new.npy" + redmax_sampled_verts_idxes_fn = os.path.join("assets", redmax_sampled_verts_idxes_fn) + if os.path.exists(redmax_sampled_verts_idxes_fn): + sampled_verts_idxes = np.load(redmax_sampled_verts_idxes_fn) + sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + else: + n_sampling = 1000 + pts_fps_idx = data_utils.farthest_point_sampling(robo_init_verts.unsqueeze(0), n_sampling=n_sampling) + sampled_verts_idxes = pts_fps_idx + np.save(redmax_sampled_verts_idxes_fn, sampled_verts_idxes.detach().cpu().numpy()) + self.sampled_verts_idxes = sampled_verts_idxes + + + self.robo_hand_faces = self.robot_agent.robot_faces + + + ## sampled verts idxes ## + # self.sampled_verts_idxes = sampled_verts_idxes + ''' Load the robot hand ''' + + + ''' Load robot hand in DiffHand simulator ''' + # redmax_sim = redmax.Simulation(model_path) + # redmax_sim.reset(backward_flag = True) # redmax_sim -- # # --- robot hand mani asset? ## ## robot hand mani asse ## + # # ### redmax_ndof_u, redmax_ndof_r ### # + # redmax_ndof_u = redmax_sim.ndof_u + # redmax_ndof_r = redmax_sim.ndof_r + # redmax_ndof_m = redmax_sim.ndof_m + + + ''' Load the mano hand model ''' + model_path_mano = self.conf['model.mano_sim_model_path'] + # mano_agent = dyn_model_act_mano_deformable.RobotAgent(xml_fn=model_path_mano) # robot # + mano_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path_mano) ## model path mano ## # + self.mano_agent = mano_agent + # ''' Load the mano hand ''' + self.mano_agent.active_robot.expand_visual_pts() + # self.robo_hand_faces = self.mano_agent.robot_faces + + # if self.use_mano_hand_for_test: ## use + # self.robo_hand_faces = self.hand_faces + ## + + ## start expanding the current visual pts ## + print(f"Start expanding current visual pts...") + expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + + self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + + ## expanded_visual_pts of the expanded visual pts # + expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + print(f"Saving expanded visual pts with shape {expanded_visual_pts.size()} to {expanded_visual_pts_sv_fn}") + np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) # + + + nn_substeps = 10 + + mano_nn_substeps = 1 + # mano_nn_substeps = 10 # + self.mano_nn_substeps = mano_nn_substeps + + # self.hand_faces # + + + ''' Expnad the current visual points ''' + # expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + # self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + # expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + # expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + # np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) + # # ''' Expnad the current visual points ''' # # differentiate through the simulator? # # + + + params_to_train = [] # params to train # + ### robot_actions, robot_init_states, robot_glb_rotation, robot_actuator_friction_forces, robot_glb_trans ### + + ''' Define MANO robot actions, delta_states, init_states, frictions, and others ''' + self.mano_robot_actions = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_actions.weight) + # params_to_train += list(self.robot_actions.parameters()) + + self.mano_robot_delta_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + + self.mano_robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_init_states.weight) + # params_to_train += list(self.robot_init_states.parameters()) + + self.mano_robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=4 + ).cuda() + self.mano_robot_glb_rotation.weight.data[:, 0] = 1. + self.mano_robot_glb_rotation.weight.data[:, 1:] = 0. + # params_to_train += list(self.robot_glb_rotation.parameters()) + + + self.mano_robot_glb_trans = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_robot_glb_trans.weight) + # params_to_train += list(self.robot_glb_trans.parameters()) + + self.mano_robot_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_states.weight) + self.mano_robot_states.weight.data[0, :] = self.mano_robot_init_states.weight.data[0, :].clone() + + + + self.mano_expanded_actuator_delta_offset = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_delta_offset.weight) + # params_to_train += list(self.mano_expanded_actuator_delta_offset.parameters()) + + + # mano_expanded_actuator_friction_forces, mano_expanded_actuator_delta_offset # + self.mano_expanded_actuator_friction_forces = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_friction_forces.weight) + + + ##### expanded actuators pointact jforces ### + self.mano_expanded_actuator_pointact_forces = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_pointact_forces.weight) + + self.mano_expanded_actuator_pointact_damping_coefs = nn.Embedding( + num_embeddings=10, embedding_dim=1 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_pointact_damping_coefs.weight) + + + + ## load mano states and actions ## + if 'model.load_optimized_init_actions' in self.conf and len(self.conf['model.load_optimized_init_actions']) > 0: + print(f"[MANO] Loading optimized init transformations from {self.conf['model.load_optimized_init_actions']}") + cur_optimized_init_actions_fn = self.conf['model.load_optimized_init_actions'] + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + if 'mano_robot_init_states' in optimized_init_actions_ckpt: + self.mano_robot_init_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_init_states']) + if 'mano_robot_glb_rotation' in optimized_init_actions_ckpt: + self.mano_robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_rotation']) + + if 'mano_robot_states' in optimized_init_actions_ckpt: + self.mano_robot_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_states']) + + if 'mano_robot_actions' in optimized_init_actions_ckpt: + self.mano_robot_actions.load_state_dict(optimized_init_actions_ckpt['mano_robot_actions']) + + self.mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + if 'expanded_actuator_friction_forces' in optimized_init_actions_ckpt: + try: + self.mano_expanded_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_friction_forces']) + except: + pass + #### actuator point forces and actuator point offsets #### + if 'mano_expanded_actuator_delta_offset' in optimized_init_actions_ckpt: + print(f"loading mano_expanded_actuator_delta_offset...") + self.mano_expanded_actuator_delta_offset.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_delta_offset']) + if 'mano_expanded_actuator_pointact_forces' in optimized_init_actions_ckpt: + self.mano_expanded_actuator_pointact_forces.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_pointact_forces']) + if 'mano_expanded_actuator_pointact_damping_coefs' in optimized_init_actions_ckpt: + self.mano_expanded_actuator_pointact_damping_coefs.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_pointact_damping_coefs']) + + + + + ''' parameters for the real robot hand ''' + # # robot actions # # real robot hand ## + self.robot_actions = nn.Embedding( + num_embeddings=num_steps, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_actions.weight) + params_to_train += list(self.robot_actions.parameters()) + + # self.robot_delta_states = nn.Embedding( + # num_embeddings=num_steps, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + self.robot_states = nn.Embedding( + num_embeddings=num_steps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.robot_states.weight) + params_to_train += list(self.robot_states.parameters()) + + self.robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_init_states.weight) + params_to_train += list(self.robot_init_states.parameters()) + + ## robot glb rotations ## + self.robot_glb_rotation = nn.Embedding( ## robot hand rotation + num_embeddings=num_steps, embedding_dim=4 + ).cuda() + self.robot_glb_rotation.weight.data[:, 0] = 1. + self.robot_glb_rotation.weight.data[:, 1:] = 0. + + self.robot_glb_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_glb_trans.weight) + + + # ### local minimum -> ## robot + self.robot_actuator_friction_forces = nn.Embedding( # frictional forces ## + num_embeddings=365428 * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_actuator_friction_forces.weight) + + + ### load optimized init transformations for robot actions ### + if len(self.load_optimized_init_transformations) > 0: + print(f"[Robot] Loading optimized init transformations from {self.load_optimized_init_transformations}") + cur_optimized_init_actions_fn = self.load_optimized_init_transformations + # cur_optimized_init_actions = # optimized init states # ## robot init states ## + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + try: + self.robot_init_states.load_state_dict(optimized_init_actions_ckpt['robot_init_states']) + except: + pass + self.robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['robot_glb_rotation']) + if 'robot_delta_states' in optimized_init_actions_ckpt: + try: + self.robot_delta_states.load_state_dict(optimized_init_actions_ckpt['robot_delta_states']) + except: + pass + if 'robot_states' in optimized_init_actions_ckpt: + self.robot_states.load_state_dict(optimized_init_actions_ckpt['robot_states']) + # if 'robot_delta_states' ## robot delta states ## + # self.robot_actions.load_state_dict(optimized_init_actions_ckpt['robot_actions']) + # self.mano_robot_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['robot_actuator_friction_forces']) + + self.robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['robot_glb_trans']) + + + + if self.hand_type == "redmax_hand": + self.maxx_robo_pts = 25. + self.minn_robo_pts = -15. + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.5437551664260203 + else: + self.minn_robo_pts = -0.1 + self.maxx_robo_pts = 0.2 + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.437551664260203 + ## for grab ## + self.mult_const_after_cent = self.mult_const_after_cent / 3. * 0.9507 + + + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # # self.robot_fingers = [3591, 4768, 6358, 10228, 6629, 10566, 5631, 9673] + # self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877] + # # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + # if self.hand_type == "redmax_hand": + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121, ] + self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877, ] + + if self.hand_type == "redmax_hand": + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + self.robot_fingers = [14670, 321530, 36939, 125930, 200397, 257721, 333438, 338358] + + # params_to_train = [] + + if 'model.mano_mult_const_after_cent' in self.conf: + self.mano_mult_const_after_cent = self.conf['model.mano_mult_const_after_cent'] + + + self.nn_ts = self.nn_timesteps - 1 + + ''' parameters for the real robot hand ''' + + + + self.timestep_to_active_mesh = {} + # ref_expanded_visual_pts, minn_idx_expanded_visual_pts_to_link_pts # + # minn_idx_expanded_visual_pts_to_link_pts # + self.timestep_to_expanded_visual_pts = {} + self.timestep_to_active_mesh_opt_ours_sim = {} + + self.timestep_to_active_mesh_w_delta_states = {} + + + self.mass_point_mass = 1.0 + + self.timestep_to_actuator_points_vels = {} + self.timestep_to_actuator_points_passive_forces = {} + + + self.timestep_to_actuator_points_offsets = {} + time_cons = 0.0005 + pointset_expansion_alpha = 0.1 + + + # states -> get states -> only update the acitons # + with torch.no_grad(): # init them to zero + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + ''' Get rotations, translations, and actions of the current robot ''' + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) # mano glb rot + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + ## mano robot states ## mano robot states ## + link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_init_states_target_value(link_cur_states) + + ## set init visual pts ## + cur_visual_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=True) + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent + + # not taht the scale is differne but would not affect the final result + # expanded_pts # + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + + cur_visual_pts_offset = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) ## get the idxes ### + + + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + ## + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) ## transformed pts ## + + + cur_visual_pts = cur_visual_pts + cur_visual_pts_offset + + # if not self.use_mano_inputs: + # if self.use_mano_hand_for_test: + # self.timestep_to_active_mesh[cur_ts] = self.rhand_verts[cur_ts].detach() + # else: + # self.timestep_to_active_mesh[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_w_delta_states[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_opt_ours_sim[cur_ts] = cur_visual_pts.detach() + + + cur_robo_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_robo_glb_rot = cur_robo_glb_rot / torch.clamp(torch.norm(cur_robo_glb_rot, dim=-1, p=2), min=1e-7) + cur_robo_glb_rot = dyn_model_act.quaternion_to_matrix(cur_robo_glb_rot) # mano glboal rotations # + cur_robo_glb_trans = self.robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + robo_links_states = self.robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.robot_agent.set_init_states_target_value(robo_links_states) + cur_robo_visual_pts = self.robot_agent.get_init_state_visual_pts() + + + if not self.use_scaled_urdf: + ### transform the visual pts ### + cur_robo_visual_pts = (cur_robo_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + cur_robo_visual_pts = cur_robo_visual_pts * 2. -1. + cur_robo_visual_pts = cur_robo_visual_pts * self.mult_const_after_cent # mult_const # + + + # cur_rot = cur_robo_glb_rot + # cur_trans = cur_glb_trans + + # timestep_to_tot_rot[cur_ts] = cur_rot.detach() + # timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + ### transform by the glboal transformation and the translation ### + cur_robo_visual_pts = torch.matmul(cur_robo_glb_rot, cur_robo_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_robo_glb_trans.unsqueeze(0) ## transformed pts ## + + self.timestep_to_active_mesh[cur_ts] = cur_robo_visual_pts.detach() + + self.iter_step = 0 + + mano_expanded_actuator_delta_offset_ori = self.mano_expanded_actuator_delta_offset.weight.data.clone().detach() + mano_expanded_actuator_pointact_forces_ori = self.mano_expanded_actuator_pointact_forces.weight.data.clone().detach() + + ''' Set redmax robot actions ''' + + + # self.optimize_robot = False + + self.optimize_robot = True + + # self.optimize_anchored_pts = False + self.optimize_anchored_pts = True + + + params_to_train_kines = [] + # params_to_train_kines += list(self.mano_robot_glb_rotation.parameters()) + + + # if self.optimize_anchored_pts: + # params_to_train_kines += list(self.mano_expanded_actuator_delta_offset.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + + if self.optimize_robot: + params_to_train_kines += list(self.robot_states.parameters()) + + + + + self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) #### kinematics optimizer ### + + self.expanded_set_delta_motion_ori = self.mano_expanded_actuator_delta_offset.weight.data.clone() + + + ''' prepare for keeping the original global rotations, trans, and states ''' + # ori_mano_robot_glb_rot = self.mano_robot_glb_rotation.weight.data.clone() + # ori_mano_robot_glb_trans = self.mano_robot_glb_trans.weight.data.clone() + # ori_mano_robot_delta_states = self.mano_robot_delta_states.weight.data.clone() + + + + + self.iter_step = 0 + + self.ts_to_dyn_mano_pts_th = {} + self.timestep_to_anchored_mano_pts = {} + + + for i_iter in tqdm(range(100000)): + tot_losses = [] + tot_tracking_loss = [] + + # timestep # + # self.timestep_to_active_mesh = {} + self.timestep_to_posed_active_mesh = {} + self.timestep_to_posed_mano_active_mesh = {} + self.timestep_to_mano_active_mesh = {} + self.timestep_to_corr_mano_pts = {} + # # # # + timestep_to_tot_rot = {} + timestep_to_tot_trans = {} + + + # tot_penetrating_depth_penalty = [] + # tot_ragged_dist = [] + # tot_delta_offset_reg_motion = [] + # tot_dist_mano_visual_ori_to_cur = [] + # tot_reg_loss = [] + # tot_diff_cur_states_to_ref_states = [] + # tot_diff_tangential_forces = [] + # penetration_forces = None ### + # sampled_visual_pts_joint_idxes = None + + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_raw_active_meshes = {} + self.timestep_to_penetration_points = {} + self.timestep_to_penetration_points_forces = {} + self.joint_name_to_penetration_forces_intermediates = {} + + # self.timestep_to_anchored_mano_pts = {} + + + self.ts_to_contact_passive_normals = {} + self.ts_to_passive_normals = {} + self.ts_to_passive_pts = {} + self.ts_to_contact_force_d = {} + self.ts_to_penalty_frictions = {} + self.ts_to_penalty_disp_pts = {} + self.ts_to_redmax_states = {} + self.ts_to_dyn_mano_pts = {} + # constraitns for states # + # with 17 dimensions on the states; [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16] + + contact_pairs_set = None + self.contact_pairs_sets = {} + + # redmax_sim.reset(backward_flag = True) + + # tot_grad_qs = [] + + robo_intermediates_states = [] + + tot_penetration_depth = [] + + robo_actions_diff_loss = [] + mano_tracking_loss = [] + + + tot_interpenetration_nns = [] + + tot_diff_cur_visual_pts_offsets_with_ori = [] + + tot_summ_grad_mano_expanded_actuator_delta_offset_weight = [] + tot_summ_grad_mano_expanded_actuator_pointact_forces_weight = [] + + penetration_forces = None ### + sampled_visual_pts_joint_idxes = None + + + + # init global transformations ## + # cur_ts_redmax_delta_rotations = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_delta_rotations = torch.tensor([0., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_robot_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + # for cur_ts in range(self.nn_ts): + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + # tot_redmax_actions = [] + # actions = {} + + self.free_def_bending_weight = 0.0 + + # mano_robot_glb_rotation, mano_robot_glb_trans, mano_robot_delta_states # + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_rot = cur_glb_rot + cur_ts_redmax_delta_rotations + + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # # glb rot # trans # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + # # # cur_ts_delta_rot, cur_ts_redmax_robot_trans # # # + # cur_glb_rot = torch.matmul(cur_ts_delta_rot, cur_glb_rot) + # cur_glb_trans = cur_glb_trans + cur_ts_redmax_robot_trans # redmax robot transj## + + # link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # self.mano_agent.set_init_states_target_value(link_cur_states) + # cur_visual_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=True) # init state visual pts # + + # cur_dyn_mano_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=False) # init s + + ### motivate via actions #### + link_cur_actions = self.mano_robot_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_actions_and_update_states_v2( link_cur_actions, cur_ts, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) + ### motivate via actions #### + + + cur_visual_pts, visual_pts_joint_idxes = self.mano_agent.get_init_state_visual_pts(expanded_pts=True, ret_joint_idxes=True) # init state visual pts # + + ## visual pts sampled ## + + ## visual pts sampled ## + cur_dyn_mano_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=False) # init s + + # not taht the scale is differne but would not affect the final result + # expanded_pts # + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + + + if self.drive_pointset == "actions": + ''' Act-React-driven point motions ''' + ###### get the point offset via actuation forces and reaction forces ###### + ### actuation forces at this timestep ### + cur_visual_pts_forces = self.mano_expanded_actuator_pointact_forces(cur_visual_pts_idxes) * 1e7 ## get vsual pts forces ### + + # if cur_ts > 0 and (cur_ts - 1 in self.timestep_to_actuator_points_passive_forces): + # cur_visual_pts_passive_forces = self.timestep_to_actuator_points_passive_forces[cur_ts - 1] ## nn_visual_pts x 3 ## + # cur_visual_pts_forces = cur_visual_pts_forces + cur_visual_pts_passive_forces ## two forces ### + + cur_visual_pts_forces = cur_visual_pts_forces * pointset_expansion_alpha + + ## --- linear damping here ? ## + cur_visual_pts_accs = cur_visual_pts_forces / self.mass_point_mass ### get the mass pont accs ## + if cur_ts == 0: + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + else: # visual pts acc -> visual pts vels # + # prev_visual_pts_vels = self.timestep_to_actuator_points_vels[cur_ts - 1] ### nn_pts x 3 ## + # cur_visual_pts_accs = cur_visual_pts_accs - cur_vel_damping_coef * prev_visual_pts_vels ### nn_pts x 3 ## + # cur_visual_pts_vels = prev_visual_pts_vels + cur_visual_pts_accs * time_cons ## nn_pts x 3 ## + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + self.timestep_to_actuator_points_vels[cur_ts] = cur_visual_pts_vels.detach().clone() + cur_visual_pts_offsets = cur_visual_pts_vels * time_cons + # + # if cur_ts > 0: + # prev_visual_pts_offset = self.timestep_to_actuator_points_offsets[cur_ts - 1] + # cur_visual_pts_offsets = prev_visual_pts_offset + cur_visual_pts_offsets + + + # train_pointset_acts_via_deltas, diff_cur_visual_pts_offsets_with_ori + # cur_visual_pts_offsets_from_delta = mano_expanded_actuator_delta_offset_ori[cur_ts] + # cur_visual_pts_offsets_from_delta = self.mano_expanded_actuator_delta_offset.weight.data[cur_ts].detach() + cur_visual_pts_offsets_from_delta = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes).detach() + ## + diff_cur_visual_pts_offsets_with_ori = torch.sum((cur_visual_pts_offsets - cur_visual_pts_offsets_from_delta) ** 2, dim=-1).mean() ## mean of the avg offset differences ## + tot_diff_cur_visual_pts_offsets_with_ori.append(diff_cur_visual_pts_offsets_with_ori.item()) + + + self.timestep_to_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ###### get the point offset via actuation forces and reaction forces ###### + ''' Act-React-driven point motions ''' + elif self.drive_pointset == "states": + ''' Offset-driven point motions ''' + ## points should be able to manipulate the object accordingly ### + ### we should avoid the penetrations between points and the object ### + ### we should restrict relative point displacement / the point offsets at each timestep to relatively small values ### + ## -> so we have three losses for the delta offset optimization ## + cur_visual_pts_offsets = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) + # cur_visual_pts_offsets = cur_visual_pts_offsets * 10 + self.timestep_to_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ''' Offset-driven point motions ''' + + else: + raise ValueError(f"Unknown drive_pointset: {self.drive_pointset}") + + + + + # cur_visual_pts_offset = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) ## get the idxes ### + + # ## get the friction forces ## + # cur_visual_pts_friction_forces = self.mano_expanded_actuator_friction_forces(cur_visual_pts_idxes) + + ### transform the visual pts ### ## fricton forces ## + # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # cur_visual_pts = cur_visual_pts * 2. - 1. # cur visual pts # + # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # # mult_const # + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent # mult cnst after cent # + cur_dyn_mano_pts = cur_dyn_mano_pts * self.mano_mult_const_after_cent + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + ## + ### transform by the glboal transformation and the translation ### + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + cur_dyn_mano_pts = torch.matmul(cur_rot, cur_dyn_mano_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + + cur_visual_pts = cur_visual_pts + cur_visual_pts_offsets + + # diff_redmax_visual_pts_with_ori_visual_pts = torch.sum( + # (cur_visual_pts[sampled_verts_idxes] - self.timestep_to_active_mesh_opt_ours_sim[cur_ts].detach()) ** 2, dim=-1 + # ) + # diff_redmax_visual_pts_with_ori_visual_pts = diff_redmax_visual_pts_with_ori_visual_pts.mean() + + # train the friction net? how to train the friction net? # + # if self.use_mano_hand_for_test: + # self.timestep_to_active_mesh[cur_ts] = self.rhand_verts[cur_ts] # .detach() + # else: + + + # timestep_to_anchored_mano_pts, timestep_to_raw_active_meshes # + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + self.timestep_to_raw_active_meshes[cur_ts] = cur_visual_pts.detach().cpu().numpy() + # self.ts_to_dyn_mano_pts[cur_ts] = cur_dyn_mano_pts.detach().cpu().numpy() + self.ts_to_dyn_mano_pts[cur_ts] = cur_dyn_mano_pts.detach().cpu().numpy() + self.ts_to_dyn_mano_pts_th[cur_ts] = cur_dyn_mano_pts + + + # ragged_dist = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # dist_transformed_expanded_visual_pts_to_ori_visual_pts = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # diff_cur_states_to_ref_states = torch.zeros((1,), dtype=torch.float32).cuda().mean() + cur_robo_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_robo_glb_rot = cur_robo_glb_rot / torch.clamp(torch.norm(cur_robo_glb_rot, dim=-1, p=2), min=1e-7) + cur_robo_glb_rot = dyn_model_act.quaternion_to_matrix(cur_robo_glb_rot) # mano glboal rotations # + cur_robo_glb_trans = self.robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + robo_links_states = self.robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.robot_agent.set_init_states_target_value(robo_links_states) + cur_robo_visual_pts = self.robot_agent.get_init_state_visual_pts() + + + if not self.use_scaled_urdf: + ### transform the visual pts ### + cur_robo_visual_pts = (cur_robo_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + cur_robo_visual_pts = cur_robo_visual_pts * 2. -1. + cur_robo_visual_pts = cur_robo_visual_pts * self.mult_const_after_cent # mult_const # + + + cur_rot = cur_robo_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + ### transform by the glboal transformation and the translation ### + cur_robo_visual_pts = torch.matmul(cur_robo_glb_rot, cur_robo_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_robo_glb_trans.unsqueeze(0) ## transformed pts ## + + + self.timestep_to_active_mesh[cur_ts] = cur_robo_visual_pts.clone() # robo visual pts # + + # self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + # if self.hand_type == 'redmax_hand': + # maxx_sampled_idxes = torch.max(sampled_verts_idxes) + # minn_sampled_idxes = torch.min(sampled_verts_idxes) + # # print(f"cur_robo_visual_pts: {cur_robo_visual_pts.size()}, maxx_sampled_idxes: {maxx_sampled_idxes}, minn_sampled_idxes: {minn_sampled_idxes}") + # cur_robo_visual_pts = cur_robo_visual_pts[sampled_verts_idxes] # + + + + self.free_def_bending_weight = 0.0 + # self.free_def_bending_weight = 0.5 + + if i_iter == 0 and cur_ts == 0: ## for the tiantianquan sequence ## + dist_robot_pts_to_mano_pts = torch.sum( + (cur_robo_visual_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + ) + minn_dist_robot_pts_to_mano_pts, correspondence_pts_idxes = torch.min(dist_robot_pts_to_mano_pts, dim=-1) + minn_dist_robot_pts_to_mano_pts = torch.sqrt(minn_dist_robot_pts_to_mano_pts) + # dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.01 + dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.005 + + corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] # in correspondence pts idxes ## + + dist_corr_correspondence_pts_to_mano_visual_pts = torch.sum( + (corr_correspondence_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_corr_correspondence_pts_to_mano_visual_pts = torch.sqrt(dist_corr_correspondence_pts_to_mano_visual_pts) + minn_dist_to_corr_pts, _ = torch.min(dist_corr_correspondence_pts_to_mano_visual_pts, dim=0) + anchored_mano_visual_pts = minn_dist_to_corr_pts < 0.005 + + corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] + # corr_robo = cur_visual_pts[sampled_verts_idxes] + cd_robo_pts_to_corr_mano_pts = torch.sum( # distance from robot pts to the anchored mano pts + (cur_robo_visual_pts.unsqueeze(1) - cur_visual_pts[anchored_mano_visual_pts].unsqueeze(0).detach()) ** 2, dim=-1 + ) + + # self.timestep_to_anchored_mano_pts[cur_ts] = cur_visual_pts[anchored_mano_visual_pts] # .detach().cpu().numpy() + self.timestep_to_anchored_mano_pts[cur_ts] = cur_visual_pts # .detach().cpu().numpy() + + + cd_robo_to_mano, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=-1) + cd_mano_to_robo, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=0) + # diff_robo_to_corr_mano_pts = cd_mano_to_robo.mean() + diff_robo_to_corr_mano_pts = cd_robo_to_mano.mean() + + mano_fingers = self.rhand_verts[cur_ts][self.mano_fingers] + + + + ##### finger cd loss -> to the anchored expanded actioning points ##### + ## fingeer tracking loss -> to each finger ## + # loss_finger_tracking = diff_robo_to_corr_mano_pts * self.finger_cd_loss_coef + pure_finger_tracking_loss * 0.5 # + diff_robo_to_corr_mano_pts_finger_tracking * self.finger_tracking_loss_coef + loss_finger_tracking = diff_robo_to_corr_mano_pts * self.finger_cd_loss_coef # + pure_finger_tracking_loss * 0.5 + + + ## TODO: add the glboal retargeting using fingers before conducting this approach + + + # def evaluate_tracking_loss(): + # self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=False, contact_pairs_set=self.contact_pairs_set) + + # ## + # # init states # + # # cur_ts % mano_nn_substeps == 0: + # if (cur_ts + 1) % mano_nn_substeps == 0: + # cur_passive_big_ts = cur_ts // mano_nn_substeps + # in_func_tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + # # tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + # else: + # in_func_tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # return in_func_tracking_loss + + + if contact_pairs_set is None: + self.contact_pairs_set = None + else: + ## + self.contact_pairs_set = contact_pairs_set.copy() + + # ### if traiing the jrbpt h + # print(self.timestep_to_active_mesh[cur_ts].size(), cur_visual_pts_friction_forces.size()) # friction forces # + ### optimize the robot tracing loss ### + + + # if self.o + + if self.optimize_anchored_pts: + # anchored_cur_visual_pts_friction_forces = cur_visual_pts_friction_forces[anchored_mano_visual_pts] + contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_anchored_mano_pts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set) + else: + contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set, pts_frictional_forces=None) + + + ### train with force to active ## + if self.train_with_forces_to_active and (not self.use_mano_inputs): + # penetration_forces # + if torch.sum(self.other_bending_network.penetrating_indicator.float()) > 0.5: + net_penetrating_forces = self.other_bending_network.penetrating_forces + net_penetrating_points = self.other_bending_network.penetrating_points + + + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_penetration_points[cur_ts] = net_penetrating_points.detach().cpu().numpy() + self.timestep_to_penetration_points_forces[cur_ts] = net_penetrating_forces.detach().cpu().numpy() + + + ### transform the visual pts ### + # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # cur_visual_pts = cur_visual_pts * 2. - 1. + # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # mult_const # + + # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[finger_sampled_idxes][self.other_bending_network.penetrating_indicator] + + sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[self.other_bending_network.penetrating_indicator] + + ## from net penetration forces to to the + + ### get the passvie force for each point ## ## + self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() ## for + + net_penetrating_forces = torch.matmul( + cur_rot.transpose(1, 0), net_penetrating_forces.transpose(1, 0) + ).transpose(1, 0) + # net_penetrating_forces = net_penetrating_forces / self.mult_const_after_cent + # net_penetrating_forces = net_penetrating_forces / 2 + # net_penetrating_forces = net_penetrating_forces * self.extent_robo_pts + + net_penetrating_forces = (1.0 - pointset_expansion_alpha) * net_penetrating_forces + + net_penetrating_points = torch.matmul( + cur_rot.transpose(1, 0), (net_penetrating_points - cur_trans.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + # net_penetrating_points = net_penetrating_points / self.mult_const_after_cent + # net_penetrating_points = (net_penetrating_points + 1.) / 2. # penetrating points # + # net_penetrating_points = (net_penetrating_points * self.extent_robo_pts) + self.minn_robo_pts + + penetration_forces = net_penetrating_forces + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + # + else: + penetration_forces = None + sampled_visual_pts_joint_idxes = None + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + ''' the bending network still have this property and we can get force values here for the expanded visual points ''' + self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() + + + + # if self.optimize_robot: + # contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=False, contact_pairs_set=contact_pairs_set, pts_frictional_forces=None) + # else: + # anchored_cur_visual_pts_friction_forces = cur_visual_pts_friction_forces[anchored_mano_visual_pts] + # contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_anchored_mano_pts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=False, contact_pairs_set=contact_pairs_set, pts_frictional_forces=anchored_cur_visual_pts_friction_forces) + + + # if self.train_with_forces_to_active and (not self.use_mano_inputs): + # # penetration_forces # + # if torch.sum(self.other_bending_network.penetrating_indicator.float()) > 0.5: + # net_penetrating_forces = self.other_bending_network.penetrating_forces + # net_penetrating_points = self.other_bending_network.penetrating_points + + + # # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + # self.timestep_to_penetration_points[cur_ts] = net_penetrating_points.detach().cpu().numpy() + # self.timestep_to_penetration_points_forces[cur_ts] = net_penetrating_forces.detach().cpu().numpy() + + + # ### transform the visual pts ### + # # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # # cur_visual_pts = cur_visual_pts * 2. - 1. + # # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # mult_const # + + # # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[finger_sampled_idxes][self.other_bending_network.penetrating_indicator] + + # # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[self.other_bending_network.penetrating_indicator] + + # net_penetrating_forces = torch.matmul( + # cur_rot.transpose(1, 0), net_penetrating_forces.transpose(1, 0) + # ).transpose(1, 0) + # net_penetrating_forces = net_penetrating_forces / self.mult_const_after_cent + # net_penetrating_forces = net_penetrating_forces / 2 + # net_penetrating_forces = net_penetrating_forces * self.extent_robo_pts + + # net_penetrating_points = torch.matmul( + # cur_rot.transpose(1, 0), (net_penetrating_points - cur_trans.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + # net_penetrating_points = net_penetrating_points / self.mult_const_after_cent + # net_penetrating_points = (net_penetrating_points + 1.) / 2. # penetrating points # + # net_penetrating_points = (net_penetrating_points * self.extent_robo_pts) + self.minn_robo_pts + + + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + + # else: + # # penetration_forces = None + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + + # if contact_pairs_set is not None: + # self.contact_pairs_sets[cur_ts] = contact_pairs_set.copy() + + # # contact force d ## ts to the passive normals ## + # self.ts_to_contact_passive_normals[cur_ts] = self.other_bending_network.tot_contact_passive_normals.detach().cpu().numpy() + # self.ts_to_passive_pts[cur_ts] = self.other_bending_network.cur_passive_obj_verts.detach().cpu().numpy() + # self.ts_to_passive_normals[cur_ts] = self.other_bending_network.cur_passive_obj_ns.detach().cpu().numpy() + # self.ts_to_contact_force_d[cur_ts] = self.other_bending_network.contact_force_d.detach().cpu().numpy() + # self.ts_to_penalty_frictions[cur_ts] = self.other_bending_network.penalty_friction_tangential_forces.detach().cpu().numpy() + # if self.other_bending_network.penalty_based_friction_forces is not None: + # self.ts_to_penalty_disp_pts[cur_ts] = self.other_bending_network.penalty_based_friction_forces.detach().cpu().numpy() + + # # # get the penetration depth of the bending network # + # tot_penetration_depth.append(self.other_bending_network.penetrating_depth_penalty.detach().item()) + + # ### optimize with intermediates ### # optimize with intermediates # + # if self.optimize_with_intermediates: + # tracking_loss = self.compute_loss_optimized_transformations(cur_ts + 1) # + # else: + # tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + + # # cur_ts % mano_nn_substeps == 0: # + if (cur_ts + 1) % mano_nn_substeps == 0: + cur_passive_big_ts = cur_ts // mano_nn_substeps + ### tracking loss between the predicted transformation and te tracking ### + tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + else: + tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + # # hand_tracking_loss = torch.sum( ## delta states? ## + # # (self.timestep_to_active_mesh_w_delta_states[cur_ts] - cur_visual_pts) ** 2, dim=-1 + # # ) + # # hand_tracking_loss = hand_tracking_loss.mean() + + + # # loss = tracking_loss + self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + # # diff_redmax_visual_pts_with_ori_visual_pts.backward() + # penetraton_penalty = self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + + + # diff_hand_tracking = torch.zeros((1,), dtype=torch.float32).cuda().mean() ## + + # ## diff + # # diff_hand_tracking_coef + # # kinematics_proj_loss = kinematics_trans_diff + penetraton_penalty + diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + + # # if self.use_mano_hand_for_test: ## only the kinematics mano hand is optimized here ## + # # kinematics_proj_loss = tracking_loss + + # # kinematics_proj_loss = hand_tracking_loss * 1e2 + + # smaller_than_zero_level_set_indicator + cur_interpenetration_nns = self.other_bending_network.smaller_than_zero_level_set_indicator.float().sum() + + + # kinematics_proj_loss = diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + penetraton_penalty + # expanded_set_delta_motion_ori + diff_actions = torch.sum( + (self.expanded_set_delta_motion_ori - self.mano_expanded_actuator_delta_offset.weight) ** 2, dim=-1 + ) + + reg_act_force_loss = torch.sum( + (mano_expanded_actuator_pointact_forces_ori - self.mano_expanded_actuator_pointact_forces.weight.data) ** 2, dim=-1 + ) + reg_act_force_loss = reg_act_force_loss.mean() + + + diff_actions = diff_actions.mean() + diff_actions = diff_actions + reg_act_force_loss + + + ### the tracking loss ### + # kinematics_proj_loss = loss_finger_tracking + tracking_loss + diff_actions # + tracking_loss + penetraton_penalty + + kinematics_proj_loss = loss_finger_tracking # + tracking_loss + + loss = kinematics_proj_loss # * self.loss_scale_coef ## get + + + mano_tracking_loss.append(loss_finger_tracking.detach().cpu().item()) + + + self.kines_optimizer.zero_grad() + + kinematics_proj_loss.backward(retain_graph=True) + + self.kines_optimizer.step() + + # if self.use_LBFGS: + # self.kines_optimizer.step(evaluate_tracking_loss) # + # else: + # self.kines_optimizer.step() + + # + # tracking_loss.backward(retain_graph=True) + # if self.use_LBFGS: + # self.other_bending_network.reset_timestep_to_quantities(cur_ts) + + + # robot_states_actions_diff_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # diff_actions + robot_states_actions_diff_loss = diff_actions + robo_actions_diff_loss.append(robot_states_actions_diff_loss) + + + tot_losses.append(loss.detach().item()) # total losses # # total losses # + # tot_penalty_dot_forces_normals.append(cur_penalty_dot_forces_normals.detach().item()) + # tot_penalty_friction_constraint.append(cur_penalty_friction_constraint.detach().item()) + + self.iter_step += 1 + + self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + + if self.iter_step % self.save_freq == 0: + self.save_checkpoint() + + self.update_learning_rate() + + torch.cuda.empty_cache() + + + ''' Get nn_forward_ts and backward through the actions for updating ''' + tot_losses = sum(tot_losses) / float(len(tot_losses)) + if len(tot_tracking_loss) > 0: + tot_tracking_loss = sum(tot_tracking_loss) / float(len(tot_tracking_loss)) + else: + tot_tracking_loss = 0.0 + if len(tot_penetration_depth) > 0: + tot_penetration_depth = sum(tot_penetration_depth) / float(len(tot_penetration_depth)) + else: + tot_penetration_depth = 0.0 + robo_actions_diff_loss = sum(robo_actions_diff_loss) / float(len(robo_actions_diff_loss)) + if len(mano_tracking_loss) > 0: + mano_tracking_loss = sum(mano_tracking_loss) / float(len(mano_tracking_loss)) + else: + mano_tracking_loss = 0.0 + + + + + if i_iter % self.report_freq == 0: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} tracking_loss = {} mano_tracking_loss = {} penetration_depth = {} actions_diff_loss = {} lr={}'.format(self.iter_step, tot_losses, tot_tracking_loss, mano_tracking_loss, tot_penetration_depth, robo_actions_diff_loss, self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + ''' Dump to the file ''' + with open(logs_sv_fn, 'a') as log_file: + log_file.write(cur_log_sv_str + '\n') + else: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} tracking_loss = {} mano_tracking_loss = {} penetration_depth = {} actions_diff_loss = {} lr={}'.format(self.iter_step, tot_losses, tot_tracking_loss, mano_tracking_loss, tot_penetration_depth, robo_actions_diff_loss, self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + + + # self.validate_mesh_robo_a() + if i_iter % self.val_mesh_freq == 0: + self.validate_mesh_robo_g() + self.validate_mesh_robo() + ### test for contact infos ### + # self.validate_contact_info_robo() + + + torch.cuda.empty_cache() + + + + ''' GRAB clips --- expanded point set and expanded points for retargeting ''' + def train_point_set_retargeting_pts(self, ): + ## ## GRAB clips ## ## + # states -> the robot actions --- in this sim ## + # chagne # # mano notjmano but the mano ---> optimize the mano delta states? # + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() # update learning rrate # + # robot actions ## + + nn_timesteps = self.timestep_to_passive_mesh.size(0) + self.nn_timesteps = nn_timesteps + num_steps = self.nn_timesteps + + # load # # load the robot hand # # load + ''' Load the robot hand ''' + model_path = self.conf['model.sim_model_path'] # + # robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + self.hand_type = "redmax_hand" if "redmax" in model_path else "shadow_hand" + # self.hand_type = "redmax_hand" + if model_path.endswith(".xml"): + # self.hand_type = "redmax_hand" + robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + else: + # self.hand_type = "shadow_hand" + robot_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path, args=None) + self.robot_agent = robot_agent + + robo_init_verts = self.robot_agent.robot_pts + if self.hand_type == "redmax_hand": + redmax_sampled_verts_idxes_fn = "redmax_robo_sampled_verts_idxes_new.npy" + redmax_sampled_verts_idxes_fn = os.path.join("assets", redmax_sampled_verts_idxes_fn) + if os.path.exists(redmax_sampled_verts_idxes_fn): + sampled_verts_idxes = np.load(redmax_sampled_verts_idxes_fn) + sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + else: + n_sampling = 1000 + pts_fps_idx = data_utils.farthest_point_sampling(robo_init_verts.unsqueeze(0), n_sampling=n_sampling) + sampled_verts_idxes = pts_fps_idx + np.save(redmax_sampled_verts_idxes_fn, sampled_verts_idxes.detach().cpu().numpy()) + self.sampled_verts_idxes = sampled_verts_idxes + + robo_expanded_visual_pts = self.robot_agent.active_robot.expand_visual_pts() + self.robo_expanded_visual_pts_nn = robo_expanded_visual_pts.size(0) + + self.robo_hand_faces = self.robot_agent.robot_faces + + + + ## sampled verts idxes ## + # self.sampled_verts_idxes = sampled_verts_idxes + ''' Load the robot hand ''' + + + ''' Load robot hand in DiffHand simulator ''' + # redmax_sim = redmax.Simulation(model_path) + # redmax_sim.reset(backward_flag = True) # redmax_sim -- # # --- robot hand mani asset? ## ## robot hand mani asse ## + # # ### redmax_ndof_u, redmax_ndof_r ### # + # redmax_ndof_u = redmax_sim.ndof_u + # redmax_ndof_r = redmax_sim.ndof_r + # redmax_ndof_m = redmax_sim.ndof_m + + + ''' Load the mano hand model ''' + model_path_mano = self.conf['model.mano_sim_model_path'] + # mano_agent = dyn_model_act_mano_deformable.RobotAgent(xml_fn=model_path_mano) # robot # + mano_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path_mano) ## model path mano ## # + self.mano_agent = mano_agent + # ''' Load the mano hand ''' + self.mano_agent.active_robot.expand_visual_pts() + # self.robo_hand_faces = self.mano_agent.robot_faces + + + + # if self.use_mano_hand_for_test: ## use + # self.robo_hand_faces = self.hand_faces + ## + + ## start expanding the current visual pts ## + print(f"Start expanding current visual pts...") + expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + + self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + ## expanded_visual_pts of the expanded visual pts # + expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + print(f"Saving expanded visual pts with shape {expanded_visual_pts.size()} to {expanded_visual_pts_sv_fn}") + np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) + + + nn_substeps = 10 + # + mano_nn_substeps = 1 + # mano_nn_substeps = 10 # + self.mano_nn_substeps = mano_nn_substeps + + # self.hand_faces # + + + ''' Expnad the current visual points ''' + # expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + # self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + # expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + # expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + # np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) + # # ''' Expnad the current visual points ''' # # differentiate through the simulator? # # + + + params_to_train = [] # params to train # + ### robot_actions, robot_init_states, robot_glb_rotation, robot_actuator_friction_forces, robot_glb_trans ### + + ''' Define MANO robot actions, delta_states, init_states, frictions, and others ''' + self.mano_robot_actions = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_actions.weight) + # params_to_train += list(self.robot_actions.parameters()) + + self.mano_robot_delta_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + + self.mano_robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_init_states.weight) + # params_to_train += list(self.robot_init_states.parameters()) + + self.mano_robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=4 + ).cuda() + self.mano_robot_glb_rotation.weight.data[:, 0] = 1. + self.mano_robot_glb_rotation.weight.data[:, 1:] = 0. + # params_to_train += list(self.robot_glb_rotation.parameters()) + + + self.mano_robot_glb_trans = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_robot_glb_trans.weight) + # params_to_train += list(self.robot_glb_trans.parameters()) + + self.mano_robot_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_states.weight) + self.mano_robot_states.weight.data[0, :] = self.mano_robot_init_states.weight.data[0, :].clone() + + + + self.mano_expanded_actuator_delta_offset = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_delta_offset.weight) + # params_to_train += list(self.mano_expanded_actuator_delta_offset.parameters()) + + + # mano_expanded_actuator_friction_forces, mano_expanded_actuator_delta_offset # + self.mano_expanded_actuator_friction_forces = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_friction_forces.weight) + + + ##### expanded actuators pointact jforces ### + self.mano_expanded_actuator_pointact_forces = nn.Embedding( + num_embeddings=self.expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_pointact_forces.weight) + + self.mano_expanded_actuator_pointact_damping_coefs = nn.Embedding( + num_embeddings=10, embedding_dim=1 + ).cuda() + torch.nn.init.zeros_(self.mano_expanded_actuator_pointact_damping_coefs.weight) + + + + ## load mano states and actions ## + if 'model.load_optimized_init_actions' in self.conf and len(self.conf['model.load_optimized_init_actions']) > 0: + print(f"[MANO] Loading optimized init transformations from {self.conf['model.load_optimized_init_actions']}") + cur_optimized_init_actions_fn = self.conf['model.load_optimized_init_actions'] + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + if 'mano_robot_init_states' in optimized_init_actions_ckpt: + self.mano_robot_init_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_init_states']) + if 'mano_robot_glb_rotation' in optimized_init_actions_ckpt: + self.mano_robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_rotation']) + + if 'mano_robot_states' in optimized_init_actions_ckpt: + self.mano_robot_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_states']) + + if 'mano_robot_actions' in optimized_init_actions_ckpt: + self.mano_robot_actions.load_state_dict(optimized_init_actions_ckpt['mano_robot_actions']) + + self.mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + if 'expanded_actuator_friction_forces' in optimized_init_actions_ckpt: + try: + self.mano_expanded_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_friction_forces']) + except: + pass + #### actuator point forces and actuator point offsets #### + if 'mano_expanded_actuator_delta_offset' in optimized_init_actions_ckpt: + print(f"loading mano_expanded_actuator_delta_offset...") + self.mano_expanded_actuator_delta_offset.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_delta_offset']) + if 'mano_expanded_actuator_pointact_forces' in optimized_init_actions_ckpt: + self.mano_expanded_actuator_pointact_forces.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_pointact_forces']) + if 'mano_expanded_actuator_pointact_damping_coefs' in optimized_init_actions_ckpt: + self.mano_expanded_actuator_pointact_damping_coefs.load_state_dict(optimized_init_actions_ckpt['mano_expanded_actuator_pointact_damping_coefs']) + + + + + ''' parameters for the real robot hand ''' + # # robot actions # # real robot hand ## + self.robot_actions = nn.Embedding( + num_embeddings=num_steps, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_actions.weight) + params_to_train += list(self.robot_actions.parameters()) + + # self.robot_delta_states = nn.Embedding( + # num_embeddings=num_steps, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + self.robot_states = nn.Embedding( + num_embeddings=num_steps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.robot_states.weight) + params_to_train += list(self.robot_states.parameters()) + + self.robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.robot_init_states.weight) + params_to_train += list(self.robot_init_states.parameters()) + + ## robot glb rotations ## + self.robot_glb_rotation = nn.Embedding( ## robot hand rotation + num_embeddings=num_steps, embedding_dim=4 + ).cuda() + self.robot_glb_rotation.weight.data[:, 0] = 1. + self.robot_glb_rotation.weight.data[:, 1:] = 0. + + self.robot_glb_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_glb_trans.weight) + + + # ### local minimum -> ## robot + self.robot_actuator_friction_forces = nn.Embedding( # frictional forces ## + num_embeddings=365428 * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_actuator_friction_forces.weight) + + + self.expanded_actuator_delta_offset = nn.Embedding( + num_embeddings=self.robo_expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.expanded_actuator_delta_offset.weight) + # params_to_train += list(self.mano_expanded_actuator_delta_offset.parameters()) + + + + ##### expanded actuators pointact jforces ### + self.expanded_actuator_pointact_forces = nn.Embedding( + num_embeddings=self.robo_expanded_visual_pts_nn * 60, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.expanded_actuator_pointact_forces.weight) + + self.expanded_actuator_pointact_damping_coefs = nn.Embedding( + num_embeddings=10, embedding_dim=1 + ).cuda() + torch.nn.init.zeros_(self.expanded_actuator_pointact_damping_coefs.weight) + + + + + + ### load optimized init transformations for robot actions ### + if len(self.load_optimized_init_transformations) > 0: + print(f"[Robot] Loading optimized init transformations from {self.load_optimized_init_transformations}") + cur_optimized_init_actions_fn = self.load_optimized_init_transformations + # cur_optimized_init_actions = # optimized init states # ## robot init states ## + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + try: + self.robot_init_states.load_state_dict(optimized_init_actions_ckpt['robot_init_states']) + except: + pass + self.robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['robot_glb_rotation']) + if 'robot_delta_states' in optimized_init_actions_ckpt: + try: + self.robot_delta_states.load_state_dict(optimized_init_actions_ckpt['robot_delta_states']) + except: + pass + if 'robot_states' in optimized_init_actions_ckpt: + self.robot_states.load_state_dict(optimized_init_actions_ckpt['robot_states']) + # if 'robot_delta_states' ## robot delta states ## + # self.robot_actions.load_state_dict(optimized_init_actions_ckpt['robot_actions']) + # self.mano_robot_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['robot_actuator_friction_forces']) + + self.robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['robot_glb_trans']) + + if 'expanded_actuator_delta_offset' in optimized_init_actions_ckpt: + print(f"[Robot] loading actuator delta offsets from {self.load_optimized_init_transformations}") + self.expanded_actuator_delta_offset.load_state_dict(optimized_init_actions_ckpt['expanded_actuator_delta_offset']) + + if 'expanded_actuator_pointact_forces' in optimized_init_actions_ckpt: + print(f"[Robot] loading actuator pointact forces from {self.load_optimized_init_transformations}") + self.expanded_actuator_pointact_forces.load_state_dict(optimized_init_actions_ckpt['expanded_actuator_pointact_forces']) + + if 'expanded_actuator_pointact_damping_coefs' in optimized_init_actions_ckpt: + print(f"[Robot] loading actuator pointact damping coefs from {self.load_optimized_init_transformations}") + self.expanded_actuator_pointact_damping_coefs.load_state_dict(optimized_init_actions_ckpt['expanded_actuator_pointact_damping_coefs']) + + + + if self.hand_type == "redmax_hand": + self.maxx_robo_pts = 25. + self.minn_robo_pts = -15. + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.5437551664260203 + else: + self.minn_robo_pts = -0.1 + self.maxx_robo_pts = 0.2 + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.437551664260203 + ## for grab ## + self.mult_const_after_cent = self.mult_const_after_cent / 3. * 0.9507 + + + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # # self.robot_fingers = [3591, 4768, 6358, 10228, 6629, 10566, 5631, 9673] + # self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877] + # # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + # if self.hand_type == "redmax_hand": + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121, ] + self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877, ] + + if self.hand_type == "redmax_hand": + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + self.robot_fingers = [14670, 321530, 36939, 125930, 200397, 257721, 333438, 338358] + + # params_to_train = [] + + if 'model.mano_mult_const_after_cent' in self.conf: + self.mano_mult_const_after_cent = self.conf['model.mano_mult_const_after_cent'] + + + self.nn_ts = self.nn_timesteps - 1 + + ''' parameters for the real robot hand ''' + + + + self.timestep_to_active_mesh = {} + # ref_expanded_visual_pts, minn_idx_expanded_visual_pts_to_link_pts # + # minn_idx_expanded_visual_pts_to_link_pts # + self.timestep_to_expanded_visual_pts = {} + self.timestep_to_active_mesh_opt_ours_sim = {} + + self.timestep_to_active_mesh_w_delta_states = {} + + + self.mass_point_mass = 1.0 + + self.timestep_to_actuator_points_vels = {} + self.timestep_to_actuator_points_passive_forces = {} + + self.timestep_to_robo_actuator_points_vels = {} + self.timestep_to_robo_actuator_points_passive_forces = {} + + + self.timestep_to_actuator_points_offsets = {} + self.timestep_to_robo_actuator_points_offsets = {} + + time_cons = 0.0005 + pointset_expansion_alpha = 0.1 + + + # states -> get states -> only update the acitons # + with torch.no_grad(): # init them to zero + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + ''' Get rotations, translations, and actions of the current robot ''' + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) # mano glb rot + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + ## mano robot states ## mano robot states ## + link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_init_states_target_value(link_cur_states) + + ## set init visual pts ## + cur_visual_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=True) + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent + + # not taht the scale is differne but would not affect the final result + # expanded_pts # + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + + cur_visual_pts_offset = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) ## get the idxes ### + + + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + ## + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) ## transformed pts ## + + + cur_visual_pts = cur_visual_pts + cur_visual_pts_offset + + # if not self.use_mano_inputs: + # if self.use_mano_hand_for_test: + # self.timestep_to_active_mesh[cur_ts] = self.rhand_verts[cur_ts].detach() + # else: + # self.timestep_to_active_mesh[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_w_delta_states[cur_ts] = cur_visual_pts.detach() + self.timestep_to_active_mesh_opt_ours_sim[cur_ts] = cur_visual_pts.detach() + + + cur_robo_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_robo_glb_rot = cur_robo_glb_rot / torch.clamp(torch.norm(cur_robo_glb_rot, dim=-1, p=2), min=1e-7) + cur_robo_glb_rot = dyn_model_act.quaternion_to_matrix(cur_robo_glb_rot) # mano glboal rotations # + cur_robo_glb_trans = self.robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + robo_links_states = self.robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.robot_agent.set_init_states_target_value(robo_links_states) + cur_robo_visual_pts = self.robot_agent.get_init_state_visual_pts() + + + if not self.use_scaled_urdf: + ### transform the visual pts ### + cur_robo_visual_pts = (cur_robo_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + cur_robo_visual_pts = cur_robo_visual_pts * 2. -1. + cur_robo_visual_pts = cur_robo_visual_pts * self.mult_const_after_cent # mult_const # + + + # cur_rot = cur_robo_glb_rot + # cur_trans = cur_glb_trans + + # timestep_to_tot_rot[cur_ts] = cur_rot.detach() + # timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + ### transform by the glboal transformation and the translation ### + cur_robo_visual_pts = torch.matmul(cur_robo_glb_rot, cur_robo_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_robo_glb_trans.unsqueeze(0) ## transformed pts ## + + self.timestep_to_active_mesh[cur_ts] = cur_robo_visual_pts.detach() + + self.iter_step = 0 + + mano_expanded_actuator_delta_offset_ori = self.mano_expanded_actuator_delta_offset.weight.data.clone().detach() + mano_expanded_actuator_pointact_forces_ori = self.mano_expanded_actuator_pointact_forces.weight.data.clone().detach() + + # robo_expanded_set_delta_motion_ori, robo_expanded_actuator_pointact_forces_ori + robo_expanded_set_delta_motion_ori = self.expanded_actuator_delta_offset.weight.data.clone().detach() + robo_expanded_actuator_pointact_forces_ori = self.expanded_actuator_pointact_forces.weight.data.clone().detach() + + ''' Set redmax robot actions ''' + + + # self.optimize_robot = False + + # self.optimize_robot = True + + # self.optimize_anchored_pts = False + # self.optimize_anchored_pts = True + + + params_to_train_kines = [] + # params_to_train_kines += list(self.mano_robot_glb_rotation.parameters()) + + + # if self.optimize_anchored_pts: + # params_to_train_kines += list(self.mano_expanded_actuator_delta_offset.parameters()) + # params_to_train_kines += list(self.mano_expanded_actuator_friction_forces.parameters()) + + # expanded_actuator_pointact_forces, expanded_actuator_delta_offset + # if self.optimize_robot: + + if self.optimize_pointset_motion_only: + # params_to_train_kines += list(self.robot_states.parameters()) + ### + params_to_train_kines += list(self.expanded_actuator_pointact_forces.parameters()) + params_to_train_kines += list(self.expanded_actuator_delta_offset.parameters()) + else: + params_to_train_kines += list(self.robot_states.parameters()) + params_to_train_kines += list(self.expanded_actuator_pointact_forces.parameters()) + params_to_train_kines += list(self.expanded_actuator_delta_offset.parameters()) + + + + + self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) #### kinematics optimizer ### + + + if self.optimize_rules: + params_to_train_kines = [] + params_to_train_kines += list(self.other_bending_network.parameters()) + self.kines_optimizer = torch.optim.Adam(params_to_train_kines, lr=self.learning_rate) + + + + self.expanded_set_delta_motion_ori = self.mano_expanded_actuator_delta_offset.weight.data.clone() + + + ''' prepare for keeping the original global rotations, trans, and states ''' + # ori_mano_robot_glb_rot = self.mano_robot_glb_rotation.weight.data.clone() + # ori_mano_robot_glb_trans = self.mano_robot_glb_trans.weight.data.clone() + # ori_mano_robot_delta_states = self.mano_robot_delta_states.weight.data.clone() + + + + + self.iter_step = 0 + + self.ts_to_dyn_mano_pts_th = {} + self.timestep_to_anchored_mano_pts = {} + self.timestep_to_expanded_robo_visual_pts = {} + + self.sampled_robo_expanded_pts_idxes = None + + + for i_iter in tqdm(range(100000)): + tot_losses = [] + tot_tracking_loss = [] + + # timestep # + # self.timestep_to_active_mesh = {} + self.timestep_to_posed_active_mesh = {} + self.timestep_to_posed_mano_active_mesh = {} + self.timestep_to_mano_active_mesh = {} + self.timestep_to_corr_mano_pts = {} + # # # # + timestep_to_tot_rot = {} + timestep_to_tot_trans = {} + + + # tot_penetrating_depth_penalty = [] + # tot_ragged_dist = [] + # tot_delta_offset_reg_motion = [] + # tot_dist_mano_visual_ori_to_cur = [] + # tot_reg_loss = [] + # tot_diff_cur_states_to_ref_states = [] + # tot_diff_tangential_forces = [] + # penetration_forces = None ### + # sampled_visual_pts_joint_idxes = None + + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_raw_active_meshes = {} + self.timestep_to_penetration_points = {} + self.timestep_to_penetration_points_forces = {} + self.joint_name_to_penetration_forces_intermediates = {} + + # self.timestep_to_anchored_mano_pts = {} + + + self.ts_to_contact_passive_normals = {} + self.ts_to_passive_normals = {} + self.ts_to_passive_pts = {} + self.ts_to_contact_force_d = {} + self.ts_to_penalty_frictions = {} + self.ts_to_penalty_disp_pts = {} + self.ts_to_redmax_states = {} + self.ts_to_dyn_mano_pts = {} + # constraitns for states # + # with 17 dimensions on the states; [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16] + + contact_pairs_set = None + self.contact_pairs_sets = {} + + # redmax_sim.reset(backward_flag = True) + + # tot_grad_qs = [] + + robo_intermediates_states = [] + + tot_penetration_depth = [] + + robo_actions_diff_loss = [] + mano_tracking_loss = [] + + + tot_interpenetration_nns = [] + + tot_diff_cur_visual_pts_offsets_with_ori = [] + tot_diff_robo_cur_visual_pts_offsets_with_ori = [] + + tot_summ_grad_mano_expanded_actuator_delta_offset_weight = [] + tot_summ_grad_mano_expanded_actuator_pointact_forces_weight = [] + + penetration_forces = None ### + sampled_visual_pts_joint_idxes = None + + + + # init global transformations ## + # cur_ts_redmax_delta_rotations = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_delta_rotations = torch.tensor([0., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_robot_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + # for cur_ts in range(self.nn_ts): + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + # tot_redmax_actions = [] + # actions = {} + + self.free_def_bending_weight = 0.0 + + + ''' Get dynamic mano pts, expanded pts, etc ''' + # mano_robot_glb_rotation, mano_robot_glb_trans, mano_robot_delta_states # + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_rot = cur_glb_rot + cur_ts_redmax_delta_rotations + + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # # glb rot # trans # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + + # # # cur_ts_delta_rot, cur_ts_redmax_robot_trans # # # + # cur_glb_rot = torch.matmul(cur_ts_delta_rot, cur_glb_rot) + # cur_glb_trans = cur_glb_trans + cur_ts_redmax_robot_trans # redmax robot transj## + + # link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # self.mano_agent.set_init_states_target_value(link_cur_states) + # cur_visual_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=True) # init state visual pts # + + # cur_dyn_mano_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=False) # init s + + ### motivate via actions #### + link_cur_actions = self.mano_robot_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_actions_and_update_states_v2( link_cur_actions, cur_ts, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) + ### motivate via actions #### + + + cur_visual_pts, visual_pts_joint_idxes = self.mano_agent.get_init_state_visual_pts(expanded_pts=True, ret_joint_idxes=True) # init state visual pts # + + ## visual pts sampled ## + + ## visual pts sampled ## + cur_dyn_mano_pts = self.mano_agent.get_init_state_visual_pts(expanded_pts=False) # init s + + # not taht the scale is differne but would not affect the final result + # expanded_pts # + cur_visual_pts_idxes = torch.arange( + start=cur_ts * self.expanded_visual_pts_nn, end=(cur_ts + 1) * self.expanded_visual_pts_nn, dtype=torch.long + ).cuda() + + + + if self.drive_pointset == "actions": + ''' Act-React-driven point motions ''' + ###### get the point offset via actuation forces and reaction forces ###### + ### actuation forces at this timestep ### + cur_visual_pts_forces = self.mano_expanded_actuator_pointact_forces(cur_visual_pts_idxes) * 1e7 ## get vsual pts forces ### + + # if cur_ts > 0 and (cur_ts - 1 in self.timestep_to_actuator_points_passive_forces): + # cur_visual_pts_passive_forces = self.timestep_to_actuator_points_passive_forces[cur_ts - 1] ## nn_visual_pts x 3 ## + # cur_visual_pts_forces = cur_visual_pts_forces + cur_visual_pts_passive_forces ## two forces ### + + cur_visual_pts_forces = cur_visual_pts_forces * pointset_expansion_alpha + + ## --- linear damping here ? ## + cur_visual_pts_accs = cur_visual_pts_forces / self.mass_point_mass ### get the mass pont accs ## + if cur_ts == 0: + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + else: # visual pts acc -> visual pts vels # + # prev_visual_pts_vels = self.timestep_to_actuator_points_vels[cur_ts - 1] ### nn_pts x 3 ## + # cur_visual_pts_accs = cur_visual_pts_accs - cur_vel_damping_coef * prev_visual_pts_vels ### nn_pts x 3 ## + # cur_visual_pts_vels = prev_visual_pts_vels + cur_visual_pts_accs * time_cons ## nn_pts x 3 ## + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + self.timestep_to_actuator_points_vels[cur_ts] = cur_visual_pts_vels.detach().clone() + cur_visual_pts_offsets = cur_visual_pts_vels * time_cons + # + # if cur_ts > 0: + # prev_visual_pts_offset = self.timestep_to_actuator_points_offsets[cur_ts - 1] + # cur_visual_pts_offsets = prev_visual_pts_offset + cur_visual_pts_offsets + + + # train_pointset_acts_via_deltas, diff_cur_visual_pts_offsets_with_ori + # cur_visual_pts_offsets_from_delta = mano_expanded_actuator_delta_offset_ori[cur_ts] + # cur_visual_pts_offsets_from_delta = self.mano_expanded_actuator_delta_offset.weight.data[cur_ts].detach() + cur_visual_pts_offsets_from_delta = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes).detach() + ## + diff_cur_visual_pts_offsets_with_ori = torch.sum((cur_visual_pts_offsets - cur_visual_pts_offsets_from_delta) ** 2, dim=-1).mean() ## mean of the avg offset differences ## + tot_diff_cur_visual_pts_offsets_with_ori.append(diff_cur_visual_pts_offsets_with_ori.item()) + + + self.timestep_to_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ###### get the point offset via actuation forces and reaction forces ###### + ''' Act-React-driven point motions ''' + elif self.drive_pointset == "states": + ''' Offset-driven point motions ''' + ## points should be able to manipulate the object accordingly ### + ### we should avoid the penetrations between points and the object ### + ### we should restrict relative point displacement / the point offsets at each timestep to relatively small values ### + ## -> so we have three losses for the delta offset optimization ## + cur_visual_pts_offsets = self.mano_expanded_actuator_delta_offset(cur_visual_pts_idxes) + # cur_visual_pts_offsets = cur_visual_pts_offsets * 10 + self.timestep_to_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ''' Offset-driven point motions ''' + + else: + raise ValueError(f"Unknown drive_pointset: {self.drive_pointset}") + + + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent # mult cnst after cent # + cur_dyn_mano_pts = cur_dyn_mano_pts * self.mano_mult_const_after_cent + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + cur_dyn_mano_pts = torch.matmul(cur_rot, cur_dyn_mano_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + cur_visual_pts = cur_visual_pts + cur_visual_pts_offsets + + + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + self.timestep_to_raw_active_meshes[cur_ts] = cur_visual_pts.detach().cpu().numpy() + self.ts_to_dyn_mano_pts[cur_ts] = cur_dyn_mano_pts.detach().cpu().numpy() + self.ts_to_dyn_mano_pts_th[cur_ts] = cur_dyn_mano_pts + ''' Get dynamic mano pts, expanded pts, etc ''' + + + + cur_robo_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_robo_glb_rot = cur_robo_glb_rot / torch.clamp(torch.norm(cur_robo_glb_rot, dim=-1, p=2), min=1e-7) + cur_robo_glb_rot = dyn_model_act.quaternion_to_matrix(cur_robo_glb_rot) # mano glboal rotations # + cur_robo_glb_trans = self.robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + ### drive by states, not the acts ### + robo_links_states = self.robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.robot_agent.set_init_states_target_value(robo_links_states) + cur_robo_visual_pts = self.robot_agent.get_init_state_visual_pts() + cur_robo_expanded_visual_pts, robo_visual_pts_joint_idxes = self.robot_agent.get_init_state_visual_pts(expanded_pts=True, ret_joint_idxes=True) # init state visual pts # + + + if not self.use_scaled_urdf: + cur_robo_visual_pts = (cur_robo_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + cur_robo_visual_pts = cur_robo_visual_pts * 2. -1. + cur_robo_visual_pts = cur_robo_visual_pts * self.mult_const_after_cent # mult_const # + + cur_robo_expanded_visual_pts = (cur_robo_expanded_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + cur_robo_expanded_visual_pts = cur_robo_expanded_visual_pts * 2. -1. + cur_robo_expanded_visual_pts = cur_robo_expanded_visual_pts * self.mult_const_after_cent # mult_const # + + cur_rot = cur_robo_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + ### transform by the glboal transformation and the translation ### + cur_robo_visual_pts = torch.matmul(cur_robo_glb_rot, cur_robo_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_robo_glb_trans.unsqueeze(0) ## transformed pts ## + + ### transform by the glboal transformation and the translation ### + cur_robo_expanded_visual_pts = torch.matmul(cur_robo_glb_rot, cur_robo_expanded_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_robo_glb_trans.unsqueeze(0) ## transformed pts ## + + + + + self.timestep_to_active_mesh[cur_ts] = cur_robo_visual_pts.clone() # robo visual pts # + + # not taht the scale is differne but would not affect the final result + # expanded_pts # + cur_robo_visual_pts_idxes = torch.arange( + start=cur_ts * self.robo_expanded_visual_pts_nn, end=(cur_ts + 1) * self.robo_expanded_visual_pts_nn, dtype=torch.long + ).cuda() + + # expanded_actuator_pointact_forces, expanded_actuator_delta_offset + if self.drive_pointset == "actions": + ''' Act-React-driven point motions ''' + ###### get the point offset via actuation forces and reaction forces ###### + ### actuation forces at this timestep ### + cur_visual_pts_forces = self.expanded_actuator_pointact_forces(cur_robo_visual_pts_idxes) * 1e7 + cur_visual_pts_forces = cur_visual_pts_forces * pointset_expansion_alpha + + ## --- linear damping here ? ## + cur_visual_pts_accs = cur_visual_pts_forces / self.mass_point_mass ### get the mass pont accs ## + if cur_ts == 0: + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + else: # visual pts acc -> visual pts vels # + cur_visual_pts_vels = cur_visual_pts_accs * time_cons + self.timestep_to_robo_actuator_points_vels[cur_ts] = cur_visual_pts_vels.detach().clone() + cur_visual_pts_offsets = cur_visual_pts_vels * time_cons + + # train_pointset_acts_via_deltas, diff_cur_visual_pts_offsets_with_ori + # cur_visual_pts_offsets_from_delta = mano_expanded_actuator_delta_offset_ori[cur_ts] + # cur_visual_pts_offsets_from_delta = self.mano_expanded_actuator_delta_offset.weight.data[cur_ts].detach() + cur_visual_pts_offsets_from_delta = self.expanded_actuator_delta_offset(cur_robo_visual_pts_idxes).detach() + ## + diff_cur_visual_pts_offsets_with_ori = torch.sum((cur_visual_pts_offsets - cur_visual_pts_offsets_from_delta) ** 2, dim=-1).mean() + tot_diff_robo_cur_visual_pts_offsets_with_ori.append(diff_cur_visual_pts_offsets_with_ori.item()) + + self.timestep_to_robo_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ###### get the point offset via actuation forces and reaction forces ###### + ''' Act-React-driven point motions ''' + elif self.drive_pointset == "states": + ''' Offset-driven point motions ''' + cur_visual_pts_offsets = self.expanded_actuator_delta_offset(cur_robo_visual_pts_idxes) + # cur_visual_pts_offsets = cur_visual_pts_offsets * 10 + self.timestep_to_robo_actuator_points_offsets[cur_ts] = cur_visual_pts_offsets.detach().clone() + ''' Offset-driven point motions ''' + + else: + raise ValueError(f"Unknown drive_pointset: {self.drive_pointset}") + + cur_robo_expanded_visual_pts = cur_robo_expanded_visual_pts + cur_visual_pts_offsets + + # print(f"cur_robo_expanded_visual_pts: {cur_robo_expanded_visual_pts.size()}") + + if self.sampled_robo_expanded_pts_idxes is None: + # if os.path.exists(redmax_sampled_verts_idxes_fn): + # sampled_verts_idxes = np.load(redmax_sampled_verts_idxes_fn) + # sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + # else: + # print(f"Sampling fps idxes for robot expanded pts...") + n_sampling = 10000 + pts_fps_idx = data_utils.farthest_point_sampling(cur_robo_expanded_visual_pts.unsqueeze(0), n_sampling=n_sampling) + sampled_robo_expanded_pts_idxes = pts_fps_idx + # np.save(redmax_sampled_verts_idxes_fn, sampled_verts_idxes.detach().cpu().numpy()) + self.sampled_robo_expanded_pts_idxes = sampled_robo_expanded_pts_idxes + self.timestep_to_expanded_robo_visual_pts[cur_ts] = cur_robo_expanded_visual_pts[self.sampled_robo_expanded_pts_idxes] + + + self.free_def_bending_weight = 0.0 + # self.free_def_bending_weight = 0.5 + + if i_iter == 0 and cur_ts == 0: ## for the tiantianquan sequence ## + dist_robot_pts_to_mano_pts = torch.sum( + (cur_robo_expanded_visual_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + ) + minn_dist_robot_pts_to_mano_pts, correspondence_pts_idxes = torch.min(dist_robot_pts_to_mano_pts, dim=-1) + minn_dist_robot_pts_to_mano_pts = torch.sqrt(minn_dist_robot_pts_to_mano_pts) + # dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.01 + dist_smaller_than_thres = minn_dist_robot_pts_to_mano_pts < 0.005 + + corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] # in correspondence pts idxes ## + + dist_corr_correspondence_pts_to_mano_visual_pts = torch.sum( + (corr_correspondence_pts.unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_corr_correspondence_pts_to_mano_visual_pts = torch.sqrt(dist_corr_correspondence_pts_to_mano_visual_pts) + minn_dist_to_corr_pts, _ = torch.min(dist_corr_correspondence_pts_to_mano_visual_pts, dim=0) + anchored_mano_visual_pts = minn_dist_to_corr_pts < 0.005 + + corr_correspondence_pts = cur_visual_pts[correspondence_pts_idxes] + # corr_robo = cur_visual_pts[sampled_verts_idxes] + cd_robo_pts_to_corr_mano_pts = torch.sum( # distance from robot pts to the anchored mano pts + (cur_robo_expanded_visual_pts.unsqueeze(1) - cur_visual_pts[anchored_mano_visual_pts].unsqueeze(0).detach()) ** 2, dim=-1 + ) + + # self.timestep_to_anchored_mano_pts[cur_ts] = cur_visual_pts[anchored_mano_visual_pts] # .detach().cpu().numpy() + self.timestep_to_anchored_mano_pts[cur_ts] = cur_visual_pts # .detach().cpu().numpy() + + + cd_robo_to_mano, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=-1) + cd_mano_to_robo, _ = torch.min(cd_robo_pts_to_corr_mano_pts, dim=0) + # diff_robo_to_corr_mano_pts = cd_mano_to_robo.mean() + diff_robo_to_corr_mano_pts = cd_robo_to_mano.mean() + + mano_fingers = self.rhand_verts[cur_ts][self.mano_fingers] + + + + ##### finger cd loss -> to the anchored expanded actioning points ##### + ## fingeer tracking loss -> to each finger ## + # loss_finger_tracking = diff_robo_to_corr_mano_pts * self.finger_cd_loss_coef + pure_finger_tracking_loss * 0.5 # + diff_robo_to_corr_mano_pts_finger_tracking * self.finger_tracking_loss_coef + loss_finger_tracking = diff_robo_to_corr_mano_pts * self.finger_cd_loss_coef # + pure_finger_tracking_loss * 0.5 + + + ## TODO: add the glboal retargeting using fingers before conducting this approach + + + # def evaluate_tracking_loss(): + # self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=False, contact_pairs_set=self.contact_pairs_set) + + # ## + # # init states # + # # cur_ts % mano_nn_substeps == 0: + # if (cur_ts + 1) % mano_nn_substeps == 0: + # cur_passive_big_ts = cur_ts // mano_nn_substeps + # in_func_tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + # # tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + # else: + # in_func_tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # return in_func_tracking_loss + + + if contact_pairs_set is None: + self.contact_pairs_set = None + else: + ## + self.contact_pairs_set = contact_pairs_set.copy() + + # ### if traiing the jrbpt h + # print(self.timestep_to_active_mesh[cur_ts].size(), cur_visual_pts_friction_forces.size()) # friction forces # + ### optimize the robot tracing loss ### + + + # if self.o + + if self.optimize_anchored_pts: + # anchored_cur_visual_pts_friction_forces = cur_visual_pts_friction_forces[anchored_mano_visual_pts] + contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_anchored_mano_pts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set) + else: + contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_expanded_robo_visual_pts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=self.robot_actuator_friction_forces, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set, pts_frictional_forces=None) + + + ### train with force to active ## + if self.train_with_forces_to_active and (not self.use_mano_inputs): + # penetration_forces # + if torch.sum(self.other_bending_network.penetrating_indicator.float()) > 0.5: + net_penetrating_forces = self.other_bending_network.penetrating_forces + net_penetrating_points = self.other_bending_network.penetrating_points + + + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_penetration_points[cur_ts] = net_penetrating_points.detach().cpu().numpy() + self.timestep_to_penetration_points_forces[cur_ts] = net_penetrating_forces.detach().cpu().numpy() + + + sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[self.other_bending_network.penetrating_indicator] + + + self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() ## for + + net_penetrating_forces = torch.matmul( + cur_rot.transpose(1, 0), net_penetrating_forces.transpose(1, 0) + ).transpose(1, 0) + + net_penetrating_forces = (1.0 - pointset_expansion_alpha) * net_penetrating_forces + + net_penetrating_points = torch.matmul( + cur_rot.transpose(1, 0), (net_penetrating_points - cur_trans.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + + penetration_forces = net_penetrating_forces + + else: + penetration_forces = None + sampled_visual_pts_joint_idxes = None + ''' the bending network still have this property and we can get force values here for the expanded visual points ''' + self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() + + + if (cur_ts + 1) % mano_nn_substeps == 0: + cur_passive_big_ts = cur_ts // mano_nn_substeps + ### tracking loss between the predicted transformation and te tracking ### + tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + else: + tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + + + # diff_hand_tracking = torch.zeros((1,), dtype=torch.float32).cuda().mean() ## + + # ## diff + # # diff_hand_tracking_coef + # # kinematics_proj_loss = kinematics_trans_diff + penetraton_penalty + diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + + # # if self.use_mano_hand_for_test: ## only the kinematics mano hand is optimized here ## + # # kinematics_proj_loss = tracking_loss + + # # kinematics_proj_loss = hand_tracking_loss * 1e2 + + # smaller_than_zero_level_set_indicator + cur_interpenetration_nns = self.other_bending_network.smaller_than_zero_level_set_indicator.float().sum() + + + # kinematics_proj_loss = diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + penetraton_penalty + # expanded_set_delta_motion_ori + diff_actions = torch.sum( + (self.expanded_set_delta_motion_ori - self.mano_expanded_actuator_delta_offset.weight) ** 2, dim=-1 + ) + diff_actions = diff_actions.mean() + + reg_act_force_loss = torch.sum( + (mano_expanded_actuator_pointact_forces_ori - self.mano_expanded_actuator_pointact_forces.weight.data) ** 2, dim=-1 + ) + reg_act_force_loss = reg_act_force_loss.mean() + + # robo_expanded_set_delta_motion_ori, robo_expanded_actuator_pointact_forces_ori + diff_robo_actions = torch.sum( + (robo_expanded_set_delta_motion_ori - self.expanded_actuator_delta_offset.weight) ** 2, dim=-1 + ) + diff_robo_actions = diff_robo_actions.mean() + + reg_robo_act_force_loss = torch.sum( + (robo_expanded_actuator_pointact_forces_ori - self.expanded_actuator_pointact_forces.weight.data) ** 2, dim=-1 + ) + reg_robo_act_force_loss = reg_robo_act_force_loss.mean() + + + diff_actions = diff_actions + reg_act_force_loss + diff_robo_actions + reg_robo_act_force_loss + + + kinematics_proj_loss = loss_finger_tracking + tracking_loss + + loss = kinematics_proj_loss # * self.loss_scale_coef ## get + + + mano_tracking_loss.append(loss_finger_tracking.detach().cpu().item()) + + + self.kines_optimizer.zero_grad() + + kinematics_proj_loss.backward(retain_graph=True) + + self.kines_optimizer.step() + + # if self.use_LBFGS: + # self.kines_optimizer.step(evaluate_tracking_loss) # + # else: + # self.kines_optimizer.step() + + # + # tracking_loss.backward(retain_graph=True) + # if self.use_LBFGS: + # self.other_bending_network.reset_timestep_to_quantities(cur_ts) + + + # robot_states_actions_diff_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # diff_actions + robot_states_actions_diff_loss = diff_actions + robo_actions_diff_loss.append(robot_states_actions_diff_loss) + + + tot_losses.append(loss.detach().item()) # total losses # # total losses # + # tot_penalty_dot_forces_normals.append(cur_penalty_dot_forces_normals.detach().item()) + # tot_penalty_friction_constraint.append(cur_penalty_friction_constraint.detach().item()) + + self.iter_step += 1 + + self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + + if self.iter_step % self.save_freq == 0: + self.save_checkpoint() + + self.update_learning_rate() + + torch.cuda.empty_cache() + + + ''' Get nn_forward_ts and backward through the actions for updating ''' + tot_losses = sum(tot_losses) / float(len(tot_losses)) + if len(tot_tracking_loss) > 0: + tot_tracking_loss = sum(tot_tracking_loss) / float(len(tot_tracking_loss)) + else: + tot_tracking_loss = 0.0 + if len(tot_penetration_depth) > 0: + tot_penetration_depth = sum(tot_penetration_depth) / float(len(tot_penetration_depth)) + else: + tot_penetration_depth = 0.0 + robo_actions_diff_loss = sum(robo_actions_diff_loss) / float(len(robo_actions_diff_loss)) + if len(mano_tracking_loss) > 0: + mano_tracking_loss = sum(mano_tracking_loss) / float(len(mano_tracking_loss)) + else: + mano_tracking_loss = 0.0 + + + + + if i_iter % self.report_freq == 0: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} tracking_loss = {} mano_tracking_loss = {} penetration_depth = {} actions_diff_loss = {} lr={}'.format(self.iter_step, tot_losses, tot_tracking_loss, mano_tracking_loss, tot_penetration_depth, robo_actions_diff_loss, self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + ''' Dump to the file ''' + with open(logs_sv_fn, 'a') as log_file: + log_file.write(cur_log_sv_str + '\n') + else: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} tracking_loss = {} mano_tracking_loss = {} penetration_depth = {} actions_diff_loss = {} lr={}'.format(self.iter_step, tot_losses, tot_tracking_loss, mano_tracking_loss, tot_penetration_depth, robo_actions_diff_loss, self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + + + # self.validate_mesh_robo_a() + if i_iter % self.val_mesh_freq == 0: + self.validate_mesh_robo_g() + self.validate_mesh_robo() + + + torch.cuda.empty_cache() + + + ''' GRAB clips --- kinematics based finger retargeting ''' + def train_sparse_retar(self, ): + + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() + + + nn_timesteps = self.timestep_to_passive_mesh.size(0) + self.nn_timesteps = nn_timesteps + num_steps = self.nn_timesteps + + # load + ''' Load the robot hand ''' + model_path = self.conf['model.sim_model_path'] + self.hand_type = "shadow_hand" + # if model_path.endswith(".xml"): + # # self.hand_type = "redmax_hand" + # robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + # else: + # self.hand_type = "shadow_hand" + robot_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path, args=None) + + + self.robot_agent = robot_agent + robo_init_verts = self.robot_agent.robot_pts + robo_sampled_verts_idxes_fn = "robo_sampled_verts_idxes.npy" + + sampled_verts_idxes = np.load(robo_sampled_verts_idxes_fn) + sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + + self.robo_hand_faces = self.robot_agent.robot_faces + + + ''' Load the robot hand ''' + ### adfte loadingjthe robot hand ## + self.robot_delta_angles = nn.Embedding( + num_embeddings=num_steps, embedding_dim=4, + ).cuda() + torch.nn.init.zeros_(self.robot_delta_angles.weight) + self.robot_delta_angles.weight.data[:, 0] = 1.0 + + + self.robot_delta_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3, + ).cuda() + torch.nn.init.zeros_(self.robot_delta_trans.weight) + + + self.robot_delta_glb_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3, + ).cuda() + torch.nn.init.zeros_(self.robot_delta_glb_trans.weight) + + ### adfte loadingjthe robot hand ## + self.robot_delta_states = nn.Embedding( + num_embeddings=num_steps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.robot_delta_states.weight) + self.robot_delta_states.weight.data[0, 24] = 1.0 + + self.robot_states = nn.Embedding( + num_embeddings=num_steps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.robot_states.weight) + self.robot_states.weight.data[:, 24] = 1.0 + + self.robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.robot_init_states.weight) + self.robot_init_states.weight.data[0, 24] = 1.0 + + + self.robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps, embedding_dim=4 + ).cuda() + # [6.123234e-17, 0.000000e+00 0.000000e+00 1.000000e+00 ] + + # if self.hand_type == "redmax": + + if self.hand_type == "shadow": + self.robot_glb_rotation.weight.data[:, 0] = 6.123234e-17 + self.robot_glb_rotation.weight.data[:, 1:] = 0. + self.robot_glb_rotation.weight.data[:, 3] = 1.0 + else: + self.robot_glb_rotation.weight.data[:, 0] = 1. + self.robot_glb_rotation.weight.data[:, 1:] = 0. + + self.robot_glb_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.robot_glb_trans.weight) + # params_to_train += list(self.robot_glb_trans.parameters()) + + load_delta_trans = False + if 'model.load_optimized_init_transformations' in self.conf and len(self.conf['model.load_optimized_init_transformations']) > 0: + print(f"[Robot] Loading optimized init transformations from {self.conf['model.load_optimized_init_transformations']}") + cur_optimized_init_actions_fn = self.conf['model.load_optimized_init_transformations'] + # cur_optimized_init_actions = # optimized init states + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + + try: + if optimized_init_actions_ckpt['robot_init_states']['weight'].size(0) > self.robot_init_states.weight.data.size(0): + optimized_init_actions_ckpt['robot_init_states']['weight'].data = optimized_init_actions_ckpt['robot_init_states']['weight'].data[:self.robot_init_states.weight.data.size(0)] + self.robot_init_states.load_state_dict(optimized_init_actions_ckpt['robot_init_states']) + except: + pass + if optimized_init_actions_ckpt['robot_glb_rotation']['weight'].size(0) > self.robot_glb_rotation.weight.data.size(0): + optimized_init_actions_ckpt['robot_glb_rotation']['weight'].data = optimized_init_actions_ckpt['robot_glb_rotation']['weight'].data[:self.robot_glb_rotation.weight.data.size(0)] + self.robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['robot_glb_rotation']) + if 'robot_delta_states' in optimized_init_actions_ckpt: + try: + if optimized_init_actions_ckpt['robot_delta_states']['weight'].size(0) > self.robot_delta_states.weight.data.size(0): + optimized_init_actions_ckpt['robot_delta_states']['weight'].data = optimized_init_actions_ckpt['robot_delta_states']['weight'].data[:self.robot_delta_states.weight.data.size(0)] + self.robot_delta_states.load_state_dict(optimized_init_actions_ckpt['robot_delta_states']) + except: + pass + if 'robot_states' in optimized_init_actions_ckpt: + if optimized_init_actions_ckpt['robot_states']['weight'].size(0) > self.robot_states.weight.data.size(0): + optimized_init_actions_ckpt['robot_states']['weight'].data = optimized_init_actions_ckpt['robot_states']['weight'].data[:self.robot_states.weight.data.size(0)] + self.robot_states.load_state_dict(optimized_init_actions_ckpt['robot_states']) + # if 'robot_delta_states' ## robot delta states ## + if 'robot_actions' in optimized_init_actions_ckpt: + if optimized_init_actions_ckpt['robot_actions']['weight'].size(0) > self.robot_actions.weight.data.size(0): + optimized_init_actions_ckpt['robot_actions']['weight'].data = optimized_init_actions_ckpt['robot_actions']['weight'].data[:self.robot_actions.weight.data.size(0)] + self.robot_actions.load_state_dict(optimized_init_actions_ckpt['robot_actions']) + # self.robot_actions.load_state_dict(optimized_init_actions_ckpt['robot_actions']) + # self.mano_robot_actuator_friction_forces.load_state_dict(optimized_init_actions_ckpt['robot_actuator_friction_forces']) + if optimized_init_actions_ckpt['robot_glb_trans']['weight'].data.size(0) > self.robot_glb_trans.weight.data.size(0): + optimized_init_actions_ckpt['robot_glb_trans']['weight'].data = optimized_init_actions_ckpt['robot_glb_trans']['weight'].data[:self.robot_glb_trans.weight.data.size(0)] + self.robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['robot_glb_trans']) + + if 'robot_delta_angles' in optimized_init_actions_ckpt: + if optimized_init_actions_ckpt['robot_delta_angles']['weight'].data.size(0) > self.robot_delta_angles.weight.data.size(0): + optimized_init_actions_ckpt['robot_delta_angles']['weight'].data = optimized_init_actions_ckpt['robot_delta_angles']['weight'].data[:self.robot_delta_angles.weight.data.size(0)] + self.robot_delta_angles.load_state_dict(optimized_init_actions_ckpt['robot_delta_angles']) + if 'robot_delta_trans' in optimized_init_actions_ckpt: + if optimized_init_actions_ckpt['robot_delta_trans']['weight'].data.size(0) > self.robot_delta_trans.weight.data.size(0): + optimized_init_actions_ckpt['robot_delta_trans']['weight'].data = optimized_init_actions_ckpt['robot_delta_trans']['weight'].data[:self.robot_delta_trans.weight.data.size(0)] + self.robot_delta_trans.load_state_dict(optimized_init_actions_ckpt['robot_delta_trans']) + + if 'robot_delta_glb_trans' in optimized_init_actions_ckpt: + load_delta_trans = True + if optimized_init_actions_ckpt['robot_delta_glb_trans']['weight'].size(0) > self.robot_delta_glb_trans.weight.data.size(0): + optimized_init_actions_ckpt['robot_delta_glb_trans']['weight'].data = optimized_init_actions_ckpt['robot_delta_glb_trans']['weight'].data[:self.robot_delta_glb_trans.weight.data.size(0)] + + self.robot_delta_glb_trans.load_state_dict(optimized_init_actions_ckpt['robot_delta_glb_trans']) + + # robot_delta_glb_trans, robot_delta_glb_trans_lft + if (not load_delta_trans): + tot_robot_delta_trans = [] + for i_fr in range(num_steps): + if i_fr == 0: + cur_robot_delta_trans = self.robot_glb_trans.weight.data[0] + else: + cur_robot_delta_trans = self.robot_glb_trans.weight.data[i_fr] - self.robot_glb_trans.weight.data[i_fr - 1] + tot_robot_delta_trans.append(cur_robot_delta_trans) + tot_robot_delta_trans = torch.stack(tot_robot_delta_trans, dim=0) + self.robot_delta_glb_trans.weight.data.copy_(tot_robot_delta_trans) # use delta states ## + + + # ### load init transformations ckpts ### # + for i_ts in range(self.robot_glb_trans.weight.size(0)): + if i_ts == 0: + self.robot_delta_trans.weight.data[i_ts, :] = self.robot_glb_trans.weight.data[i_ts, :].clone() + else: + prev_ts = i_ts - 1 + self.robot_delta_trans.weight.data[i_ts, :] = self.robot_glb_trans.weight.data[i_ts, :] - self.robot_glb_trans.weight.data[i_ts - 1, :] + + self.robot_delta_angles.weight.data[0, :] = self.robot_glb_rotation.weight.data[0, :].clone() + + params_to_train = [] # params to train # + # params_to_train += list(self.robot_delta_states.parameters()) + params_to_train += list(self.robot_glb_rotation.parameters()) + # params_to_train += list(self.robot_init_states.parameters()) + params_to_train += list(self.robot_glb_trans.parameters()) + + params_to_train += list(self.robot_delta_angles.parameters()) + + params_to_train += list(self.robot_delta_trans.parameters()) + + if not self.retar_only_glb: + params_to_train += list(self.robot_states.parameters()) + params_to_train += list(self.robot_delta_states.parameters()) + + + + + # # # train the robot or train the expanded points # + # # first tip | center of the wrist | second tip | thrid tip | fourth tip | fifth tip | left wrist | rigth wrist # + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # # self.robot_fingers = [3591, 4768, 6358, 10228, 6629, 10566, 5631, 9673] + # self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877] + # # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121, 86, 364, 477, 588] + # self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877, 1030, 2266, 3822, 5058] + + self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121, 86, 364, 477, 588, 699] + self.robot_fingers = [6684, 9174, 53, 1623, 3209, 4495, 10028, 8762, 1030, 2266, 3822, 5058, 7074] + + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # self.robot_fingers = [6496, 10128, 53, 1623, 3209, 4495, 9523, 8877] + + # if self.hand_type == "redmax_hand": + # self.mano_fingers = [745, 279, 320, 444, 555, 672, 234, 121] + # self.robot_fingers = [521, 624, 846, 973, 606, 459, 383, 265] + # self.robot_fingers = [233786, 268323, 183944, 187495, 55902, 166666, 178838, 75173] + # self.robot_fingers = [233786, 251198, 183944, 64195, 55902, 166666, 178838, 75173] + # self.robot_fingers = [14670, 321530, 36939, 125930, 200397, 257721, 333438, 338358] + + + # # robot fingers ## + # self.mano_fingers = [279, 320, 444, 555, 672, 234, 121] + # self.robot_fingers = [321530, 36939, 125930, 200397, 257721, 333438, 338358] + # if not os.path.exists("/home/xueyi"): + # self.robot_fingers = [347724, 53224, 128561, 198041, 276858, 340722, 340333] + + # self.minn_robo_pts = -0.1 + # self.maxx_robo_pts = 0.2 + # self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + + # if self.hand_type == "redmax_hand": + # self.maxx_robo_pts = 25. + # self.minn_robo_pts = -15. + # self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + # self.mult_const_after_cent = 0.5437551664260203 + # else: + self.minn_robo_pts = -0.1 + self.maxx_robo_pts = 0.2 + self.extent_robo_pts = self.maxx_robo_pts - self.minn_robo_pts + self.mult_const_after_cent = 0.437551664260203 + + ## for grab ## + self.mult_const_after_cent = self.mult_const_after_cent / 3. * 0.9507 + + + # # optimize with intermediates # + self.mano_fingers = torch.tensor(self.mano_fingers, dtype=torch.long).cuda() + self.robot_fingers = torch.tensor(self.robot_fingers, dtype=torch.long).cuda() + + self.nn_ts = self.nn_timesteps - 1 + # self.optimize_with_intermediates = False + + + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + + + self.timestep_to_active_mesh = {} + # ref_expanded_visual_pts, minn_idx_expanded_visual_pts_to_link_pts # + # minn_idx_expanded_visual_pts_to_link_pts # + self.timestep_to_expanded_visual_pts = {} + self.timestep_to_active_mesh_opt_ours_sim = {} + + + self.iter_step = 0 + + + + self.minn_retargeting_loss = 1e27 + + + nn_glb_retar_eps = 1000 + nn_wstates_retar_eps = 2000 + + transfer_to_wstates = False + transfer_to_denseretar = False + + finger_sampled_idxes = None + + minn_dist_mano_pts_to_visual_pts_idxes = None + + for i_iter in tqdm(range(100000)): + tot_losses = [] + + tot_tracking_loss = [] + + # timestep + # self.timestep_to_active_mesh = {} + self.timestep_to_posed_active_mesh = {} + self.timestep_to_posed_mano_active_mesh = {} + self.timestep_to_mano_active_mesh = {} + self.timestep_to_corr_mano_pts = {} + # self.timestep_to_ + timestep_to_tot_rot = {} + timestep_to_tot_trans = {} + + self.timestep_to_raw_active_meshes = {} + self.timestep_to_penetration_points = {} + self.timestep_to_penetration_points_forces = {} + self.joint_name_to_penetration_forces_intermediates = {} + + + self.ts_to_contact_force_d = {} + self.ts_to_penalty_frictions = {} + self.ts_to_penalty_disp_pts = {} + self.ts_to_redmax_states = {} + self.ts_to_robot_fingers = {} + self.ts_to_mano_fingers = {} + # constraitns for states # + # with 17 dimensions on the states; [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16] + + contact_pairs_set = None + self.contact_pairs_sets = {} + + # redmax_sim.reset(backward_flag = True) + + # tot_grad_qs = [] + + self.ts_to_glb_quat = {} + self.ts_to_glb_trans = {} + + + # robo_intermediates_states = [] + + # tot_penetration_depth = [] + + # robo_actions_diff_loss = [] + + # init global transformations ## + # cur_ts_redmax_delta_rotations = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_delta_rotations = torch.tensor([0., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_robot_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + for cur_ts in range(self.nn_ts): + tot_redmax_actions = [] + + actions = {} + + self.free_def_bending_weight = 0.0 + + if self.drive_glb_delta: + print(f"drive_glb_delta!") + if cur_ts == 0: + cur_glb_rot_quat = self.robot_delta_angles(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_rot_quat = cur_glb_rot_quat / torch.clamp(torch.norm(cur_glb_rot_quat, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot_quat) + + self.ts_to_glb_quat[cur_ts] = cur_glb_rot_quat.detach().clone() + + cur_glb_trans = self.robot_delta_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + self.ts_to_glb_trans[cur_ts] = cur_glb_trans.detach().clone() + + self.robot_glb_rotation.weight.data[cur_ts, :] = cur_glb_rot_quat.detach().clone() + self.robot_glb_trans.weight.data[cur_ts, :] = cur_glb_trans.detach().clone() + + else: + prev_glb_quat = self.ts_to_glb_quat[cur_ts - 1] + cur_glb_rot_angle = self.robot_delta_angles(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0)[1:] + cur_glb_rot_quat = prev_glb_quat + fields.update_quaternion(cur_glb_rot_angle, prev_glb_quat) + cur_glb_rot_quat = cur_glb_rot_quat / torch.clamp(torch.norm(cur_glb_rot_quat, dim=-1, p=2), min=1e-7) + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot_quat) + + self.ts_to_glb_quat[cur_ts] = cur_glb_rot_quat.detach().clone() + + cur_delta_glb_trans = self.robot_delta_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + prev_glb_trans = self.ts_to_glb_trans[cur_ts - 1].detach().clone() + cur_glb_trans = prev_glb_trans + cur_delta_glb_trans + + self.ts_to_glb_trans[cur_ts] = cur_glb_trans.detach().clone() + + self.robot_glb_rotation.weight.data[cur_ts, :] = cur_glb_rot_quat.detach().clone() + self.robot_glb_trans.weight.data[cur_ts, :] = cur_glb_trans.detach().clone() + + elif self.retar_delta_glb_trans: + print("fzzzzzzz") + if cur_ts == 0: + cur_glb_trans = self.robot_delta_glb_trans(torch.zeros((1,), dtype=torch.long).cuda()).squeeze(0) + else: + prev_trans = torch.sum( self.robot_delta_glb_trans.weight.data[:cur_ts], dim=0).detach() + cur_delta_glb_trans = self.robot_delta_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_glb_trans = prev_trans + cur_delta_glb_trans + self.robot_glb_trans.weight.data[cur_ts, :] = cur_glb_trans[:].detach().clone() + + cur_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) + + else: + cur_glb_rot = self.robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + # cur_glb_rot = cur_glb_rot + cur_ts_redmax_delta_rotations + + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # + + + cur_glb_trans = self.robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + # cur_glb_trans = cur_glb_trans + cur_ts_redmax_robot_trans # + + ########### Delta states ########### + if self.drive_glb_delta: + robo_delta_states = self.robot_delta_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.robot_agent.active_robot.set_delta_state_and_update_v2(robo_delta_states, cur_ts) ## delta states ## + self.robot_states.weight.data[cur_ts, :] = self.robot_agent.get_joint_state(cur_ts - 1, self.robot_states.weight.data[cur_ts, :]) + # self.robot_states_sv[cur_ts, : ] = self.robot_agent.get_joint_state(cur_ts - 1, self.robot_states_sv[cur_ts, :]) + else: + # if self.hand_type == "shadow_hand": + link_cur_states = self.robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.robot_agent.set_init_states_target_value(link_cur_states) + + # robo_delta_states = self.robot_delta_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # self.robot_agent.active_robot.set_delta_state_and_update_v2(robo_delta_states, cur_ts) ## delta states ## + # self.robot_states.weight.data[cur_ts, :] = self.robot_agent.get_joint_state(cur_ts - 1, self.robot_states.weight.data[cur_ts, :]) + cur_visual_pts = self.robot_agent.get_init_state_visual_pts() # get init state visual pts + + + + if not self.use_scaled_urdf: + ### transform the visual pts ### + cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + cur_visual_pts = cur_visual_pts * 2. - 1. + cur_visual_pts = cur_visual_pts * self.mult_const_after_cent + + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + + ### transform by the glboal transformation and the translation ### + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) + + mano_fingers = self.rhand_verts[cur_ts][self.mano_fingers] + + + if self.hand_type == 'shadow_hand': + if self.retar_dense_corres and cur_ts == 0: + dist_mano_pts_to_visual_pts = torch.sum( + (self.rhand_verts[cur_ts].unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 #### nn_mano_pts x nn_robo_pts x 3 + ) + dist_mano_pts_to_visual_pts = torch.sqrt(dist_mano_pts_to_visual_pts) + minn_dist_mano_pts_to_visual_pts, minn_dist_mano_pts_to_visual_pts_idxes = torch.min(dist_mano_pts_to_visual_pts, dim=-1) ### nn_mano_pts + minn_dist_mano_pts_to_visual_pts_idxes = minn_dist_mano_pts_to_visual_pts_idxes.detach() + elif self.hand_type == 'redmax_hand': + if self.retar_dense_corres and cur_ts == 0: + dist_mano_pts_to_visual_pts = torch.sum( + (self.rhand_verts[cur_ts].unsqueeze(1) - cur_visual_pts.unsqueeze(0)) ** 2, dim=-1 #### nn_mano_pts x nn_robo_pts x 3 + ) + dist_mano_pts_to_visual_pts = torch.sqrt(dist_mano_pts_to_visual_pts) + minn_dist_mano_pts_to_visual_pts, minn_dist_mano_pts_to_visual_pts_idxes = torch.min(dist_mano_pts_to_visual_pts, dim=-1) ### nn_mano_pts + minn_dist_mano_pts_to_visual_pts_idxes = minn_dist_mano_pts_to_visual_pts_idxes.detach() + + + if self.hand_type == 'redmax_hand': + # sampled_verts_idxes + # print(f"cur_visual_pts: {cur_visual_pts.size()}, maxx_verts_idx: {torch.max(sampled_verts_idxes)}, minn_verts_idx: {torch.min(sampled_verts_idxes)}") + # robo_fingers = cur_visual_pts[sampled_verts_idxes][self.robot_fingers] + robo_fingers = cur_visual_pts[self.robot_fingers] + else: + robo_fingers = cur_visual_pts[self.robot_fingers] + self.ts_to_robot_fingers[cur_ts] = robo_fingers.detach().cpu().numpy() + self.ts_to_mano_fingers[cur_ts] = mano_fingers.detach().cpu().numpy() + + # diff_redmax_visual_pts_with_ori_visual_pts = torch.sum( + # (cur_visual_pts[sampled_verts_idxes] - self.timestep_to_active_mesh_opt_ours_sim[cur_ts].detach()) ** 2, dim=-1 + # ) + # diff_redmax_visual_pts_with_ori_visual_pts = diff_redmax_visual_pts_with_ori_visual_pts.mean() + + # ts_to_robot_fingers, ts_to_mano_fingers + + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + self.timestep_to_raw_active_meshes[cur_ts] = cur_visual_pts.detach().cpu().numpy() + + # ragged_dist = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # dist_transformed_expanded_visual_pts_to_ori_visual_pts = torch.zeros((1,), dtype=torch.float32).cuda().mean() + # diff_cur_states_to_ref_states = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + + self.free_def_bending_weight = 0.0 + + if self.retar_dense_corres and (minn_dist_mano_pts_to_visual_pts_idxes is not None): + corres_robo_pts = cur_visual_pts[minn_dist_mano_pts_to_visual_pts_idxes] + tracking_loss = torch.sum((self.rhand_verts[cur_ts] - corres_robo_pts) ** 2, dim=-1) ### nn_mano_pts ### + tracking_loss = torch.mean(tracking_loss) * 20.0 + else: + tracking_loss = torch.sum((mano_fingers - robo_fingers) ** 2) + + loss = tracking_loss # + self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + + # kinematics_trans_diff = (robot_rotation_diff + robot_trans_diff + robot_delta_states_diff) * self.robot_actions_diff_coef + + # kinematics_proj_loss = kinematics_trans_diff + penetraton_penalty + tracking_loss + + loss = loss # + (robot_rotation_diff + robot_trans_diff) * self.robot_actions_diff_coef + + # loss = kinematics_proj_loss + tot_losses.append(loss) + # self.iter_step += 1 + # self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + + self.optimizer.zero_grad() + + loss.backward(retain_graph=True) + + self.optimizer.step() + + + + # tot_losses.append(loss.detach().item()) + # # tot_penalty_dot_forces_normals.append(cur_penalty_dot_forces_normals.detach().item()) + # # tot_penalty_friction_constraint.append(cur_penalty_friction_constraint.detach().item()) + + self.iter_step += 1 + + self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + # # if self.iter_step % self.save_freq == 0: + # # self.save_checkpoint() # a smart solution for them ? # # save checkpoint # + self.update_learning_rate() ## update learning rate ## + + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + torch.cuda.empty_cache() + + + ''' Get nn_forward_ts and backward through the actions for updating ''' + + + + # tot_losses = sum(tot_losses) / float(len(tot_losses)) + tot_losses = sum(tot_losses) / float(len(tot_losses)) + + saved_best = False + + if self.retar_dense_corres: + if tot_losses < self.minn_retargeting_loss: + self.minn_retargeting_loss = tot_losses + self.save_checkpoint(tag="denseretar_best", niter=True) + self.validate_mesh_robo() + saved_best = True + + if tot_losses < self.minn_retargeting_loss: + self.minn_retargeting_loss = tot_losses + if self.retar_dense_corres: + self.save_checkpoint(tag="denseretar_best", niter=True) + elif (self.retar_only_glb and transfer_to_wstates) or (not self.retar_only_glb): + self.save_checkpoint(tag="wstates_best", niter=True) + elif self.retar_only_glb: + self.save_checkpoint(tag="glbonly_best", niter=True) + + + if i_iter == 0 or (i_iter % self.ckpt_sv_freq == 0): + #### transfer to dense-retargeting #### + if (self.retar_only_glb and transfer_to_denseretar) or ((not self.retar_only_glb) and transfer_to_denseretar): + self.save_checkpoint(tag="denseretar") + elif self.retar_only_glb and transfer_to_wstates: + self.save_checkpoint(tag="towstates") + else: + self.save_checkpoint() # a smart solution for them ? # # save checkpoint # + + + if i_iter % self.report_freq == 0: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'i_iter: {} iter:{:8>d} loss = {} lr={}'.format(i_iter, self.iter_step, tot_losses, self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + ''' Dump to the file ''' + with open(logs_sv_fn, 'a') as log_file: + log_file.write(cur_log_sv_str + '\n') + + ### train finger retargeting + + # self.validate_mesh_robo_a() + if (not saved_best) and (i_iter % self.val_mesh_freq == 0): + self.validate_mesh_robo() + + if self.retar_only_glb and (i_iter == 1000): + params_to_train = [] # params to train # + params_to_train += list(self.robot_glb_rotation.parameters()) + params_to_train += list(self.robot_glb_trans.parameters()) + + params_to_train += list(self.robot_states.parameters()) + params_to_train += list(self.robot_delta_glb_trans.parameters()) + params_to_train += list(self.robot_delta_states.parameters()) + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + transfer_to_wstates = True + + if (self.retar_only_glb and (i_iter == nn_wstates_retar_eps + nn_glb_retar_eps)) or ((not self.retar_only_glb) and i_iter == nn_wstates_retar_eps): + params_to_train = [] # params to train # ### judge ### + params_to_train += list(self.robot_glb_rotation.parameters()) + params_to_train += list(self.robot_glb_trans.parameters()) + params_to_train += list(self.robot_states.parameters()) + params_to_train += list(self.robot_delta_glb_trans.parameters()) + params_to_train += list(self.robot_delta_states.parameters()) + + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + transfer_to_denseretar = True + self.retar_dense_corres = True + + + + torch.cuda.empty_cache() + + + + ''' GRAB & TACO clips; MANO dynamic hand ''' + def train_dyn_mano_model(self, ): + + # chagne # # mano notjmano but the mano ---> optimize the mano delta states? # + ### the real robot actions from mano model rules ### + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() # update learning rrate # + # robot actions ## + + nn_timesteps = self.timestep_to_passive_mesh.size(0) + self.nn_timesteps = nn_timesteps + num_steps = self.nn_timesteps + + # load --- and can load other states as well ## + ''' Load the robot hand ''' + # model_path = self.conf['model.sim_model_path'] # + # robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + # self.robot_agent = robot_agent + # robo_init_verts = self.robot_agent.robot_pts + # robo_sampled_verts_idxes_fn = "robo_sampled_verts_idxes.npy" + # if os.path.exists(robo_sampled_verts_idxes_fn): + # sampled_verts_idxes = np.load("robo_sampled_verts_idxes.npy") + # sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + # else: + # n_sampling = 1000 + # pts_fps_idx = data_utils.farthest_point_sampling(robo_init_verts.unsqueeze(0), n_sampling=n_sampling) + # sampled_verts_idxes = pts_fps_idx + # np.save(robo_sampled_verts_idxes_fn, sampled_verts_idxes.detach().cpu().numpy()) + # self.robo_hand_faces = self.robot_agent.robot_faces + # self.sampled_verts_idxes = sampled_verts_idxes + ''' Load the robot hand ''' + + ## load the robot hand ## + + ''' Load robot hand in DiffHand simulator ''' + # redmax_sim = redmax.Simulation(model_path) + # redmax_sim.reset(backward_flag = True) # redmax_sim -- + # # ### redmax_ndof_u, redmax_ndof_r ### # + # redmax_ndof_u = redmax_sim.ndof_u + # redmax_ndof_r = redmax_sim.ndof_r + # redmax_ndof_m = redmax_sim.ndof_m ### ndof_m ### # redma # x_sim + + + ''' Load the mano hand ''' # dynamic mano hand jin it # + model_path_mano = self.conf['model.mano_sim_model_path'] + if not os.path.exists(model_path_mano): + model_path_mano = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + # mano_agent = dyn_model_act_mano_deformable.RobotAgent(xml_fn=model_path_mano) # robot # + mano_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path_mano) ## model path mano ## # + self.mano_agent = mano_agent + # ''' Load the mano hand ''' + self.robo_hand_faces = self.mano_agent.robot_faces + + + nn_substeps = 10 + + mano_nn_substeps = 1 + # mano_nn_substeps = 10 # + self.mano_nn_substeps = mano_nn_substeps + + + + ''' Expnad the current visual points ''' + # expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + # self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + # expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + # expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + # np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) + # # ''' Expnad the current visual points ''' # # differentiate through the simulator? # # + + params_to_train = [] # params to train # + ### robot_actions, robot_init_states, robot_glb_rotation, robot_actuator_friction_forces, robot_glb_trans ### + + ''' Define MANO robot actions, delta_states, init_states, frictions, and others ''' + self.mano_robot_actions = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_actions.weight) + params_to_train += list(self.mano_robot_actions.parameters()) + + # self.mano_robot_delta_states = nn.Embedding( + # num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.mano_robot_delta_states.weight) + # # params_to_train += list(self.robot_delta_states.parameters()) + + # self.mano_robot_init_states = nn.Embedding( + # num_embeddings=1, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.mano_robot_init_states.weight) + # # params_to_train += list(self.robot_init_states.parameters()) + + self.mano_robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=4 + ).cuda() + self.mano_robot_glb_rotation.weight.data[:, 0] = 1. + self.mano_robot_glb_rotation.weight.data[:, 1:] = 0. + params_to_train += list(self.mano_robot_glb_rotation.parameters()) + + + self.mano_robot_glb_trans = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_robot_glb_trans.weight) + params_to_train += list(self.mano_robot_glb_trans.parameters()) + # + self.mano_robot_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_states.weight) + # self.mano_robot_states.weight.data[0, :] = self.mano_robot_init_states.weight.data[0, :].clone() + params_to_train += list(self.mano_robot_states.parameters()) + + + ''' Load optimized MANO hand actions and states ''' + # ### laod optimized init actions #### + if 'model.load_optimized_init_actions' in self.conf and len(self.conf['model.load_optimized_init_actions']) > 0: + print(f"[MANO] Loading optimized init transformations from {self.conf['model.load_optimized_init_actions']}") + cur_optimized_init_actions_fn = self.conf['model.load_optimized_init_actions'] + # cur_optimized_init_actions = # optimized init states + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + + if 'mano_robot_states' in optimized_init_actions_ckpt: + self.mano_robot_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_states']) + self.mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + self.mano_robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_rotation']) + # self.mano_robot_init_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_init_states']) + if 'mano_robot_actions' in optimized_init_actions_ckpt: + self.mano_robot_actions.load_state_dict(optimized_init_actions_ckpt['mano_robot_actions']) + + else: + optimized_init_actions_ckpt = None + + mano_glb_trans_np_data = self.mano_robot_glb_trans.weight.data.detach().cpu().numpy() + mano_glb_rotation_np_data = self.mano_robot_glb_rotation.weight.data.detach().cpu().numpy() + mano_states_np_data = self.mano_robot_states.weight.data.detach().cpu().numpy() + + if optimized_init_actions_ckpt is not None and 'object_transl' in optimized_init_actions_ckpt: + object_transl = optimized_init_actions_ckpt['object_transl'].detach().cpu().numpy() + object_global_orient = optimized_init_actions_ckpt['object_global_orient'].detach().cpu().numpy() + + + ''' Scaling constants ''' + self.mano_mult_const_after_cent = 0.9507 + + if 'model.mano_mult_const_after_cent' in self.conf: + self.mano_mult_const_after_cent = self.conf['model.mano_mult_const_after_cent'] + + mano_to_dyn_corr_pts_idxes_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/nearest_dyn_verts_idxes.npy" + if not os.path.exists(mano_to_dyn_corr_pts_idxes_fn): + mano_to_dyn_corr_pts_idxes_fn = "/data/xueyi/diffsim/NeuS/rsc/mano/nearest_dyn_verts_idxes.npy" + self.mano_to_dyn_corr_pts_idxes = np.load(mano_to_dyn_corr_pts_idxes_fn, allow_pickle=True) + self.mano_to_dyn_corr_pts_idxes = torch.from_numpy(self.mano_to_dyn_corr_pts_idxes).long().cuda() + + print(f"mano_to_dyn_corr_pts_idxes: {self.mano_to_dyn_corr_pts_idxes.size()}") + + + self.nn_ts = self.nn_timesteps + + + + ''' Set actions for the redmax simulation and add parameters to params-to-train ''' + + # params_to_train = [] + # params_to_train += list(self.redmax_robot_actions.parameters()) + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + # init_rot = R.random().as_quat() + # init_rot = torch.from_numpy(init_rot).float().cuda() + # self.robot_glb_rotation.weight.data[0, :] = init_rot[:] # init rot + + + + # ### Constraint set ### + # self.robot_hand_states_only_allowing_neg = torch.tensor( [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16], dtype=torch.long).cuda() + + + self.timestep_to_active_mesh = {} + # ref_expanded_visual_pts, minn_idx_expanded_visual_pts_to_link_pts # + # minn_idx_expanded_visual_pts_to_link_pts # + self.timestep_to_expanded_visual_pts = {} + self.timestep_to_active_mesh_opt_ours_sim = {} + self.timestep_to_active_mesh_w_delta_states = {} + + + self.iter_step = 0 + + + self.minn_tracking_loss = 1e27 + + for i_iter in tqdm(range(100000)): + tot_losses = [] + tot_tracking_loss = [] + + # timestep + # self.timestep_to_active_mesh = {} + self.timestep_to_posed_active_mesh = {} + self.timestep_to_posed_mano_active_mesh = {} + self.timestep_to_mano_active_mesh = {} + self.timestep_to_corr_mano_pts = {} + # self.timestep_to_ + timestep_to_tot_rot = {} + timestep_to_tot_trans = {} + + correspondence_pts_idxes = None + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_raw_active_meshes = {} + self.timestep_to_penetration_points = {} + self.timestep_to_penetration_points_forces = {} + self.joint_name_to_penetration_forces_intermediates = {} + + + self.ts_to_contact_force_d = {} + self.ts_to_penalty_frictions = {} + self.ts_to_penalty_disp_pts = {} + self.ts_to_redmax_states = {} + # constraitns for states # + # with 17 dimensions on the states; [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16] + + contact_pairs_set = None + self.contact_pairs_sets = {} + + # redmax_sim.reset(backward_flag = True) + + # tot_grad_qs = [] + + robo_intermediates_states = [] + + tot_penetration_depth = [] + + robo_actions_diff_loss = [] + mano_tracking_loss = [] + + # init global transformations ## + # cur_ts_redmax_delta_rotations = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_delta_rotations = torch.tensor([0., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_robot_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + # for cur_ts in range(self.nn_ts): + for cur_ts in range(self.nn_ts * self.mano_nn_substeps): + tot_redmax_actions = [] + + + actions = {} + + self.free_def_bending_weight = 0.0 + + # mano_robot_glb_rotation, mano_robot_glb_trans, mano_robot_delta_states # + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_rot = cur_glb_rot + cur_ts_redmax_delta_rotations + + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_trans = cur_glb_trans + cur_ts_redmax_robot_trans + + if self.optimize_dyn_actions: + ''' Articulated joint forces-driven mano robot ''' + ### current -> no penetration setting; no contact setting ### + link_cur_actions = self.mano_robot_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_actions_and_update_states_v2( link_cur_actions, cur_ts, penetration_forces=None, sampled_visual_pts_joint_idxes=None) + else: + ''' Articulated states-driven mano robot ''' + link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_init_states_target_value(link_cur_states) + + # optimize_dyn_actions + + + cur_visual_pts = self.mano_agent.get_init_state_visual_pts() + + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent + + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + ## + ### transform by the glboal transformation and the translation ### + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) ## visual pts ## + + + + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + self.timestep_to_raw_active_meshes[cur_ts] = cur_visual_pts.detach().cpu().numpy() + + # 0.1 ## as the initial one # + + + cur_kine_rhand_verts = self.rhand_verts[cur_ts // mano_nn_substeps] + cur_dyn_visual_pts_to_mano_verts = cur_visual_pts[self.mano_to_dyn_corr_pts_idxes] + diff_hand_tracking = torch.mean( + torch.sum((cur_kine_rhand_verts - cur_dyn_visual_pts_to_mano_verts) ** 2, dim=-1) + ) + + + + self.optimizer.zero_grad() + loss = diff_hand_tracking + loss.backward(retain_graph=True) + self.optimizer.step() + + # diff_hand_tracking # diff hand ## + mano_tracking_loss.append(diff_hand_tracking.detach().cpu().item()) + + + + tot_losses.append(loss.detach().item()) + + + self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + self.iter_step += 1 + self.update_learning_rate() ## update learning rate ## + + torch.cuda.empty_cache() + + + ''' Get nn_forward_ts and backward through the actions for updating ''' + + if (i_iter % self.ckpt_sv_freq) == 0: + self.save_checkpoint() # a smart solution for them ? # # save checkpoint # + + + tot_losses = sum(tot_losses) / float(len(tot_losses)) + # tot_penalty_dot_forces_normals = sum(tot_penalty_dot_forces_normals) / float(len(tot_penalty_dot_forces_normals)) + # tot_penalty_friction_constraint = sum(tot_penalty_friction_constraint) / float(len(tot_penalty_friction_constraint)) + # tot_tracking_loss = sum(tot_tracking_loss) / float(len(tot_tracking_loss)) + # tot_penetration_depth = sum(tot_penetration_depth) / float(len(tot_penetration_depth)) + # robo_actions_diff_loss = sum(robo_actions_diff_loss) / float(len(robo_actions_diff_loss)) + mano_tracking_loss = sum(mano_tracking_loss) / float(len(mano_tracking_loss)) + + + # if tot_losses < self.minn_tracking_loss: + # self.minn_tracking_loss = tot_losses + # self.save_checkpoint(tag="best") + + + if i_iter % self.report_freq == 0: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} mano_tracking_loss = {} lr={}'.format(self.iter_step, tot_losses, mano_tracking_loss, self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + ''' Dump to the file ''' + with open(logs_sv_fn, 'a') as log_file: + log_file.write(cur_log_sv_str + '\n') + + # self.validate_mesh_robo_a() + if i_iter % self.val_mesh_freq == 0: + # self.validate_mesh_robo() + self.validate_mesh_robo_e() + + torch.cuda.empty_cache() + + + ''' GRAB & TACO clips; MANO dynamic hand ''' + def train_dyn_mano_model_wreact(self, ): + + # chagne # # mano notjmano but the mano ---> optimize the mano delta states? # + ### the real robot actions from mano model rules ### + self.writer = SummaryWriter(log_dir=os.path.join(self.base_exp_dir, 'logs')) + self.update_learning_rate() # update learning rrate # + # robot actions ## + + nn_timesteps = self.timestep_to_passive_mesh.size(0) + self.nn_timesteps = nn_timesteps + num_steps = self.nn_timesteps + + # load --- and can load other states as well ## + ''' Load the robot hand ''' + # model_path = self.conf['model.sim_model_path'] # + # robot_agent = dyn_model_act.RobotAgent(xml_fn=model_path, args=None) + # self.robot_agent = robot_agent + # robo_init_verts = self.robot_agent.robot_pts + # robo_sampled_verts_idxes_fn = "robo_sampled_verts_idxes.npy" + # if os.path.exists(robo_sampled_verts_idxes_fn): + # sampled_verts_idxes = np.load("robo_sampled_verts_idxes.npy") + # sampled_verts_idxes = torch.from_numpy(sampled_verts_idxes).long().cuda() + # else: + # n_sampling = 1000 + # pts_fps_idx = data_utils.farthest_point_sampling(robo_init_verts.unsqueeze(0), n_sampling=n_sampling) + # sampled_verts_idxes = pts_fps_idx + # np.save(robo_sampled_verts_idxes_fn, sampled_verts_idxes.detach().cpu().numpy()) + # self.robo_hand_faces = self.robot_agent.robot_faces + # self.sampled_verts_idxes = sampled_verts_idxes + ''' Load the robot hand ''' + + ## load the robot hand ## + + ''' Load robot hand in DiffHand simulator ''' + # redmax_sim = redmax.Simulation(model_path) + # redmax_sim.reset(backward_flag = True) # redmax_sim -- + # # ### redmax_ndof_u, redmax_ndof_r ### # + # redmax_ndof_u = redmax_sim.ndof_u + # redmax_ndof_r = redmax_sim.ndof_r + # redmax_ndof_m = redmax_sim.ndof_m ### ndof_m ### # redma # x_sim + + + ''' Load the mano hand ''' # dynamic mano hand jin it # + model_path_mano = self.conf['model.mano_sim_model_path'] + if not os.path.exists(model_path_mano): ## the model path mano ## + model_path_mano = "rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + # mano_agent = dyn_model_act_mano_deformable.RobotAgent(xml_fn=model_path_mano) # robot # + mano_agent = dyn_model_act_mano.RobotAgent(xml_fn=model_path_mano) ## model path mano ## # + self.mano_agent = mano_agent + # ''' Load the mano hand ''' + self.robo_hand_faces = self.mano_agent.robot_faces + + + nn_substeps = 10 + + mano_nn_substeps = 1 + # mano_nn_substeps = 10 # + self.mano_nn_substeps = mano_nn_substeps + + + + ''' Expnad the current visual points ''' + # expanded_visual_pts = self.mano_agent.active_robot.expand_visual_pts() + # self.expanded_visual_pts_nn = expanded_visual_pts.size(0) + # expanded_visual_pts_npy = expanded_visual_pts.detach().cpu().numpy() + # expanded_visual_pts_sv_fn = "expanded_visual_pts.npy" + # np.save(expanded_visual_pts_sv_fn, expanded_visual_pts_npy) + # # ''' Expnad the current visual points ''' # # differentiate through the simulator? # # + + params_to_train = [] # params to train # + ### robot_actions, robot_init_states, robot_glb_rotation, robot_actuator_friction_forces, robot_glb_trans ### + + ''' Define MANO robot actions, delta_states, init_states, frictions, and others ''' + self.mano_robot_actions = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_actions.weight) + params_to_train += list(self.mano_robot_actions.parameters()) + + # self.mano_robot_delta_states = nn.Embedding( + # num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.mano_robot_delta_states.weight) + # # params_to_train += list(self.robot_delta_states.parameters()) + + # self.mano_robot_init_states = nn.Embedding( + # num_embeddings=1, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(self.mano_robot_init_states.weight) + # # params_to_train += list(self.robot_init_states.parameters()) ## params to train the aaa ## + + self.mano_robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=4 + ).cuda() + self.mano_robot_glb_rotation.weight.data[:, 0] = 1. + self.mano_robot_glb_rotation.weight.data[:, 1:] = 0. + params_to_train += list(self.mano_robot_glb_rotation.parameters()) + + + self.mano_robot_glb_trans = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(self.mano_robot_glb_trans.weight) + params_to_train += list(self.mano_robot_glb_trans.parameters()) + # + self.mano_robot_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.mano_robot_states.weight) + # self.mano_robot_states.weight.data[0, :] = self.mano_robot_init_states.weight.data[0, :].clone() + params_to_train += list(self.mano_robot_states.parameters()) + + + ''' Load optimized MANO hand actions and states ''' + # ### laod optimized init actions #### + if 'model.load_optimized_init_actions' in self.conf and len(self.conf['model.load_optimized_init_actions']) > 0: + ''' load the optimized actions ''' #### init transformations #### + print(f"[MANO] Loading optimized init transformations from {self.conf['model.load_optimized_init_actions']}") + cur_optimized_init_actions_fn = self.conf['model.load_optimized_init_actions'] + # cur_optimized_init_actions = # optimized init states + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location=self.device, ) + + if 'mano_robot_states' in optimized_init_actions_ckpt: ## + self.mano_robot_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_states']) + self.mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + self.mano_robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_rotation']) + # self.mano_robot_init_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_init_states']) + if 'mano_robot_actions' in optimized_init_actions_ckpt: + self.mano_robot_actions.load_state_dict(optimized_init_actions_ckpt['mano_robot_actions']) + else: + optimized_init_actions_ckpt = None + + mano_glb_trans_np_data = self.mano_robot_glb_trans.weight.data.detach().cpu().numpy() + mano_glb_rotation_np_data = self.mano_robot_glb_rotation.weight.data.detach().cpu().numpy() + mano_states_np_data = self.mano_robot_states.weight.data.detach().cpu().numpy() + + if optimized_init_actions_ckpt is not None and 'object_transl' in optimized_init_actions_ckpt: + object_transl = optimized_init_actions_ckpt['object_transl'].detach().cpu().numpy() + object_global_orient = optimized_init_actions_ckpt['object_global_orient'].detach().cpu().numpy() + + + ### scaling constaints ### + ''' Scaling constants ''' ## scaling constants ## + self.mano_mult_const_after_cent = 0.9507 + + if 'model.mano_mult_const_after_cent' in self.conf: + self.mano_mult_const_after_cent = self.conf['model.mano_mult_const_after_cent'] + + mano_to_dyn_corr_pts_idxes_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/nearest_dyn_verts_idxes.npy" + if not os.path.exists(mano_to_dyn_corr_pts_idxes_fn): + mano_to_dyn_corr_pts_idxes_fn = "/data/xueyi/diffsim/NeuS/rsc/mano/nearest_dyn_verts_idxes.npy" + self.mano_to_dyn_corr_pts_idxes = np.load(mano_to_dyn_corr_pts_idxes_fn, allow_pickle=True) + self.mano_to_dyn_corr_pts_idxes = torch.from_numpy(self.mano_to_dyn_corr_pts_idxes).long().cuda() + + print(f"mano_to_dyn_corr_pts_idxes: {self.mano_to_dyn_corr_pts_idxes.size()}") + + + self.nn_ts = self.nn_timesteps + + + + ''' Set actions for the redmax simulation and add parameters to params-to-train ''' + + if self.optimize_rules: + params_to_train = [] + params_to_train += list(self.other_bending_network.parameters()) + else: + params_to_train = [] + params_to_train += list(self.mano_robot_actions.parameters()) + params_to_train += list(self.mano_robot_glb_rotation.parameters()) + params_to_train += list(self.mano_robot_glb_trans.parameters()) + params_to_train += list(self.mano_robot_states.parameters()) + + # params_to_train = [] + # params_to_train += list(self.redmax_robot_actions.parameters()) + ### construct optimizer ### + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + # init_rot = R.random().as_quat() + # init_rot = torch.from_numpy(init_rot).float().cuda() + # self.robot_glb_rotation.weight.data[0, :] = init_rot[:] + + + + self.timestep_to_active_mesh = {} + # ref_expanded_visual_pts, minn_idx_expanded_visual_pts_to_link_pts # + # minn_idx_expanded_visual_pts_to_link_pts # + self.timestep_to_expanded_visual_pts = {} + self.timestep_to_active_mesh_opt_ours_sim = {} + self.timestep_to_active_mesh_w_delta_states = {} + + + self.iter_step = 0 + + + self.minn_tracking_loss = 1e27 + + for i_iter in tqdm(range(100000)): + tot_losses = [] + tot_tracking_loss = [] + + # timestep # + # self.timestep_to_active_mesh = {} # + self.timestep_to_posed_active_mesh = {} + self.timestep_to_posed_mano_active_mesh = {} + self.timestep_to_mano_active_mesh = {} + self.timestep_to_corr_mano_pts = {} + + timestep_to_tot_rot = {} + timestep_to_tot_trans = {} + + # correspondence_pts_idxes = None + # # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces # timestep # + self.timestep_to_raw_active_meshes = {} + self.timestep_to_penetration_points = {} + self.timestep_to_penetration_points_forces = {} + self.joint_name_to_penetration_forces_intermediates = {} + + + self.ts_to_contact_force_d = {} + self.ts_to_penalty_frictions = {} + self.ts_to_penalty_disp_pts = {} + self.ts_to_redmax_states = {} + # constraitns for states # + # with 17 dimensions on the states; [3, 4, 5, 7, 8, 9, 11, 12, 13, 15, 16] # # with 17 dimensions # + + contact_pairs_set = None + self.contact_pairs_sets = {} + + # redmax_sim.reset(backward_flag = True) + + penetration_forces= None + sampled_visual_pts_joint_idxes = None + + # tot_grad_qs = [] + + robo_intermediates_states = [] + + tot_penetration_depth = [] + + robo_actions_diff_loss = [] + mano_tracking_loss = [] + + # init global transformations ## + # cur_ts_redmax_delta_rotations = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_delta_rotations = torch.tensor([0., 0., 0., 0.], dtype=torch.float32).cuda() + cur_ts_redmax_robot_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + # for cur_ts in range(self.nn_ts): + for cur_ts in range(self.nn_ts * self.mano_nn_substeps - 1): + # tot_redmax_actions = [] + + # actions = {} + + self.free_def_bending_weight = 0.0 + + # mano_robot_glb_rotation, mano_robot_glb_trans, mano_robot_delta_states # + cur_glb_rot = self.mano_robot_glb_rotation(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_rot = cur_glb_rot + cur_ts_redmax_delta_rotations + + cur_glb_rot = cur_glb_rot / torch.clamp(torch.norm(cur_glb_rot, dim=-1, p=2), min=1e-7) + # cur_glb_rot_quat = cur_glb_rot.clone() # + + cur_glb_rot = dyn_model_act.quaternion_to_matrix(cur_glb_rot) # mano glboal rotations # + cur_glb_trans = self.mano_robot_glb_trans(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + + cur_glb_trans = cur_glb_trans + cur_ts_redmax_robot_trans + + if self.optimize_dyn_actions: + ''' Articulated joint forces-driven mano robot ''' + ### current -> no penetration setting; no contact setting ### + link_cur_actions = self.mano_robot_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_actions_and_update_states_v2( link_cur_actions, cur_ts, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) ## + else: + ''' Articulated states-driven mano robot ''' + link_cur_states = self.mano_robot_states(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + self.mano_agent.set_init_states_target_value(link_cur_states) + + # optimize_dyn_actions + + cur_visual_pts, visual_pts_joint_idxes = self.mano_agent.get_init_state_visual_pts(ret_joint_idxes=True) + + + cur_visual_pts = cur_visual_pts * self.mano_mult_const_after_cent + + + cur_rot = cur_glb_rot + cur_trans = cur_glb_trans + + timestep_to_tot_rot[cur_ts] = cur_rot.detach() + timestep_to_tot_trans[cur_ts] = cur_trans.detach() + + ## + ### transform by the glboal transformation and the translation ### + cur_visual_pts = torch.matmul(cur_rot, cur_visual_pts.contiguous().transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_trans.unsqueeze(0) ## visual pts ## + + + + self.timestep_to_active_mesh[cur_ts] = cur_visual_pts + self.timestep_to_raw_active_meshes[cur_ts] = cur_visual_pts.detach().cpu().numpy() + + # 0.1 ## as the initial one # + ''' cache contact pair set for exporting contact information ''' + if contact_pairs_set is None: + self.contact_pairs_set = None + else: + self.contact_pairs_set = contact_pairs_set.copy() + + # ### if traiing the jrbpt h ## act wiht reacts ## + # print(self.timestep_to_active_mesh[cur_ts].size(), cur_visual_pts_friction_forces.size()) + ### get the jactive mesh and remember the active mesh ### + contact_pairs_set = self.other_bending_network.forward2( input_pts_ts=cur_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_passive_mesh_normals=self.timestep_to_passive_mesh_normals, friction_forces=None, sampled_verts_idxes=None, reference_mano_pts=None, fix_obj=self.fix_obj, contact_pairs_set=contact_pairs_set) + + ### train with force to active ## + # if self.train_with_forces_to_active and (not self.use_mano_inputs): + # penetration_forces # + if torch.sum(self.other_bending_network.penetrating_indicator.float()) > 0.5: + net_penetrating_forces = self.other_bending_network.penetrating_forces + net_penetrating_points = self.other_bending_network.penetrating_points + + + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + self.timestep_to_penetration_points[cur_ts] = net_penetrating_points.detach().cpu().numpy() + self.timestep_to_penetration_points_forces[cur_ts] = net_penetrating_forces.detach().cpu().numpy() + + + ### transform the visual pts ### + # cur_visual_pts = (cur_visual_pts - self.minn_robo_pts) / self.extent_robo_pts + # cur_visual_pts = cur_visual_pts * 2. - 1. + # cur_visual_pts = cur_visual_pts * self.mult_const_after_cent # mult_const # + + # sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[finger_sampled_idxes][self.other_bending_network.penetrating_indicator] + + sampled_visual_pts_joint_idxes = visual_pts_joint_idxes[self.other_bending_network.penetrating_indicator] + + ## from net penetration forces to to the + + ### get the passvie force for each point ## ## + # self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() ## for + + net_penetrating_forces = torch.matmul( + cur_rot.transpose(1, 0), net_penetrating_forces.transpose(1, 0) + ).transpose(1, 0) + # net_penetrating_forces = net_penetrating_forces / self.mult_const_after_cent + # net_penetrating_forces = net_penetrating_forces / 2 + # net_penetrating_forces = net_penetrating_forces * self.extent_robo_pts + + # net_penetrating_forces = (1.0 - pointset_expansion_alpha) * net_penetrating_forces + + net_penetrating_points = torch.matmul( + cur_rot.transpose(1, 0), (net_penetrating_points - cur_trans.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + # net_penetrating_points = net_penetrating_points / self.mult_const_after_cent + # net_penetrating_points = (net_penetrating_points + 1.) / 2. # penetrating points # + # net_penetrating_points = (net_penetrating_points * self.extent_robo_pts) + self.minn_robo_pts + + penetration_forces = net_penetrating_forces ## get the penetration forces and net penetration forces ## + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() + + # penetration_forces_values = penetration_forces['penetration_forces'].detach() + # penetration_forces_points = penetration_forces['penetration_forces_points'].detach() + penetration_forces = { + 'penetration_forces': penetration_forces, + 'penetration_forces_points': net_penetrating_points + } + + else: + penetration_forces = None + sampled_visual_pts_joint_idxes = None + # penetration_forces = { + # 'penetration_forces': penetration_forces, + # 'penetration_forces_points': None, + # } + # link_maximal_contact_forces = torch.zeros((redmax_ndof_r, 6), dtype=torch.float32).cuda() ## pointset ## + ''' the bending network still have this property and we can get force values here for the expanded visual points ''' + # self.timestep_to_actuator_points_passive_forces[cur_ts] = self.other_bending_network.penetrating_forces_allpts.detach().clone() + + # if contact_pairs_set is not None: + # self.contact_pairs_sets[cur_ts] = contact_pairs_set.copy() + + # # contact force d ## ts to the passive normals ## + # self.ts_to_contact_passive_normals[cur_ts] = self.other_bending_network.tot_contact_passive_normals.detach().cpu().numpy() + # self.ts_to_passive_pts[cur_ts] = self.other_bending_network.cur_passive_obj_verts.detach().cpu().numpy() + # self.ts_to_passive_normals[cur_ts] = self.other_bending_network.cur_passive_obj_ns.detach().cpu().numpy() + # self.ts_to_contact_force_d[cur_ts] = self.other_bending_network.contact_force_d.detach().cpu().numpy() + # self.ts_to_penalty_frictions[cur_ts] = self.other_bending_network.penalty_friction_tangential_forces.detach().cpu().numpy() + # if self.other_bending_network.penalty_based_friction_forces is not None: + # self.ts_to_penalty_disp_pts[cur_ts] = self.other_bending_network.penalty_based_friction_forces.detach().cpu().numpy() + + + + # if self.optimize_with_intermediates: + # tracking_loss = self.compute_loss_optimized_transformations(cur_ts + 1) # + # else: + # tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + + # # cur_ts % mano_nn_substeps == 0: # + if (cur_ts + 1) % mano_nn_substeps == 0: + cur_passive_big_ts = cur_ts // mano_nn_substeps + ## compute optimized transformations ## + tracking_loss = self.compute_loss_optimized_transformations_v2(cur_ts + 1, cur_passive_big_ts + 1) + tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + else: + tracking_loss = torch.zeros((1,), dtype=torch.float32).cuda().mean() + + # # hand_tracking_loss = torch.sum( ## delta states? ## + # # (self.timestep_to_active_mesh_w_delta_states[cur_ts] - cur_visual_pts) ** 2, dim=-1 + # # ) + # # hand_tracking_loss = hand_tracking_loss.mean() + + + # # loss = tracking_loss + self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + # # diff_redmax_visual_pts_with_ori_visual_pts.backward() + penetraton_penalty = self.other_bending_network.penetrating_depth_penalty * self.penetrating_depth_penalty_coef + + tot_penetration_depth.append(penetraton_penalty.detach().item()) + + # smaller_than_zero_level_set_indicator + # cur_interpenetration_nns = self.other_bending_network.smaller_than_zero_level_set_indicator.float().sum() + + # tot_interpenetration_nns.append(cur_interpenetration_nns) + # + # diff_hand_tracking = torch.zeros((1,), dtype=torch.float32).cuda().mean() ## + + + # # kinematics_proj_loss = kinematics_trans_diff + penetraton_penalty + diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + + # # if self.use_mano_hand_for_test: ## only the kinematics mano hand is optimized here ## + # # kinematics_proj_loss = tracking_loss + + # # kinematics_proj_loss = hand_tracking_loss * 1e2 ## 1e2 and the 1e2 ## + + # kinematics_proj_loss = diff_hand_tracking * self.diff_hand_tracking_coef + tracking_loss + penetraton_penalty + + # kinematics_proj_loss = loss_finger_tracking # + tracking_loss + penetraton_penalty + + # reg_delta_offset_loss = torch.sum( + # (mano_expanded_actuator_delta_offset_ori - self.mano_expanded_actuator_delta_offset.weight.data) ** 2, dim=-1 + # ) + # reg_delta_offset_loss = reg_delta_offset_loss.mean() + # motion_reg_loss_coef + # reg_delta_offset_loss = reg_delta_offset_loss * self.motion_reg_loss_coef + + + ### tracking loss and the penetration penalty ### + # kinematics_proj_loss = tracking_loss + penetraton_penalty + reg_delta_offset_loss + + cur_kine_rhand_verts = self.rhand_verts[cur_ts // mano_nn_substeps] + cur_dyn_visual_pts_to_mano_verts = cur_visual_pts[self.mano_to_dyn_corr_pts_idxes] + diff_hand_tracking = torch.mean( + torch.sum((cur_kine_rhand_verts - cur_dyn_visual_pts_to_mano_verts) ** 2, dim=-1) + ) + + + kinematics_proj_loss = tracking_loss + diff_hand_tracking + ### kinematics proj loss ### + loss = kinematics_proj_loss # * self.loss_scale_coef + + + + # self.kines_optimizer.zero_grad() + + # try: + # kinematics_proj_loss.backward(retain_graph=True) + + # self.kines_optimizer.step() + # except: + # pass + + + + + + self.optimizer.zero_grad() + # loss = diff_hand_tracking + loss.backward(retain_graph=True) # + self.optimizer.step() ## update the weights ## + + # diff_hand_tracking # diff hand ## + mano_tracking_loss.append(diff_hand_tracking.detach().cpu().item()) + tot_tracking_loss.append(tracking_loss.detach().cpu().item()) + + + tot_losses.append(loss.detach().item()) + + self.writer.add_scalar('Loss/loss', loss, self.iter_step) + + self.iter_step += 1 + self.update_learning_rate() ## update learning rate ## + + torch.cuda.empty_cache() + + + ''' Get nn_forward_ts and backward through the actions for updating ''' + + ## save checkpoint ## + + if (i_iter % self.ckpt_sv_freq) == 0: + self.save_checkpoint() + + + tot_losses = sum(tot_losses) / float(len(tot_losses)) + # tot_penalty_dot_forces_normals = sum(tot_penalty_dot_forces_normals) / float(len(tot_penalty_dot_forces_normals)) + # tot_penalty_friction_constraint = sum(tot_penalty_friction_constraint) / float(len(tot_penalty_friction_constraint)) + tot_tracking_loss = sum(tot_tracking_loss) / float(len(tot_tracking_loss)) + # tot_penetration_depth = sum(tot_penetration_depth) / float(len(tot_penetration_depth)) + # robo_actions_diff_loss = sum(robo_actions_diff_loss) / float(len(robo_actions_diff_loss)) + mano_tracking_loss = sum(mano_tracking_loss) / float(len(mano_tracking_loss)) + + # if tot_losses < self.minn_tracking_loss: + # self.minn_tracking_loss = tot_losses + # self.save_checkpoint(tag="best") + + if i_iter % self.report_freq == 0: + logs_sv_fn = os.path.join(self.base_exp_dir, 'log.txt') + + cur_log_sv_str = 'iter:{:8>d} loss = {} mano_tracking_loss = {} tot_tracking_loss = {} lr={}'.format(self.iter_step, tot_losses, mano_tracking_loss, tot_tracking_loss, self.optimizer.param_groups[0]['lr']) + + print(cur_log_sv_str) + ''' Dump to the file ''' + with open(logs_sv_fn, 'a') as log_file: + log_file.write(cur_log_sv_str + '\n') + + # self.validate_mesh_robo_a() + if i_iter % self.val_mesh_freq == 0: + self.validate_mesh_robo() + # self.validate_mesh_robo_e() # validate meshes # + + torch.cuda.empty_cache() + + + def get_cos_anneal_ratio(self): + if self.anneal_end == 0.0: + return 1.0 # cos anneal ratio # + else: + return np.min([1.0, self.iter_step / self.anneal_end]) + + def update_learning_rate(self): + if self.iter_step < self.warm_up_end: # warm up end and the w + learning_factor = self.iter_step / self.warm_up_end + else: + alpha = self.learning_rate_alpha + progress = (self.iter_step - self.warm_up_end) / (self.end_iter - self.warm_up_end) + learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (1 - alpha) + alpha + + ## g in self. + for g in self.optimizer.param_groups: + g['lr'] = self.learning_rate * learning_factor + + ## backup files ## + def file_backup(self): + dir_lis = self.conf['general.recording'] + os.makedirs(os.path.join(self.base_exp_dir, 'recording'), exist_ok=True) + for dir_name in dir_lis: + cur_dir = os.path.join(self.base_exp_dir, 'recording', dir_name) + os.makedirs(cur_dir, exist_ok=True) + files = os.listdir(dir_name) + for f_name in files: + if f_name[-3:] == '.py': + copyfile(os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name)) + + copyfile(self.conf_path, os.path.join(self.base_exp_dir, 'recording', 'config.conf')) + + def load_checkpoint(self, checkpoint_name): + checkpoint = torch.load(os.path.join(self.base_exp_dir, 'checkpoints', checkpoint_name), map_location=self.device) + self.nerf_outside.load_state_dict(checkpoint['nerf']) + for i_obj in range(len(self.sdf_network)): + self.sdf_network[i_obj].load_state_dict(checkpoint['sdf_network_fine'][i_obj]) + self.bending_network[i_obj].load_state_dict(checkpoint['bending_network_fine'][i_obj]) + # self.sdf_network.load_state_dict(checkpoint['sdf_network_fine']) + self.deviation_network.load_state_dict(checkpoint['variance_network_fine']) + self.color_network.load_state_dict(checkpoint['color_network_fine']) + self.optimizer.load_state_dict(checkpoint['optimizer']) + self.iter_step = checkpoint['iter_step'] + + logging.info('End') + + + def load_checkpoint_via_fn(self, checkpoint_fn): + + checkpoint = torch.load(checkpoint_fn, map_location=self.device, ) + + self.other_bending_network.load_state_dict(checkpoint['dyn_model'], strict=False) + + logging.info(f"checkpoint with sdf_net and bending_net loaded from {checkpoint_fn}") + + logging.info('End') + + def load_checkpoint_prev_delta(self, delta_mesh_checkpoint_fn): + delta_mesh_checkpoint = torch.load(delta_mesh_checkpoint_fn, map_location=self.device) + self.prev_sdf_network.load_state_dict(delta_mesh_checkpoint['sdf_network_fine']) + logging.info(f"delta_mesh checkpoint loaded from {delta_mesh_checkpoint_fn}") + + def save_checkpoint_delta_states(self, ): + checkpoint = { + 'robot_delta_states': self.robot_delta_states.state_dict() + } + ckpt_sv_root_folder = os.path.join(self.base_exp_dir, 'checkpoints') + os.makedirs(ckpt_sv_root_folder, exist_ok=True) + ckpt_sv_fn = os.path.join(ckpt_sv_root_folder, 'robo_delta_states_ckpt_{:0>6d}.pth'.format(self.iter_step)) + + torch.save(checkpoint, ckpt_sv_fn) + + + def save_checkpoint_redmax_robot_actions(self, ): + checkpoint = { + 'redmax_robot_actions': self.redmax_robot_actions.state_dict() + } + ckpt_sv_root_folder = os.path.join(self.base_exp_dir, 'checkpoints') + os.makedirs(ckpt_sv_root_folder, exist_ok=True) + ckpt_sv_fn = os.path.join(ckpt_sv_root_folder, 'redmax_robot_actions_ckpt_{:0>6d}.pth'.format(self.iter_step)) + + torch.save(checkpoint, ckpt_sv_fn) + + + def save_checkpoint(self, tag="", niter=False): + + checkpoint = { + 'dyn_model': self.other_bending_network.state_dict() + } + + + if self.mode in ['train_actions_from_model_rules', 'train_mano_actions_from_model_rules', 'train_actions_from_mano_model_rules', 'train_real_robot_actions_from_mano_model_rules', 'train_real_robot_actions_from_mano_model_rules_diffhand', 'train_real_robot_actions_from_mano_model_rules_diffhand_fortest', 'train_sparse_retar', 'train_real_robot_actions_from_mano_model_rules_manohand_fortest', 'train_real_robot_actions_from_mano_model_rules_manohand_fortest_states', 'train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_world', 'train_dyn_mano_model', 'train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_grab', 'train_point_set', 'train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab', 'train_point_set_retar', "train_point_set_retar_pts", "train_finger_kinematics_retargeting_arctic_twohands", "train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_arctic_twohands", "train_redmax_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab", "train_dyn_mano_model_wreact"]: + try: + checkpoint['robot_actions'] = self.robot_actions.state_dict() + except: + pass + + try: + checkpoint['robot_actions_lft'] = self.robot_actions_lft.state_dict() + except: + pass + try: + checkpoint['robot_init_states'] = self.robot_init_states.state_dict() + except: + pass + try: + checkpoint['robot_glb_rotation'] = self.robot_glb_rotation.state_dict() + except: + pass + + try: + checkpoint['robot_glb_rotation_lft'] = self.robot_glb_rotation_lft.state_dict() + except: + pass + + try: + checkpoint['robot_actuator_friction_forces'] = self.robot_actuator_friction_forces.state_dict() + except: + pass + + try: + checkpoint['robot_actuator_friction_forces'] = self.robot_actuator_friction_forces.state_dict() + except: + pass + + try: + checkpoint['robot_glb_trans'] = self.robot_glb_trans.state_dict() + except: + pass + # try: + # checkpoint['robot_delta_states'] = self.robot_delta_states.state_dict() + # except: + # pass + + try: + checkpoint['robot_delta_angles'] = self.robot_delta_angles.state_dict() + except: + pass + + try: + checkpoint['robot_delta_trans'] = self.robot_delta_trans.state_dict() + except: + pass + + # try: + # checkpoint['robot_glb_trans_lft'] = self.robot_glb_trans_lft.state_dict() + # except: + # pass + + try: + checkpoint['robot_glb_trans_lft'] = self.robot_glb_trans_lft.state_dict() + except: + pass + try: + checkpoint['robot_delta_states'] = self.robot_delta_states.state_dict() + except: + pass + + + if self.mode in ['train_actions_from_mano_model_rules']: + + checkpoint['expanded_actuator_friction_forces'] = self.expanded_actuator_friction_forces.state_dict() + # robot_delta_states + + try: + checkpoint['expanded_actuator_delta_offset'] = self.expanded_actuator_delta_offset.state_dict() + except: + pass + + # mano_expanded_actuator_pointact_forces + # mano_expanded_actuator_friction_forces, mano_expanded_actuator_delta_offset # + try: + checkpoint['mano_expanded_actuator_friction_forces'] = self.mano_expanded_actuator_friction_forces.state_dict() + except: + pass + + try: + checkpoint['mano_expanded_actuator_pointact_forces'] = self.mano_expanded_actuator_pointact_forces.state_dict() + except: + pass + + + try: + checkpoint['mano_expanded_actuator_delta_offset'] = self.mano_expanded_actuator_delta_offset.state_dict() + except: + pass + + # mano_expanded_actuator_delta_offset_nex + try: + checkpoint['mano_expanded_actuator_delta_offset_nex'] = self.mano_expanded_actuator_delta_offset_nex.state_dict() + except: + pass + + # expanded_actuator_pointact_forces + try: + checkpoint['expanded_actuator_pointact_forces'] = self.expanded_actuator_pointact_forces.state_dict() + except: + pass + + try: + checkpoint['expanded_actuator_delta_offset'] = self.expanded_actuator_delta_offset.state_dict() + except: + pass + + # mano_robot_glb_rotation, mano_robot_glb_trans, mano_robot_init_states, mano_robot_delta_states # + try: + checkpoint['mano_robot_glb_rotation'] = self.mano_robot_glb_rotation.state_dict() + except: + pass + + try: + checkpoint['mano_robot_glb_trans'] = self.mano_robot_glb_trans.state_dict() + except: + pass + + try: + checkpoint['mano_robot_init_states'] = self.mano_robot_init_states.state_dict() + except: + pass + + try: + checkpoint['mano_robot_delta_states'] = self.mano_robot_delta_states.state_dict() + except: + pass + + try: + checkpoint['mano_robot_states'] = self.mano_robot_states.state_dict() + except: + pass + + try: + checkpoint['mano_robot_actions'] = self.mano_robot_actions.state_dict() + except: + pass + + try: + checkpoint['redmax_robot_actions'] = self.redmax_robot_actions.state_dict() + except: + pass + + # residual_controller, residual_dynamics_model # + try: + checkpoint['residual_controller'] = self.residual_controller.state_dict() + except: + pass + + try: + checkpoint['residual_dynamics_model'] = self.residual_dynamics_model.state_dict() + except: + pass + + # robot_states + try: + checkpoint['robot_states'] = self.robot_states.state_dict() + except: + pass + + try: + checkpoint['robot_states_sv_from_act'] = self.robot_states_sv + except: + pass + + try: + checkpoint['robot_states_lft'] = self.robot_states_lft.state_dict() + except: + pass + + try: + checkpoint['robot_delta_states_lft'] = self.robot_delta_states_lft.state_dict() + except: + pass + + # robot_delta_glb_trans, robot_delta_glb_trans_lft + try: + checkpoint['robot_delta_glb_trans'] = self.robot_delta_glb_trans.state_dict() + except: + pass + + try: + checkpoint['robot_delta_glb_trans_lft'] = self.robot_delta_glb_trans_lft.state_dict() + except: + pass + + # # object_global_orient, object_transl # + try: + checkpoint['object_transl'] = self.object_transl + checkpoint['object_global_orient'] = self.object_global_orient + except: + pass + + try: + # optimized_quat + cur_optimized_quat = np.stack(self.optimized_quat, axis=0) + cur_optimized_trans = np.stack(self.optimized_trans, axis=0) + checkpoint['optimized_quat'] = cur_optimized_quat + checkpoint['optimized_trans'] = cur_optimized_trans + except: + pass + + + + print(f"Saving checkpoint with keys {checkpoint.keys()}") + os.makedirs(os.path.join(self.base_exp_dir, 'checkpoints'), exist_ok=True) + + ckpt_sv_fn = os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{:0>6d}.pth'.format(self.iter_step)) + if len(tag) > 0: + if tag == "best": ## + ckpt_sv_fn = os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{}.pth'.format(tag)) + else: + if niter: ### + ckpt_sv_fn = os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{}.pth'.format(tag)) + else: + # ckpt_sv_fn = os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{}_{}.pth'.format(self.iter_step, tag)) + ckpt_sv_fn = os.path.join(self.base_exp_dir, 'checkpoints', 'ckpt_{}_{}.pth'.format(self.iter_step, tag)) + + torch.save(checkpoint, ckpt_sv_fn) + + + + + bending_net_save_values = self.other_bending_network.save_values + os.makedirs(os.path.join(self.base_exp_dir, 'miscs'), exist_ok=True) + bending_net_save_values_sv_fn = os.path.join(self.base_exp_dir, 'miscs', 'bending_net_save_values_{:0>6d}.npy'.format(self.iter_step)) + np.save(bending_net_save_values_sv_fn, bending_net_save_values) + return ckpt_sv_fn + + + + def validate_mesh_robo(self, ): + ## merge meshes ## + def merge_meshes(verts_list, faces_list): + tot_verts_nn = 0 + merged_verts = [] + merged_faces = [] + for i_mesh in range(len(verts_list)): + merged_verts.append(verts_list[i_mesh]) + merged_faces.append(faces_list[i_mesh] + tot_verts_nn) # + tot_verts_nn += verts_list[i_mesh].shape[0] + merged_verts = np.concatenate(merged_verts, axis=0) + merged_faces = np.concatenate(merged_faces, axis=0) + return merged_verts, merged_faces + + # self.hand_faces, self.obj_faces # # hand faces # + mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + hand_faces_np = self.robo_hand_faces.detach().cpu().numpy() + + if self.use_mano_inputs: + hand_faces_np = mano_hand_faces_np + + if self.optim_sim_model_params_from_mano: + hand_faces_np = mano_hand_faces_np + + obj_faces_np = self.obj_faces.detach().cpu().numpy() + + + if self.other_bending_network.canon_passive_obj_verts is None: + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + else: + init_passive_obj_verts = self.other_bending_network.canon_passive_obj_verts.detach().cpu().numpy() + init_passive_obj_verts_center = torch.zeros((3, )).cuda().detach().cpu().numpy() + + # init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + # init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_obj_quaternion = {} + ts_to_obj_rot_mtx = {} + ts_to_obj_trans = {} + ts_to_hand_obj_verts = {} + # for i_ts in range(1, self.nn_timesteps - 1, 10): + for i_ts in range(0, (self.nn_ts - 1) * self.mano_nn_substeps, 1): + if self.optim_sim_model_params_from_mano: + cur_hand_mesh = self.rhand_verts[i_ts] # .detach().cpu().numpy() + else: + cur_hand_mesh = self.timestep_to_active_mesh[i_ts] + + if i_ts % self.mano_nn_substeps == 0: + cur_mano_rhand = self.rhand_verts[i_ts // self.mano_nn_substeps].detach().cpu().numpy() + cur_rhand_verts = cur_hand_mesh + cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + # cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + if i_ts not in self.other_bending_network.timestep_to_optimizable_rot_mtx: # to optimizable rot mtx # + cur_pred_rot_mtx = np.eye(3, dtype=np.float32) + cur_pred_trans = np.zeros((3,), dtype=np.float32) + else: + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + ## the training + if self.mode in ['train_sparse_retar']: + cur_transformed_obj = self.obj_pcs[i_ts].detach().cpu().numpy() + + if i_ts not in self.other_bending_network.timestep_to_optimizable_quaternion: + ts_to_obj_quaternion[i_ts] = np.zeros((4,), dtype=np.float32) + ts_to_obj_quaternion[i_ts][0] = 1. # ## quaternion ## # + ts_to_obj_rot_mtx[i_ts] = np.eye(3, dtype=np.float32) + ts_to_obj_trans[i_ts] = np.zeros((3,), dtype=np.float32) + else: + ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + ts_to_hand_obj_verts[i_ts] = (cur_rhand_verts_np, cur_transformed_obj) # not correct.... # + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + if i_ts % 10 == 0: + # print(f"exporting meshes i_ts: {i_ts}, cur_hand_verts_np: {cur_rhand_verts_np.shape}, hand_faces_np: {hand_faces_np.shape}") + merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj], [hand_faces_np, obj_faces_np]) + maxx_hand_faces, minn_hand_faces = np.max(hand_faces_np), np.min(hand_faces_np) + maxx_obj_faces, minn_obj_faces = np.max(obj_faces_np), np.min(obj_faces_np) + + # print(f"cur_rhand_verts_np: {cur_rhand_verts_np.shape}, cur_transformed_obj: {cur_transformed_obj.shape}, maxx_hand_faces: {maxx_hand_faces}, minn_hand_faces: {minn_hand_faces}, maxx_obj_faces: {maxx_obj_faces}, minn_obj_faces: {minn_obj_faces}") + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + if i_ts % self.mano_nn_substeps == 0: + ### overlayed with the mano mesh ### + merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_wmano.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + + ts_sv_dict = { + 'ts_to_obj_quaternion': ts_to_obj_quaternion, + 'ts_to_obj_rot_mtx': ts_to_obj_rot_mtx, + 'ts_to_obj_trans': ts_to_obj_trans, + 'ts_to_hand_obj_verts': ts_to_hand_obj_verts + } + + if self.mode not in ['train_sparse_retar']: + hand_obj_verts_faces_sv_dict = { + 'ts_to_hand_obj_verts': ts_to_hand_obj_verts, + 'hand_faces': hand_faces_np, + 'obj_faces': obj_faces_np + } + + hand_obj_verts_faces_sv_dict_sv_fn = 'hand_obj_verts_faces_sv_dict_{:0>8d}.npy'.format(self.iter_step) + hand_obj_verts_faces_sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, hand_obj_verts_faces_sv_dict_sv_fn) + np.save(hand_obj_verts_faces_sv_dict_sv_fn, hand_obj_verts_faces_sv_dict) + # collision detection and # hand obj verts faces sv dict # + try: + timestep_to_mano_active_mesh = {ts: self.timestep_to_mano_active_mesh[ts].detach().cpu().numpy() for ts in self.timestep_to_mano_active_mesh} + mano_sv_dict_sv_fn = 'mano_act_pts_{:0>8d}.npy'.format(self.iter_step) + mano_sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, mano_sv_dict_sv_fn) + np.save(mano_sv_dict_sv_fn, timestep_to_mano_active_mesh) + + timestep_to_robot_mesh = {ts: self.timestep_to_active_mesh[ts].detach().cpu().numpy() for ts in self.timestep_to_active_mesh} + rhand_verts = self.rhand_verts.detach().cpu().numpy() + # ts_to_robot_fingers, ts_to_mano_fingers + retar_sv_dict = { + 'timestep_to_robot_mesh': timestep_to_robot_mesh, + 'rhand_verts': rhand_verts, + 'ts_to_robot_fingers': self.ts_to_robot_fingers, + 'ts_to_mano_fingers': self.ts_to_mano_fingers, + } + retar_sv_fn = 'retar_info_dict_{:0>8d}.npy'.format(self.iter_step) + retar_sv_fn = os.path.join(mesh_sv_root_dir, retar_sv_fn) + np.save(retar_sv_fn, retar_sv_dict) + + timestep_to_corr_mano_pts = {ts: self.timestep_to_corr_mano_pts[ts].detach().cpu().numpy() for ts in self.timestep_to_corr_mano_pts} + mano_sv_dict_sv_fn = 'corr_mano_act_pts_{:0>8d}.npy'.format(self.iter_step) + mano_sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, mano_sv_dict_sv_fn) + np.save(mano_sv_dict_sv_fn, timestep_to_corr_mano_pts) + except: + pass + sv_dict_sv_fn = '{:0>8d}.npy'.format(self.iter_step) + sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, sv_dict_sv_fn) + np.save(sv_dict_sv_fn, ts_sv_dict) + + try: + robo_intermediates_states_np = self.robo_intermediates_states.numpy() + robo_intermediates_states_sv_fn = f"robo_intermediates_states_{self.iter_step}.npy" + robo_intermediates_states_sv_fn = os.path.join(mesh_sv_root_dir, robo_intermediates_states_sv_fn) + np.save(robo_intermediates_states_sv_fn, robo_intermediates_states_np) # + except: + pass + + def validate_mesh_robo_redmax_acts(self, i_iter, tag=None): + optimized_redmax_meshes = { + ts: self.ts_to_act_opt_pts_woglb[ts].detach().cpu().numpy() for ts in self.ts_to_act_opt_pts_woglb + } + states_optimized_meshes = { + ts: self.timestep_to_active_mesh_wo_glb_from_states[ts].detach().cpu().numpy() for ts in self.timestep_to_active_mesh_wo_glb_from_states + } + act_optimized_sv_dict = { + 'optimized_redmax_meshes': optimized_redmax_meshes, + 'states_optimized_meshes': states_optimized_meshes, + } + + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + if (tag is None) or (len(tag) == 0): + act_optimized_sv_dict_fn = f"act_optimized_sv_dict_{i_iter}.npy" + else: + act_optimized_sv_dict_fn = f"act_optimized_sv_dict_{i_iter}_tag_{tag}.npy" + act_optimized_sv_dict_fn = os.path.join(mesh_sv_root_dir, act_optimized_sv_dict_fn) + np.save(act_optimized_sv_dict_fn, act_optimized_sv_dict) # + print(f"Redmax acts optimized info saved to {act_optimized_sv_dict_fn}") + + + def validate_mesh_robo_d(self, ): + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + + ts_to_active_mesh = { + ts: self.ts_to_target_sim_active_meshes[ts].detach().cpu().numpy() for ts in self.ts_to_target_sim_active_meshes + } + ts_to_passive_mesh = {} + for ts in self.ts_to_target_sim_active_meshes: + cur_pred_quat = self.ts_to_target_sim_obj_quat[ts] + cur_pred_trans = self.ts_to_target_sim_obj_trans[ts].detach().cpu().numpy() + + cur_pred_rot_mtx = fields.quaternion_to_matrix(cur_pred_quat).detach().cpu().numpy() + + ## cur pred trnas ## + # cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + ts_to_passive_mesh[ts] = cur_transformed_obj + + obj_faces_np = self.obj_faces.detach().cpu().numpy() + hand_faces_np = self.robo_hand_faces.detach().cpu().numpy() # # + + # obj faces; hand faces; ts to active meshes; ts to passive meshes # + + sv_dict = { + 'ts_to_active_mesh': ts_to_active_mesh, 'ts_to_passive_mesh': ts_to_passive_mesh, 'obj_faces': obj_faces_np, 'hand_faces': hand_faces_np + } + + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_active_mesh_sv_fn = '{:0>8d}_ts_to_target_sim_active_mesh.npy'.format(self.iter_step) + ts_to_active_mesh_sv_fn = os.path.join(mesh_sv_root_dir, ts_to_active_mesh_sv_fn) + np.save(ts_to_active_mesh_sv_fn, sv_dict) + + + def validate_mesh_robo_a(self, ): + ts_to_active_mesh = { + ts: self.timestep_to_active_mesh[ts][self.sampled_verts_idxes].detach().cpu().numpy() for ts in self.timestep_to_active_mesh + } + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_active_mesh_sv_fn = '{:0>8d}_ts_to_active_mesh.ply'.format(self.iter_step) + ts_to_active_mesh_sv_fn = os.path.join(mesh_sv_root_dir, ts_to_active_mesh_sv_fn) + np.save(ts_to_active_mesh_sv_fn, ts_to_active_mesh) + + def validate_mesh_robo_b(self, ): + ts_to_active_mesh = { + ts: self.cur_ts_to_optimized_visual_pts[ts] for ts in self.cur_ts_to_optimized_visual_pts + } + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_active_mesh_sv_fn = '{:0>8d}_ts_to_opt_act_pts.ply'.format(self.iter_step) + ts_to_active_mesh_sv_fn = os.path.join(mesh_sv_root_dir, ts_to_active_mesh_sv_fn) + np.save(ts_to_active_mesh_sv_fn, ts_to_active_mesh) + + + def save_redmax_actions(self, ): + + # tot_redmax_actions + redmax_act_sv_folder = os.path.join(self.base_exp_dir, 'checkpoint') + os.makedirs(redmax_act_sv_folder, exist_ok=True) + redmax_act_sv_fn = '{:0>8d}_redmax_act.npy'.format(self.iter_step) + redmax_act_sv_fn = os.path.join(redmax_act_sv_folder, redmax_act_sv_fn) + + + np.save(redmax_act_sv_fn, self.tot_redmax_actions.detach().cpu().numpy()) + + + def validate_mesh_robo_c(self, ): + ts_to_active_mesh = { + ts: self.tot_visual_pts[ts] for ts in range(self.tot_visual_pts.shape[0]) + } + + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_active_mesh_sv_fn = '{:0>8d}_ts_to_opt_intermediate_act_pts.npy'.format(self.iter_step) + ts_to_active_mesh_sv_fn = os.path.join(mesh_sv_root_dir, ts_to_active_mesh_sv_fn) + np.save(ts_to_active_mesh_sv_fn, ts_to_active_mesh) + + ts_to_ref_act_mesh = { + ts: self.timestep_to_active_mesh_wo_glb[ts].detach().cpu().numpy() for ts in self.timestep_to_active_mesh_wo_glb + } + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_ref_act_mesh_sv_fn = '{:0>8d}_ts_to_ref_act_pts.npy'.format(self.iter_step) + ts_to_ref_act_mesh_sv_fn = os.path.join(mesh_sv_root_dir, ts_to_ref_act_mesh_sv_fn) + np.save(ts_to_ref_act_mesh_sv_fn, ts_to_ref_act_mesh) + + + + + def validate_mesh_robo_e(self, ): + def merge_meshes(verts_list, faces_list): + tot_verts_nn = 0 + merged_verts = [] + merged_faces = [] + for i_mesh in range(len(verts_list)): + merged_verts.append(verts_list[i_mesh]) + merged_faces.append(faces_list[i_mesh] + tot_verts_nn) # + tot_verts_nn += verts_list[i_mesh].shape[0] + merged_verts = np.concatenate(merged_verts, axis=0) + merged_faces = np.concatenate(merged_faces, axis=0) + return merged_verts, merged_faces + + + mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + hand_faces_np = self.robo_hand_faces.detach().cpu().numpy() + + if self.use_mano_inputs: + hand_faces_np = mano_hand_faces_np + + obj_faces_np = self.obj_faces.detach().cpu().numpy() + + + # init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + # init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + + + retargeting_info = {} + ts_to_retargeted_info = {} + + for i_ts in range(0, self.nn_ts - 1, 1): + cur_mano_rhand = self.rhand_verts[i_ts].detach().cpu().numpy() + cur_hand_verts = self.timestep_to_active_mesh[i_ts].detach().cpu().numpy() + obj_verts = self.timestep_to_passive_mesh[i_ts].detach().cpu().numpy() + obj_faces = obj_faces_np + hand_faces = hand_faces_np + + ts_to_retargeted_info[i_ts] = (cur_hand_verts, cur_mano_rhand, obj_verts) + + + if i_ts % 10 == 0: # merged verts # + merged_verts, merged_faces = merge_meshes([cur_hand_verts, obj_verts], [hand_faces, obj_faces]) + ## ## merged + mesh = trimesh.Trimesh(merged_verts, merged_faces) + + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + + merged_verts, merged_faces = merge_meshes([cur_hand_verts, obj_verts, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_wmano.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + cur_hand_mesh = trimesh.Trimesh(cur_hand_verts, hand_faces) + mesh_sv_fn = 'dyn_mano_hand_{:0>8d}_ts_{:0>3d}.obj'.format(self.iter_step, i_ts) + cur_hand_mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + cur_obj_mesh = trimesh.Trimesh(obj_verts, obj_faces_np) + mesh_sv_fn = 'obj_{:0>8d}_ts_{:0>3d}.obj'.format(self.iter_step, i_ts) + cur_obj_mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + cur_mano_hand_mesh = trimesh.Trimesh(cur_mano_rhand, mano_hand_faces_np) + mesh_sv_fn = 'kine_mano_hand_{:0>8d}_ts_{:0>3d}.obj'.format(self.iter_step, i_ts) + cur_mano_hand_mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + retargeting_info = { + 'ts_to_retargeted_info': ts_to_retargeted_info, + 'obj_verts': obj_verts, + 'hand_faces': hand_faces, + } + retargeting_info_sv_fn = 'retargeting_info_{:0>8d}.npy'.format(self.iter_step) + retargeting_info_sv_fn = os.path.join(mesh_sv_root_dir, retargeting_info_sv_fn) + np.save(retargeting_info_sv_fn, retargeting_info) + # save the retargeted info here ## + + # ts_to_hand_obj_verts = self.get_hand_obj_infos() + def get_hand_obj_infos(self, tag=None): + + # one single hand or # not very easy to + # self.hand_faces, self.obj_faces + mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + # hand_faces_np = self.robo_hand_faces.detach().cpu().numpy() # ### robot faces # + + # if self.use_mano_inputs: ## mano inputs ## ## validate mesh robo g ## + # hand_faces_np = mano_hand_faces_np + + # obj_faces_np = self.obj_faces.detach().cpu().numpy() + + + if self.other_bending_network.canon_passive_obj_verts is None: + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + else: + init_passive_obj_verts = self.other_bending_network.canon_passive_obj_verts.detach().cpu().numpy() + init_passive_obj_verts_center = torch.zeros((3, )).cuda().detach().cpu().numpy() + + # init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + # init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_obj_quaternion = {} + ts_to_obj_rot_mtx = {} + ts_to_obj_trans = {} + ts_to_hand_obj_verts = {} + ts_to_transformed_obj_pts = {} + # for i_ts in range(1, self.nn_timesteps - 1, 10): + for i_ts in range(0, (self.nn_ts - 1) * self.mano_nn_substeps, 1): + cur_hand_mesh = self.timestep_to_active_mesh[i_ts] + if i_ts % self.mano_nn_substeps == 0: + cur_mano_rhand = self.rhand_verts[i_ts // self.mano_nn_substeps].detach().cpu().numpy() + cur_rhand_verts = cur_hand_mesh # [: cur_hand_mesh.size(0) // 2] + # cur_lhand_verts = cur_hand_mesh # [cur_hand_mesh.size(0) // 2:] + cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + # cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + if i_ts not in self.other_bending_network.timestep_to_optimizable_rot_mtx: # to optimizable rot mtx # + cur_pred_rot_mtx = np.eye(3, dtype=np.float32) + cur_pred_trans = np.zeros((3,), dtype=np.float32) + else: + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + if i_ts not in self.other_bending_network.timestep_to_optimizable_quaternion: + ts_to_obj_quaternion[i_ts] = np.zeros((4,), dtype=np.float32) + ts_to_obj_quaternion[i_ts][0] = 1. # ## quaternion ## # + ts_to_obj_rot_mtx[i_ts] = np.eye(3, dtype=np.float32) + ts_to_obj_trans[i_ts] = np.zeros((3,), dtype=np.float32) + else: + ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + ts_to_hand_obj_verts[i_ts] = (cur_rhand_verts_np, cur_transformed_obj) # not correct.... # + # tostotrans + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + # if i_ts % 10 == 0: + # # print(f"exporting meshes i_ts: {i_ts}, cur_hand_verts_np: {cur_rhand_verts_np.shape}, hand_faces_np: {hand_faces_np.shape}") + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj], [hand_faces_np, obj_faces_np]) + # mesh = trimesh.Trimesh(merged_verts, merged_faces) + # mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + # mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + # if i_ts % self.mano_nn_substeps == 0: + # ### overlayed with the mano mesh ### + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + # mesh = trimesh.Trimesh(merged_verts, merged_faces) + # mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_wmano.ply'.format(self.iter_step, i_ts) + # mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + #### + ## + return ts_to_hand_obj_verts + # try: + # timestep_to_anchored_mano_pts = self.timestep_to_anchored_mano_pts + # except: + # timestep_to_anchored_mano_pts = {} + + # try: + # timestep_to_raw_active_meshes = self.timestep_to_raw_active_meshes + # except: + # timestep_to_raw_active_meshes = {} + + # try: + # ts_to_dyn_mano_pts = self.ts_to_dyn_mano_pts + # except: + # ts_to_dyn_mano_pts = {} + # ts_sv_dict = { + # 'ts_to_obj_quaternion': ts_to_obj_quaternion, + # 'ts_to_obj_rot_mtx': ts_to_obj_rot_mtx, + # 'ts_to_obj_trans': ts_to_obj_trans, + # 'ts_to_hand_obj_verts': ts_to_hand_obj_verts, # hand vertices, obj vertices # + # 'obj_faces_np': obj_faces_np, + # # 'timestep_to_anchored_mano_pts': timestep_to_anchored_mano_pts, + # # 'timestep_to_raw_active_meshes': timestep_to_raw_active_meshes, # ts to raw active meshes ## + # # 'ts_to_dyn_mano_pts': ts_to_dyn_mano_pts, + # # 'timestep_to_expanded_robo_visual_pts': { ts: self.timestep_to_expanded_robo_visual_pts[ts].detach().cpu().numpy() for ts in self.timestep_to_expanded_robo_visual_pts}, + # } + # try: + # ts_sv_dict['timestep_to_expanded_robo_visual_pts'] = { ts: self.timestep_to_expanded_robo_visual_pts[ts].detach().cpu().numpy() for ts in self.timestep_to_expanded_robo_visual_pts}, + # except: + # pass + + + + def validate_mesh_robo_g(self, tag=None): + def merge_meshes(verts_list, faces_list): + tot_verts_nn = 0 + merged_verts = [] + merged_faces = [] + for i_mesh in range(len(verts_list)): + merged_verts.append(verts_list[i_mesh]) + merged_faces.append(faces_list[i_mesh] + tot_verts_nn) # + tot_verts_nn += verts_list[i_mesh].shape[0] + merged_verts = np.concatenate(merged_verts, axis=0) + merged_faces = np.concatenate(merged_faces, axis=0) + return merged_verts, merged_faces + + # one single hand or # not very easy to + # self.hand_faces, self.obj_faces + mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + # hand_faces_np = self.robo_hand_faces.detach().cpu().numpy() # ### robot faces # + + if self.use_mano_inputs: ## mano inputs ## ## validate mesh robo g ## + hand_faces_np = mano_hand_faces_np + + obj_faces_np = self.obj_faces.detach().cpu().numpy() + + + if self.other_bending_network.canon_passive_obj_verts is None: + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + else: + init_passive_obj_verts = self.other_bending_network.canon_passive_obj_verts.detach().cpu().numpy() + init_passive_obj_verts_center = torch.zeros((3, )).cuda().detach().cpu().numpy() + + # init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + # init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_obj_quaternion = {} + ts_to_obj_rot_mtx = {} + ts_to_obj_trans = {} + ts_to_hand_obj_verts = {} + ts_to_transformed_obj_pts = {} + # for i_ts in range(1, self.nn_timesteps - 1, 10): + for i_ts in range(0, (self.nn_ts - 1) * self.mano_nn_substeps, 1): + cur_hand_mesh = self.timestep_to_active_mesh[i_ts] + if i_ts % self.mano_nn_substeps == 0: + cur_mano_rhand = self.rhand_verts[i_ts // self.mano_nn_substeps].detach().cpu().numpy() + cur_rhand_verts = cur_hand_mesh # [: cur_hand_mesh.size(0) // 2] + # cur_lhand_verts = cur_hand_mesh # [cur_hand_mesh.size(0) // 2:] + cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + # cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + if i_ts not in self.other_bending_network.timestep_to_optimizable_rot_mtx: # to optimizable rot mtx # + cur_pred_rot_mtx = np.eye(3, dtype=np.float32) + cur_pred_trans = np.zeros((3,), dtype=np.float32) + else: + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + if i_ts not in self.other_bending_network.timestep_to_optimizable_quaternion: + ts_to_obj_quaternion[i_ts] = np.zeros((4,), dtype=np.float32) + ts_to_obj_quaternion[i_ts][0] = 1. # ## quaternion ## # + ts_to_obj_rot_mtx[i_ts] = np.eye(3, dtype=np.float32) + ts_to_obj_trans[i_ts] = np.zeros((3,), dtype=np.float32) + else: + ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + ts_to_hand_obj_verts[i_ts] = (cur_rhand_verts_np, cur_transformed_obj) # not correct.... # + # tostotrans + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + # if i_ts % 10 == 0: + # # print(f"exporting meshes i_ts: {i_ts}, cur_hand_verts_np: {cur_rhand_verts_np.shape}, hand_faces_np: {hand_faces_np.shape}") + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj], [hand_faces_np, obj_faces_np]) + # mesh = trimesh.Trimesh(merged_verts, merged_faces) + # mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + # mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + # if i_ts % self.mano_nn_substeps == 0: + # ### overlayed with the mano mesh ### + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + # mesh = trimesh.Trimesh(merged_verts, merged_faces) + # mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_wmano.ply'.format(self.iter_step, i_ts) + # mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + #### + ## + try: + timestep_to_anchored_mano_pts = self.timestep_to_anchored_mano_pts + except: + timestep_to_anchored_mano_pts = {} + + try: + timestep_to_raw_active_meshes = self.timestep_to_raw_active_meshes + except: + timestep_to_raw_active_meshes = {} + + try: + ts_to_dyn_mano_pts = self.ts_to_dyn_mano_pts + except: + ts_to_dyn_mano_pts = {} + ts_sv_dict = { + 'ts_to_obj_quaternion': ts_to_obj_quaternion, + 'ts_to_obj_rot_mtx': ts_to_obj_rot_mtx, + 'ts_to_obj_trans': ts_to_obj_trans, + 'ts_to_hand_obj_verts': ts_to_hand_obj_verts, # hand vertices, obj vertices # + 'obj_faces_np': obj_faces_np, + # 'timestep_to_anchored_mano_pts': timestep_to_anchored_mano_pts, + # 'timestep_to_raw_active_meshes': timestep_to_raw_active_meshes, # ts to raw active meshes ## + # 'ts_to_dyn_mano_pts': ts_to_dyn_mano_pts, + # 'timestep_to_expanded_robo_visual_pts': { ts: self.timestep_to_expanded_robo_visual_pts[ts].detach().cpu().numpy() for ts in self.timestep_to_expanded_robo_visual_pts}, + } + try: + ts_sv_dict['timestep_to_expanded_robo_visual_pts'] = { ts: self.timestep_to_expanded_robo_visual_pts[ts].detach().cpu().numpy() for ts in self.timestep_to_expanded_robo_visual_pts}, + except: + pass + + + timestep_to_mano_active_mesh = {ts: self.timestep_to_active_mesh[ts].detach().cpu().numpy() for ts in self.timestep_to_active_mesh} + if tag is not None: + mano_sv_dict_sv_fn = 'mano_act_pts_{:0>8d}_{}.npy'.format(self.iter_step, tag) + else: + mano_sv_dict_sv_fn = 'mano_act_pts_{:0>8d}.npy'.format(self.iter_step) + mano_sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, mano_sv_dict_sv_fn) + np.save(mano_sv_dict_sv_fn, timestep_to_mano_active_mesh) + + + if tag is not None: + sv_dict_sv_fn = 'retar_info_dict_{:0>8d}_{}.npy'.format(self.iter_step, tag) + else: + sv_dict_sv_fn = 'retar_info_dict_{:0>8d}.npy'.format(self.iter_step) + sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, sv_dict_sv_fn) + np.save(sv_dict_sv_fn, ts_sv_dict) + + def validate_mesh_robo_h(self, ): # validate mesh robo ### + def merge_meshes(verts_list, faces_list): + tot_verts_nn = 0 + merged_verts = [] + merged_faces = [] + for i_mesh in range(len(verts_list)): + merged_verts.append(verts_list[i_mesh]) + merged_faces.append(faces_list[i_mesh] + tot_verts_nn) # + tot_verts_nn += verts_list[i_mesh].shape[0] + merged_verts = np.concatenate(merged_verts, axis=0) + merged_faces = np.concatenate(merged_faces, axis=0) + return merged_verts, merged_faces + + # one single hand or # not very easy to ## no + # self.hand_faces, self.obj_faces + mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + hand_faces_np = self.robo_hand_faces.detach().cpu().numpy() # ### robot faces # + + if self.use_mano_inputs: + hand_faces_np = mano_hand_faces_np + + if self.optim_sim_model_params_from_mano: + hand_faces_np = mano_hand_faces_np + else: + hand_faces_np_left = self.robot_agent_left.robot_faces.detach().cpu().numpy() + + obj_faces_np = self.obj_faces.detach().cpu().numpy() + + + if self.other_bending_network.canon_passive_obj_verts is None: + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + # init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + init_passive_obj_verts_center = torch.zeros((3, )).cuda().detach().cpu().numpy() + else: + init_passive_obj_verts = self.other_bending_network.canon_passive_obj_verts.detach().cpu().numpy() + init_passive_obj_verts_center = torch.zeros((3, )).cuda().detach().cpu().numpy() + + # init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + # init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_obj_quaternion = {} + ts_to_obj_rot_mtx = {} + ts_to_obj_trans = {} + ts_to_hand_obj_verts = {} + # for i_ts in range(1, self.nn_timesteps - 1, 10): + for i_ts in range(0, (self.nn_ts - 1) * self.mano_nn_substeps, 1): + if self.optim_sim_model_params_from_mano: + cur_hand_mesh = self.rhand_verts[i_ts] # .detach().cpu().numpy() + else: + cur_hand_mesh = self.ts_to_act_rgt[i_ts] + + # cur_rhand_mano_gt, cur_lhand_mano_gt, gt_obj_pcs, re_trans_obj_pcs # + cur_rhand_mano_gt = self.rhand_verts[i_ts].detach().cpu().numpy() + cur_lhand_mano_gt = self.lhand_verts[i_ts].detach().cpu().numpy() # + gt_obj_pcs = self.timestep_to_passive_mesh[i_ts].detach().cpu().numpy() # + re_trans_obj_pcs = self.re_transformed_obj_verts[i_ts].detach().cpu().numpy() # + + # ts_to_mano_rhand_meshes ## why we have zero gradients ## ## why the values cannot be updated? ## + if self.optim_sim_model_params_from_mano: + cur_hand_mesh_left = self.lhand_verts[i_ts] ## retargeting ## + cur_hand_mesh_faces_left = self.hand_faces.detach().cpu().numpy() + else: + cur_hand_mesh_left = self.ts_to_act_lft[i_ts] + cur_hand_mesh_faces_left = self.robot_agent_left.robot_faces.detach().cpu().numpy() + + # cur_hand_mesh_left = self.ts_to_act_lft[i_ts] + cur_hand_mesh_left_np = cur_hand_mesh_left.detach().cpu().numpy() + + if i_ts % self.mano_nn_substeps == 0: + cur_mano_rhand = self.rhand_verts[i_ts // self.mano_nn_substeps].detach().cpu().numpy() + cur_rhand_verts = cur_hand_mesh # [: cur_hand_mesh.size(0) // 2] + # cur_lhand_verts = cur_hand_mesh # [cur_hand_mesh.size(0) // 2:] + cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + # cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + if i_ts not in self.other_bending_network.timestep_to_optimizable_rot_mtx: # to optimizable rot mtx # + cur_pred_rot_mtx = np.eye(3, dtype=np.float32) + cur_pred_trans = np.zeros((3,), dtype=np.float32) + else: + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + if i_ts not in self.other_bending_network.timestep_to_optimizable_quaternion: + ts_to_obj_quaternion[i_ts] = np.zeros((4,), dtype=np.float32) + ts_to_obj_quaternion[i_ts][0] = 1. # ## quaternion ## # + ts_to_obj_rot_mtx[i_ts] = np.eye(3, dtype=np.float32) + ts_to_obj_trans[i_ts] = np.zeros((3,), dtype=np.float32) + else: + ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + ts_to_hand_obj_verts[i_ts] = (cur_rhand_verts_np, cur_hand_mesh_left_np, cur_transformed_obj) # not correct.... # + # cur_transformed_obj = self.timestep_to_passive_mesh[i_ts].detach().cpu().numpy() + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + if i_ts % 10 == 0: + # print(f"exporting meshes i_ts: {i_ts}, cur_hand_verts_np: {cur_rhand_verts_np.shape}, hand_faces_np: {hand_faces_np.shape}") + # print(f"cur_rhand_verts_np: {cur_rhand_verts_np.shape}, cur_hand_mesh_left_np: {cur_hand_mesh_left_np.shape}, cur_transformed_obj: {cur_transformed_obj.shape}, hand_faces_np: {hand_faces_np.shape}, obj_faces_np: {obj_faces_np.shape}") + merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_hand_mesh_left_np, cur_transformed_obj], [hand_faces_np, cur_hand_mesh_faces_left, obj_faces_np]) + # print(f"merged_verts: {merged_verts.shape}, merged_faces: {merged_faces.shape}") + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + if i_ts % self.mano_nn_substeps == 0: + ### overlayed with the mano mesh ### + merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_hand_mesh_left_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, cur_hand_mesh_faces_left, obj_faces_np, mano_hand_faces_np]) + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_wmano.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + ### overlayed with the mano mesh ### + merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_hand_mesh_left_np, cur_mano_rhand], [hand_faces_np, cur_hand_mesh_faces_left, mano_hand_faces_np]) # + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_onlyhand.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + # cur_rhand_mano_gt, cur_lhand_mano_gt, gt_obj_pcs, re_trans_obj_pcs # + merged_verts, merged_faces = merge_meshes([cur_rhand_mano_gt, cur_lhand_mano_gt, gt_obj_pcs], [mano_hand_faces_np, mano_hand_faces_np, obj_faces_np]) + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_gt_mano_obj.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + merged_verts, merged_faces = merge_meshes([cur_rhand_mano_gt, cur_lhand_mano_gt, re_trans_obj_pcs], [mano_hand_faces_np, mano_hand_faces_np, obj_faces_np]) + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}_gt_mano_retrans_obj.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + + + ts_sv_dict = { + 'ts_to_obj_quaternion': ts_to_obj_quaternion, + 'ts_to_obj_rot_mtx': ts_to_obj_rot_mtx, + 'ts_to_obj_trans': ts_to_obj_trans, + 'ts_to_hand_obj_verts': ts_to_hand_obj_verts, + 'hand_faces_np': hand_faces_np, + 'cur_hand_mesh_faces_left': cur_hand_mesh_faces_left, + 'obj_faces_np': obj_faces_np + } + + + sv_dict_sv_fn = '{:0>8d}.npy'.format(self.iter_step) + sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, sv_dict_sv_fn) + np.save(sv_dict_sv_fn, ts_sv_dict) + + + + def validate_contact_info_robo(self, ): # validate mesh robo # + def merge_meshes(verts_list, faces_list): + tot_verts_nn = 0 + merged_verts = [] + merged_faces = [] + for i_mesh in range(len(verts_list)): + merged_verts.append(verts_list[i_mesh]) + merged_faces.append(faces_list[i_mesh] + tot_verts_nn) # and you + tot_verts_nn += verts_list[i_mesh].shape[0] + merged_verts = np.concatenate(merged_verts, axis=0) + merged_faces = np.concatenate(merged_faces, axis=0) + return merged_verts, merged_faces + + # one single hand or # not very easy to + # self.hand_faces, self.obj_faces # # j + # mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + # hand_faces_np = self.robo_hand_faces.detach().cpu().numpy() # ### robot faces # + # obj_faces_np = self.obj_faces.detach().cpu().numpy() + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + # ts_to_obj_quaternion = {} + # ts_to_obj_rot_mtx = {} + # ts_to_obj_trans = {} + # ts_to_hand_obj_verts = {} + ts_to_obj_verts = {} + # for i_ts in range(1, self.nn_timesteps - 1, 10): + for i_ts in range(0,( self.nn_ts - 1) * self.mano_nn_substeps, 1): + cur_hand_mesh = self.timestep_to_active_mesh[i_ts] + # cur_mano_rhand = self.rhand_verts[i_ts].detach().cpu().numpy() # + # cur_rhand_verts = cur_hand_mesh # [: cur_hand_mesh.size(0) // 2] + # cur_lhand_verts = cur_hand_mesh # [cur_hand_mesh.size(0) // 2:] + # cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + # cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + if i_ts not in self.other_bending_network.timestep_to_optimizable_rot_mtx: + cur_pred_rot_mtx = np.eye(3, dtype=np.float32) + cur_pred_trans = np.zeros((3,), dtype=np.float32) + else: + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + # if i_ts not in self.other_bending_network.timestep_to_optimizable_quaternion: + # ts_to_obj_quaternion[i_ts] = np.zeros((4,), dtype=np.float32) + # ts_to_obj_quaternion[i_ts][0] = 1. # ## quaternion ## # + # ts_to_obj_rot_mtx[i_ts] = np.eye(3, dtype=np.float32) + # ts_to_obj_trans[i_ts] = np.zeros((3,), dtype=np.float32) + # else: # + # ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + # ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + # ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + ts_to_obj_verts[i_ts] = cur_transformed_obj # cur_transformed_obj # + # ts_to_hand_obj_verts[i_ts] = (cur_rhand_verts_np, cur_transformed_obj) + # # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + # if i_ts % 10 == 0: + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj], [hand_faces_np, obj_faces_np]) + # mesh = trimesh.Trimesh(merged_verts, merged_faces) + # mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + # mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + + # joint torque # joint torque # + cur_sv_penetration_points_fn = os.path.join(self.base_exp_dir, "meshes", f"penetration_points_{self.iter_step}.npy") + # timestep_to_raw_active_meshes, timestep_to_penetration_points, timestep_to_penetration_points_forces + cur_timestep_to_accum_acc = { + ts: self.other_bending_network.timestep_to_accum_acc[ts].detach().cpu().numpy() for ts in self.other_bending_network.timestep_to_accum_acc + } + # save penetration points # joint + cur_sv_penetration_points = { + 'timestep_to_raw_active_meshes': self.timestep_to_raw_active_meshes, + 'timestep_to_penetration_points': self.timestep_to_penetration_points, + 'timestep_to_penetration_points_forces': self.timestep_to_penetration_points_forces, + 'ts_to_obj_verts': ts_to_obj_verts, + 'cur_timestep_to_accum_acc': cur_timestep_to_accum_acc + } + np.save(cur_sv_penetration_points_fn, cur_sv_penetration_points) + + # joint_name_to_penetration_forces_intermediates + cur_sv_joint_penetration_intermediates_fn = os.path.join(self.base_exp_dir, "meshes", f"joint_penetration_intermediates_{self.iter_step}.npy") + np.save(cur_sv_joint_penetration_intermediates_fn, self.joint_name_to_penetration_forces_intermediates) + + + # [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] + tot_contact_pairs_set = {} + + for ts in self.contact_pairs_sets: + cur_contact_pairs_set = self.contact_pairs_sets[ts] + if isinstance(cur_contact_pairs_set, dict): + tot_contact_pairs_set[ts] = { + 'upd_contact_active_idxes': cur_contact_pairs_set['contact_active_idxes'].detach().cpu().numpy(), + 'upd_contact_passive_pts': cur_contact_pairs_set['contact_passive_pts'].detach().cpu().numpy(), # in contact indexes # + } + else: + [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] = cur_contact_pairs_set + tot_contact_pairs_set[ts] = { + 'upd_contact_active_idxes': upd_contact_active_idxes.detach().cpu().numpy(), + 'upd_contact_passive_idxes': upd_contact_passive_idxes.detach().cpu().numpy(), # in contact indexes # + + } + # sampled_verts_idxes + tot_contact_pairs_set['sampled_verts_idxes'] = self.sampled_verts_idxes.detach().cpu().numpy() + tot_contact_pairs_set_fn = os.path.join(self.base_exp_dir, "meshes", f"tot_contact_pairs_set_{self.iter_step}.npy") + np.save(tot_contact_pairs_set_fn, tot_contact_pairs_set) + + # self.ts_to_contact_force_d[cur_ts] = self.other_bending_network.contact_force_d.detach().cpu().numpy() + # self.ts_to_penalty_frictions[cur_ts] = self.other_bending_network.penalty_friction_tangential_forces.detach().cpu().numpy() + + contact_forces_dict = { + 'ts_to_contact_force_d': self.ts_to_contact_force_d, + 'ts_to_penalty_frictions': self.ts_to_penalty_frictions, + 'ts_to_penalty_disp_pts': self.ts_to_penalty_disp_pts, + 'cur_timestep_to_accum_acc': cur_timestep_to_accum_acc, + 'ts_to_passive_normals': self.ts_to_passive_normals, + 'ts_to_passive_pts': self.ts_to_passive_pts, + 'ts_to_contact_passive_normals': self.ts_to_contact_passive_normals, ## the normal directions of the inc contact apssive object points # + } + contact_forces_dict_sv_fn = os.path.join(self.base_exp_dir, "meshes", f"contact_forces_dict_{self.iter_step}.npy") + np.save(contact_forces_dict_sv_fn, contact_forces_dict) + + # [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] = self.contact_pairs_set + # contact_pairs_info = { + # 'upd_contact_active_idxes': upd_contact_active_idxes.detach().cpu().numpy() + # } + + + + # train_robot_actions_from_mano_model_rules + def validate_mesh_expanded_pts(self, ): + # one single hand or + # self.hand_faces, self.obj_faces # + mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + # validate_mesh_expanded_pts = self.faces.detach().cpu().numpy() # ### robot faces # + # obj_faces_np = self.obj_faces.detach().cpu().numpy() + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_obj_quaternion = {} + ts_to_obj_rot_mtx = {} + ts_to_obj_trans = {} + ts_to_transformed_obj = {} + ts_to_active_pts = {} + ts_to_mano_hand_verts = {} + # for i_ts in range(1, self.nn_timesteps - 1, 10): + for i_ts in range(0, self.nn_ts, 10): + cur_hand_mesh = self.timestep_to_active_mesh[i_ts] + cur_mano_rhand = self.rhand_verts[i_ts].detach().cpu().numpy() + cur_rhand_verts = cur_hand_mesh # [: cur_hand_mesh.size(0) // 2] + # cur_lhand_verts = cur_hand_mesh # [cur_hand_mesh.size(0) // 2:] + cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + # cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + if i_ts not in self.other_bending_network.timestep_to_optimizable_rot_mtx: + cur_pred_rot_mtx = np.eye(3, dtype=np.float32) + cur_pred_trans = np.zeros((3,), dtype=np.float32) + else: + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + if i_ts not in self.other_bending_network.timestep_to_optimizable_quaternion: + ts_to_obj_quaternion[i_ts] = np.zeros((4,), dtype=np.float32) + ts_to_obj_quaternion[i_ts][0] = 1. # ## quaternion ## # + ts_to_obj_rot_mtx[i_ts] = np.eye(3, dtype=np.float32) + ts_to_obj_trans[i_ts] = np.zeros((3,), dtype=np.float32) + else: + ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + ts_to_transformed_obj[i_ts] = cur_transformed_obj # .detach().cpu().numpy() + ts_to_active_pts[i_ts] = cur_hand_mesh.detach().cpu().numpy() + ts_to_mano_hand_verts[i_ts] = cur_mano_rhand + # merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + # mesh = trimesh.Trimesh(merged_verts, merged_faces) + # mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + # mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) # mesh_sv_fn # + ts_sv_dict = { + 'ts_to_obj_quaternion': ts_to_obj_quaternion, + 'ts_to_obj_rot_mtx': ts_to_obj_rot_mtx, + 'ts_to_obj_trans': ts_to_obj_trans, + 'ts_to_transformed_obj': ts_to_transformed_obj, + 'ts_to_active_pts': ts_to_active_pts, + 'ts_to_mano_hand_verts': ts_to_mano_hand_verts, + 'hand_faces': mano_hand_faces_np, + # 'fobj_faces': obj_faces_np, + } + sv_dict_sv_fn = '{:0>8d}.npy'.format(self.iter_step) + sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, sv_dict_sv_fn) + np.save(sv_dict_sv_fn, ts_sv_dict) + + def validate_mesh_mano(self, ): + def merge_meshes(verts_list, faces_list): + tot_verts_nn = 0 + merged_verts = [] + merged_faces = [] + for i_mesh in range(len(verts_list)): + merged_verts.append(verts_list[i_mesh]) + merged_faces.append(faces_list[i_mesh] + tot_verts_nn) # and you + tot_verts_nn += verts_list[i_mesh].shape[0] + merged_verts = np.concatenate(merged_verts, axis=0) + merged_faces = np.concatenate(merged_faces, axis=0) + return merged_verts, merged_faces + + # one single hand or + # self.hand_faces, self.obj_faces # + mano_hand_faces_np = self.hand_faces.detach().cpu().numpy() + hand_faces_np = self.faces.detach().cpu().numpy() # ### robot faces # + obj_faces_np = self.obj_faces.detach().cpu().numpy() + init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_obj_quaternion = {} + ts_to_obj_rot_mtx = {} + ts_to_obj_trans = {} + # for i_ts in range(1, self.nn_timesteps - 1, 10): + for i_ts in range(0, self.nn_ts, 10): + cur_hand_mesh = self.timestep_to_active_mesh[i_ts] + cur_mano_rhand = self.rhand_verts[i_ts].detach().cpu().numpy() + cur_rhand_verts = cur_hand_mesh # [: cur_hand_mesh.size(0) // 2] + # cur_lhand_verts = cur_hand_mesh # [cur_hand_mesh.size(0) // 2:] + cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + # cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + if i_ts not in self.other_bending_network.timestep_to_optimizable_rot_mtx: + cur_pred_rot_mtx = np.eye(3, dtype=np.float32) + cur_pred_trans = np.zeros((3,), dtype=np.float32) + else: + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + if i_ts not in self.other_bending_network.timestep_to_optimizable_quaternion: + ts_to_obj_quaternion[i_ts] = np.zeros((4,), dtype=np.float32) + ts_to_obj_quaternion[i_ts][0] = 1. # ## quaternion ## # + ts_to_obj_rot_mtx[i_ts] = np.eye(3, dtype=np.float32) + ts_to_obj_trans[i_ts] = np.zeros((3,), dtype=np.float32) + else: + ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_transformed_obj, cur_mano_rhand], [hand_faces_np, obj_faces_np, mano_hand_faces_np]) + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + ts_sv_dict = { + 'ts_to_obj_quaternion': ts_to_obj_quaternion, + 'ts_to_obj_rot_mtx': ts_to_obj_rot_mtx, + 'ts_to_obj_trans': ts_to_obj_trans, + } + sv_dict_sv_fn = '{:0>8d}.npy'.format(self.iter_step) + sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, sv_dict_sv_fn) + np.save(sv_dict_sv_fn, ts_sv_dict) + + + def validate_mesh(self, world_space=False, resolution=64, threshold=0.0): + def merge_meshes(verts_list, faces_list): + tot_verts_nn = 0 + merged_verts = [] + merged_faces = [] + for i_mesh in range(len(verts_list)): + merged_verts.append(verts_list[i_mesh]) + merged_faces.append(faces_list[i_mesh] + tot_verts_nn) # and you + tot_verts_nn += verts_list[i_mesh].shape[0] + merged_verts = np.concatenate(merged_verts, axis=0) + merged_faces = np.concatenate(merged_faces, axis=0) + return merged_verts, merged_faces + + + + if self.train_multi_seqs: + # seq_idx = torch.randint(low=0, high=len(self.rhand_verts), size=(1,)).item() + seq_idx = self.seq_idx + cur_hand_faces = self.hand_faces[seq_idx] + cur_obj_faces = self.obj_faces[seq_idx] + timestep_to_passive_mesh = self.obj_verts[seq_idx] + timestep_to_active_mesh = self.hand_verts[seq_idx] + else: + cur_hand_faces = self.hand_faces + cur_obj_faces = self.obj_faces + timestep_to_passive_mesh = self.timestep_to_passive_mesh + timestep_to_active_mesh = self.timestep_to_active_mesh + + + # one single hand or + # self.hand_faces, self.obj_faces # + # hand_faces_np = self.hand_faces.detach().cpu().numpy() + # obj_faces_np = self.obj_faces.detach().cpu().numpy() + # init_passive_obj_verts = self.timestep_to_passive_mesh[0].detach().cpu().numpy() + + hand_faces_np = cur_hand_faces.detach().cpu().numpy() + obj_faces_np = cur_obj_faces.detach().cpu().numpy() + init_passive_obj_verts = timestep_to_passive_mesh[0].detach().cpu().numpy() + init_passive_obj_verts_center = np.mean(init_passive_obj_verts, axis=0, keepdims=True) + mesh_sv_root_dir = os.path.join(self.base_exp_dir, 'meshes') + os.makedirs(mesh_sv_root_dir, exist_ok=True) + ts_to_obj_quaternion = {} + ts_to_obj_rot_mtx = {} + ts_to_obj_trans = {} + for i_ts in range(1, self.n_timesteps, 10): + # cur_hand_mesh = self.timestep_to_active_mesh[i_ts] + cur_hand_mesh = timestep_to_active_mesh[i_ts] + cur_rhand_verts = cur_hand_mesh[: cur_hand_mesh.size(0) // 2] + cur_lhand_verts = cur_hand_mesh[cur_hand_mesh.size(0) // 2: ] + cur_rhand_verts_np = cur_rhand_verts.detach().cpu().numpy() + cur_lhand_verts_np = cur_lhand_verts.detach().cpu().numpy() + cur_pred_rot_mtx = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + cur_pred_trans = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + cur_transformed_obj = np.matmul(cur_pred_rot_mtx, (init_passive_obj_verts - init_passive_obj_verts_center).T).T + init_passive_obj_verts_center + np.reshape(cur_pred_trans, (1, 3)) + + ts_to_obj_quaternion[i_ts] = self.other_bending_network.timestep_to_optimizable_quaternion[i_ts].detach().cpu().numpy() + ts_to_obj_rot_mtx[i_ts] = self.other_bending_network.timestep_to_optimizable_rot_mtx[i_ts].detach().cpu().numpy() + ts_to_obj_trans[i_ts] = self.other_bending_network.timestep_to_optimizable_total_def[i_ts].detach().cpu().numpy() + # + merged_verts, merged_faces = merge_meshes([cur_rhand_verts_np, cur_lhand_verts_np, cur_transformed_obj], [hand_faces_np, hand_faces_np, obj_faces_np]) + mesh = trimesh.Trimesh(merged_verts, merged_faces) + mesh_sv_fn = '{:0>8d}_ts_{:0>3d}.ply'.format(self.iter_step, i_ts) + mesh.export(os.path.join(mesh_sv_root_dir, mesh_sv_fn)) + ts_sv_dict = { + 'ts_to_obj_quaternion': ts_to_obj_quaternion, + 'ts_to_obj_rot_mtx': ts_to_obj_rot_mtx, + 'ts_to_obj_trans': ts_to_obj_trans, + } + sv_dict_sv_fn = '{:0>8d}.npy'.format(self.iter_step) + sv_dict_sv_fn = os.path.join(mesh_sv_root_dir, sv_dict_sv_fn) + np.save(sv_dict_sv_fn, ts_sv_dict) + + + def interpolate_view(self, img_idx_0, img_idx_1): + images = [] + n_frames = 60 + for i in range(n_frames): + print(i) + images.append(self.render_novel_image(img_idx_0, + img_idx_1, + np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5, + resolution_level=4)) + for i in range(n_frames): + images.append(images[n_frames - i - 1]) + + fourcc = cv.VideoWriter_fourcc(*'mp4v') + video_dir = os.path.join(self.base_exp_dir, 'render') + os.makedirs(video_dir, exist_ok=True) + h, w, _ = images[0].shape + writer = cv.VideoWriter(os.path.join(video_dir, + '{:0>8d}_{}_{}.mp4'.format(self.iter_step, img_idx_0, img_idx_1)), + fourcc, 30, (w, h)) + + for image in images: + writer.write(image) + + writer.release() + + + + +if __name__ == '__main__': + + + print('Hello Wooden') + + torch.set_default_tensor_type('torch.cuda.FloatTensor') + + FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s" + logging.basicConfig(level=logging.DEBUG, format=FORMAT) + + parser = argparse.ArgumentParser() + parser.add_argument('--conf', type=str, default='./confs/base.conf') + parser.add_argument('--mode', type=str, default='train') + parser.add_argument('--mcube_threshold', type=float, default=0.0) + parser.add_argument('--is_continue', default=False, action="store_true") + parser.add_argument('--gpu', type=int, default=0) + parser.add_argument('--case', type=str, default='') + parser.add_argument('--data_fn', type=str, default='') + + args = parser.parse_args() + + torch.cuda.set_device(args.gpu) + runner = Runner(args.conf, args.mode, args.case, args.is_continue) + + bending_net_type = runner.conf['model.bending_net_type'] + + + # if args.mode == 'train': + # runner.train() + # elif args.mode == 'train_def': # + # runner.train_def() + # elif args.mode == 'train_from_model_rules': # from model rules # + # runner.train_from_model_rules() # + # elif args.mode == 'train_sdf_from_model_rules': + # runner.train_sdf_from_model_rules() + # elif args.mode == 'train_actions_from_model_rules': + # runner.train_actions_from_model_rules() + # if args.mode == 'train_mano_actions_from_model_rules': + # runner.train_mano_actions_from_model_rules() ## + # elif args.mode == 'train_actions_from_mano_model_rules': + # runner.train_robot_actions_from_mano_model_rules_v3() ## + # elif args.mode == 'train_real_robot_actions_from_mano_model_rules': ## + # # runner.train_real_robot_actions_from_mano_model_rules() ## + # # runner.train_real_robot_actions_from_mano_model_rules_v2() ## + # if bending_net_type in ["active_force_field_v15", 'active_force_field_v16']: + # runner.train_real_robot_actions_from_mano_model_rules_v4() + # elif bending_net_type in ["active_force_field_v17"]: + # runner.train_real_robot_actions_from_mano_model_rules_v5() ### train the mano model rules v5 ## + # else: + # runner.train_real_robot_actions_from_mano_model_rules_v3() ## + # # runner.train_real_robot_actions_from_mano_model_rules_v4() ## train the model rules v4 ## + + # elif args.mode == 'train_real_robot_actions_from_mano_model_rules_diffhand': + # runner.train_real_robot_actions_from_mano_model_rules_v5_diffhand() ### trai + + # elif args.mode == 'train_real_robot_actions_from_mano_model_rules_diffhand_fortest': + # # train_real_robot_actions_from_mano_model_rules_v5_diffhand_fortest + # runner.train_real_robot_actions_from_mano_model_rules_v5_diffhand_fortest() + + # elif args.mode == 'train_real_robot_actions_from_mano_model_rules_manohand_fortest': + # # runner. + # runner.train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest() + # # pass + # elif args.mode == 'train_real_robot_actions_from_mano_model_rules_manohand_fortest_states': + # runner.train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states() + + # elif args.mode == "train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_world": + # runner.train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_world() + + # elif args.mode == "train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_rl": + # runner.train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_rl() ## res rl for test states ## + + # elif args.mode == "train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_grab": + # runner.train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_grab() + + + ############# dynamic mano optimization and the pointset representation trajectory optimization ############# + ########## Dynamic MANO optimization ########## + if args.mode == "train_dyn_mano_model": + runner.train_dyn_mano_model() # + + elif args.mode == "train_dyn_mano_model_wreact": + runner.train_dyn_mano_model_wreact() + + ########## Retargeting -- Expanded set motion ########## + elif args.mode == "train_point_set": + ts_to_hand_obj_verts = runner.train_point_set_dyn() + mano_pts_retar_sv_info_fn = os.path.join(runner.base_exp_dir, f"ts_to_hand_obj_verts.npy") + np.save(mano_pts_retar_sv_info_fn, ts_to_hand_obj_verts) + print(f"optimized info saved to {mano_pts_retar_sv_info_fn}") + # return + + ########## Retargeting -- Expanded set motion retargeting ########## + elif args.mode == "train_point_set_retar": ### retargeting ### + runner.train_point_set_retargeting() ### retargeting ### + ############# dynamic mano optimization and the pointset representation trajectory optimization ############# ## optimization ## + + ########## Retargeting -- Expanded set motion retargeting ########## + elif args.mode == "train_point_set_retar_pts": ### retargeting ### + runner.train_point_set_retargeting_pts() ### retargeting ### + ############# dynamic mano optimization and the pointset representation trajectory optimization ############# ## optimization ## + + ########## Retargeting -- GRAB & TACO ########## + elif args.mode == 'train_sparse_retar': + runner.train_sparse_retar() + + # ########## Retargeting -- ARCTIC ########## + # elif args.mode == "train_finger_kinematics_retargeting_arctic_twohands": + # runner.train_finger_kinematics_retargeting_arctic_twohands() + + # ########## GRAB & TACO ########## + # elif args.mode == "train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab": + # # runner.train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab() + # if runner.use_multi_stages: # try the actons + # runner.train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab_multi_stages() + # else: + # runner.train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab() + + # ########## ARCTIC ########## + # elif args.mode == "train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_arctic_twohands": + # if runner.use_multi_stages: # try the + # runner.train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_arctic_twohands_multi_stages() + # else: + # runner.train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_arctic_twohands() + + elif args.mode == "train_real_robot_actions_from_mano_model_rules_shadowhand": + runner.train_real_robot_actions_from_mano_model_rules_shadowhand() + + + elif args.mode == "train_redmax_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab": + runner.train_redmax_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab() + + + diff --git a/models/data_utils_torch.py b/models/data_utils_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..020bb5783ae69ccd48c5c6caa28c786141c45113 --- /dev/null +++ b/models/data_utils_torch.py @@ -0,0 +1,1547 @@ +"""Mesh data utilities.""" +from re import I +# import matplotlib.pyplot as plt +# from mpl_toolkits import mplot3d # pylint: disable=unused-import +# from mpl_toolkits.mplot3d.art3d import Poly3DCollection +import networkx as nx +import numpy as np +import six +import os +import math +import torch +import models.fields as fields +# from options.options import opt +# from polygen_torch. +# from utils.constants import MASK_GRID_VALIE + +try: + from torch_cluster import fps +except: + pass + +MAX_RANGE = 0.1 +MIN_RANGE = -0.1 + +# import open3d as o3d + +def calculate_correspondences(last_mesh, bending_network, tot_timesteps, delta_bending): + # iterate all the timesteps and get the bended + timestep_to_pts = { + tot_timesteps - 1: last_mesh.detach().cpu().numpy() + } + if delta_bending: + + for i_ts in range(tot_timesteps - 1, -1, -1): + if isinstance(bending_network, list): + tot_offsets = [] + for i_bending, cur_bending_net in enumerate(bending_network): + cur_def_pts = cur_bending_net(last_mesh, i_ts) + tot_offsets.append(cur_def_pts - last_mesh) + tot_offsets = torch.stack(tot_offsets, dim=0) + tot_offsets = torch.sum(tot_offsets, dim=0) + last_mesh = last_mesh + tot_offsets + else: + last_mesh = bending_network(last_mesh, i_ts) + timestep_to_pts[i_ts - 1] = last_mesh.detach().cpu().numpy() + elif isinstance(bending_network, fields.BendingNetworkRigidTrans): + for i_ts in range(tot_timesteps - 1, -1, -1): + last_mesh = bending_network.forward_delta(last_mesh, i_ts) + timestep_to_pts[i_ts - 1] = last_mesh.detach().cpu().numpy() + else: + raise ValueError(f"the function is designed for delta bending") + return timestep_to_pts + # pass + +def joint_infos_to_numpy(joint_infos): + joint_infos_np = [] + for part_joint_info in joint_infos: + for zz in ["dir", "center"]: + # if isinstance(part_joint_info["axis"][zz], np.array): + part_joint_info["axis"][zz] = part_joint_info["axis"][zz].detach().numpy() + joint_infos_np.append(part_joint_info) + return joint_infos_np + + +def normalie_pc_bbox_batched(pc, rt_stats=False): + pc_min = torch.min(pc, dim=1, keepdim=True)[0] + pc_max = torch.max(pc, dim=1, keepdim=True)[0] + pc_center = 0.5 * (pc_min + pc_max) + + pc = pc - pc_center + extents = pc_max - pc_min + scale = torch.sqrt(torch.sum(extents ** 2, dim=-1, keepdim=True)) + + pc = pc / torch.clamp(scale, min=1e-6) + if rt_stats: + return pc, pc_center, scale + else: + return pc + + + +def scale_vertices_to_target_scale(vertices, target_scale): + # vertices: bsz x N x 3; + # target_scale: bsz x 1 +# normalized_vertices = normalize_vertices_scale_torch(vertices) + normalized_vertices = normalie_pc_bbox_batched(vertices) + normalized_vertices = normalized_vertices * target_scale.unsqueeze(1) + return normalized_vertices + +def compute_normals_o3d(verts, faces): #### assume no batching... #### + mesh = o3d.geometry.TriangleMesh() + # o3d_mesh_b.vertices = verts_b + # o3d_mesh_b.triangles = np.array(faces_b, dtype=np.long) + mesh.vertices = verts.detach().cpu().numpy() + mesh.triangles = faces.detach().cpu().numpy().astype(np.long) + verts_normals = mesh.compute_vertex_normals(normalized=True) + verts_normals = torch.from_numpy(verts_normals, dtype=torch.float32).cuda() + return verts_normals + + +def get_vals_via_nearest_neighbours(pts_src, pts_tar, val_tar): + ### n_src x 3 ---> n_src x n_tar + dist_src_tar = torch.sum((pts_src.unsqueeze(-2) - pts_tar.unsqueeze(0)) ** 2, dim=-1) + minn_dists_src_tar, minn_dists_tar_idxes = torch.min(dist_src_tar, dim=-1) ### n_src + selected_src_val = batched_index_select(values=val_tar, indices=minn_dists_tar_idxes, dim=0) ### n_src x dim + return selected_src_val + + + +## sample conected componetn start from selected_verts +def sample_bfs_component(selected_vert, faces, max_num_grids): + vert_idx_to_adj_verts = {} + for i_f, cur_f in enumerate(faces): + # for i0, v0 in enumerate(cur_f): + for i0 in range(len(cur_f)): + v0 = cur_f[i0] - 1 + i1 = (i0 + 1) % len(cur_f) + v1 = cur_f[i1] - 1 + if v0 not in vert_idx_to_adj_verts: + vert_idx_to_adj_verts[v0] = {v1: 1} + else: + vert_idx_to_adj_verts[v0][v1] = 1 + if v1 not in vert_idx_to_adj_verts: + vert_idx_to_adj_verts[v1] = {v0: 1} + else: + vert_idx_to_adj_verts[v1][v0] = 1 + vert_idx_to_visited = {} # whether visisted here # + vis_que = [selected_vert] + vert_idx_to_visited[selected_vert] = 1 + visited = [selected_vert] + while len(vis_que) > 0 and len(visited) < max_num_grids: + cur_frnt_vert = vis_que[0] + vis_que.pop(0) + if cur_frnt_vert in vert_idx_to_adj_verts: + cur_frnt_vert_adjs = vert_idx_to_adj_verts[cur_frnt_vert] + for adj_vert in cur_frnt_vert_adjs: + if adj_vert not in vert_idx_to_visited: + vert_idx_to_visited[adj_vert] = 1 + vis_que.append(adj_vert) + visited.append(adj_vert) + if len(visited) >= max_num_grids: + visited = visited[: max_num_grids - 1] + return visited + +def select_faces_via_verts(selected_verts, faces): + if not isinstance(selected_verts, list): + selected_verts = selected_verts.tolist() + # selected_verts_dict = {ii: 1 for ii in selected_verts} + old_idx_to_new_idx = {v + 1: ii + 1 for ii, v in enumerate(selected_verts)} ####### v + 1: ii + 1 --> for the selected_verts + new_faces = [] + for i_f, cur_f in enumerate(faces): + cur_new_f = [] + valid = True + for cur_v in cur_f: + if cur_v in old_idx_to_new_idx: + cur_new_f.append(old_idx_to_new_idx[cur_v]) + else: + valid = False + break + if valid: + new_faces.append(cur_new_f) + return new_faces + + +def convert_grid_content_to_grid_pts(content_value, grid_size): + flat_grid = torch.zeros([grid_size ** 3], dtype=torch.long) + cur_idx = flat_grid.size(0) - 1 + while content_value > 0: + flat_grid[cur_idx] = content_value % grid_size + content_value = content_value // grid_size + cur_idx -= 1 + grid_pts = flat_grid.contiguous().view(grid_size, grid_size, grid_size).contiguous() + return grid_pts + +# 0.2 +def warp_coord(sampled_gradients, val, reflect=False): # val from [0.0, 1.0] # from the 0.0 + # assume single value as inputs + grad_values = sampled_gradients.tolist() + # mid_val + mid_val = grad_values[0] * 0.2 + grad_values[1] * 0.2 + grad_values[2] * 0.1 + if reflect: + grad_values[3] = grad_values[1] + grad_values[4] = grad_values[0] + + # if not reflect: + accum_val = 0.0 + for i_val in range(len(grad_values)): + if val > 0.2 * (i_val + 1) and i_val < 4: # if i_val == 4, directly use the reamining length * corresponding gradient value + accum_val += grad_values[i_val] * 0.2 + else: + accum_val += grad_values[i_val] * (val - 0.2 * i_val) + break + return accum_val # modified value + +def random_shift(vertices, shift_factor=0.25): + """Apply random shift to vertices.""" + # max_shift_pos = tf.cast(255 - tf.reduce_max(vertices, axis=0), tf.float32) + + # max_shift_pos = tf.maximum(max_shift_pos, 1e-9) + + # max_shift_neg = tf.cast(tf.reduce_min(vertices, axis=0), tf.float32) + # max_shift_neg = tf.maximum(max_shift_neg, 1e-9) + + # shift = tfd.TruncatedNormal( + # tf.zeros([1, 3]), shift_factor*255, -max_shift_neg, + # max_shift_pos).sample() + # shift = tf.cast(shift, tf.int32) + # vertices += shift + + minn_tensor = torch.tensor([1e-9], dtype=torch.float32) + + max_shift_pos = (255 - torch.max(vertices, dim=0)[0]).float() + max_shift_pos = torch.maximum(max_shift_pos, minn_tensor) + max_shift_neg = (torch.min(vertices, dim=0)[0]).float() + max_shift_neg = torch.maximum(max_shift_neg, minn_tensor) + + shift = torch.zeros((1, 3), dtype=torch.float32) + # torch.nn.init.trunc_normal_(shift, 0., shift_factor * 255., -max_shift_neg, max_shift_pos) + for i_s in range(shift.size(-1)): + cur_axis_max_shift_neg = max_shift_neg[i_s].item() + cur_axis_max_shift_pos = max_shift_pos[i_s].item() + cur_axis_shift = torch.zeros((1,), dtype=torch.float32) + + torch.nn.init.trunc_normal_(cur_axis_shift, 0., shift_factor * 255., -cur_axis_max_shift_neg, cur_axis_max_shift_pos) + shift[:, i_s] = cur_axis_shift.item() + + shift = shift.long() + vertices += shift + + return vertices + +def safe_transpose(tsr, dima, dimb): + tsr = tsr.contiguous().transpose(dima, dimb).contiguous() + return tsr + +def batched_index_select(values, indices, dim = 1): + value_dims = values.shape[(dim + 1):] + values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) + indices = indices[(..., *((None,) * len(value_dims)))] + indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) + value_expand_len = len(indices_shape) - (dim + 1) + values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] + + value_expand_shape = [-1] * len(values.shape) + expand_slice = slice(dim, (dim + value_expand_len)) + value_expand_shape[expand_slice] = indices.shape[expand_slice] + values = values.expand(*value_expand_shape) + + dim += value_expand_len + return values.gather(dim, indices) + +def read_obj_file_ours(obj_fn, sub_one=False): + vertices = [] + faces = [] + with open(obj_fn, "r") as rf: + for line in rf: + items = line.strip().split(" ") + if items[0] == 'v': + cur_verts = items[1:] + cur_verts = [float(vv) for vv in cur_verts] + vertices.append(cur_verts) + elif items[0] == 'f': + cur_faces = items[1:] # faces + cur_face_idxes = [] + for cur_f in cur_faces: + try: + cur_f_idx = int(cur_f.split("/")[0]) + except: + cur_f_idx = int(cur_f.split("//")[0]) + cur_face_idxes.append(cur_f_idx if not sub_one else cur_f_idx - 1) + faces.append(cur_face_idxes) + rf.close() + vertices = np.array(vertices, dtype=np.float) + return vertices, faces + +def read_obj_file(obj_file): + """Read vertices and faces from already opened file.""" + vertex_list = [] + flat_vertices_list = [] + flat_vertices_indices = {} + flat_triangles = [] + + for line in obj_file: + tokens = line.split() + if not tokens: + continue + line_type = tokens[0] + # We skip lines not starting with v or f. + if line_type == 'v': # + vertex_list.append([float(x) for x in tokens[1:]]) + elif line_type == 'f': + triangle = [] + for i in range(len(tokens) - 1): + vertex_name = tokens[i + 1] + if vertex_name in flat_vertices_indices: # triangles + triangle.append(flat_vertices_indices[vertex_name]) + continue + flat_vertex = [] + for index in six.ensure_str(vertex_name).split('/'): + if not index: + continue + # obj triangle indices are 1 indexed, so subtract 1 here. + flat_vertex += vertex_list[int(index) - 1] + flat_vertex_index = len(flat_vertices_list) + flat_vertices_list.append(flat_vertex) + # flat_vertex_index + flat_vertices_indices[vertex_name] = flat_vertex_index + triangle.append(flat_vertex_index) + flat_triangles.append(triangle) + + return np.array(flat_vertices_list, dtype=np.float32), flat_triangles + + +def batched_index_select(values, indices, dim = 1): + value_dims = values.shape[(dim + 1):] + values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) + indices = indices[(..., *((None,) * len(value_dims)))] + indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) + value_expand_len = len(indices_shape) - (dim + 1) + values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] + + value_expand_shape = [-1] * len(values.shape) + expand_slice = slice(dim, (dim + value_expand_len)) + value_expand_shape[expand_slice] = indices.shape[expand_slice] + values = values.expand(*value_expand_shape) + + dim += value_expand_len + return values.gather(dim, indices) + + +def safe_transpose(x, dim1, dim2): + x = x.contiguous().transpose(dim1, dim2).contiguous() + return x + +def merge_meshes(vertices_list, faces_list): + tot_vertices = [] + tot_faces = [] + nn_verts = 0 + for cur_vertices, cur_faces in zip(vertices_list, faces_list): + tot_vertices.append(cur_vertices) + new_cur_faces = [] + for cur_face_idx in cur_faces: + new_cur_face_idx = [vert_idx + nn_verts for vert_idx in cur_face_idx] + new_cur_faces.append(new_cur_face_idx) + nn_verts += cur_vertices.shape[0] + tot_faces += new_cur_faces # get total-faces + tot_vertices = np.concatenate(tot_vertices, axis=0) + return tot_vertices, tot_faces + + +def read_obj(obj_path): + """Open .obj file from the path provided and read vertices and faces.""" + + with open(obj_path) as obj_file: + return read_obj_file_ours(obj_path, sub_one=True) + # return read_obj_file(obj_file) + + + + +def center_vertices(vertices): + """Translate the vertices so that bounding box is centered at zero.""" + vert_min = vertices.min(axis=0) + vert_max = vertices.max(axis=0) + vert_center = 0.5 * (vert_min + vert_max) + return vertices - vert_center + + +def normalize_vertices_scale(vertices): + """Scale the vertices so that the long diagonal of the bounding box is one.""" + vert_min = vertices.min(axis=0) + vert_max = vertices.max(axis=0) + extents = vert_max - vert_min + scale = np.sqrt(np.sum(extents**2)) # normalize the diagonal line to 1. + return vertices / scale + + +def get_vertices_center(vertices): + vert_min = vertices.min(axis=0) + vert_max = vertices.max(axis=0) + vert_center = 0.5 * (vert_min + vert_max) + return vert_center + +def get_batched_vertices_center(vertices): + vert_min = vertices.min(axis=1) + vert_max = vertices.max(axis=1) + vert_center = 0.5 * (vert_min + vert_max) + return vert_center + +def get_vertices_scale(vertices): + vert_min = vertices.min(axis=0) + vert_max = vertices.max(axis=0) + extents = vert_max - vert_min + scale = np.sqrt(np.sum(extents**2)) + return scale + +def quantize_verts(verts, n_bits=8, min_range=None, max_range=None): + """Convert vertices in [-1., 1.] to discrete values in [0, n_bits**2 - 1].""" + min_range = -0.5 if min_range is None else min_range + max_range = 0.5 if max_range is None else max_range + range_quantize = 2**n_bits - 1 + verts_quantize = (verts - min_range) * range_quantize / ( + max_range - min_range) + return verts_quantize.astype('int32') + +def quantize_verts_torch(verts, n_bits=8, min_range=None, max_range=None): + min_range = -0.5 if min_range is None else min_range + max_range = 0.5 if max_range is None else max_range + range_quantize = 2**n_bits - 1 + verts_quantize = (verts - min_range) * range_quantize / ( + max_range - min_range) + return verts_quantize.long() + +def dequantize_verts(verts, n_bits=8, add_noise=False, min_range=None, max_range=None): + """Convert quantized vertices to floats.""" + min_range = -0.5 if min_range is None else min_range + max_range = 0.5 if max_range is None else max_range + range_quantize = 2**n_bits - 1 + verts = verts.astype('float32') + verts = verts * (max_range - min_range) / range_quantize + min_range + if add_noise: + verts += np.random.uniform(size=verts.shape) * (1 / range_quantize) + return verts + +def dequantize_verts_torch(verts, n_bits=8, add_noise=False, min_range=None, max_range=None): + min_range = -0.5 if min_range is None else min_range + max_range = 0.5 if max_range is None else max_range + range_quantize = 2**n_bits - 1 + verts = verts.float() + verts = verts * (max_range - min_range) / range_quantize + min_range + # if add_noise: + # verts += np.random.uniform(size=verts.shape) * (1 / range_quantize) + return verts + + +### dump vertices and faces to the obj file +def write_obj(vertices, faces, file_path, transpose=True, scale=1.): + """Write vertices and faces to obj.""" + if transpose: + vertices = vertices[:, [1, 2, 0]] + vertices *= scale + if faces is not None: + if min(min(faces)) == 0: + f_add = 1 + else: + f_add = 0 + else: + faces = [] + with open(file_path, 'w') as f: + for v in vertices: + f.write('v {} {} {}\n'.format(v[0], v[1], v[2])) + for face in faces: + line = 'f' + for i in face: + line += ' {}'.format(i + f_add) + line += '\n' + f.write(line) + + +def face_to_cycles(face): + """Find cycles in face.""" + g = nx.Graph() + for v in range(len(face) - 1): + g.add_edge(face[v], face[v + 1]) + g.add_edge(face[-1], face[0]) + return list(nx.cycle_basis(g)) + + +def flatten_faces(faces): + """Converts from list of faces to flat face array with stopping indices.""" + if not faces: + return np.array([0]) + else: + l = [f + [-1] for f in faces[:-1]] + l += [faces[-1] + [-2]] + return np.array([item for sublist in l for item in sublist]) + 2 # pylint: disable=g-complex-comprehension + + +def unflatten_faces(flat_faces): + """Converts from flat face sequence to a list of separate faces.""" + def group(seq): + g = [] + for el in seq: + if el == 0 or el == -1: + yield g + g = [] + else: + g.append(el - 1) + yield g + outputs = list(group(flat_faces - 1))[:-1] + # Remove empty faces + return [o for o in outputs if len(o) > 2] + + + +def quantize_process_mesh(vertices, faces, tris=None, quantization_bits=8, remove_du=True): + """Quantize vertices, remove resulting duplicates and reindex faces.""" + vertices = quantize_verts(vertices, quantization_bits) + vertices, inv = np.unique(vertices, axis=0, return_inverse=True) # return inverse and unique the vertices + + # + if opt.dataset.sort_dist: + if opt.model.debug: + print("sorting via dist...") + vertices_max = np.max(vertices, axis=0) + vertices_min = np.min(vertices, axis=0) + dist_vertices = np.minimum(np.abs(vertices - np.array([[vertices_min[0], vertices_min[1], 0]])), np.abs(vertices - np.array([[vertices_max[0], vertices_max[1], 0]]))) + dist_vertices = np.concatenate([dist_vertices[:, 0:1] + dist_vertices[:, 1:2], dist_vertices[:, 2:]], axis=-1) + sort_inds = np.lexsort(dist_vertices.T) + else: + # Sort vertices by z then y then x. + sort_inds = np.lexsort(vertices.T) # sorted indices... + vertices = vertices[sort_inds] + + # Re-index faces and tris to re-ordered vertices. + faces = [np.argsort(sort_inds)[inv[f]] for f in faces] + if tris is not None: + tris = np.array([np.argsort(sort_inds)[inv[t]] for t in tris]) + + # Merging duplicate vertices and re-indexing the faces causes some faces to + # contain loops (e.g [2, 3, 5, 2, 4]). Split these faces into distinct + # sub-faces. + sub_faces = [] + for f in faces: + cliques = face_to_cycles(f) + for c in cliques: + c_length = len(c) + # Only append faces with more than two verts. + if c_length > 2: + d = np.argmin(c) + # Cyclically permute faces just that first index is the smallest. + sub_faces.append([c[(d + i) % c_length] for i in range(c_length)]) + faces = sub_faces + if tris is not None: + tris = np.array([v for v in tris if len(set(v)) == len(v)]) + + # Sort faces by lowest vertex indices. If two faces have the same lowest + # index then sort by next lowest and so on. + faces.sort(key=lambda f: tuple(sorted(f))) + if tris is not None: + tris = tris.tolist() + tris.sort(key=lambda f: tuple(sorted(f))) + tris = np.array(tris) + + # After removing degenerate faces some vertices are now unreferenced. # Vertices + # Remove these. # Vertices + num_verts = vertices.shape[0] + # print(f"remove_du: {remove_du}") + if remove_du: ##### num_verts + print("Removing du..") + try: + vert_connected = np.equal( + np.arange(num_verts)[:, None], np.hstack(faces)[None]).any(axis=-1) + vertices = vertices[vert_connected] + + + # Re-index faces and tris to re-ordered vertices. + vert_indices = ( + np.arange(num_verts) - np.cumsum(1 - vert_connected.astype('int'))) + faces = [vert_indices[f].tolist() for f in faces] + except: + pass + if tris is not None: + tris = np.array([vert_indices[t].tolist() for t in tris]) + + return vertices, faces, tris + + +def process_mesh(vertices, faces, quantization_bits=8, recenter_mesh=True, remove_du=True): + """Process mesh vertices and faces.""" + + + + # Transpose so that z-axis is vertical. + vertices = vertices[:, [2, 0, 1]] + + if recenter_mesh: + # Translate the vertices so that bounding box is centered at zero. + vertices = center_vertices(vertices) + + # Scale the vertices so that the long diagonal of the bounding box is equal + # to one. + vertices = normalize_vertices_scale(vertices) + + # Quantize and sort vertices, remove resulting duplicates, sort and reindex + # faces. + vertices, faces, _ = quantize_process_mesh( + vertices, faces, quantization_bits=quantization_bits, remove_du=remove_du) ##### quantize_process_mesh + + # unflatten_faces = np.array(faces, dtype=np.long) ### start from zero + + # Flatten faces and add 'new face' = 1 and 'stop' = 0 tokens. + faces = flatten_faces(faces) + + # Discard degenerate meshes without faces. + return { + 'vertices': vertices, + 'faces': faces, + } + + +def process_mesh_ours(vertices, faces, quantization_bits=8, recenter_mesh=True, remove_du=True): + """Process mesh vertices and faces.""" + # Transpose so that z-axis is vertical. + vertices = vertices[:, [2, 0, 1]] + + if recenter_mesh: + # Translate the vertices so that bounding box is centered at zero. + vertices = center_vertices(vertices) + + # Scale the vertices so that the long diagonal of the bounding box is equal + # to one. + vertices = normalize_vertices_scale(vertices) + + # Quantize and sort vertices, remove resulting duplicates, sort and reindex + # faces. + quant_vertices, faces, _ = quantize_process_mesh( + vertices, faces, quantization_bits=quantization_bits, remove_du=remove_du) ##### quantize_process_mesh + vertices = dequantize_verts(quant_vertices) #### dequantize vertices #### + ### vertices: nn_verts x 3 + # try: + # # print("faces", faces) + # unflatten_faces = np.array(faces, dtype=np.long) + # except: + # print("faces", faces) + # raise ValueError("Something bad happened when processing meshes...") + + # Flatten faces and add 'new face' = 1 and 'stop' = 0 tokens. + + faces = flatten_faces(faces) + + # Discard degenerate meshes without faces. + return { + 'vertices': quant_vertices, + 'faces': faces, + # 'unflatten_faces': unflatten_faces, + 'dequant_vertices': vertices, + 'class_label': 0 + } + +def read_mesh_from_obj_file(fn, quantization_bits=8, recenter_mesh=True, remove_du=True): + vertices, faces = read_obj(fn) + # print(vertices.shape) + mesh_dict = process_mesh_ours(vertices, faces, quantization_bits=quantization_bits, recenter_mesh=recenter_mesh, remove_du=remove_du) + return mesh_dict + +def process_mesh_list(vertices, faces, quantization_bits=8, recenter_mesh=True): + """Process mesh vertices and faces.""" + + vertices = [cur_vert[:, [2, 0, 1]] for cur_vert in vertices] + + tot_vertices = np.concatenate(vertices, axis=0) # center and scale of those vertices + vert_center = get_vertices_center(tot_vertices) + vert_scale = get_vertices_scale(tot_vertices) + + processed_vertices, processed_faces = [], [] + + for cur_verts, cur_faces in zip(vertices, faces): + # print(f"current vertices: {cur_verts.shape}, faces: {len(cur_faces)}") + normalized_verts = (cur_verts - vert_center) / vert_scale + cur_processed_verts, cur_processed_faces, _ = quantize_process_mesh( + normalized_verts, cur_faces, quantization_bits=quantization_bits + ) + processed_vertices.append(cur_processed_verts) + processed_faces.append(cur_processed_faces) + vertices, faces = merge_meshes(processed_vertices, processed_faces) + faces = flatten_faces(faces=faces) + + + # Discard degenerate meshes without faces. + return { + 'vertices': vertices, + + 'faces': faces, + + } + + +def plot_sampled_meshes(v_sample, f_sample, sv_mesh_folder, cur_step=0, predict_joint=True,): + + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + part_vertex_samples = [v_sample['left'], v_sample['rgt']] + part_face_samples = [f_sample['left'], f_sample['rgt']] + + + tot_n_samples = part_vertex_samples[0]['vertices'].shape[0] + tot_n_part = 2 + + if predict_joint: + pred_dir = v_sample['joint_dir'] + pred_pvp = v_sample['joint_pvp'] + print("pred_dir", pred_dir.shape, pred_dir) + print("pred_pvp", pred_pvp.shape, pred_pvp) + else: + pred_pvp = np.zeros(shape=[tot_n_samples, 3], dtype=np.float32) + + + + + tot_mesh_list = [] + for i_p, (cur_part_v_samples_np, cur_part_f_samples_np) in enumerate(zip(part_vertex_samples, part_face_samples)): + mesh_list = [] + for i_n in range(tot_n_samples): + mesh_list.append( + { + 'vertices': cur_part_v_samples_np['vertices'][i_n][:cur_part_v_samples_np['num_vertices'][i_n]], + 'faces': unflatten_faces( + cur_part_f_samples_np['faces'][i_n][:cur_part_f_samples_np['num_face_indices'][i_n]]) + } + ) + tot_mesh_list.append(mesh_list) + # and write this obj file? + # write_obj(vertices, faces, file_path, transpose=True, scale=1.): + # write mesh objs + for i_n in range(tot_n_samples): + cur_mesh = mesh_list[i_n] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + cur_mesh_sv_fn = os.path.join("./meshes", f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=False, scale=1.) + + + + ###### plot mesh (predicted) ###### + tot_samples_mesh_dict = [] + for i_s in range(tot_n_samples): + cur_s_tot_vertices = [] + cur_s_tot_faces = [] + cur_s_n_vertices = 0 + + for i_p in range(tot_n_part): + cur_s_cur_part_mesh_dict = tot_mesh_list[i_p][i_s] + cur_s_cur_part_vertices, cur_s_cur_part_faces = cur_s_cur_part_mesh_dict['vertices'], \ + cur_s_cur_part_mesh_dict['faces'] + cur_s_cur_part_new_faces = [] + for cur_s_cur_part_cur_face in cur_s_cur_part_faces: + cur_s_cur_part_cur_new_face = [fid + cur_s_n_vertices for fid in cur_s_cur_part_cur_face] + cur_s_cur_part_new_faces.append(cur_s_cur_part_cur_new_face) + cur_s_n_vertices += cur_s_cur_part_vertices.shape[0] + cur_s_tot_vertices.append(cur_s_cur_part_vertices) + cur_s_tot_faces += cur_s_cur_part_new_faces + + cur_s_tot_vertices = np.concatenate(cur_s_tot_vertices, axis=0) + cur_s_mesh_dict = { + 'vertices': cur_s_tot_vertices, 'faces': cur_s_tot_faces + } + tot_samples_mesh_dict.append(cur_s_mesh_dict) + + for i_s in range(tot_n_samples): + cur_mesh = tot_samples_mesh_dict[i_s] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_s}.obj") + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=False, scale=1.) + ###### plot mesh (predicted) ###### + + + ###### plot mesh (translated) ###### + tot_samples_mesh_dict = [] + for i_s in range(tot_n_samples): + cur_s_tot_vertices = [] + cur_s_tot_faces = [] + cur_s_n_vertices = 0 + cur_s_pred_pvp = pred_pvp[i_s] + + for i_p in range(tot_n_part): + cur_s_cur_part_mesh_dict = tot_mesh_list[i_p][i_s] + cur_s_cur_part_vertices, cur_s_cur_part_faces = cur_s_cur_part_mesh_dict['vertices'], \ + cur_s_cur_part_mesh_dict['faces'] + cur_s_cur_part_new_faces = [] + for cur_s_cur_part_cur_face in cur_s_cur_part_faces: + cur_s_cur_part_cur_new_face = [fid + cur_s_n_vertices for fid in cur_s_cur_part_cur_face] + cur_s_cur_part_new_faces.append(cur_s_cur_part_cur_new_face) + cur_s_n_vertices += cur_s_cur_part_vertices.shape[0] + + if i_p == 1: + # min_rngs = cur_s_cur_part_vertices.min(1) + # max_rngs = cur_s_cur_part_vertices.max(1) + min_rngs = cur_s_cur_part_vertices.min(0) + max_rngs = cur_s_cur_part_vertices.max(0) + # shifted; cur_s_pred_pvp + # shifted = np.array([0., cur_s_pred_pvp[1] - max_rngs[1], cur_s_pred_pvp[2] - min_rngs[2]], dtype=np.float) + # shifted = np.reshape(shifted, [1, 3]) # + cur_s_pred_pvp = np.array([0., max_rngs[1], min_rngs[2]], dtype=np.float32) + pvp_sample_pred_err = np.sum((cur_s_pred_pvp - pred_pvp[i_s]) ** 2) + # print prediction err, pred pvp and real pvp + # print("cur_s, sample_pred_pvp_err:", pvp_sample_pred_err.item(), ";real val:", cur_s_pred_pvp, "; pred_val:", pred_pvp[i_s]) + pred_pvp[i_s] = cur_s_pred_pvp + shifted = np.zeros((1, 3), dtype=np.float32) + cur_s_cur_part_vertices = cur_s_cur_part_vertices + shifted # shift vertices... # min_rngs + # shifted + cur_s_tot_vertices.append(cur_s_cur_part_vertices) + cur_s_tot_faces += cur_s_cur_part_new_faces + + cur_s_tot_vertices = np.concatenate(cur_s_tot_vertices, axis=0) + cur_s_mesh_dict = { + 'vertices': cur_s_tot_vertices, 'faces': cur_s_tot_faces + } + tot_samples_mesh_dict.append(cur_s_mesh_dict) + + for i_s in range(tot_n_samples): + cur_mesh = tot_samples_mesh_dict[i_s] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_s}_shifted.obj") + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=False, scale=1.) + ###### plot mesh (translated) ###### + + + + ###### plot mesh (rotated) ###### + if predict_joint: + from revolute_transform import revoluteTransform + tot_samples_mesh_dict = [] + for i_s in range(tot_n_samples): + cur_s_tot_vertices = [] + cur_s_tot_faces = [] + cur_s_n_vertices = 0 + + # cur_s_pred_dir = pred_dir[i_s] + cur_s_pred_pvp = pred_pvp[i_s] + print("current pred dir:", cur_s_pred_dir, "; current pred pvp:", cur_s_pred_pvp) + cur_s_pred_dir = np.array([1.0, 0.0, 0.0], dtype=np.float) + # cur_s_pred_pvp = cur_s_pred_pvp[[1, 2, 0]] + + for i_p in range(tot_n_part): + cur_s_cur_part_mesh_dict = tot_mesh_list[i_p][i_s] + cur_s_cur_part_vertices, cur_s_cur_part_faces = cur_s_cur_part_mesh_dict['vertices'], \ + cur_s_cur_part_mesh_dict['faces'] + + if i_p == 1: + cur_s_cur_part_vertices, _ = revoluteTransform(cur_s_cur_part_vertices, cur_s_pred_pvp, cur_s_pred_dir, 0.5 * np.pi) # reverse revolute vertices of the upper piece + cur_s_cur_part_vertices = cur_s_cur_part_vertices[:, :3] # + cur_s_cur_part_new_faces = [] + for cur_s_cur_part_cur_face in cur_s_cur_part_faces: + cur_s_cur_part_cur_new_face = [fid + cur_s_n_vertices for fid in cur_s_cur_part_cur_face] + cur_s_cur_part_new_faces.append(cur_s_cur_part_cur_new_face) + cur_s_n_vertices += cur_s_cur_part_vertices.shape[0] + cur_s_tot_vertices.append(cur_s_cur_part_vertices) + # print(f"i_s: {i_s}, i_p: {i_p}, n_vertices: {cur_s_cur_part_vertices.shape[0]}") + cur_s_tot_faces += cur_s_cur_part_new_faces + + cur_s_tot_vertices = np.concatenate(cur_s_tot_vertices, axis=0) + # print(f"i_s: {i_s}, n_cur_s_tot_vertices: {cur_s_tot_vertices.shape[0]}") + cur_s_mesh_dict = { + 'vertices': cur_s_tot_vertices, 'faces': cur_s_tot_faces + } + tot_samples_mesh_dict.append(cur_s_mesh_dict) + # plot_meshes(tot_samples_mesh_dict, ax_lims=0.5, mesh_sv_fn=f"./figs/training_step_{n}_part_{tot_n_part}_rot.png") # plot the mesh; + for i_s in range(tot_n_samples): + cur_mesh = tot_samples_mesh_dict[i_s] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + # rotated mesh fn + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_s}_rot.obj") + # write object in the file... + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=False, scale=1.) + + + + +def sample_pts_from_mesh(vertices, faces, npoints=512, minus_one=True): + return vertices + sampled_pcts = [] + pts_to_seg_idx = [] + # triangles and pts + minus_val = 0 if not minus_one else 1 + for i in range(len(faces)): # + cur_face = faces[i] + n_tris = len(cur_face) - 2 + v_as, v_bs, v_cs = [cur_face[0] for _ in range(n_tris)], cur_face[1: len(cur_face) - 1], cur_face[2: len(cur_face)] + for v_a, v_b, v_c in zip(v_as, v_bs, v_cs): + + v_a, v_b, v_c = vertices[v_a - minus_val], vertices[v_b - minus_val], vertices[v_c - minus_val] + ab, ac = v_b - v_a, v_c - v_a + cos_ab_ac = (np.sum(ab * ac) / np.clip(np.sqrt(np.sum(ab ** 2)) * np.sqrt(np.sum(ac ** 2)), a_min=1e-9, + a_max=9999999.)).item() + sin_ab_ac = math.sqrt(min(max(0., 1. - cos_ab_ac ** 2), 1.)) + cur_area = 0.5 * sin_ab_ac * np.sqrt(np.sum(ab ** 2)).item() * np.sqrt(np.sum(ac ** 2)).item() + + cur_sampled_pts = int(cur_area * npoints) + cur_sampled_pts = 1 if cur_sampled_pts == 0 else cur_sampled_pts + # if cur_sampled_pts == 0: + + tmp_x, tmp_y = np.random.uniform(0, 1., (cur_sampled_pts,)).tolist(), np.random.uniform(0., 1., ( + cur_sampled_pts,)).tolist() + + for xx, yy in zip(tmp_x, tmp_y): + sqrt_xx, sqrt_yy = math.sqrt(xx), math.sqrt(yy) + aa = 1. - sqrt_xx + bb = sqrt_xx * (1. - yy) + cc = yy * sqrt_xx + cur_pos = v_a * aa + v_b * bb + v_c * cc + sampled_pcts.append(cur_pos) + # pts_to_seg_idx.append(cur_tri_seg) + + # seg_idx_to_sampled_pts = {} + sampled_pcts = np.array(sampled_pcts, dtype=np.float) + + return sampled_pcts + + +def fps_fr_numpy(np_pts, n_sampling=4096): + pts = torch.from_numpy(np_pts).float().cuda() + pts_fps_idx = farthest_point_sampling(pts.unsqueeze(0), n_sampling=n_sampling) # farthes points sampling ## + pts = pts[pts_fps_idx].cpu() + return pts + + +def farthest_point_sampling(pos: torch.FloatTensor, n_sampling: int): + bz, N = pos.size(0), pos.size(1) + feat_dim = pos.size(-1) + device = pos.device + sampling_ratio = float(n_sampling / N) + pos_float = pos.float() + + batch = torch.arange(bz, dtype=torch.long).view(bz, 1).to(device) + mult_one = torch.ones((N,), dtype=torch.long).view(1, N).to(device) + + batch = batch * mult_one + batch = batch.view(-1) + pos_float = pos_float.contiguous().view(-1, feat_dim).contiguous() # (bz x N, 3) + # sampling_ratio = torch.tensor([sampling_ratio for _ in range(bz)], dtype=torch.float).to(device) + # batch = torch.zeros((N, ), dtype=torch.long, device=device) + sampled_idx = fps(pos_float, batch, ratio=sampling_ratio, random_start=False) + # shape of sampled_idx? + return sampled_idx + + +def plot_sampled_meshes_single_part(v_sample, f_sample, sv_mesh_folder, cur_step=0, predict_joint=True,): + + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + part_vertex_samples = [v_sample] + part_face_samples = [f_sample] + + + tot_n_samples = part_vertex_samples[0]['vertices'].shape[0] + tot_n_part = 2 + + # not predict joints here + # if predict_joint: + # pred_dir = v_sample['joint_dir'] + # pred_pvp = v_sample['joint_pvp'] + # print("pred_dir", pred_dir.shape, pred_dir) + # print("pred_pvp", pred_pvp.shape, pred_pvp) + # else: + # pred_pvp = np.zeros(shape=[tot_n_samples, 3], dtype=np.float32) + + + tot_mesh_list = [] + for i_p, (cur_part_v_samples_np, cur_part_f_samples_np) in enumerate(zip(part_vertex_samples, part_face_samples)): + mesh_list = [] + for i_n in range(tot_n_samples): + mesh_list.append( + { + 'vertices': cur_part_v_samples_np['vertices'][i_n][:cur_part_v_samples_np['num_vertices'][i_n]], + 'faces': unflatten_faces( + cur_part_f_samples_np['faces'][i_n][:cur_part_f_samples_np['num_face_indices'][i_n]]) + } + ) + tot_mesh_list.append(mesh_list) + + for i_n in range(tot_n_samples): + cur_mesh = mesh_list[i_n] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + # cur_mesh_sv_fn = os.path.join("./meshes", f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + print(f"saving to {cur_mesh_sv_fn}, nn_verts: {cur_mesh_vertices.shape[0]}, nn_faces: {len(cur_mesh_faces)}") + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=True, scale=1.) + + +def plot_sampled_meshes(v_sample, f_sample, sv_mesh_folder, cur_step=0, predict_joint=True,): + + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + part_vertex_samples = [v_sample] + part_face_samples = [f_sample] + + + tot_n_samples = part_vertex_samples[0]['vertices'].shape[0] + # tot_n_part = 2 + + # not predict joints here + # if predict_joint: + # pred_dir = v_sample['joint_dir'] + # pred_pvp = v_sample['joint_pvp'] + # print("pred_dir", pred_dir.shape, pred_dir) + # print("pred_pvp", pred_pvp.shape, pred_pvp) + # else: + # pred_pvp = np.zeros(shape=[tot_n_samples, 3], dtype=np.float32) + + + tot_mesh_list = [] + for i_p, (cur_part_v_samples_np, cur_part_f_samples_np) in enumerate(zip(part_vertex_samples, part_face_samples)): + mesh_list = [] + for i_n in range(tot_n_samples): + mesh_list.append( + { + 'vertices': cur_part_v_samples_np['vertices'][i_n][:cur_part_v_samples_np['num_vertices'][i_n]], + 'faces': unflatten_faces( + cur_part_f_samples_np['faces'][i_n][:cur_part_f_samples_np['num_face_indices'][i_n]]) + } + ) + tot_mesh_list.append(mesh_list) + + for i_n in range(tot_n_samples): + cur_mesh = mesh_list[i_n] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + # cur_mesh_sv_fn = os.path.join("./meshes", f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + print(f"saving to {cur_mesh_sv_fn}, nn_verts: {cur_mesh_vertices.shape[0]}, nn_faces: {len(cur_mesh_faces)}") + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=True, scale=1.) + + +def filter_masked_vertices(vertices, mask_indicator): + # vertices: n_verts x 3 + mask_indicator = np.reshape(mask_indicator, (vertices.shape[0], 3)) + tot_vertices = [] + for i_v in range(vertices.shape[0]): + cur_vert = vertices[i_v] + cur_vert_indicator = mask_indicator[i_v][0].item() + if cur_vert_indicator < 0.5: + tot_vertices.append(cur_vert) + tot_vertices = np.array(tot_vertices) + return tot_vertices + + +def plot_sampled_ar_subd_meshes(v_sample, f_sample, sv_mesh_folder, cur_step=0, ): + if not os.path.exists(sv_mesh_folder): ### vertices_mask + os.mkdir(sv_mesh_folder) + ### v_sample: bsz x nn_verts x 3 + vertices_mask = v_sample['vertices_mask'] + vertices = v_sample['vertices'] + faces = f_sample['faces'] + num_face_indices = f_sample['num_face_indices'] #### num_faces_indices + bsz = vertices.shape[0] + + for i_bsz in range(bsz): + cur_vertices = vertices[i_bsz] + cur_vertices_mask = vertices_mask[i_bsz] + cur_faces = faces[i_bsz] + cur_num_face_indices = num_face_indices[i_bsz] + cur_nn_verts = cur_vertices_mask.sum(-1).item() + cur_nn_verts = int(cur_nn_verts) + cur_vertices = cur_vertices[:cur_nn_verts] + cur_faces = unflatten_faces( + cur_faces[:int(cur_num_face_indices)]) + + cur_num_faces = len(cur_faces) + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_inst_{i_bsz}.obj") + # cur_context_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_part_{i_p}_ins_{i_n}_context.obj") + print(f"saving to {cur_mesh_sv_fn}, nn_verts: {cur_nn_verts}, nn_faces: {cur_num_faces}") + if cur_nn_verts > 0 and cur_num_faces > 0: + write_obj(cur_vertices, cur_faces, cur_mesh_sv_fn, transpose=True, scale=1.) + + + +def plot_sampled_meshes_single_part_for_pretraining(v_sample, f_sample, context, sv_mesh_folder, cur_step=0, predict_joint=True, with_context=True): + + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + part_vertex_samples = [v_sample] + part_face_samples = [f_sample] + + context_vertices = [context['vertices']] + context_faces = [context['faces']] + context_vertices_mask = [context['vertices_mask']] + context_faces_mask = [context['faces_mask']] + + + tot_n_samples = part_vertex_samples[0]['vertices'].shape[0] + tot_n_part = 2 + + # not predict joints here + # if predict_joint: + # pred_dir = v_sample['joint_dir'] + # pred_pvp = v_sample['joint_pvp'] + # print("pred_dir", pred_dir.shape, pred_dir) + # print("pred_pvp", pred_pvp.shape, pred_pvp) + # else: + # pred_pvp = np.zeros(shape=[tot_n_samples, 3], dtype=np.float32) + + # + + + tot_mesh_list = [] + for i_p, (cur_part_v_samples_np, cur_part_f_samples_np) in enumerate(zip(part_vertex_samples, part_face_samples)): + mesh_list = [] + context_mesh_list = [] + for i_n in range(tot_n_samples): + mesh_list.append( + { + 'vertices': cur_part_v_samples_np['vertices'][i_n][:cur_part_v_samples_np['num_vertices'][i_n]], + 'faces': unflatten_faces( + cur_part_f_samples_np['faces'][i_n][:cur_part_f_samples_np['num_face_indices'][i_n]]) + } + ) + + cur_context_vertices = context_vertices[i_p][i_n] + cur_context_faces = context_faces[i_p][i_n] + cur_context_vertices_mask = context_vertices_mask[i_p][i_n] + cur_context_faces_mask = context_faces_mask[i_p][i_n] + cur_nn_vertices = np.sum(cur_context_vertices_mask).item() + cur_nn_faces = np.sum(cur_context_faces_mask).item() + cur_nn_vertices, cur_nn_faces = int(cur_nn_vertices), int(cur_nn_faces) + cur_context_vertices = cur_context_vertices[:cur_nn_vertices] + if 'mask_vertices_flat_indicator' in context: + cur_context_vertices_mask_indicator = context['mask_vertices_flat_indicator'][i_n] + cur_context_vertices_mask_indicator = cur_context_vertices_mask_indicator[:cur_nn_vertices * 3] + cur_context_vertices = filter_masked_vertices(cur_context_vertices, cur_context_vertices_mask_indicator) + cur_context_faces = cur_context_faces[:cur_nn_faces] # context faces... + context_mesh_dict = { + 'vertices': dequantize_verts(cur_context_vertices, n_bits=8), 'faces': unflatten_faces(cur_context_faces) + } + context_mesh_list.append(context_mesh_dict) + + tot_mesh_list.append(mesh_list) + + # if with_context: + for i_n in range(tot_n_samples): + cur_mesh = mesh_list[i_n] + cur_context_mesh = context_mesh_list[i_n] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + cur_context_vertices, cur_context_faces = cur_context_mesh['vertices'], cur_context_mesh['faces'] + # cur_mesh_sv_fn = os.path.join("./meshes", f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + cur_context_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_part_{i_p}_ins_{i_n}_context.obj") + print(f"saving to {cur_mesh_sv_fn}, nn_verts: {cur_mesh_vertices.shape[0]}, nn_faces: {len(cur_mesh_faces)}") + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=True, scale=1.) + if cur_context_vertices.shape[0] > 0 and len(cur_context_faces) > 0: + write_obj(cur_context_vertices, cur_context_faces, cur_context_mesh_sv_fn, transpose=True, scale=1.) + + +def plot_grids_for_pretraining_ml(v_sample, sv_mesh_folder="", cur_step=0, context=None): + + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + mesh_list = [] + context_mesh_list = [] + tot_n_samples = v_sample['vertices'].shape[0] + + for i_n in range(tot_n_samples): + mesh_list.append( + { + 'vertices': v_sample['vertices'][i_n][:v_sample['num_vertices'][i_n]], + 'faces': [] + } + ) + + cur_context_vertices = context['vertices'][i_n] + cur_context_vertices_mask = context['vertices_mask'][i_n] + cur_nn_vertices = np.sum(cur_context_vertices_mask).item() + cur_nn_vertices = int(cur_nn_vertices) + cur_context_vertices = cur_context_vertices[:cur_nn_vertices] + if 'mask_vertices_flat_indicator' in context: + cur_context_vertices_mask_indicator = context['mask_vertices_flat_indicator'][i_n] + cur_context_vertices_mask_indicator = cur_context_vertices_mask_indicator[:cur_nn_vertices * 3] + cur_context_vertices = filter_masked_vertices(cur_context_vertices, cur_context_vertices_mask_indicator) + context_mesh_dict = { + 'vertices': dequantize_verts(cur_context_vertices, n_bits=8), 'faces': [] + } + context_mesh_list.append(context_mesh_dict) + + # tot_mesh_list.append(mesh_list) + + # if with_context: + for i_n in range(tot_n_samples): + cur_mesh = mesh_list[i_n] + cur_context_mesh = context_mesh_list[i_n] + cur_mesh_vertices = cur_mesh['vertices'] + cur_context_vertices = cur_context_mesh['vertices'] + # cur_mesh_sv_fn = os.path.join("./meshes", f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_n}.obj") + cur_context_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_n}_context.obj") + # print(f"saving to {cur_mesh_sv_fn}, nn_verts: {cur_mesh_vertices.shape[0]}, nn_faces: {len(cur_mesh_faces)}") + print(f"saving the sample to {cur_mesh_sv_fn}, context sample to {cur_context_mesh_sv_fn}") + if cur_mesh_vertices.shape[0] > 0: + write_obj(cur_mesh_vertices, None, cur_mesh_sv_fn, transpose=True, scale=1.) + if cur_context_vertices.shape[0] > 0: + write_obj(cur_context_vertices, None, cur_context_mesh_sv_fn, transpose=True, scale=1.) + + +def get_grid_content_from_grids(grid_xyzs, grid_values, grid_size=2): + cur_bsz_grid_xyzs = grid_xyzs # grid_length x 3 # grids pts for a sinlge batch + cur_bsz_grid_values = grid_values # grid_length x gs x gs x gs + pts = [] + for i_grid in range(cur_bsz_grid_xyzs.shape[0]): # cur_bsz_grid_xyzs + cur_grid_xyz = cur_bsz_grid_xyzs[i_grid].tolist() + if cur_grid_xyz[0] == -1 or cur_grid_xyz[1] == -1 or cur_grid_xyz[2] == -1: + break + if len(cur_bsz_grid_values.shape) > 1: + cur_grid_values = cur_bsz_grid_values[i_grid] + else: + cur_grid_content = cur_bsz_grid_values[i_grid].item() + if cur_grid_content >= MASK_GRID_VALIE: + continue + inde = 2 + cur_grid_values = [] + for i_s in range(grid_size ** 3): + cur_mod_value = cur_grid_content % inde + cur_grid_content = cur_grid_content // inde + cur_grid_values = [cur_mod_value] + cur_grid_values # higher values should be put to the front of the list + cur_grid_values = np.array(cur_grid_values, dtype=np.long) + cur_grid_values = np.reshape(cur_grid_values, (grid_size, grid_size, grid_size)) + # if words + # flip words + for i_x in range(cur_grid_values.shape[0]): + for i_y in range(cur_grid_values.shape[1]): + for i_z in range(cur_grid_values.shape[2]): + cur_grid_xyz_value = int(cur_grid_values[i_x, i_y, i_z].item()) + if cur_grid_xyz_value > 0.5: + cur_x, cur_y, cur_z = cur_grid_xyz[0] * grid_size + i_x, cur_grid_xyz[1] * grid_size + i_y, cur_grid_xyz[2] * grid_size + i_z + pts.append([cur_x, cur_y, cur_z]) + return pts + +def plot_grids_for_pretraining(v_sample, sv_mesh_folder="", cur_step=0, context=None, sv_mesh_fn=None): + + ##### plot grids + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + # part_vertex_samples = [v_sample] # vertex samples + # part_face_samples = [f_sample] # face samples + + grid_xyzs = v_sample['grid_xyzs'] + grid_values = v_sample['grid_values'] + + bsz = grid_xyzs.shape[0] + grid_size = opt.vertex_model.grid_size + + + for i_bsz in range(bsz): + cur_bsz_grid_xyzs = grid_xyzs[i_bsz] # grid_length x 3 + cur_bsz_grid_values = grid_values[i_bsz] # grid_length x gs x gs x gs + pts = [] + for i_grid in range(cur_bsz_grid_xyzs.shape[0]): # cur_bsz_grid_xyzs + cur_grid_xyz = cur_bsz_grid_xyzs[i_grid].tolist() + if cur_grid_xyz[0] == -1 or cur_grid_xyz[1] == -1 or cur_grid_xyz[2] == -1: + break + if len(cur_bsz_grid_values.shape) > 1: + cur_grid_values = cur_bsz_grid_values[i_grid] + else: + cur_grid_content = cur_bsz_grid_values[i_grid].item() + if cur_grid_content >= MASK_GRID_VALIE: + continue + inde = 2 + cur_grid_values = [] + for i_s in range(grid_size ** 3): + cur_mod_value = cur_grid_content % inde + cur_grid_content = cur_grid_content // inde + cur_grid_values = [cur_mod_value] + cur_grid_values # higher values should be put to the front of the list + cur_grid_values = np.array(cur_grid_values, dtype=np.long) + cur_grid_values = np.reshape(cur_grid_values, (grid_size, grid_size, grid_size)) + # if + for i_x in range(cur_grid_values.shape[0]): + for i_y in range(cur_grid_values.shape[1]): + for i_z in range(cur_grid_values.shape[2]): + cur_grid_xyz_value = int(cur_grid_values[i_x, i_y, i_z].item()) + if cur_grid_xyz_value > 0.5: + cur_x, cur_y, cur_z = cur_grid_xyz[0] * grid_size + i_x, cur_grid_xyz[1] * grid_size + i_y, cur_grid_xyz[2] * grid_size + i_z + pts.append([cur_x, cur_y, cur_z]) + + + if len(pts) == 0: + print("zzz, len(pts) == 0") + continue + pts = np.array(pts, dtype=np.float32) + # pts = center_vertices(pts) + # pts = normalize_vertices_scale(pts) + pts = pts[:, [2, 1, 0]] + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_bsz}.obj" if sv_mesh_fn is None else sv_mesh_fn) + + print(f"Saving obj to {cur_mesh_sv_fn}") + write_obj(pts, None, cur_mesh_sv_fn, transpose=True, scale=1.) + + +def plot_grids_for_pretraining_obj_corpus(v_sample, sv_mesh_folder="", cur_step=0, context=None, sv_mesh_fn=None): + + ##### plot grids + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + # part_vertex_samples = [v_sample] # vertex samples + # part_face_samples = [f_sample] # face samples + + grid_xyzs = v_sample['grid_xyzs'] + grid_values = v_sample['grid_values'] + + bsz = grid_xyzs.shape[0] + grid_size = opt.vertex_model.grid_size + + + for i_bsz in range(bsz): + cur_bsz_grid_xyzs = grid_xyzs[i_bsz] # grid_length x 3 + cur_bsz_grid_values = grid_values[i_bsz] # grid_length x gs x gs x gs + part_pts = [] + pts = [] + for i_grid in range(cur_bsz_grid_xyzs.shape[0]): # cur_bsz_grid_xyzs + cur_grid_xyz = cur_bsz_grid_xyzs[i_grid].tolist() + ##### grid_xyz; grid_ + if cur_grid_xyz[0] == -1 and cur_grid_xyz[1] == -1 and cur_grid_xyz[2] == -1: + part_pts.append(pts) + pts = [] + continue + ##### cur_grid_xyz... ##### + elif not (cur_grid_xyz[0] >= 0 and cur_grid_xyz[1] >= 0 and cur_grid_xyz[2] >= 0): + continue + if len(cur_bsz_grid_values.shape) > 1: + cur_grid_values = cur_bsz_grid_values[i_grid] + else: + cur_grid_content = cur_bsz_grid_values[i_grid].item() + if cur_grid_content >= MASK_GRID_VALIE: # mask grid value + continue + inde = 2 + cur_grid_values = [] + for i_s in range(grid_size ** 3): + cur_mod_value = cur_grid_content % inde + cur_grid_content = cur_grid_content // inde + cur_grid_values = [cur_mod_value] + cur_grid_values # higher values should be put to the front of the list + cur_grid_values = np.array(cur_grid_values, dtype=np.long) + cur_grid_values = np.reshape(cur_grid_values, (grid_size, grid_size, grid_size)) + # if + for i_x in range(cur_grid_values.shape[0]): + for i_y in range(cur_grid_values.shape[1]): + for i_z in range(cur_grid_values.shape[2]): + cur_grid_xyz_value = int(cur_grid_values[i_x, i_y, i_z].item()) + ##### gird-xyz-values ##### + if cur_grid_xyz_value > 0.5: # cur_grid_xyz_value + cur_x, cur_y, cur_z = cur_grid_xyz[0] * grid_size + i_x, cur_grid_xyz[1] * grid_size + i_y, cur_grid_xyz[2] * grid_size + i_z + pts.append([cur_x, cur_y, cur_z]) + + if len(pts) > 0: + part_pts.append(pts) + pts = [] + tot_nn_pts = sum([len(aa) for aa in part_pts]) + if tot_nn_pts == 0: + print("zzz, tot_nn_pts == 0") + continue + + for i_p, pts in enumerate(part_pts): + if len(pts) == 0: + continue + pts = np.array(pts, dtype=np.float32) + pts = center_vertices(pts) + # pts = normalize_vertices_scale(pts) + pts = pts[:, [2, 1, 0]] + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_bsz}_ip_{i_p}.obj" if sv_mesh_fn is None else sv_mesh_fn) + + print(f"Saving {i_p}-th part obj to {cur_mesh_sv_fn}") + write_obj(pts, None, cur_mesh_sv_fn, transpose=True, scale=1.) + + + +def plot_grids_for_pretraining_obj_part(v_sample, sv_mesh_folder="", cur_step=0, context=None, sv_mesh_fn=None): + + ##### plot grids + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + # part_vertex_samples = [v_sample] # vertex samples + # part_face_samples = [f_sample] # face samples + + grid_xyzs = v_sample['grid_xyzs'] + grid_values = v_sample['grid_values'] + + bsz = grid_xyzs.shape[0] + grid_size = opt.vertex_model.grid_size + + + for i_bsz in range(bsz): + cur_bsz_grid_xyzs = grid_xyzs[i_bsz] # grid_length x 3 + cur_bsz_grid_values = grid_values[i_bsz] # grid_length x gs x gs x gs + part_pts = [] + pts = [] + for i_grid in range(cur_bsz_grid_xyzs.shape[0]): # cur_bsz_grid_xyzs + cur_grid_xyz = cur_bsz_grid_xyzs[i_grid].tolist() + ##### grid_xyz; grid_ + if cur_grid_xyz[0] == -1 and cur_grid_xyz[1] == -1 and cur_grid_xyz[2] == -1: + part_pts.append(pts) + pts = [] + break + elif cur_grid_xyz[0] == -1 and cur_grid_xyz[1] == -1 and cur_grid_xyz[2] == 0: + part_pts.append(pts) + pts = [] + continue + ##### cur_grid_xyz... ##### + elif not (cur_grid_xyz[0] >= 0 and cur_grid_xyz[1] >= 0 and cur_grid_xyz[2] >= 0): + continue + if len(cur_bsz_grid_values.shape) > 1: + cur_grid_values = cur_bsz_grid_values[i_grid] + else: + cur_grid_content = cur_bsz_grid_values[i_grid].item() + if cur_grid_content >= MASK_GRID_VALIE: # invalid jor dummy content value s + continue + inde = 2 + cur_grid_values = [] + for i_s in range(grid_size ** 3): + cur_mod_value = cur_grid_content % inde + cur_grid_content = cur_grid_content // inde + cur_grid_values = [cur_mod_value] + cur_grid_values # higher values should be put to the front of the list + cur_grid_values = np.array(cur_grid_values, dtype=np.long) + cur_grid_values = np.reshape(cur_grid_values, (grid_size, grid_size, grid_size)) + # if + for i_x in range(cur_grid_values.shape[0]): + for i_y in range(cur_grid_values.shape[1]): + for i_z in range(cur_grid_values.shape[2]): + cur_grid_xyz_value = int(cur_grid_values[i_x, i_y, i_z].item()) + ##### gird-xyz-values ##### + if cur_grid_xyz_value > 0.5: # cur_grid_xyz_value + cur_x, cur_y, cur_z = cur_grid_xyz[0] * grid_size + i_x, cur_grid_xyz[1] * grid_size + i_y, cur_grid_xyz[2] * grid_size + i_z + pts.append([cur_x, cur_y, cur_z]) + + if len(pts) > 0: + part_pts.append(pts) + pts = [] + tot_nn_pts = sum([len(aa) for aa in part_pts]) + if tot_nn_pts == 0: + print("zzz, tot_nn_pts == 0") + continue + + for i_p, pts in enumerate(part_pts): + if len(pts) == 0: + continue + pts = np.array(pts, dtype=np.float32) + pts = center_vertices(pts) + # pts = normalize_vertices_scale(pts) + pts = pts[:, [2, 1, 0]] + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_bsz}_ip_{i_p}.obj" if sv_mesh_fn is None else sv_mesh_fn) + + print(f"Saving {i_p}-th part obj to {cur_mesh_sv_fn}") + write_obj(pts, None, cur_mesh_sv_fn, transpose=True, scale=1.) + + +def plot_grids_for_pretraining_ml(v_sample, sv_mesh_folder="", cur_step=0, context=None): + + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + # part_vertex_samples = [v_sample] # vertex samples + # part_face_samples = [f_sample] # face samples + + grid_xyzs = v_sample['grid_xyzs'] + grid_values = v_sample['grid_values'] + + context_grid_xyzs = context['grid_xyzs'] - 1 + # context_grid_values = context['grid_content'] + context_grid_values = context['mask_grid_content'] + + bsz = grid_xyzs.shape[0] + grid_size = opt.vertex_model.grid_size + + + for i_bsz in range(bsz): + cur_bsz_grid_pts = get_grid_content_from_grids(grid_xyzs[i_bsz], grid_values[i_bsz], grid_size=grid_size) + cur_context_grid_pts = get_grid_content_from_grids(context_grid_xyzs[i_bsz], context_grid_values[i_bsz], grid_size=grid_size) + + if len(cur_bsz_grid_pts) > 0 and len(cur_context_grid_pts) > 0: + cur_bsz_grid_pts = np.array(cur_bsz_grid_pts, dtype=np.float32) + cur_bsz_grid_pts = center_vertices(cur_bsz_grid_pts) + cur_bsz_grid_pts = normalize_vertices_scale(cur_bsz_grid_pts) + cur_bsz_grid_pts = cur_bsz_grid_pts[:, [2, 1, 0]] + #### plot current mesh / sampled points #### + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_bsz}.obj") + print(f"Saving predicted grid points to {cur_mesh_sv_fn}") + write_obj(cur_bsz_grid_pts, None, cur_mesh_sv_fn, transpose=True, scale=1.) + + cur_context_grid_pts = np.array(cur_context_grid_pts, dtype=np.float32) + cur_context_grid_pts = center_vertices(cur_context_grid_pts) + cur_context_grid_pts = normalize_vertices_scale(cur_context_grid_pts) + cur_context_grid_pts = cur_context_grid_pts[:, [2, 1, 0]] + #### plot current mesh / sampled points #### + cur_context_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_bsz}_context.obj") + print(f"Saving context grid points to {cur_context_mesh_sv_fn}") + write_obj(cur_context_grid_pts, None, cur_context_mesh_sv_fn, transpose=True, scale=1.) + + # print(f"Saving obj to {cur_mesh_sv_fn}") + # write_obj(pts, None, cur_mesh_sv_fn, transpose=True, scale=1.) + + # if len(cur_bsz_grid_pts) == 0: + # print("zzz, len(pts) == 0") + # continue + # pts = np.array(pts, dtype=np.float32) + # pts = center_vertices(pts) + # pts = normalize_vertices_scale(pts) + # pts = pts[:, [2, 1, 0]] + # cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"training_step_{cur_step}_ins_{i_bsz}.obj") + + # print(f"Saving obj to {cur_mesh_sv_fn}") + # write_obj(pts, None, cur_mesh_sv_fn, transpose=True, scale=1.) + + + +def plot_sampled_meshes_single_part_for_sampling(v_sample, f_sample, sv_mesh_folder, cur_step=0, predict_joint=True,): + + if not os.path.exists(sv_mesh_folder): + os.mkdir(sv_mesh_folder) + + part_vertex_samples = [v_sample] + part_face_samples = [f_sample] + + + tot_n_samples = part_vertex_samples[0]['vertices'].shape[0] + tot_n_part = 2 + + # not predict joints here + # if predict_joint: + # pred_dir = v_sample['joint_dir'] + # pred_pvp = v_sample['joint_pvp'] + # print("pred_dir", pred_dir.shape, pred_dir) + # print("pred_pvp", pred_pvp.shape, pred_pvp) + # else: + # pred_pvp = np.zeros(shape=[tot_n_samples, 3], dtype=np.float32) + + + tot_mesh_list = [] + for i_p, (cur_part_v_samples_np, cur_part_f_samples_np) in enumerate(zip(part_vertex_samples, part_face_samples)): + mesh_list = [] + for i_n in range(tot_n_samples): + mesh_list.append( + { + 'vertices': cur_part_v_samples_np['vertices'][i_n][:cur_part_v_samples_np['num_vertices'][i_n]], + 'faces': unflatten_faces( + cur_part_f_samples_np['faces'][i_n][:cur_part_f_samples_np['num_face_indices'][i_n]]) + } + ) + tot_mesh_list.append(mesh_list) + + for i_n in range(tot_n_samples): + cur_mesh = mesh_list[i_n] + cur_mesh_vertices, cur_mesh_faces = cur_mesh['vertices'], cur_mesh['faces'] + # cur_mesh_sv_fn = os.path.join("./meshes", f"training_step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + cur_mesh_sv_fn = os.path.join(sv_mesh_folder, f"step_{cur_step}_part_{i_p}_ins_{i_n}.obj") + print(f"saving to {cur_mesh_sv_fn}, nn_verts: {cur_mesh_vertices.shape[0]}, nn_faces: {len(cur_mesh_faces)}") + if cur_mesh_vertices.shape[0] > 0 and len(cur_mesh_faces) > 0: + write_obj(cur_mesh_vertices, cur_mesh_faces, cur_mesh_sv_fn, transpose=True, scale=1.) + + \ No newline at end of file diff --git a/models/dataset.py b/models/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..2b4a41f1020ff40907929c9bdf08072f7e8dccbf --- /dev/null +++ b/models/dataset.py @@ -0,0 +1,359 @@ +import torch +import torch.nn.functional as F +import cv2 as cv +import numpy as np +import os +from glob import glob +from icecream import ic +from scipy.spatial.transform import Rotation as Rot +from scipy.spatial.transform import Slerp + + +# This function is borrowed from IDR: https://github.com/lioryariv/idr +def load_K_Rt_from_P(filename, P=None): + if P is None: + lines = open(filename).read().splitlines() + if len(lines) == 4: + lines = lines[1:] + lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)] + P = np.asarray(lines).astype(np.float32).squeeze() + + out = cv.decomposeProjectionMatrix(P) + K = out[0] + R = out[1] + t = out[2] + + K = K / K[2, 2] + intrinsics = np.eye(4) + intrinsics[:3, :3] = K + + pose = np.eye(4, dtype=np.float32) + pose[:3, :3] = R.transpose() + pose[:3, 3] = (t[:3] / t[3])[:, 0] + + return intrinsics, pose + +def filter_iamges_via_pixel_values(data_dir): + images_lis = sorted(glob(os.path.join(data_dir, 'image/*.png'))) ## images lis ## + n_images = len(images_lis) + images_np = np.stack([cv.imread(im_name) for im_name in images_lis]) / 255.0 + print(f"images_np: {images_np.shape}") + # nn_frames x res x res x 3 # + images_np = 1. - images_np + has_density_values = (np.sum(images_np, axis=-1) > 0.7).astype(np.float32) + has_density_values = np.sum(np.sum(has_density_values, axis=-1), axis=-1) + tot_res_nns = float(images_np.shape[1] * images_np.shape[2]) + has_density_ratio = has_density_values / tot_res_nns ### has density ratio and ratio # + print(f"has_density_values: {has_density_values.shape}") + paried_has_density_ratio_list = [(i_fr, has_density_ratio[i_fr].item()) for i_fr in range(has_density_ratio.shape[0])] + paried_has_density_ratio_list = sorted(paried_has_density_ratio_list, key=lambda ii: ii[1], reverse=True) + mid_rnk_value = len(paried_has_density_ratio_list) // 4 + print(f"mid value of the density ratio") + print(paried_has_density_ratio_list[mid_rnk_value]) + iamge_idx = paried_has_density_ratio_list[mid_rnk_value][0] + print(f"iamge idx: {images_lis[iamge_idx]}") + print(paried_has_density_ratio_list[:mid_rnk_value]) + tot_selected_img_idx_list = [ii[0] for ii in paried_has_density_ratio_list[:mid_rnk_value]] + tot_selected_img_idx_list =sorted(tot_selected_img_idx_list) + print(len(tot_selected_img_idx_list)) + # print(tot_selected_img_idx_list[54]) + print(tot_selected_img_idx_list) + + + +class Dataset: + def __init__(self, conf): + super(Dataset, self).__init__() + print('Load data: Begin') + self.device = torch.device('cuda') + self.conf = conf + + self.selected_img_idxes_list = [0, 1, 5, 6, 7, 8, 9, 13, 14, 15, 35, 36, 42, 43, 44, 48, 49, 50, 51, 55, 56, 57, 61, 62, 63, 69, 84, 90, 91, 92, 96, 97] + # self.selected_img_idxes_list = [0, 1, 5, 6, 7, 8, 9, 12, 13, 14, 15, 20, 21, 22, 23, 26, 27, 28, 29, 35, 36, 37, 40, 41, 70, 71, 79, 82, 83, 84, 85, 92, 93, 96, 97, 98, 99, 105, 106, 107, 110, 111, 112, 113, 118, 119, 120, 121, 124, 125, 133, 134, 135, 139, 174, 175, 176, 177, 180, 188, 189, 190, 191, 194, 195] + + self.selected_img_idxes_list = [0, 1, 6, 7, 8, 9, 12, 13, 14, 15, 20, 21, 22, 23, 26, 27, 36, 40, 41, 70, 71, 78, 82, 83, 84, 85, 90, 91, 92, 93, 96, 97] + + self.selected_img_idxes_list = [0, 1, 6, 7, 8, 9, 12, 13, 14, 15, 20, 21, 22, 23, 26, 27, 36, 40, 41, 70, 71, 78, 82, 83, 84, 85, 90, 91, 92, 93, 96, 97, 98, 99, 104, 105, 106, 107, 110, 111, 112, 113, 118, 119, 120, 121, 124, 125, 134, 135, 139, 174, 175, 176, 177, 180, 181, 182, 183, 188, 189, 190, 191, 194, 195] + + self.selected_img_idxes_list = [0, 1, 6, 7, 8, 9, 12, 13, 14, 20, 21, 22, 23, 26, 27, 70, 78, 83, 84, 85, 91, 92, 93, 96, 97, 98, 99, 105, 106, 107, 110, 111, 112, 113, 119, 120, 121, 124, 125, 175, 176, 181, 182, 188, 189, 190, 191, 194, 195] + # or the timestep to the dataset instance ## # selected img idxes list # + self.selected_img_idxes = np.array(self.selected_img_idxes_list).astype(np.int32) + + + + + + self.data_dir = conf.get_string('data_dir') + self.render_cameras_name = conf.get_string('render_cameras_name') + self.object_cameras_name = conf.get_string('object_cameras_name') + + ## camera outside sphere ## + self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True) + self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1) + + camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name)) + # camera_dict = np.load("/home/xueyi/diffsim/NeuS/public_data/dtu_scan24/cameras_sphere.npz") + self.camera_dict = camera_dict # rendr camera dict # + # render camera dict # # number of pixels in the views -> very thin geometry is not useful + self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png'))) + + # iamges_lis # and the images_lis and the images_lis # + # self.images_lis = self.images_lis[:1] # totoal views and poses of the camera; # and select cameras for rendering # + + self.n_images = len(self.images_lis) + self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0 + + + self.selected_img_idxes_list = list(range(self.images_np.shape[0])) + self.selected_img_idxes = np.array(self.selected_img_idxes_list).astype(np.int32) + + self.images_np = self.images_np[self.selected_img_idxes] ## get selected iamges_np # + + ### if we deal with the backgound carefully ### ### get + self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 255.0 + self.images_np = self.images_np[self.selected_img_idxes] + self.images_np = 1. - self.images_np ### + + + self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png'))) + + # self.masks_lis = self.masks_lis[:1] + + try: + self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0 + self.masks_np = self.masks_np[self.selected_img_idxes] + except: + self.masks_np = self.images_np.copy() + + + + + + + # world_mat is a projection matrix from world to image + self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)] + + self.scale_mats_np = [] + + # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin. + self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)] + + self.intrinsics_all = [] + self.pose_all = [] + + # for idx, (scale_mat, world_mat) in enumerate(zip(self.scale_mats_np, self.world_mats_np)): + for idx in self.selected_img_idxes_list: + scale_mat = self.scale_mats_np[idx] + world_mat = self.world_mats_np[idx] + + if "hand" in self.data_dir: + intrinsics = np.eye(4) + fov = 512. / 2. # * 2 + res = 512. + intrinsics[:3, :3] = np.array([ + [fov, 0, 0.5* res], # res # + [0, fov, 0.5* res], # res # + [0, 0, 1] + ], dtype=np.float32) + pose = camera_dict['camera_mat_%d' % idx].astype(np.float32) + else: + P = world_mat @ scale_mat + P = P[:3, :4] + intrinsics, pose = load_K_Rt_from_P(None, P) + + self.intrinsics_all.append(torch.from_numpy(intrinsics).float()) + self.pose_all.append(torch.from_numpy(pose).float()) + + ### images, masks, + self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3] # + self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3] # + self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4] # optimize sdf field # rigid model hand + self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4] + self.focal = self.intrinsics_all[0][0, 0] + self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4] + self.H, self.W = self.images.shape[1], self.images.shape[2] + self.image_pixels = self.H * self.W + + object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0]) + object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0]) + # Object scale mat: region of interest to **extract mesh** + object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0'] + object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None] + object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None] + self.object_bbox_min = object_bbox_min[:3, 0] + self.object_bbox_max = object_bbox_max[:3, 0] + + self.n_images = self.images.size(0) + + print('Load data: End') + + def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'): + i, j = torch.meshgrid( # meshgrid # + torch.linspace(0, W-1, W, device=c2w.device), + torch.linspace(0, H-1, H, device=c2w.device)) + i = i.t().float() + j = j.t().float() + if mode == 'lefttop': + pass + elif mode == 'center': + i, j = i+0.5, j+0.5 + elif mode == 'random': + i = i+torch.rand_like(i) + j = j+torch.rand_like(j) + else: + raise NotImplementedError + + if flip_x: + i = i.flip((1,)) + if flip_y: + j = j.flip((0,)) + if inverse_y: + dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1) + else: + dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1) + # Rotate ray directions from camera frame to the world frame + rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs] + # Translate camera frame's origin to the world frame. It is the origin of all rays. + rays_o = c2w[:3,3].expand(rays_d.shape) + return rays_o, rays_d + + def gen_rays_at(self, img_idx, resolution_level=1): + """ + Generate rays at world space from one camera. + """ + l = resolution_level + tx = torch.linspace(0, self.W - 1, self.W // l) + ty = torch.linspace(0, self.H - 1, self.H // l) + pixels_x, pixels_y = torch.meshgrid(tx, ty) + + ##### previous method ##### + # p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # # p = torch.stack([pixels_x, pixels_y, -1. * torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3 + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + # rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3 + # rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3 + ##### previous method ##### + + fov = 512.; res = 512. + K = np.array([ + [fov, 0, 0.5* res], + [0, fov, 0.5* res], + [0, 0, 1] + ], dtype=np.float32) + K = torch.from_numpy(K).float().cuda() + + + # ### `center` mode ### # + c2w = self.pose_all[img_idx] + pixels_x, pixels_y = pixels_x+0.5, pixels_y+0.5 + + dirs = torch.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -torch.ones_like(pixels_x)], -1) + rays_v = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) + rays_o = c2w[:3,3].expand(rays_v.shape) + # dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1) + + # p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # # p = torch.stack([pixels_x, pixels_y, -1. * torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3 + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + # rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3 + # rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3 + return rays_o.transpose(0, 1), rays_v.transpose(0, 1) + + def gen_random_rays_at(self, img_idx, batch_size): + """ + Generate random rays at world space from one camera. + """ + pixels_x = torch.randint(low=0, high=self.W, size=[batch_size]) + pixels_y = torch.randint(low=0, high=self.H, size=[batch_size]) + color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3 + + mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3 + + + ##### previous method ##### + # p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3 + # # p = torch.stack([pixels_x, pixels_y, -1. * torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3 + # p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3 + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3 + # rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3 + # rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3 + ##### previous method ##### + + fov = 512.; res = 512. + K = np.array([ + [fov, 0, 0.5* res], + [0, fov, 0.5* res], + [0, 0, 1] + ], dtype=np.float32) + K = torch.from_numpy(K).float().cuda() + + + # ### `center` mode ### # + c2w = self.pose_all[img_idx] + pixels_x, pixels_y = pixels_x+0.5, pixels_y+0.5 + + dirs = torch.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -torch.ones_like(pixels_x)], -1) + rays_v = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) + rays_o = c2w[:3,3].expand(rays_v.shape) + + + return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10 + + def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1): + """ + Interpolate pose between two cameras. + """ + l = resolution_level + tx = torch.linspace(0, self.W - 1, self.W // l) + ty = torch.linspace(0, self.H - 1, self.H // l) + pixels_x, pixels_y = torch.meshgrid(tx, ty) + p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3 + rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio + pose_0 = self.pose_all[idx_0].detach().cpu().numpy() + pose_1 = self.pose_all[idx_1].detach().cpu().numpy() + pose_0 = np.linalg.inv(pose_0) + pose_1 = np.linalg.inv(pose_1) + rot_0 = pose_0[:3, :3] + rot_1 = pose_1[:3, :3] + rots = Rot.from_matrix(np.stack([rot_0, rot_1])) + key_times = [0, 1] + slerp = Slerp(key_times, rots) + rot = slerp(ratio) + pose = np.diag([1.0, 1.0, 1.0, 1.0]) + pose = pose.astype(np.float32) + pose[:3, :3] = rot.as_matrix() + pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3] + pose = np.linalg.inv(pose) + rot = torch.from_numpy(pose[:3, :3]).cuda() + trans = torch.from_numpy(pose[:3, 3]).cuda() + rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3 + rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3 + return rays_o.transpose(0, 1), rays_v.transpose(0, 1) + + def near_far_from_sphere(self, rays_o, rays_d): + a = torch.sum(rays_d**2, dim=-1, keepdim=True) + b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True) + mid = 0.5 * (-b) / a + near = mid - 1.0 + far = mid + 1.0 + return near, far + + ## iamge_at ## + def image_at(self, idx, resolution_level): + if self.selected_img_idxes_list is not None: + img = cv.imread(self.images_lis[self.selected_img_idxes_list[idx]]) + else: + img = cv.imread(self.images_lis[idx]) + return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255) + + +if __name__=='__main__': + data_dir = "/data/datasets/genn/diffsim/diffredmax/save_res/goal_optimize_model_hand_sphere_test_obj_type_active_nfr_10_view_divide_0.5_n_views_7_three_planes_False_recon_dvgo_new_Nposes_7_routine_2" + data_dir = "/data/datasets/genn/diffsim/neus/public_data/hand_test" + data_dir = "/data2/datasets/diffsim/neus/public_data/hand_test_routine_2" + data_dir = "/data2/datasets/diffsim/neus/public_data/hand_test_routine_2_light_color" + filter_iamges_via_pixel_values(data_dir=data_dir) diff --git a/models/dataset_wtime.py b/models/dataset_wtime.py new file mode 100644 index 0000000000000000000000000000000000000000..743c94a20d2980bca240323e1c0398518744cd6e --- /dev/null +++ b/models/dataset_wtime.py @@ -0,0 +1,403 @@ +import torch +import torch.nn.functional as F +import cv2 as cv +import numpy as np +import os +from glob import glob +from icecream import ic +from scipy.spatial.transform import Rotation as Rot +from scipy.spatial.transform import Slerp + + +# This function is borrowed from IDR: https://github.com/lioryariv/idr +def load_K_Rt_from_P(filename, P=None): + if P is None: + lines = open(filename).read().splitlines() + if len(lines) == 4: + lines = lines[1:] + lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)] + P = np.asarray(lines).astype(np.float32).squeeze() + + out = cv.decomposeProjectionMatrix(P) + K = out[0] + R = out[1] + t = out[2] + + K = K / K[2, 2] + intrinsics = np.eye(4) + intrinsics[:3, :3] = K + + pose = np.eye(4, dtype=np.float32) + pose[:3, :3] = R.transpose() + pose[:3, 3] = (t[:3] / t[3])[:, 0] + + return intrinsics, pose + +def filter_iamges_via_pixel_values(data_dir): + images_lis = sorted(glob(os.path.join(data_dir, 'image/*.png'))) ## images lis ## + n_images = len(images_lis) + images_np = np.stack([cv.imread(im_name) for im_name in images_lis]) / 255.0 + print(f"images_np: {images_np.shape}") + # nn_frames x res x res x 3 # + images_np = 1. - images_np + has_density_values = (np.sum(images_np, axis=-1) > 0.7).astype(np.float32) + has_density_values = np.sum(np.sum(has_density_values, axis=-1), axis=-1) + tot_res_nns = float(images_np.shape[1] * images_np.shape[2]) + has_density_ratio = has_density_values / tot_res_nns ### has density ratio and ratio # + print(f"has_density_values: {has_density_values.shape}") + paried_has_density_ratio_list = [(i_fr, has_density_ratio[i_fr].item()) for i_fr in range(has_density_ratio.shape[0])] + paried_has_density_ratio_list = sorted(paried_has_density_ratio_list, key=lambda ii: ii[1], reverse=True) + mid_rnk_value = len(paried_has_density_ratio_list) // 4 + print(f"mid value of the density ratio") + print(paried_has_density_ratio_list[mid_rnk_value]) + iamge_idx = paried_has_density_ratio_list[mid_rnk_value][0] + print(f"iamge idx: {images_lis[iamge_idx]}") + print(paried_has_density_ratio_list[:mid_rnk_value]) + tot_selected_img_idx_list = [ii[0] for ii in paried_has_density_ratio_list[:mid_rnk_value]] + tot_selected_img_idx_list =sorted(tot_selected_img_idx_list) + print(len(tot_selected_img_idx_list)) + # print(tot_selected_img_idx_list[54]) + print(tot_selected_img_idx_list) + + + +class Dataset: + def __init__(self, conf, time_idx, mode='train'): + super(Dataset, self).__init__() + print('Load data: Begin') + self.device = torch.device('cuda') + self.conf = conf + + self.selected_img_idxes_list = [0, 1, 5, 6, 7, 8, 9, 13, 14, 15, 35, 36, 42, 43, 44, 48, 49, 50, 51, 55, 56, 57, 61, 62, 63, 69, 84, 90, 91, 92, 96, 97] + # self.selected_img_idxes_list = [0, 1, 5, 6, 7, 8, 9, 12, 13, 14, 15, 20, 21, 22, 23, 26, 27, 28, 29, 35, 36, 37, 40, 41, 70, 71, 79, 82, 83, 84, 85, 92, 93, 96, 97, 98, 99, 105, 106, 107, 110, 111, 112, 113, 118, 119, 120, 121, 124, 125, 133, 134, 135, 139, 174, 175, 176, 177, 180, 188, 189, 190, 191, 194, 195] + + self.selected_img_idxes_list = [0, 1, 6, 7, 8, 9, 12, 13, 14, 15, 20, 21, 22, 23, 26, 27, 36, 40, 41, 70, 71, 78, 82, 83, 84, 85, 90, 91, 92, 93, 96, 97] + + self.selected_img_idxes_list = [0, 1, 6, 7, 8, 9, 12, 13, 14, 15, 20, 21, 22, 23, 26, 27, 36, 40, 41, 70, 71, 78, 82, 83, 84, 85, 90, 91, 92, 93, 96, 97, 98, 99, 104, 105, 106, 107, 110, 111, 112, 113, 118, 119, 120, 121, 124, 125, 134, 135, 139, 174, 175, 176, 177, 180, 181, 182, 183, 188, 189, 190, 191, 194, 195] + # selected img idxes list # + self.selected_img_idxes_list = [0, 1, 6, 7, 8, 9, 12, 13, 14, 20, 21, 22, 23, 26, 27, 70, 78, 83, 84, 85, 91, 92, 93, 96, 97, 98, 99, 105, 106, 107, 110, 111, 112, 113, 119, 120, 121, 124, 125, 175, 176, 181, 182, 188, 189, 190, 191, 194, 195] + # or the timestep to the dataset instance ## # selected img idxes list # + self.selected_img_idxes = np.array(self.selected_img_idxes_list).astype(np.int32) + + + + + + self.data_dir = conf.get_string('data_dir') + + self.data_dir = os.path.join(self.data_dir, f"{time_idx}") # the time_idx # + + self.render_cameras_name = conf.get_string('render_cameras_name') + self.object_cameras_name = conf.get_string('object_cameras_name') + + ## camera outside sphere ## + self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True) + self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1) + + camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name)) + # camera_dict = np.load("/home/xueyi/diffsim/NeuS/public_data/dtu_scan24/cameras_sphere.npz") + self.camera_dict = camera_dict # rendr camera dict # + # render camera dict # # number of pixels in the views -> very thin geometry is not useful + self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png'))) + + # iamges_lis # and the images_lis and the images_lis # + # self.images_lis = self.images_lis[:1] # totoal views and poses of the camera; # and select cameras for rendering # + + self.n_images = len(self.images_lis) + + if mode == 'train_from_model_rules': + self.images_np = cv.imread(self.images_lis[0]) / 256.0 + print(self.images_np.shape) + self.images_np = np.reshape(self.images_np, (1, self.images_np.shape[0], self.images_np.shape[1], self.images_np.shape[2])) + self.images_np = [self.images_np for _ in range(len(self.images_lis))] + self.images_np = np.concatenate(self.images_np, axis=0) + else: + presaved_imags_npy_fn = os.path.join(self.data_dir, "processed_images.npy") + if not os.path.exists(presaved_imags_npy_fn): + self.images_np = [] + for i_im_idx, im_name in enumerate(self.images_lis): + print(f"loading {i_im_idx} / {len(self.images_lis)}") + cur_im = cv.imread(im_name) # for im_name in self.images_lis + self.images_np.append(cur_im) + self.images_np = np.stack(self.images_np) / 256.0 + np.save(presaved_imags_npy_fn, self.images_np) + else: + print(f"Loading from {presaved_imags_npy_fn}") + self.images_np = np.load(presaved_imags_npy_fn, allow_pickle=True) + + # self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0 + + + # self.selected_img_idxes_list = list(range(self.images_np.shape[0])) + # self.selected_img_idxes = np.array(self.selected_img_idxes_list).astype(np.int32) + + # get + self.images_np = self.images_np[self.selected_img_idxes] ## get selected iamges_np # + + ### if we deal with the backgound carefully ### ### get + # self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 255.0 + # self.images_np = self.images_np[self.selected_img_idxes] + self.images_np = 1. - self.images_np ### + + + + self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png'))) + + if mode == 'train_from_model_rules': + self.masks_np = cv.imread(self.masks_lis[0]) / 256.0 + print("masks shape:", self.masks_np.shape) + self.masks_np = np.reshape(self.masks_np, (1, self.masks_np.shape[0], self.masks_np.shape[1], self.masks_np.shape[2])) # .repeat(len(self.masks_lis), 1, 1) + self.masks_np = [self.masks_np for _ in range(len(self.masks_lis))] + self.masks_np = np.concatenate(self.masks_np, axis=0) + else: + presaved_masks_npy_fn = os.path.join(self.data_dir, "processed_masks.npy") + # self.masks_lis = self.masks_lis[:1] + + if not os.path.exists(presaved_masks_npy_fn): + try: + self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0 + self.masks_np = self.masks_np[self.selected_img_idxes] + except: + self.masks_np = self.images_np.copy() + np.save(presaved_masks_npy_fn, self.masks_np) + else: + print(f"Loading from {presaved_masks_npy_fn}") + self.masks_np = np.load(presaved_masks_npy_fn, allow_pickle=True) + + + + + + + # world_mat is a projection matrix from world to image + self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)] + + self.scale_mats_np = [] + + # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin. + self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)] + + self.intrinsics_all = [] + self.pose_all = [] + + # for idx, (scale_mat, world_mat) in enumerate(zip(self.scale_mats_np, self.world_mats_np)): + for idx in self.selected_img_idxes_list: + scale_mat = self.scale_mats_np[idx] + world_mat = self.world_mats_np[idx] + + if "hand" in self.data_dir: + intrinsics = np.eye(4) + fov = 512. / 2. # * 2 + res = 512. + intrinsics[:3, :3] = np.array([ + [fov, 0, 0.5* res], # res # + [0, fov, 0.5* res], # res # + [0, 0, 1] + ], dtype=np.float32) + pose = camera_dict['camera_mat_%d' % idx].astype(np.float32) + else: + P = world_mat @ scale_mat + P = P[:3, :4] + intrinsics, pose = load_K_Rt_from_P(None, P) + + self.intrinsics_all.append(torch.from_numpy(intrinsics).float()) + self.pose_all.append(torch.from_numpy(pose).float()) + + ### images, masks, + self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3] # + self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3] # + self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4] # optimize sdf field # rigid model hand + self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4] + self.focal = self.intrinsics_all[0][0, 0] + self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4] + self.H, self.W = self.images.shape[1], self.images.shape[2] + self.image_pixels = self.H * self.W + + object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0]) + object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0]) + # Object scale mat: region of interest to **extract mesh** + object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0'] + object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None] + object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None] + self.object_bbox_min = object_bbox_min[:3, 0] + self.object_bbox_max = object_bbox_max[:3, 0] + + self.n_images = self.images.size(0) + + print('Load data: End') + + def get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'): + i, j = torch.meshgrid( # meshgrid # + torch.linspace(0, W-1, W, device=c2w.device), + torch.linspace(0, H-1, H, device=c2w.device)) + i = i.t().float() + j = j.t().float() + if mode == 'lefttop': + pass + elif mode == 'center': + i, j = i+0.5, j+0.5 + elif mode == 'random': + i = i+torch.rand_like(i) + j = j+torch.rand_like(j) + else: + raise NotImplementedError + + if flip_x: + i = i.flip((1,)) + if flip_y: + j = j.flip((0,)) + if inverse_y: + dirs = torch.stack([(i-K[0][2])/K[0][0], (j-K[1][2])/K[1][1], torch.ones_like(i)], -1) + else: + dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1) + # Rotate ray directions from camera frame to the world frame + rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs] + # Translate camera frame's origin to the world frame. It is the origin of all rays. + rays_o = c2w[:3,3].expand(rays_d.shape) + return rays_o, rays_d + + def gen_rays_at(self, img_idx, resolution_level=1): + """ + Generate rays at world space from one camera. + """ + l = resolution_level + tx = torch.linspace(0, self.W - 1, self.W // l) + ty = torch.linspace(0, self.H - 1, self.H // l) + pixels_x, pixels_y = torch.meshgrid(tx, ty) + + ##### previous method ##### + # p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # # p = torch.stack([pixels_x, pixels_y, -1. * torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3 + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + # rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3 + # rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3 + ##### previous method ##### + + fov = 512.; res = 512. + K = np.array([ + [fov, 0, 0.5* res], + [0, fov, 0.5* res], + [0, 0, 1] + ], dtype=np.float32) + K = torch.from_numpy(K).float().cuda() + + + # ### `center` mode ### # + c2w = self.pose_all[img_idx] + pixels_x, pixels_y = pixels_x+0.5, pixels_y+0.5 + + dirs = torch.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -torch.ones_like(pixels_x)], -1) + rays_v = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) + rays_o = c2w[:3,3].expand(rays_v.shape) + # dirs = torch.stack([(i-K[0][2])/K[0][0], -(j-K[1][2])/K[1][1], -torch.ones_like(i)], -1) + + # p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # # p = torch.stack([pixels_x, pixels_y, -1. * torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + # p = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3 + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + # rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3 + # rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3 + return rays_o.transpose(0, 1), rays_v.transpose(0, 1) + + def gen_random_rays_at(self, img_idx, batch_size): + """ + Generate random rays at world space from one camera. + """ + img_idx = img_idx.cpu() + pixels_x = torch.randint(low=0, high=self.W, size=[batch_size]).cpu() + pixels_y = torch.randint(low=0, high=self.H, size=[batch_size]).cpu() + + # print(self.images.device, img_idx.device, pixels_y.device) + color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3 + + mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3 + + + ##### previous method ##### + # p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3 + # # p = torch.stack([pixels_x, pixels_y, -1. * torch.ones_like(pixels_y)], dim=-1).float() # batch_size, 3 + # p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3 + # rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3 + # rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3 + # rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3 + ##### previous method ##### + + fov = 512.; res = 512. + K = np.array([ + [fov, 0, 0.5* res], + [0, fov, 0.5* res], + [0, 0, 1] + ], dtype=np.float32) + K = torch.from_numpy(K).float().cuda() + + + # ### `center` mode ### # + c2w = self.pose_all[img_idx] + + pixels_x = pixels_x.cuda() + pixels_y = pixels_y.cuda() + pixels_x, pixels_y = pixels_x+0.5, pixels_y+0.5 + + dirs = torch.stack([(pixels_x-K[0][2])/K[0][0], -(pixels_y-K[1][2])/K[1][1], -torch.ones_like(pixels_x)], -1) + rays_v = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) + rays_o = c2w[:3,3].expand(rays_v.shape) + + + return torch.cat([rays_o.cpu(), rays_v.cpu(), color, mask[:, :1]], dim=-1).cuda() # batch_size, 10 + + def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1): + """ + Interpolate pose between two cameras. + """ + l = resolution_level + tx = torch.linspace(0, self.W - 1, self.W // l) + ty = torch.linspace(0, self.H - 1, self.H // l) + pixels_x, pixels_y = torch.meshgrid(tx, ty) + p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3 + p = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3 + rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3 + trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio + pose_0 = self.pose_all[idx_0].detach().cpu().numpy() + pose_1 = self.pose_all[idx_1].detach().cpu().numpy() + pose_0 = np.linalg.inv(pose_0) + pose_1 = np.linalg.inv(pose_1) + rot_0 = pose_0[:3, :3] + rot_1 = pose_1[:3, :3] + rots = Rot.from_matrix(np.stack([rot_0, rot_1])) + key_times = [0, 1] + slerp = Slerp(key_times, rots) + rot = slerp(ratio) + pose = np.diag([1.0, 1.0, 1.0, 1.0]) + pose = pose.astype(np.float32) + pose[:3, :3] = rot.as_matrix() + pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3] + pose = np.linalg.inv(pose) + rot = torch.from_numpy(pose[:3, :3]).cuda() + trans = torch.from_numpy(pose[:3, 3]).cuda() + rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3 + rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3 + return rays_o.transpose(0, 1), rays_v.transpose(0, 1) + + def near_far_from_sphere(self, rays_o, rays_d): + a = torch.sum(rays_d**2, dim=-1, keepdim=True) + b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True) + mid = 0.5 * (-b) / a + near = mid - 1.0 + far = mid + 1.0 + return near, far + + def image_at(self, idx, resolution_level): + if self.selected_img_idxes_list is not None: + img = cv.imread(self.images_lis[self.selected_img_idxes_list[idx]]) + else: + img = cv.imread(self.images_lis[idx]) + return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255) + + +if __name__=='__main__': + data_dir = "/data/datasets/genn/diffsim/diffredmax/save_res/goal_optimize_model_hand_sphere_test_obj_type_active_nfr_10_view_divide_0.5_n_views_7_three_planes_False_recon_dvgo_new_Nposes_7_routine_2" + data_dir = "/data/datasets/genn/diffsim/neus/public_data/hand_test" + data_dir = "/data2/datasets/diffsim/neus/public_data/hand_test_routine_2" + data_dir = "/data2/datasets/diffsim/neus/public_data/hand_test_routine_2_light_color" + filter_iamges_via_pixel_values(data_dir=data_dir) diff --git a/models/dyn_model_act.py b/models/dyn_model_act.py new file mode 100644 index 0000000000000000000000000000000000000000..176107bd15c56def381fb2dfa4d2d683c908018f --- /dev/null +++ b/models/dyn_model_act.py @@ -0,0 +1,2178 @@ + +import math +# import torch +# from ..utils import Timer +import numpy as np +# import torch.nn.functional as F +import os + +import argparse + +from xml.etree.ElementTree import ElementTree + +import trimesh +import torch +import torch.nn as nn +# import List +# class link; joint; body +### + +from scipy.spatial.transform import Rotation as R + + +DAMPING = 0.3 + +# DAMPING = 0.2 + +DAMPING = 0.0 + + +def get_body_name_to_main_axis(): + # negative y; positive x # + body_name_to_main_axis = { + "body2": -2, "body6": 1, "body10": 1, "body14": 1, "body17": 1 + } + return body_name_to_main_axis ## get the body name to main axis ## + +## insert one +def plane_rotation_matrix_from_angle_xz(angle): + ## angle of + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + zero_padding = torch.zeros_like(cos_) + one_padding = torch.ones_like(cos_) + col_a = torch.stack( + [cos_, zero_padding, sin_], dim=0 + ) + col_b = torch.stack( + [zero_padding, one_padding, zero_padding], dim=0 + ) + col_c = torch.stack( + [-1. * sin_, zero_padding, cos_], dim=0 + ) + rot_mtx = torch.stack( + [col_a, col_b, col_c], dim=-1 + ) + return rot_mtx + +def plane_rotation_matrix_from_angle(angle): + ## angle of + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + col_a = torch.stack( + [cos_, sin_], dim=0 ### col of the rotation matrix + ) + col_b = torch.stack( + [-1. * sin_, cos_], dim=0 ## cols of the rotation matrix + ) + rot_mtx = torch.stack( + [col_a, col_b], dim=-1 ### rotation matrix + ) + return rot_mtx + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + +def update_quaternion(delta_angle, prev_quat): + s1 = 0 + s2 = prev_quat[0] + v2 = prev_quat[1:] + v1 = delta_angle / 2 + new_v = s1 * v2 + s2 * v1 + torch.cross(v1, v2) + new_s = s1 * s2 - torch.sum(v1 * v2) + new_quat = torch.cat([new_s.unsqueeze(0), new_v], dim=0) + return new_quat + + +def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor: + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, torch.unbind(euler_angles, -1)) + ] + # return functools.reduce(torch.matmul, matrices) + return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2]) + + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) # -1 for the quaternion matrix # + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +## the optimization strategy: incremental optimization ## +class Joint: + def __init__(self, name, joint_type, axis, pos, quat, frame, damping, args) -> None: + self.name = name + self.type = joint_type + self.axis = axis # joint axis # + self.pos = pos # joint position # + self.quat = quat + self.frame = frame + self.damping = damping + + self.args = args + + # self.timestep_to_actions = {} # torques # + self.timestep_to_vels = {} + self.timestep_to_states = {} + + self.init_pos = self.pos.clone() + + #### only for the current state #### + self.state = nn.Parameter( + torch.tensor([1., 0., 0., 0.], dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + self.action = nn.Parameter( + torch.zeros((1,), dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + # self.rot_mtx = np.eye(3, dtypes=np.float32) + # self.trans_vec = np.zeros((3,), dtype=np.float32) ## rot m + self.rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + self.trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + # self.rot_mtx = np.eye(3, dtype=np.float32) + # self.trans_vec = np.zeros((3,), dtype=np.float32) + + self.axis_rot_mtx = torch.tensor( + [ + [1, 0, 0], [0, -1, 0], [0, 0, -1] + ], dtype=torch.float32 + ).cuda() + + self.joint_idx = -1 + + self.transformed_joint_pts = self.pos.clone() + + def print_grads(self, ): + print(f"rot_mtx: {self.rot_mtx.grad}") + print(f"trans_vec: {self.trans_vec.grad}") + + def clear_grads(self,): + if self.rot_mtx.grad is not None: + self.rot_mtx.grad.data = self.rot_mtx.grad.data * 0. + if self.trans_vec.grad is not None: + self.trans_vec.grad.data = self.trans_vec.grad.data * 0. + + def compute_transformation(self,): + # use the state to transform them # # transform # ## transform the state ## + # use the state to transform them # # transform them for the state # + if self.type == "revolute": + # print(f"computing transformation matrices with axis: {self.axis}, state: {self.state}") + # rotation matrix from the axis angle # + rot_mtx = rotation_matrix_from_axis_angle(self.axis, self.state) + # rot_mtx(p - p_v) + p_v -> rot_mtx p - rot_mtx p_v + p_v + # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + # self.rot_mtx = np.copy(rot_mtx) + # self.trans_vec = np.copy(trans_vec) + trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + else: + ### TODO: implement transformations for joints in other types ### + pass + + + def set_state(self, name_to_state): + if self.name in name_to_state: + # self.state = name_to_state["name"] + self.state = name_to_state[self.name] ## + + def set_state_via_vec(self, state_vec): ### transform points via the state vectors here ### + if self.joint_idx >= 0: + self.state = state_vec[self.joint_idx] ## give the parameter to the parameters ## + + def set_joint_idx(self, joint_name_to_idx): + if self.name in joint_name_to_idx: + self.joint_idx = joint_name_to_idx[self.name] + + + def set_args(self, args): + self.args = args + + + def compute_transformation_via_state_vals(self, state_vals): + if self.joint_idx >= 0: + cur_joint_state = state_vals[self.joint_idx] + else: + cur_joint_state = self.state + # use the state to transform them # # transform # ## transform the state ## + # use the state to transform them # # transform them for the state # + if self.type == "revolute": + # print(f"computing transformation matrices with axis: {self.axis}, state: {self.state}") + # rotation matrix from the axis angle # + rot_mtx = rotation_matrix_from_axis_angle(self.axis, cur_joint_state) + # rot_mtx(p - p_v) + p_v -> rot_mtx p - rot_mtx p_v + p_v + # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + # self.rot_mtx = np.copy(rot_mtx) + # self.trans_vec = np.copy(trans_vec) + trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + elif self.type == "free2d": + cur_joint_state = state_vals # still only for the current scene # + # cur_joint_state + cur_joint_rot_val = state_vals[2] + rot_mtx = plane_rotation_matrix_from_angle_xz(cur_joint_rot_val) + # rot_mtx = plane_rotation_matrix_from_angle(cur_joint_rot_val) ### 2 x 2 rot matrix # + # R_axis^T ( R R_axis (p) + trans (with the y-axis padded) ) + cur_trans_vec = torch.stack( + [state_vals[0], torch.zeros_like(state_vals[0]), state_vals[1]], dim=0 + ) + # cur_trans_vec # + rot_mtx = torch.matmul(self.axis_rot_mtx.transpose(1, 0), torch.matmul(rot_mtx, self.axis_rot_mtx)) + trans_vec = torch.matmul(self.axis_rot_mtx.transpose(1, 0), cur_trans_vec.unsqueeze(-1).contiguous()).squeeze(-1).contiguous() + self.pos + + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec ## rot_mtx and trans_vec # + else: + ### TODO: implement transformations for joints in other types ### + pass + return self.rot_mtx, self.trans_vec + + + def compute_transformation_from_current_state(self): + # if self.joint_idx >= 0: + # cur_joint_state = state_vals[self.joint_idx] + # else: + cur_joint_state = self.state + if self.type == "revolute": + # print(f"computing transformation matrices with axis: {self.axis}, state: {self.state}") + # rotation matrix from the axis angle # + # rot_mtx = rotation_matrix_from_axis_angle(self.axis, cur_joint_state) + # rot_mtx(p - p_v) + p_v -> rot_mtx p - rot_mtx p_v + p_v + # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + # self.rot_mtx = np.copy(rot_mtx) + # self.trans_vec = np.copy(trans_vec) + rot_mtx = quaternion_to_matrix(self.state) + # print(f"state: {self.state}, rot_mtx: {rot_mtx}") + # trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + trans_vec = self.init_pos - torch.matmul(rot_mtx, self.init_pos.view(3, 1)).view(3).contiguous() + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + elif self.type == "free2d": + state_vals = cur_joint_state + cur_joint_state = state_vals # still only for the current scene # + # cur_joint_state + cur_joint_rot_val = state_vals[2] + ### rot_mtx ### ### rot_mtx ### + rot_mtx = plane_rotation_matrix_from_angle_xz(cur_joint_rot_val) + # rot_mtx = plane_rotation_matrix_from_angle(cur_joint_rot_val) ### 2 x 2 rot matrix # + # R_axis^T ( R R_axis (p) + trans (with the y-axis padded) ) + cur_trans_vec = torch.stack( + [state_vals[0], torch.zeros_like(state_vals[0]), state_vals[1]], dim=0 + ) + # cur_trans_vec # + rot_mtx = torch.matmul(self.axis_rot_mtx.transpose(1, 0), torch.matmul(rot_mtx, self.axis_rot_mtx)) + trans_vec = torch.matmul(self.axis_rot_mtx.transpose(1, 0), cur_trans_vec.unsqueeze(-1).contiguous()).squeeze(-1).contiguous() + self.pos + + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + else: + ### TODO: implement transformations for joints in other types ### + pass + return self.rot_mtx, self.trans_vec + + + + def transform_joints_via_parent_rot_trans_infos(self, parent_rot_mtx, parent_trans_vec): + # + # if self.type == "revolute" or self.type == "free2d": + transformed_joint_pts = torch.matmul(parent_rot_mtx, self.pos.view(3 ,1).contiguous()).view(3).contiguous() + parent_trans_vec + self.pos = torch.matmul(parent_rot_mtx, self.init_pos.view(3 ,1).contiguous()).view(3).contiguous() + parent_trans_vec + + return self.pos + + + + +# initialize the robot with states set to zeros # +# update the robot via states # +# set a new action # +# update states via actions # +# update robot (visual points and parameters) via states # + + +## transform from the root of the robot; pass qs from the root to the leaf node ## +## visual meshes or visual meshes from the basic description of robots ## +## visual meshes; or visual points ## +## visual meshes -> transform them into the visual density values here ## +## visual meshes -> transform them into the ## into the visual counterparts ## +## ## visual meshes -> ## ## ## +# +class Body: + def __init__(self, name, body_type, filename, pos, quat, transform_type, density, mu, rgba, radius, args) -> None: + self.name = name + self.body_type = body_type + ### for mesh object ### + self.filename = filename + self.args = args + + self.pos = pos + self.quat = quat + self.transform_type = transform_type + self.density = density + self.mu = mu + self.rgba = rgba + + # + self.radius = radius + + self.visual_pts_ref = None + self.visual_faces_ref = None + + self.visual_pts = None + + self.body_name_to_main_axis = get_body_name_to_main_axis() ### get the body name to main axis here # + + self.get_visual_counterparts() + # inertial_ref, inertial_ref_inv + + def get_visual_faces_list(self, visual_faces_list): + visual_faces_list.append(self.visual_faces_ref) + return visual_faces_list + + def compute_inertial_inv(self, rot_mtx): + cur_inertia_inv = torch.matmul( + rot_mtx, torch.matmul(self.inertial_ref_inv, rot_mtx.transpose(1, 0).contiguous()) ### passive obj rot transpose + ) + self.cur_inertial_inv = cur_inertia_inv + return cur_inertia_inv + + def compute_inertia(self, rot_mtx): + cur_inertia = torch.matmul( + rot_mtx, torch.matmul(self.inertial_ref, rot_mtx.transpose(1, 0)) + ) + self.cur_inertia = cur_inertia + return cur_inertia + + def update_radius(self,): + self.radius.data = self.radius.data - self.radius.grad.data + + self.radius.grad.data = self.radius.grad.data * 0. + + + ### get visual pts colorrs ### ### + def get_visual_pts_colors(self, ): + tot_visual_pts_nn = self.visual_pts_ref.size(0) + # self.pts_rgba = [torch.from_numpy(self.rgba).float().cuda(self.args.th_cuda_idx) for _ in range(tot_visual_pts_nn)] # total visual pts nn + self.pts_rgba = [torch.tensor(self.rgba.data).cuda() for _ in range(tot_visual_pts_nn)] # total visual pts nn skeletong + self.pts_rgba = torch.stack(self.pts_rgba, dim=0) # + return self.pts_rgba + ## optimize the action sequneces ## + def get_visual_counterparts(self,): + if self.body_type == "sphere": + filename = "/home/xueyi/diffsim/DiffHand/examples/save_res/hand_sphere_demo/meshes/18.obj" + if not os.path.exists(filename): + filename = "/data/xueyi/diffsim/DiffHand/assets/18.obj" + body_mesh = trimesh.load(filename, process=False) + elif self.body_type == "mesh": + filename = self.filename + if "shadow" in xml_fn: + rt_asset_path = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description" + else: + rt_asset_path = "/home/xueyi/diffsim/DiffHand/assets" + if not os.path.exists(rt_asset_path): + rt_asset_path = "/data/xueyi/diffsim/DiffHand/assets" + + filename = os.path.join(rt_asset_path, filename) # + body_mesh = trimesh.load(filename, process=False) + elif self.body_type == "abstract": + body_mesh = trimesh.Trimesh(vertices=np.empty((0, 3), dtype=np.float32), faces=np.empty((0, 3), dtype=np.int32)) + # body_mesh = trimesh.load(filename, process=False) + + self.pos = nn.Parameter( + torch.tensor(self.pos.detach().cpu().tolist(), dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + + ### Step 1 ### -> set the pos to the correct initial pose ### + # self.radius = nn.Parameter( + # torch.tensor([self.args.initial_radius], dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + # ) + self.radius = nn.Parameter( + torch.tensor([2.], dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + ### visual pts ref ### ## body_mesh.vertices -> # + self.visual_pts_ref = torch.tensor(body_mesh.vertices, dtype=torch.float32).cuda() + + self.visual_faces_ref = torch.tensor(body_mesh.faces, dtype=torch.long).cuda() + + minn_pts, _ = torch.min(self.visual_pts_ref, dim=0) ### get the visual pts minn ### + maxx_pts, _ = torch.max(self.visual_pts_ref, dim=0) ### visual pts maxx ### + mean_pts = torch.mean(self.visual_pts_ref, dim=0) ### mean_pts of the mean_pts ### + + if self.name in self.body_name_to_main_axis: + cur_main_axis = self.body_name_to_main_axis[self.name] ## get the body name ## + + if cur_main_axis == -2: + main_axis_pts = minn_pts[1] # the main axis pts + full_main_axis_pts = torch.tensor([mean_pts[0], main_axis_pts, mean_pts[2]], dtype=torch.float32).cuda() + elif cur_main_axis == 1: + main_axis_pts = maxx_pts[0] # the maxx axis pts + full_main_axis_pts = torch.tensor([main_axis_pts, mean_pts[1], mean_pts[2]], dtype=torch.float32).cuda() + self.full_main_axis_pts_ref = full_main_axis_pts + else: + self.full_main_axis_pts_ref = mean_pts.clone() + + + def transform_visual_pts_ref(self,): + if self.name == "sphere": + visual_pts_ref = self.visual_pts_ref / 2. # + visual_pts_ref = visual_pts_ref * self.radius + else: + visual_pts_ref = self.visual_pts_ref + return visual_pts_ref + + def transform_visual_pts(self, rot_mtx, trans_vec): + visual_pts_ref = self.transform_visual_pts_ref() + # rot_mtx: 3 x 3 numpy array + # trans_vec: 3 numpy array + # print(f"transforming body with rot_mtx: {rot_mtx} and trans_vec: {trans_vec}") + # self.visual_pts = np.matmul(rot_mtx, self.visual_pts_ref.T).T + trans_vec.reshape(1, 3) # reshape # + # print(f"rot_mtx: {rot_mtx}, trans_vec: {trans_vec}") + self.visual_pts = torch.matmul(rot_mtx, visual_pts_ref.transpose(1, 0)).transpose(1, 0) + trans_vec.unsqueeze(0) + + # full_main_axis_pts -> + self.full_main_axis_pts = torch.matmul(rot_mtx, self.full_main_axis_pts_ref.unsqueeze(-1)).contiguous().squeeze(-1) + trans_vec + self.full_main_axis_pts = self.full_main_axis_pts.unsqueeze(0) + + return self.visual_pts + + def transform_expanded_visual_pts(self, rot_mtx, trans_vec): + expanded_visual_pts_ref = self.expanded_visual_pts_ref + self.expanded_visual_pts = torch.matmul(rot_mtx, expanded_visual_pts_ref.transpose(1, 0)).transpose(1, 0) + trans_vec.unsqueeze(0) + + return self.expanded_visual_pts + + def get_tot_transformed_joints(self, transformed_joints): + if self.name in self.body_name_to_main_axis: + transformed_joints.append(self.full_main_axis_pts) + return transformed_joints + + def get_nn_pts(self,): + self.nn_pts = self.visual_pts_ref.size(0) + return self.nn_pts + + def set_args(self, args): + self.args = args + + def clear_grad(self, ): + if self.pos.grad is not None: + self.pos.grad.data = self.pos.grad.data * 0. + if self.radius.grad is not None: + self.radius.grad.data = self.radius.grad.data * 0. + + def get_visual_pts(self, visual_pts_list): + visual_pts_list.append(self.visual_pts.detach()) + return visual_pts_list + + # get the visual counterparts of the boyd mesh or elements # + + # xyz attribute ## ## xyz attribute # + +# use get_name_to_visual_pts +# use get_name_to_visual_pts_faces to get the transformed visual pts and faces # +class Link: + def __init__(self, name, joint: Joint, body: Body, children, args) -> None: + + self.joint = joint + self.body = body + self.children = children + self.name = name + + self.args = args + + ### dyn_model_act ### + # parent_rot_mtx, parent_trans_vec # + # parent_rot_mtx, parent_trans_vec # + self.parent_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(), requires_grad=True) + self.parent_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(), requires_grad=True) + self.curr_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(), requires_grad=True) + self.curr_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(), requires_grad=True) + # + self.tot_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(), requires_grad=True) + self.tot_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(), requires_grad=True) + + self.compute_inertia() + + def print_grads(self, ): + print(f"parent_rot_mtx: {self.parent_rot_mtx.grad}") + print(f"parent_trans_vec: {self.parent_trans_vec.grad}") + print(f"curr_rot_mtx: {self.curr_rot_mtx.grad}") + print(f"curr_trans_vec: {self.curr_trans_vec.grad}") + print(f"tot_rot_mtx: {self.tot_rot_mtx.grad}") + print(f"tot_trans_vec: {self.tot_trans_vec.grad}") + print(f"Joint") + self.joint.print_grads() + for cur_link in self.children: + cur_link.print_grads() + + def compute_inertia(self, ): + joint_pos = self.joint.pos + joint_rot, joint_trans = self.joint.compute_transformation_from_current_state() # from current state # + body_pts = self.body.transform_visual_pts(joint_rot, joint_trans) + self.inertial_ref = torch.zeros((3, 3), dtype=torch.float32).cuda() + body_pts_mass = 1. / float(body_pts.size(0)) + for i_pts in range(body_pts.size(0)): + cur_pts = body_pts[i_pts] + cur_pts_mass = body_pts_mass + cur_r = cur_pts - joint_pos + # cur_vert = init_passive_mesh[i_v] # + # cur_r = cur_vert - init_passive_mesh_center + dot_r_r = torch.sum(cur_r * cur_r) + cur_eye_mtx = torch.eye(3, dtype=torch.float32).cuda() + r_mult_rT = torch.matmul(cur_r.unsqueeze(-1), cur_r.unsqueeze(0)) + self.inertial_ref += (dot_r_r * cur_eye_mtx - r_mult_rT) * cur_pts_mass + self.inertial_ref_inv = torch.linalg.inv(self.inertial_ref) + self.body.inertial_ref = self.inertial_ref.clone() + self.body.inertial_ref_inv = self.inertial_ref_inv.clone() ### inertial ref ### + # print(f"body_invertia_matrix_inv: {self.body.inertial_ref_inv}") + + + + # + def set_init_states_target_value(self, init_states): + if self.joint.type == 'revolute': + self.joint_angle = init_states[self.joint.joint_idx] + joint_axis = self.joint.axis + self.rot_vec = self.joint_angle * joint_axis + self.joint.state = torch.tensor([1, 0, 0, 0], dtype=torch.float32).cuda() + self.joint.state = self.joint.state + update_quaternion(self.rot_vec, self.joint.state) + self.joint.timestep_to_states[0] = self.joint.state.detach() + self.joint.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + for cur_link in self.children: + cur_link.set_init_states_target_value(init_states) + + # should forward for one single step -> use the action # + def set_init_states(self, ): + self.joint.state = torch.tensor([1, 0, 0, 0], dtype=torch.float32).cuda() + self.joint.timestep_to_states[0] = self.joint.state.detach() + self.joint.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + for cur_link in self.children: + cur_link.set_init_states() + + + def get_visual_pts(self, visual_pts_list): + visual_pts_list = self.body.get_visual_pts(visual_pts_list) + for cur_link in self.children: + visual_pts_list = cur_link.get_visual_pts(visual_pts_list) + visual_pts_list = torch.cat(visual_pts_list, dim=0) + return visual_pts_list + + def get_visual_faces_list(self, visual_faces_list): + visual_faces_list = self.body.get_visual_faces_list(visual_faces_list) + for cur_link in self.children: + visual_faces_list = cur_link.get_visual_faces_list(visual_faces_list) + return visual_faces_list + # pass + + # with link states # # stpe + + + def get_joint_states(self, joint_states): + if self.joint.type == 'revolute': + # joint_states.append(self.joint.state) + joint_idx = self.joint.joint_idx + + def set_penetration_forces(self, penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces): + # penetration_forces + if self.children is not None and len(self.children) > 0: + for cur_link in self.children: + cur_link.set_penetration_forces(penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces) + + if self.joint.type in ['revolute']: + # penetration_forces_values = penetration_forces['penetration_forces'] # + # penetration_forces_points = penetration_forces['penetration_forces_points'] # + + # penetration forces # + penetration_forces_values = penetration_forces['penetration_forces'].detach() + penetration_forces_points = penetration_forces['penetration_forces_points'].detach() + + ####### use a part of peentration points and forces ####### + if sampled_visual_pts_joint_idxes is not None: + selected_forces_mask = sampled_visual_pts_joint_idxes == self.joint.joint_idx + else: + selected_forces_mask = torch.ones_like(penetration_forces_values[:, 0]).bool() + ####### use a part of peentration points and forces ####### + + ####### use all peentration points and forces ####### + # selected_forces_mask = torch.ones_like(penetration_forces_values[:, 0]).bool() + ####### use all peentration points and forces ####### + + if torch.sum(selected_forces_mask.float()) > 0.5: + + penetration_forces_values = penetration_forces_values[selected_forces_mask] + penetration_forces_points = penetration_forces_points[selected_forces_mask] + # tot_rot_mtx, tot_trans_vec + # cur_joint_rot = self.tot_rot_mtx + # cur_joint_trans = self.tot_trans_vec + cur_joint_rot = self.tot_rot_mtx.detach() + cur_joint_trans = self.tot_trans_vec.detach() + local_frame_penetration_forces_values = torch.matmul(cur_joint_rot.transpose(1, 0), penetration_forces_values.transpose(1, 0)).transpose(1, 0) + local_frame_penetration_forces_points = torch.matmul(cur_joint_rot.transpose(1, 0), (penetration_forces_points - cur_joint_trans.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + + body_visual_pts_ref = self.body.visual_pts_ref + center_pts = torch.mean(body_visual_pts_ref, dim=0) + + joint_pos_to_forces_points = local_frame_penetration_forces_points - center_pts.unsqueeze(0) + forces_torques = torch.cross(joint_pos_to_forces_points, local_frame_penetration_forces_values) # forces values of the local frame # + forces_torques = torch.sum(forces_torques, dim=0) + + forces = torch.sum(local_frame_penetration_forces_values, dim=0) + + cur_joint_maximal_forces = torch.cat( + [forces, forces_torques], dim=0 + ) + cur_joint_idx = self.joint.joint_idx + joint_penetration_forces[cur_joint_idx][:] = cur_joint_maximal_forces[:].clone() + + # forces_torques_dot_axis = torch.sum(self.joint.axis * forces_torques) + # forces_torques = self.joint.axis * forces_torques_dot_axis + + ####### use children penetrations torqeus ####### + # if children_penetration_torques is not None: + # children_penetration_torques_dot_axis = torch.sum(self.joint.axis * children_penetration_torques) + # children_penetration_torques = self.joint.axis * children_penetration_torques_dot_axis + # forces_torques = forces_torques + children_penetration_torques # * 0.5 # damping # + # children_penetration_torques = forces_torques.clone() * 0.5 # damping # + # else: + # children_penetration_torques = forces_torques.clone() * 0.5 # + ####### use children penetrations torqeus ####### + + # force torques # + # torque = torque + forces_torques # * 0.001 + + + + + # forward dynamics --- from actions to states # + # inertia matrix --- from the inertia matrix to the inertia matrix # # set actiosn and update states # + def set_actions_and_update_states(self, actions, cur_timestep, time_cons, penetration_forces=None, sampled_visual_pts_joint_idxes=None, joint_name_to_penetration_forces_intermediates=None, children_penetration_torques=None, buffered_intertia_matrix=None): + + if self.children is not None and len(self.children) > 0: + # tot_children_intertia_matrix = torch.zeros((3, 3), dtype=torch.float32).cuda() + for cur_link in self.children: + tot_children_intertia_matrix = torch.zeros((3, 3), dtype=torch.float32).cuda() + cur_link.set_actions_and_update_states(actions, cur_timestep, time_cons, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes, joint_name_to_penetration_forces_intermediates=joint_name_to_penetration_forces_intermediates, children_penetration_torques=children_penetration_torques, buffered_intertia_matrix=tot_children_intertia_matrix) + if buffered_intertia_matrix is not None: + buffered_intertia_matrix = buffered_intertia_matrix + tot_children_intertia_matrix + else: + buffered_intertia_matrix = tot_children_intertia_matrix + + # tot_children_intertia_matri + + # tot_children_intertia_matri = tot_children_intertia_matri + torch.eye(3, dtype=torch.float32).cuda() * 0.0001 + + # buffered_intertia_matrix + + if self.joint.type in ['revolute']: + # return # + self.joint.action = actions[self.joint.joint_idx] + # + # visual_pts and visual_pts_mass + cur_joint_pos = self.joint.pos + # TODO: check whether the following is correct # + torque = self.joint.action * self.joint.axis ## joint.axis ## + + # should along the axis # + # torques added to the joint # + # penetration forces -- a list of the forces ## penetration forces ### + if penetration_forces is not None: + # a series of the # + # penetration_forces: { 'global_rotation': xxx, 'global_translation': xxx, 'penetration_forces': xxx, 'penetration_forces_points': xxx } + # glb_rot = penetration_forces['global_rotation'] # + # # glb_trans = penetration_forces['global_translation'] # + # penetration_forces_values = penetration_forces['penetration_forces'] # + # penetration_forces_points = penetration_forces['penetration_forces_points'] # + + # penetration forces # # values points # + penetration_forces_values = penetration_forces['penetration_forces'].detach() + penetration_forces_points = penetration_forces['penetration_forces_points'].detach() + + ####### use a part of peentration points and forces ####### + if sampled_visual_pts_joint_idxes is not None: + selected_forces_mask = sampled_visual_pts_joint_idxes == self.joint.joint_idx + else: + selected_forces_mask = torch.ones_like(penetration_forces_values[:, 0]).bool() + ####### use a part of peentration points and forces ####### + + ####### use all peentration points and forces ####### + selected_forces_mask = torch.ones_like(penetration_forces_values[:, 0]).bool() + ####### use all peentration points and forces ####### + + if torch.sum(selected_forces_mask.float()) > 0.5: + + penetration_forces_values = penetration_forces_values[selected_forces_mask] + penetration_forces_points = penetration_forces_points[selected_forces_mask] + # tot_rot_mtx, tot_trans_vec + # cur_joint_rot = self.tot_rot_mtx + # cur_joint_trans = self.tot_trans_vec + cur_joint_rot = self.tot_rot_mtx.detach() + cur_joint_trans = self.tot_trans_vec.detach() + local_frame_penetration_forces_values = torch.matmul(cur_joint_rot.transpose(1, 0), penetration_forces_values.transpose(1, 0)).transpose(1, 0) + local_frame_penetration_forces_points = torch.matmul(cur_joint_rot.transpose(1, 0), (penetration_forces_points - cur_joint_trans.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + + joint_pos_to_forces_points = local_frame_penetration_forces_points - cur_joint_pos.unsqueeze(0) + forces_torques = torch.cross(joint_pos_to_forces_points, local_frame_penetration_forces_values) # forces values of the local frame # + forces_torques = torch.sum(forces_torques, dim=0) + + forces_torques_dot_axis = torch.sum(self.joint.axis * forces_torques) + forces_torques = self.joint.axis * forces_torques_dot_axis + + ####### use children penetrations torqeus ####### + # if children_penetration_torques is not None: + # children_penetration_torques_dot_axis = torch.sum(self.joint.axis * children_penetration_torques) + # children_penetration_torques = self.joint.axis * children_penetration_torques_dot_axis + # forces_torques = forces_torques + children_penetration_torques # * 0.5 # damping # + # children_penetration_torques = forces_torques.clone() * 0.5 # damping # + # else: + # children_penetration_torques = forces_torques.clone() * 0.5 # + ####### use children penetrations torqeus ####### + + # force torques # + torque = torque + forces_torques # * 0.001 + + if joint_name_to_penetration_forces_intermediates is not None: + visual_pts = self.body.visual_pts_ref.detach().cpu().numpy() + forces_points_local_frame = local_frame_penetration_forces_points.detach().cpu().numpy() + forces_values_local_frame = local_frame_penetration_forces_values.detach().cpu().numpy() + joint_pos = cur_joint_pos.detach().cpu().numpy() + joint_axis = self.joint.axis.detach().cpu().numpy() + joint_name_to_penetration_forces_intermediates[self.joint.name] = { + 'visual_pts': visual_pts, 'forces_points_local_frame': forces_points_local_frame, 'forces_values_local_frame': forces_values_local_frame, 'joint_pos': joint_pos, 'joint_axis': joint_axis + } + else: + if children_penetration_torques is not None: + children_penetration_torques = children_penetration_torques * 0.5 + + + + # # TODO: transform the forces to the joint frame # + # cur_penetration_torque = torch.zeros_like(torque) + # for cur_pene_force_set in penetration_forces: + # cur_pene_force, cur_pene_point = cur_pene_force_set + # joint_pos_to_pene_point = cur_pene_point - cur_joint_pos ## joint pos ## + # cur_point_pene_torque = torch.cross(joint_pos_to_pene_point, cur_pene_force) + # cur_penetration_torque += cur_point_pene_torque + # # # ## + # dot_axis_with_penetration_torque = torch.sum(self.joint.axis * cur_penetration_torque) + # cur_penetration_torque = self.joint.axis * dot_axis_with_penetration_torque + # torque = torque + cur_penetration_torque + + # # Compute inertia matrix # # + # inertial = torch.zeros((3, 3), dtype=torch.float32).cuda() + # for i_pts in range(self.visual_pts.size(0)): + # cur_pts = self.visual_pts[i_pts] + # cur_pts_mass = self.visual_pts_mass[i_pts] + # cur_r = cur_pts - cur_joint_pos # r_i # + # # cur_vert = init_passive_mesh[i_v] + # # cur_r = cur_vert - init_passive_mesh_center + # dot_r_r = torch.sum(cur_r * cur_r) + # cur_eye_mtx = torch.eye(3, dtype=torch.float32).cuda() + # r_mult_rT = torch.matmul(cur_r.unsqueeze(-1), cur_r.unsqueeze(0)) + # inertial += (dot_r_r * cur_eye_mtx - r_mult_rT) * cur_pts_mass + # m = torch.sum(self.visual_pts_mass) + # # Use torque to update angular velocity -> state # + # inertia_inv = torch.linalg.inv(inertial) + + # axis-angle of # axis-angle # # a) joint torque; # b) external force and torque # + # potision of the force # # link a; body a # body to the joint # # body to the joint # # + # force applied to the joint torque # # torque # + # change the angles # + # inertia_inv = self.cur_inertia_inv + inertia_inv = torch.linalg.inv(self.cur_inertia).detach() + + inertia_inv = torch.eye(n=3, dtype=torch.float32).cuda() + + + if buffered_intertia_matrix is not None: + buffered_intertia_matrix = buffered_intertia_matrix + torch.eye(n=3, dtype=torch.float32).cuda() + else: + buffered_intertia_matrix = torch.eye(n=3, dtype=torch.float32).cuda() + + inertia_inv = torch.linalg.inv(buffered_intertia_matrix).detach() + + + delta_omega = torch.matmul(inertia_inv, torque.unsqueeze(-1)).squeeze(-1) + + # + # delta_omega = torque / 400 # # apply the force onto the link; apply the force onto the link + + # TODO: dt should be an optimizable constant? should it be the same value as that optimized for the passive object? # + delta_angular_vel = delta_omega * time_cons # * self.args.dt # delta quat # + delta_angular_vel = delta_angular_vel.squeeze(0) + if cur_timestep > 0: + prev_angular_vel = self.joint.timestep_to_vels[cur_timestep - 1].detach() + cur_angular_vel = prev_angular_vel * DAMPING + delta_angular_vel + else: + cur_angular_vel = delta_angular_vel # delta + + self.joint.timestep_to_vels[cur_timestep] = cur_angular_vel.detach() + # TODO: about args.dt + cur_delta_quat = cur_angular_vel * time_cons + cur_delta_quat = cur_delta_quat.squeeze(0) # delta quat # + cur_state = self.joint.timestep_to_states[cur_timestep].detach() + nex_state = cur_state + update_quaternion(cur_delta_quat, cur_state) + self.joint.timestep_to_states[cur_timestep + 1] = nex_state.detach() # + self.joint.state = nex_state + # followed by updating visual pts using states # # + # print(f"updated_joint_state: {self.joint.state}") + + + + ## for the robot: iterate over links and get the states ## + def get_joint_nm_to_states(self, joint_nm_to_states): + if self.joint.type in ['revolute']: + joint_nm_to_states[self.joint.name] = self.joint.state + if self.children is not None and len(self.children) > 0: + for cur_link in self.children: + joint_nm_to_states = cur_link.get_joint_nm_to_states(joint_nm_to_states) + return joint_nm_to_states + + def get_timestep_to_states(self, joint_nm_to_ts_to_states): + if self.joint.type in ['revolute']: + joint_nm_to_ts_to_states[self.joint.name] = self.joint.timestep_to_states + if self.children is not None and len(self.children) > 0: + for cur_link in self.children: + joint_nm_to_ts_to_states = cur_link.get_timestep_to_states(joint_nm_to_ts_to_states) + return joint_nm_to_ts_to_states + + # current delta states # get states -- reference states # joint + def set_and_update_states(self, states, cur_timestep, time_cons): + # + if self.joint.type in ['revolute']: + # return # # prev + cur_state = states[self.joint.joint_idx] # joint idx # + + # + # self.joint.timestep_to_states[cur_timestep + 1] = cur_state.detach() + delta_rot_vec = self.joint.axis * cur_state # states --> + prev_state = self.joint.timestep_to_states[cur_timestep].detach() + cur_state = prev_state + update_quaternion(delta_rot_vec, prev_state) + self.joint.timestep_to_states[cur_timestep + 1] = cur_state.detach() + self.joint.state = cur_state + # followed by updating visual pts using states # + # print(f"updated_joint_state: {self.joint.state}") + + # link and the states # + if self.children is not None and len(self.children) > 0: + for cur_link in self.children: # glb trans # + cur_link.set_and_update_states(states, cur_timestep, time_cons) + + # + def set_state(self, name_to_state): + self.joint.set_state(name_to_state=name_to_state) + for child_link in self.children: + child_link.set_state(name_to_state) + + + def set_state_via_vec(self, state_vec): # + self.joint.set_state_via_vec(state_vec) + for child_link in self.children: + child_link.set_state_via_vec(state_vec) + + ## + def get_tot_transformed_joints(self, transformed_joints): + cur_joint_transformed_pts = self.joint.transformed_joint_pts.unsqueeze(0) ### 3 pts + transformed_joints.append(cur_joint_transformed_pts) + transformed_joints = self.body.get_tot_transformed_joints(transformed_joints) + # if self.joint.name + for cur_link in self.children: + transformed_joints = cur_link.get_tot_transformed_joints(transformed_joints) + return transformed_joints + + def compute_transformation_via_state_vecs(self, state_vals, parent_rot_mtx, parent_trans_vec, visual_pts_list): + # state vecs and rot mtx # state vecs ##### + joint_rot_mtx, joint_trans_vec = self.joint.compute_transformation_via_state_vals(state_vals=state_vals) + + self.curr_rot_mtx = joint_rot_mtx + self.curr_trans_vec = joint_trans_vec + + self.joint.transform_joints_via_parent_rot_trans_infos(parent_rot_mtx=parent_rot_mtx, parent_trans_vec=parent_trans_vec) ## get rot and trans mtx and vecs ### + + # current rot # + tot_parent_rot_mtx = torch.matmul(parent_rot_mtx, joint_rot_mtx) + tot_parent_trans_vec = torch.matmul(parent_rot_mtx, joint_trans_vec.unsqueeze(-1)).view(3) + parent_trans_vec + + self.tot_rot_mtx = tot_parent_rot_mtx + self.tot_trans_vec = tot_parent_trans_vec + + # self.tot_rot_mtx = np.copy(tot_parent_rot_mtx) + # self.tot_trans_vec = np.copy(tot_parent_trans_vec) + + ### visual_pts_list for recording visual pts ### + + cur_body_visual_pts = self.body.transform_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + visual_pts_list.append(cur_body_visual_pts) + + for cur_link in self.children: + # cur_link.parent_rot_mtx = np.copy(tot_parent_rot_mtx) ### set children parent rot mtx and the trans vec + # cur_link.parent_trans_vec = np.copy(tot_parent_trans_vec) ## + cur_link.parent_rot_mtx = tot_parent_rot_mtx ### set children parent rot mtx and the trans vec # + cur_link.parent_trans_vec = tot_parent_trans_vec ## + # cur_link.compute_transformation() ## compute self's transformations + cur_link.compute_transformation_via_state_vecs(state_vals, tot_parent_rot_mtx, tot_parent_trans_vec, visual_pts_list) + + def compute_transformation_via_current_state(self, parent_rot_mtx, parent_trans_vec, visual_pts_list, visual_pts_mass, link_name_to_transformations_and_transformed_pts, joint_idxes=None): + # state vecs and rot mtx # state vecs # + joint_rot_mtx, joint_trans_vec = self.joint.compute_transformation_from_current_state() + + # cur_inertia_inv + self.cur_inertia_inv = torch.zeros((3, 3), dtype=torch.float32).cuda() + self.cur_inertia = torch.zeros((3, 3), dtype=torch.float32).cuda() + self.curr_rot_mtx = joint_rot_mtx + self.curr_trans_vec = joint_trans_vec + + self.joint.transform_joints_via_parent_rot_trans_infos(parent_rot_mtx=parent_rot_mtx, parent_trans_vec=parent_trans_vec) ## get rot and trans mtx and vecs ### + + # get the parent rot mtx and the joint rot mtx # # joint rot mtx # + tot_parent_rot_mtx = torch.matmul(parent_rot_mtx, joint_rot_mtx) + tot_parent_trans_vec = torch.matmul(parent_rot_mtx, joint_trans_vec.unsqueeze(-1)).view(3) + parent_trans_vec + + # tot_rot_mtx, tot_trans_vec + self.tot_rot_mtx = tot_parent_rot_mtx + self.tot_trans_vec = tot_parent_trans_vec + + # self.tot_rot_mtx = np.copy(tot_parent_rot_mtx) + # self.tot_trans_vec = np.copy(tot_parent_trans_vec) + + ### visual_pts_list for recording visual pts ### # !!!! damping is an important technique here ! #### + ### visual pts list for recoding visual pts ### + # so the inertial should be transformed by the tot_rot_mtx # # transform visual pts # # tr + cur_body_visual_pts = self.body.transform_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + # visual_pts_list.append(cur_body_visual_pts) + self.cur_inertia_inv = self.cur_inertia_inv + self.body.compute_inertial_inv(self.tot_rot_mtx) + self.cur_inertia = self.cur_inertia + self.body.compute_inertia(self.tot_rot_mtx) + + # + cur_body_transformations = (self.tot_rot_mtx, self.tot_trans_vec) + link_name_to_transformations_and_transformed_pts[self.body.name] = (cur_body_visual_pts.detach().clone(), cur_body_transformations) + + if joint_idxes is not None: + cur_body_joint_idx = self.joint.joint_idx + cur_body_joint_idxes = [cur_body_joint_idx for _ in range(cur_body_visual_pts.size(0))] + cur_body_joint_idxes = torch.tensor(cur_body_joint_idxes, dtype=torch.long).cuda() + joint_idxes.append(cur_body_joint_idxes) + + # + + children_visual_pts_list = [] + children_pts_mass = [] + children_pts_mass.append(torch.ones((cur_body_visual_pts.size(0), ), dtype=torch.float32).cuda() / float(cur_body_visual_pts.size(0))) + children_visual_pts_list.append(cur_body_visual_pts) + for cur_link in self.children: + # cur_link.parent_rot_mtx = np.copy(tot_parent_rot_mtx) ### set children parent rot mtx and the trans vec + # cur_link.parent_trans_vec = np.copy(tot_parent_trans_vec) ## + cur_link.parent_rot_mtx = tot_parent_rot_mtx ### set children parent rot mtx and the trans vec # + cur_link.parent_trans_vec = tot_parent_trans_vec ## + # cur_link.compute_transformation() ## compute self's transformations + children_visual_pts_list, children_pts_mass = cur_link.compute_transformation_via_current_state(tot_parent_rot_mtx, tot_parent_trans_vec, children_visual_pts_list, children_pts_mass, link_name_to_transformations_and_transformed_pts, joint_idxes=joint_idxes) + ## inertia_inv ## + self.cur_inertia_inv = self.cur_inertia_inv + cur_link.cur_inertia_inv + self.cur_inertia = self.cur_inertia + cur_link.cur_inertia ### get the current inertia ### + + children_visual_pts = torch.cat(children_visual_pts_list, dim=0) + self.visual_pts = children_visual_pts.detach() # + visual_pts_list.append(children_visual_pts) + children_pts_mass = torch.cat(children_pts_mass, dim=0) + self.visual_pts_mass = children_pts_mass.detach() + visual_pts_mass.append(children_pts_mass) + # print(f"children_pts_mass: {children_pts_mass.size()}") + return visual_pts_list, visual_pts_mass + + + def compute_expanded_visual_pts_transformation_via_current_state(self, parent_rot_mtx, parent_trans_vec, visual_pts_list, visual_pts_mass): + # state vecs and rot mtx # state vecs ##### # + joint_rot_mtx, joint_trans_vec = self.joint.compute_transformation_from_current_state() + + # cur_inertia_inv + self.cur_inertia_inv = torch.zeros((3, 3), dtype=torch.float32).cuda() + self.cur_inertia = torch.zeros((3, 3), dtype=torch.float32).cuda() + self.curr_rot_mtx = joint_rot_mtx + self.curr_trans_vec = joint_trans_vec + + self.joint.transform_joints_via_parent_rot_trans_infos(parent_rot_mtx=parent_rot_mtx, parent_trans_vec=parent_trans_vec) ## get rot and trans mtx and vecs ### + + # get the parent rot mtx and the joint rot mtx # # joint rot mtx # + tot_parent_rot_mtx = torch.matmul(parent_rot_mtx, joint_rot_mtx) + tot_parent_trans_vec = torch.matmul(parent_rot_mtx, joint_trans_vec.unsqueeze(-1)).view(3) + parent_trans_vec + + self.tot_rot_mtx = tot_parent_rot_mtx + self.tot_trans_vec = tot_parent_trans_vec + + # self.tot_rot_mtx = np.copy(tot_parent_rot_mtx) + # self.tot_trans_vec = np.copy(tot_parent_trans_vec) + + ### visual_pts_list for recording visual pts ### + # so the inertial should be transformed by the tot_rot_mtx # # transform visual pts # + cur_body_visual_pts = self.body.transform_expanded_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + # visual_pts_list.append(cur_body_visual_pts) + self.cur_inertia_inv = self.cur_inertia_inv + self.body.compute_inertial_inv(self.tot_rot_mtx) + self.cur_inertia = self.cur_inertia + self.body.compute_inertia(self.tot_rot_mtx) + + # + # cur_body_transformations = (self.tot_rot_mtx.detach().clone(), self.tot_trans_vec.detach().clone()) + # link_name_to_transformations_and_transformed_pts[self.body.name] = (cur_body_visual_pts.detach().clone(), cur_body_transformations) + + # + + children_visual_pts_list = [] + children_pts_mass = [] + children_pts_mass.append(torch.ones((cur_body_visual_pts.size(0), ), dtype=torch.float32).cuda() / float(cur_body_visual_pts.size(0))) + children_visual_pts_list.append(cur_body_visual_pts) + for cur_link in self.children: + # cur_link.parent_rot_mtx = np.copy(tot_parent_rot_mtx) ### set children parent rot mtx and the trans vec + # cur_link.parent_trans_vec = np.copy(tot_parent_trans_vec) ## + cur_link.parent_rot_mtx = tot_parent_rot_mtx ### set children parent rot mtx and the trans vec # + cur_link.parent_trans_vec = tot_parent_trans_vec ## + # cur_link.compute_transformation() ## compute self's transformations + children_visual_pts_list, children_pts_mass = cur_link.compute_expanded_visual_pts_transformation_via_current_state(tot_parent_rot_mtx, tot_parent_trans_vec, children_visual_pts_list, children_pts_mass) + + self.cur_inertia_inv = self.cur_inertia_inv + cur_link.cur_inertia_inv + self.cur_inertia = self.cur_inertia + cur_link.cur_inertia ### get the current inertia ### + + children_visual_pts = torch.cat(children_visual_pts_list, dim=0) + self.expanded_visual_pts = children_visual_pts.detach() # + visual_pts_list.append(children_visual_pts) + children_pts_mass = torch.cat(children_pts_mass, dim=0) + self.expanded_visual_pts_mass = children_pts_mass.detach() + visual_pts_mass.append(children_pts_mass) + # print(f"children_pts_mass: {children_pts_mass.size()}") + return visual_pts_list, visual_pts_mass + + + def set_body_expanded_visual_pts(self, link_name_to_ragged_expanded_visual_pts): + self.body.expanded_visual_pts_ref = link_name_to_ragged_expanded_visual_pts[self.body.name].detach().clone() + + for cur_link in self.children: + cur_link.set_body_expanded_visual_pts(link_name_to_ragged_expanded_visual_pts) + + + def get_visual_pts_rgba_values(self, pts_rgba_vals_list): + + cur_body_visual_rgba_vals = self.body.get_visual_pts_colors() + pts_rgba_vals_list.append(cur_body_visual_rgba_vals) + + for cur_link in self.children: + cur_link.get_visual_pts_rgba_values(pts_rgba_vals_list) + + + + def compute_transformation(self,): + self.joint.compute_transformation() + # self.curr_rot_mtx = np.copy(self.joint.rot_mtx) + # self.curr_trans_vec = np.copy(self.joint.trans_vec) + + self.curr_rot_mtx = self.joint.rot_mtx + self.curr_trans_vec = self.joint.trans_vec + # rot_p (rot_c p + trans_c) + trans_p # + # rot_p rot_c p + rot_p trans_c + trans_p # + #### matmul #### + # tot_parent_rot_mtx = np.matmul(self.parent_rot_mtx, self.curr_rot_mtx) + # tot_parent_trans_vec = np.matmul(self.parent_rot_mtx, self.curr_trans_vec.reshape(3, 1)).reshape(3) + self.parent_trans_vec + + tot_parent_rot_mtx = torch.matmul(self.parent_rot_mtx, self.curr_rot_mtx) + tot_parent_trans_vec = torch.matmul(self.parent_rot_mtx, self.curr_trans_vec.unsqueeze(-1)).view(3) + self.parent_trans_vec + + self.tot_rot_mtx = tot_parent_rot_mtx + self.tot_trans_vec = tot_parent_trans_vec + + # self.tot_rot_mtx = np.copy(tot_parent_rot_mtx) + # self.tot_trans_vec = np.copy(tot_parent_trans_vec) + + for cur_link in self.children: + # cur_link.parent_rot_mtx = np.copy(tot_parent_rot_mtx) ### set children parent rot mtx and the trans vec + # cur_link.parent_trans_vec = np.copy(tot_parent_trans_vec) ## + cur_link.parent_rot_mtx = tot_parent_rot_mtx ### set children parent rot mtx and the trans vec # + cur_link.parent_trans_vec = tot_parent_trans_vec ## + cur_link.compute_transformation() ## compute self's transformations + + def get_name_to_visual_pts_faces(self, name_to_visual_pts_faces): + # transform_visual_pts # ## rot_mt + self.body.transform_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + name_to_visual_pts_faces[self.body.name] = {"pts": self.body.visual_pts, "faces": self.body.visual_faces_ref} + for cur_link in self.children: + cur_link.get_name_to_visual_pts_faces(name_to_visual_pts_faces) ## transform the pts faces + + def get_visual_pts_list(self, visual_pts_list): + # transform_visual_pts # ## rot_mt + self.body.transform_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + visual_pts_list.append(self.body.visual_pts) # body template # + # name_to_visual_pts_faces[self.body.name] = {"pts": self.body.visual_pts, "faces": self.body.visual_faces_ref} + for cur_link in self.children: + # cur_link.get_name_to_visual_pts_faces(name_to_visual_pts_faces) ## transform the pts faces + visual_pts_list = cur_link.get_visual_pts_list(visual_pts_list) + return visual_pts_list + + + def set_joint_idx(self, joint_name_to_idx): + self.joint.set_joint_idx(joint_name_to_idx) + for cur_link in self.children: + cur_link.set_joint_idx(joint_name_to_idx) + # if self.name in joint_name_to_idx: + # self.joint_idx = joint_name_to_idx[self.name] + + def get_nn_pts(self,): + nn_pts = 0 + nn_pts += self.body.get_nn_pts() + for cur_link in self.children: + nn_pts += cur_link.get_nn_pts() + self.nn_pts = nn_pts + return self.nn_pts + + def clear_grads(self,): + + if self.parent_rot_mtx.grad is not None: + self.parent_rot_mtx.grad.data = self.parent_rot_mtx.grad.data * 0. + if self.parent_trans_vec.grad is not None: + self.parent_trans_vec.grad.data = self.parent_trans_vec.grad.data * 0. + if self.curr_rot_mtx.grad is not None: + self.curr_rot_mtx.grad.data = self.curr_rot_mtx.grad.data * 0. + if self.curr_trans_vec.grad is not None: + self.curr_trans_vec.grad.data = self.curr_trans_vec.grad.data * 0. + if self.tot_rot_mtx.grad is not None: + self.tot_rot_mtx.grad.data = self.tot_rot_mtx.grad.data * 0. + if self.tot_trans_vec.grad is not None: + self.tot_trans_vec.grad.data = self.tot_trans_vec.grad.data * 0. + + self.joint.clear_grads() + self.body.clear_grad() + for cur_link in self.children: + cur_link.clear_grads() + + def set_args(self, args): + self.args = args + for cur_link in self.children: + cur_link.set_args(args) + + + + +class Robot: # robot and the robot # + def __init__(self, children_links, args) -> None: + self.children = children_links + ### global rotation quaternion ### + self.glb_rotation = nn.Parameter(torch.eye(3, dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + ### global translation vectors ## + self.glb_trans = nn.Parameter(torch.tensor([ 0., 0., 0.], dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + self.args = args + + def set_state(self, name_to_state): + for cur_link in self.children: + cur_link.set_state(name_to_state) + + def compute_transformation(self,): + for cur_link in self.children: + cur_link.compute_transformation() + + def get_name_to_visual_pts_faces(self, name_to_visual_pts_faces): + for cur_link in self.children: + cur_link.get_name_to_visual_pts_faces(name_to_visual_pts_faces) + + def get_visual_pts_list(self, visual_pts_list): + for cur_link in self.children: + visual_pts_list = cur_link.get_visual_pts_list(visual_pts_list) + return visual_pts_list + + def get_visual_faces_list(self, visual_faces_list): + for cur_link in self.children: + visual_faces_list = cur_link.get_visual_faces_list(visual_faces_list) + return visual_faces_list + + def set_joint_idx(self, joint_name_to_idx): + for cur_link in self.children: + cur_link.set_joint_idx(joint_name_to_idx) ### set joint idx ### + + def set_state_via_vec(self, state_vec): ### set the state vec for the state vec ### + for cur_link in self.children: ### set the state vec for the state vec ### + cur_link.set_state_via_vec(state_vec) + # self.joint.set_state_via_vec(state_vec) + # for child_link in self.children: + # child_link.set_state_via_vec(state_vec) + + # get_tot_transformed_joints + def get_tot_transformed_joints(self, transformed_joints): # i + for cur_link in self.children: # + transformed_joints = cur_link.get_tot_transformed_joints(transformed_joints) + return transformed_joints + + def get_joint_states(self, joint_states): + for cur_link in self.children: + joint_states = cur_link.get_joint_states(joint_states) + return joint_states + + def get_nn_pts(self): + nn_pts = 0 + for cur_link in self.children: + nn_pts += cur_link.get_nn_pts() + self.nn_pts = nn_pts + return self.nn_pts + + def set_args(self, args): + self.args = args + for cur_link in self.children: ## args ## + cur_link.set_args(args) + + def print_grads(self): + for cur_link in self.children: + cur_link.print_grads() + + def clear_grads(self,): ## clear grads ## + for cur_link in self.children: + cur_link.clear_grads() + + def compute_transformation_via_state_vecs(self, state_vals, visual_pts_list): + for cur_link in self.children: + cur_link.compute_transformation_via_state_vecs(state_vals, cur_link.parent_rot_mtx, cur_link.parent_trans_vec, visual_pts_list) + return visual_pts_list + + # get_visual_pts_rgba_values(self, pts_rgba_vals_list): + def get_visual_pts_rgba_values(self, pts_rgba_vals_list): + for cur_link in self.children: + cur_link.get_visual_pts_rgba_values(pts_rgba_vals_list) + return pts_rgba_vals_list ## compute pts rgba vals list ## + + def set_init_states(self, init_states): + # glb_rot, glb_trans # + ###### set the initial state ###### + glb_rot = init_states['glb_rot'] + self.glb_rotation.data[:, :] = glb_rot[:, :] + glb_trans = init_states['glb_trans'] + self.glb_trans.data[:] = glb_trans[:] # glb trans # + # parent_rot_mtx, parent_trans_vec # + for cur_link in self.children: + cur_link.parent_rot_mtx.data[:, :] = self.glb_rotation.data[:, :] + cur_link.parent_trans_vec.data[:] = self.glb_trans.data[:] + cur_link.set_init_states() + + def set_init_states_target_value(self, tot_init_states): + glb_rot = tot_init_states['glb_rot'] + self.glb_rotation.data[:, :] = glb_rot[:, :] + glb_trans = tot_init_states['glb_trans'] + self.glb_trans.data[:] = glb_trans[:] # glb trans # + links_init_states = tot_init_states['links_init_states'] + for cur_link in self.children: + cur_link.parent_rot_mtx.data[:, :] = self.glb_rotation.data[:, :] + cur_link.parent_trans_vec.data[:] = self.glb_trans.data[:] + cur_link.set_init_states_target_value(links_init_states) + + + def get_timestep_to_states(self, joint_nm_to_ts_to_states): + for cur_link in self.children: + joint_nm_to_ts_to_states = cur_link.get_timestep_to_states(joint_nm_to_ts_to_states) + return joint_nm_to_ts_to_states + + + def get_joint_nm_to_states(self, joint_nm_to_states): + for cur_link in self.children: + joint_nm_to_states = cur_link.get_joint_nm_to_states(joint_nm_to_states) + return joint_nm_to_states + + # def set_penetration_forces(self, penetration_forces): + def set_penetration_forces(self, penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces): + for cur_link in self.children: + cur_link.set_penetration_forces(penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces) + + # set_actions_and_update_states(..., penetration_forces) + def set_actions_and_update_states(self, actions, cur_timestep, time_cons, penetration_forces=None, sampled_visual_pts_joint_idxes=None, joint_name_to_penetration_forces_intermediates=None): + # delta_glb_rot; delta_glb_trans # # + delta_glb_rotation = actions['delta_glb_rot'] + delta_glb_trans = actions['delta_glb_trans'] + cur_glb_rot = self.glb_rotation.data.detach() + cur_glb_trans = self.glb_trans.data.detach() + nex_glb_rot = torch.matmul(delta_glb_rotation, cur_glb_rot) # + nex_glb_trans = torch.matmul(delta_glb_rotation, cur_glb_trans.unsqueeze(-1)).squeeze(-1) + delta_glb_trans + link_actions = actions['link_actions'] + self.glb_rotation = nex_glb_rot + self.glb_trans = nex_glb_trans + for cur_link in self.children: # glb trans # # + cur_link.set_actions_and_update_states(link_actions, cur_timestep, time_cons, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes, joint_name_to_penetration_forces_intermediates=joint_name_to_penetration_forces_intermediates, children_penetration_torques=None) + + def set_and_update_states(self, states, cur_timestep, time_cons): + delta_glb_rotation = states['delta_glb_rot'] # + delta_glb_trans = states['delta_glb_trans'] + cur_glb_rot = self.glb_rotation.data.detach() + cur_glb_trans = self.glb_trans.data.detach() + nex_glb_rot = torch.matmul(delta_glb_rotation, cur_glb_rot) + nex_glb_trans = torch.matmul(delta_glb_rotation, cur_glb_trans.unsqueeze(-1)).squeeze(-1) + delta_glb_trans + link_states = states['link_states'] + self.glb_rotation = nex_glb_rot + self.glb_trans = nex_glb_trans + for cur_link in self.children: # glb trans # + cur_link.set_and_update_states(link_states, cur_timestep, time_cons) + + + def compute_transformation_via_current_state(self, visual_pts_list, link_name_to_transformations_and_transformed_pts, joint_idxes= None): + # visual_pts_mass_list = [] + visual_pts_list = [] + visual_pts_mass_list = [] + # visual_pts_mass = [] + for cur_link in self.children: + visual_pts_list, visual_pts_mass_list = cur_link.compute_transformation_via_current_state(self.glb_rotation.data, self.glb_trans.data, visual_pts_list, visual_pts_mass_list, link_name_to_transformations_and_transformed_pts, joint_idxes=joint_idxes) + visual_pts_list = torch.cat(visual_pts_list, dim=0) + visual_pts_mass_list= torch.cat(visual_pts_mass_list, dim=0) + return visual_pts_list, visual_pts_mass_list + + # compute_expanded_visual_pts_transformation_via_current_state + def compute_expanded_visual_pts_transformation_via_current_state(self, visual_pts_list): + # visual_pts_mass_list = [] + visual_pts_list = [] + visual_pts_mass_list = [] + # visual_pts_mass = [] + for cur_link in self.children: + visual_pts_list, visual_pts_mass_list = cur_link.compute_expanded_visual_pts_transformation_via_current_state(self.glb_rotation.data, self.glb_trans.data, visual_pts_list, visual_pts_mass_list) + visual_pts_list = torch.cat(visual_pts_list, dim=0) + visual_pts_mass_list= torch.cat(visual_pts_mass_list, dim=0) + return visual_pts_list, visual_pts_mass_list + + def set_body_expanded_visual_pts(self, link_name_to_ragged_expanded_visual_pts): + for cur_link in self.children: + cur_link.set_body_expanded_visual_pts(link_name_to_ragged_expanded_visual_pts) + + + +# robot manager # +# set the initial state # +# record optimizable actions # +# record optimizable time constants # +# and with the external forces? # + + +def parse_nparray_from_string(strr, args): + vals = strr.split(" ") + vals = [float(val) for val in vals] + vals = np.array(vals, dtype=np.float32) + vals = torch.from_numpy(vals).float() + ## vals ## + vals = nn.Parameter(vals.cuda(), requires_grad=True) + + return vals + + +### parse link data ### +def parse_link_data(link, args): + + link_name = link.attrib["name"] + # print(f"parsing link: {link_name}") ## joints body meshes # + + joint = link.find("./joint") + + joint_name = joint.attrib["name"] + joint_type = joint.attrib["type"] + if joint_type in ["revolute"]: ## a general xml parser here? + axis = joint.attrib["axis"] + axis = parse_nparray_from_string(axis, args=args) + else: + axis = None + pos = joint.attrib["pos"] # + pos = parse_nparray_from_string(pos, args=args) + quat = joint.attrib["quat"] + quat = parse_nparray_from_string(quat, args=args) + + try: + frame = joint.attrib["frame"] + except: + frame = "WORLD" + + if joint_type not in ["fixed"]: + damping = joint.attrib["damping"] + damping = float(damping) + else: + damping = 0.0 + + cur_joint = Joint(joint_name, joint_type, axis, pos, quat, frame, damping, args=args) + + body = link.find("./body") + body_name = body.attrib["name"] + body_type = body.attrib["type"] + if body_type == "mesh": + filename = body.attrib["filename"] + else: + filename = "" + + if body_type == "sphere": + radius = body.attrib["radius"] + radius = float(radius) + else: + radius = 0. + + pos = body.attrib["pos"] + pos = parse_nparray_from_string(pos, args=args) + quat = body.attrib["quat"] + quat = joint.attrib["quat"] + try: + transform_type = body.attrib["transform_type"] + except: + transform_type = "OBJ_TO_WORLD" + density = body.attrib["density"] + density = float(density) + mu = body.attrib["mu"] + mu = float(mu) + try: ## rgba ## + rgba = body.attrib["rgba"] + rgba = parse_nparray_from_string(rgba, args=args) + except: + rgba = np.zeros((4,), dtype=np.float32) + + cur_body = Body(body_name, body_type, filename, pos, quat, transform_type, density, mu, rgba, radius, args=args) + + children_link = [] + links = link.findall("./link") + for child_link in links: # + cur_child_link = parse_link_data(child_link, args=args) + children_link.append(cur_child_link) + + link_name = link.attrib["name"] + link_obj = Link(link_name, joint=cur_joint, body=cur_body, children=children_link, args=args) + return link_obj + + + + +def parse_data_from_xml(xml_fn, args): + + tree = ElementTree() + tree.parse(xml_fn) + + ### get total robots ### + robots = tree.findall("./robot") + i_robot = 0 + tot_robots = [] + for cur_robot in robots: + print(f"Getting robot: {i_robot}") + i_robot += 1 + cur_links = cur_robot.findall("./link") + # i_link = 0 + cur_robot_links = [] + for cur_link in cur_links: ## child of the link ## + ### a parse link util -> the child of the link is composed of (the joint; body; and children links (with children or with no child here)) + # cur_link_name = cur_link.attrib["name"] + # print(f"Getting link: {i_link} with name: {cur_link_name}") + # i_link += 1 ## + cur_robot_links.append(parse_link_data(cur_link, args=args)) + cur_robot_obj = Robot(cur_robot_links, args=args) + tot_robots.append(cur_robot_obj) + + + tot_actuators = [] + actuators = tree.findall("./actuator/motor") + joint_nm_to_joint_idx = {} + i_act = 0 + for cur_act in actuators: + cur_act_joint_nm = cur_act.attrib["joint"] + joint_nm_to_joint_idx[cur_act_joint_nm] = i_act + i_act += 1 ### add the act ### + + tot_robots[0].set_joint_idx(joint_nm_to_joint_idx) ### set joint idx here ### # tot robots # + tot_robots[0].get_nn_pts() + tot_robots[1].get_nn_pts() + + return tot_robots + + +def get_name_to_state_from_str(states_str): + tot_states = states_str.split(" ") + tot_states = [float(cur_state) for cur_state in tot_states] + joint_name_to_state = {} + for i in range(len(tot_states)): + cur_joint_name = f"joint{i + 1}" + cur_joint_state = tot_states[i] + joint_name_to_state[cur_joint_name] = cur_joint_state + return joint_name_to_state + + +def merge_meshes(verts_list, faces_list): + nn_verts = 0 + tot_verts_list = [] + tot_faces_list = [] + for i_vv, cur_verts in enumerate(verts_list): + cur_verts_nn = cur_verts.size(0) + tot_verts_list.append(cur_verts) + tot_faces_list.append(faces_list[i_vv] + nn_verts) + nn_verts = nn_verts + cur_verts_nn + tot_verts_list = torch.cat(tot_verts_list, dim=0) + tot_faces_list = torch.cat(tot_faces_list, dim=0) + return tot_verts_list, tot_faces_list + + + +class RobotAgent: # robot and the robot # + def __init__(self, xml_fn, args) -> None: + self.xml_fn = xml_fn + self.args = args + + ## + active_robot, passive_robot = parse_data_from_xml(xml_fn, args) + + #### set and initialize the time constant #### + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ).cuda() + torch.nn.init.ones_(self.time_constant.weight) # + self.time_constant.weight.data = self.time_constant.weight.data * 0.2 ### time_constant data # + + #### set optimizable actions #### + self.optimizable_actions = nn.Embedding( + num_embeddings=100, embedding_dim=22, + ).cuda() + torch.nn.init.zeros_(self.optimizable_actions.weight) # + + self.learning_rate = 5e-4 + + self.active_robot = active_robot + + + self.set_init_states() + init_visual_pts = self.get_init_state_visual_pts() + self.init_visual_pts = init_visual_pts + + self.robot_visual_faces_list = [] + self.robot_visual_faces_list = self.active_robot.get_visual_faces_list(self.robot_visual_faces_list) + self.robot_visual_pts_list = [] + self.robot_visual_pts_list = self.active_robot.get_visual_pts_list(self.robot_visual_pts_list) + + self.robot_pts, self.robot_faces = merge_meshes(self.robot_visual_pts_list, self.robot_visual_faces_list) + + print(f"robot_pts: {self.robot_pts.size()}, self.robot_faces: {self.robot_faces.size()}") + cur_robot_mesh = trimesh.Trimesh(vertices=self.robot_pts.detach().cpu().numpy(), faces=self.robot_faces.detach().cpu().numpy()) + cur_robot_mesh.export(f'init_robot_mesh.ply') + + + + def get_timestep_to_states(self): + joint_nm_to_ts_to_states = {} + joint_nm_to_ts_to_states = self.active_robot.get_timestep_to_states(joint_nm_to_ts_to_states) + return joint_nm_to_ts_to_states + + # so for each joint; get the joint + def get_joint_nm_to_states(self): + joint_nm_to_states = {} + joint_nm_to_states = self.active_robot.get_joint_nm_to_states(joint_nm_to_states) + return joint_nm_to_states + + def set_init_states_target_value(self, init_states): + glb_rot = torch.eye(n=3, dtype=torch.float32).cuda() + glb_trans = torch.zeros((3,), dtype=torch.float32).cuda() ### glb_trans #### and the rot 3## + + tot_init_states = {} + tot_init_states['glb_rot'] = glb_rot; + tot_init_states['glb_trans'] = glb_trans; + tot_init_states['links_init_states'] = init_states + self.active_robot.set_init_states_target_value(tot_init_states) + + + def set_penetration_forces(self, penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces): + self.active_robot.set_penetration_forces(penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces) + + def set_init_states(self): + glb_rot = torch.eye(n=3, dtype=torch.float32).cuda() + glb_trans = torch.zeros((3,), dtype=torch.float32).cuda() ### glb_trans #### and the rot 3## + + ### random rotation ### + # glb_rot_np = R.random().as_matrix() + # glb_rot = torch.from_numpy(glb_rot_np).float().cuda() + ### random rotation ### + + # glb_rot, glb_trans # + init_states = {} + init_states['glb_rot'] = glb_rot; + init_states['glb_trans'] = glb_trans; + self.active_robot.set_init_states(init_states) + + def get_init_state_visual_pts(self, ret_link_name_to_tansformations=False, ret_joint_idxes=False): + visual_pts_list = [] # compute the transformation via current state # + link_name_to_transformations_and_transformed_pts = {} + joint_idxes = [] + visual_pts_list, visual_pts_mass_list = self.active_robot.compute_transformation_via_current_state( visual_pts_list, link_name_to_transformations_and_transformed_pts, joint_idxes=joint_idxes) + joint_idxes = torch.cat(joint_idxes, dim=0) + init_visual_pts = visual_pts_list + if ret_link_name_to_tansformations and ret_joint_idxes: + return init_visual_pts, link_name_to_transformations_and_transformed_pts, joint_idxes + elif ret_link_name_to_tansformations: + return init_visual_pts, link_name_to_transformations_and_transformed_pts + elif ret_joint_idxes: + return init_visual_pts, joint_idxes + else: + return init_visual_pts + + # init_visual_pts, link_name_to_transformations_and_transformed_pts = get_init_state_visual_pts(ret_link_name_to_tansformations=True) + # set_body_expanded_visual_pts + # expanded_visual_pts = compute_expanded_visual_pts_transformation_via_current_state() + + # expanded_init_visual_pts = compute_expanded_visual_pts_transformation_via_current_state + def compute_expanded_visual_pts_transformation_via_current_state(self,): + visual_pts_list = [] # compute the transformation via current state # + # link_name_to_transformations_and_transformed_pts = {} + visual_pts_list, visual_pts_mass_list = self.active_robot.compute_expanded_visual_pts_transformation_via_current_state( visual_pts_list) + # init_visual_pts = visual_pts_list + # if ret_link_name_to_tansformations: + # return init_visual_pts, link_name_to_transformations_and_transformed_pts + # else: + return visual_pts_list + + def set_body_expanded_visual_pts(self, link_name_to_ragged_expanded_visual_pts): + self.active_robot.set_body_expanded_visual_pts(link_name_to_ragged_expanded_visual_pts) + # for cur_link in self.children: + # cur_link.set_body_expanded_visual_pts(link_name_to_ragged_expanded_visual_pts) + + + def set_and_update_states(self, states, cur_timestep): + time_cons = self.time_constant(torch.zeros((1,), dtype=torch.long).cuda()) # + ## set and update the states ## + self.active_robot.set_and_update_states(states, cur_timestep, time_cons) + # for cur_link in self.children: # glb trans # + # cur_link.set_and_update_states(link_actions, cur_timestep, time_cons) + + # set_actions_and_update_states(..., penetration_forces) + def set_actions_and_update_states(self, actions, cur_timestep, penetration_forces=None, sampled_visual_pts_joint_idxes=None): + # + joint_name_to_penetration_forces_intermediates = {} + time_cons = self.time_constant(torch.zeros((1,), dtype=torch.long).cuda()) ### time constant of the system ## + self.active_robot.set_actions_and_update_states(actions, cur_timestep, time_cons, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes, joint_name_to_penetration_forces_intermediates=joint_name_to_penetration_forces_intermediates) ### + return joint_name_to_penetration_forces_intermediates + + + + def forward_stepping_test(self, ): + # delta_glb_rot; delta_glb_trans # + timestep_to_visual_pts = {} + for i_step in range(50): + actions = {} + actions['delta_glb_rot'] = torch.eye(3, dtype=torch.float32).cuda() + actions['delta_glb_trans'] = torch.zeros((3,), dtype=torch.float32).cuda() + actions_link_actions = torch.ones((22, ), dtype=torch.float32).cuda() + # actions_link_actions = actions_link_actions * 0.2 + actions_link_actions = actions_link_actions * -1. # + actions['link_actions'] = actions_link_actions + self.set_actions_and_update_states(actions=actions, cur_timestep=i_step) + + cur_visual_pts = robot_agent.get_init_state_visual_pts() + cur_visual_pts = cur_visual_pts.detach().cpu().numpy() + timestep_to_visual_pts[i_step + 1] = cur_visual_pts + return timestep_to_visual_pts + + def initialize_optimization(self, reference_pts_dict): + self.n_timesteps = 50 + self.n_timesteps = 19 # first 19-timesteps optimization # + self.nn_tot_optimization_iters = 1000 + # self.nn_tot_optimization_iters = 57 + # TODO: load reference points # + self.ts_to_reference_pts = np.load(reference_pts_dict, allow_pickle=True).item() #### + self.ts_to_reference_pts = { + ts: torch.from_numpy(self.ts_to_reference_pts[ts]).float().cuda() for ts in self.ts_to_reference_pts + } + + # optimize the glboal state and + def forward_stepping_optimization(self, ): + nn_tot_optimization_iters = self.nn_tot_optimization_iters + params_to_train = [] + params_to_train += list(self.optimizable_actions.parameters()) + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + + for i_iter in range(nn_tot_optimization_iters): + + tot_losses = [] + ts_to_robot_points = {} + for cur_ts in range(self.n_timesteps): + # print(f"iter: {i_iter}, cur_ts: {cur_ts}") + actions = {} + actions['delta_glb_rot'] = torch.eye(3, dtype=torch.float32).cuda() + actions['delta_glb_trans'] = torch.zeros((3,), dtype=torch.float32).cuda() + actions_link_actions = self.optimizable_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # actions_link_actions = actions_link_actions * 0.2 + # actions_link_actions = actions_link_actions * -1. # + actions['link_actions'] = actions_link_actions + self.set_actions_and_update_states(actions=actions, cur_timestep=cur_ts) # update the interaction # + + cur_visual_pts = robot_agent.get_init_state_visual_pts() + ts_to_robot_points[cur_ts + 1] = cur_visual_pts.clone() + + cur_reference_pts = self.ts_to_reference_pts[cur_ts + 1] + diff = torch.sum((cur_visual_pts - cur_reference_pts) ** 2, dim=-1) + diff = diff.mean() + + # diff. + self.optimizer.zero_grad() + diff.backward() + self.optimizer.step() + + tot_losses.append(diff.item()) + + + # for ts in ts_to_robot_points: + # # print(f"ts: {ts}") + # if not ts in self.ts_to_reference_pts: + # continue + # cur_robot_pts = ts_to_robot_points[ts] + # cur_reference_pts = self.ts_to_reference_pts[ts] + # diff = torch.sum((cur_robot_pts - cur_reference_pts) ** 2, dim=-1) + # diff = torch.mean(diff) + # tot_losses.append(diff) + + loss = sum(tot_losses) / float(len(tot_losses)) + print(f"Iter: {i_iter}, average loss: {loss}") + # print(f"Iter: {i_iter}, average loss: {loss.item()}, start optimizing") + # self.optimizer.zero_grad() + # loss.backward() + # self.optimizer.step() + + self.ts_to_robot_points = { + ts: ts_to_robot_points[ts].detach().cpu().numpy() for ts in ts_to_robot_points + } + self.ts_to_ref_points = { + ts: self.ts_to_reference_pts[ts].detach().cpu().numpy() for ts in ts_to_robot_points + } + return self.ts_to_robot_points, self.ts_to_ref_points + + + + +def create_zero_states(): + nn_joints = 17 + joint_name_to_state = {} + for i_j in range(nn_joints): + cur_joint_name = f"joint{i_j + 1}" + joint_name_to_state[cur_joint_name] = 0. + return joint_name_to_state + +# [6.96331033e-17 3.54807679e-06 1.74046190e-15 2.66367417e-05 +# 1.22444894e-05 3.38976792e-06 1.46917635e-15 2.66367383e-05 +# 1.22444882e-05 3.38976786e-06 1.97778813e-15 2.66367383e-05 +# 1.22444882e-05 3.38976786e-06 4.76033293e-16 1.26279884e-05 +# 3.51189993e-06 0.00000000e+00 4.89999978e-03 0.00000000e+00] + + +def rotation_matrix_from_axis_angle_np(axis, angle): # rotation_matrix_from_axis_angle -> + sin_ = np.sin(angle) # ti.math.sin(angle) + cos_ = np.cos(angle) # ti.math.cos(angle) + # sin_ = torch.sin(angle) # ti.math.sin(angle) + # cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z ## + + + row_a = np.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], axis=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = np.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], axis=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = np.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], axis=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = np.stack( + [row_a, row_b, row_c], axis=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + + + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z ## + + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + +def get_camera_to_world_poses(n=10, ): + ## sample from the upper half sphere ## + # theta and phi for the + theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + # theta = torch.from_numpy(theta).float().cuda() + tot_c2w_matrix = [] + for i_n in range(n): + # y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + # y_rot_mtx = load_utils.rotation_matrix_from_axis_angle(rot_vec, rot_angle) + + + z_axis_rot_axis = np.array([0, 0, 1.], dtype=np.float32) + z_axis_rot_angle = np.pi - theta[i_n] + z_axis_rot_matrix = rotation_matrix_from_axis_angle_np(z_axis_rot_axis, z_axis_rot_angle) + rotated_plane_rot_axis_ori = np.array([1, -1, 0], dtype=np.float32) + rotated_plane_rot_axis_ori = rotated_plane_rot_axis_ori / np.sqrt(np.sum(rotated_plane_rot_axis_ori ** 2)) + rotated_plane_rot_axis = np.matmul(z_axis_rot_matrix, rotated_plane_rot_axis_ori) + + plane_rot_angle = phi[i_n] + plane_rot_matrix = rotation_matrix_from_axis_angle_np(rotated_plane_rot_axis, plane_rot_angle) + + c2w_matrix = np.matmul(plane_rot_matrix, z_axis_rot_matrix) + c2w_trans_matrix = np.array( + [np.cos(theta[i_n]) * np.sin(phi[i_n]), np.sin(theta[i_n]) * np.sin(phi[i_n]), np.cos(phi[i_n])], dtype=np.float32 + ) + c2w_matrix = np.concatenate( + [c2w_matrix, c2w_trans_matrix.reshape(3, 1)], axis=-1 + ) ##c2w matrix + tot_c2w_matrix.append(c2w_matrix) + tot_c2w_matrix = np.stack(tot_c2w_matrix, axis=0) + return tot_c2w_matrix + + +def get_camera_to_world_poses_th(n=10, th_cuda_idx=0): + ## sample from the upper half sphere ## + # theta and phi for the + theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + + # n_total = 14 + # n_xz = 14 + # n_y = 7 + # theta = [i_xz * 1.0 / float(n_xz) * np.pi * 2. for i_xz in range(n_xz)] + # phi = [i_y * (-1.0) / float(n_y) * np.pi for i_y in range(n_y)] + + + theta = torch.from_numpy(theta).float().cuda(th_cuda_idx) + phi = torch.from_numpy(phi).float().cuda(th_cuda_idx) + + tot_c2w_matrix = [] + for i_n in range(n): # if use veyr dense views like those + y_rot_angle = theta[i_n] + y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + y_rot_mtx = rotation_matrix_from_axis_angle(y_rot_vec, y_rot_angle) + + x_axis = torch.tensor([1., 0., 0.]).float().cuda(th_cuda_idx) + y_rot_x_axis = torch.matmul(y_rot_mtx, x_axis.unsqueeze(-1)).squeeze(-1) ### y_rot_x_axis # + + x_rot_angle = phi[i_n] + x_rot_mtx = rotation_matrix_from_axis_angle(y_rot_x_axis, x_rot_angle) + + rot_mtx = torch.matmul(x_rot_mtx, y_rot_mtx) + xyz_offset = torch.tensor([0., 0., 1.5]).float().cuda(th_cuda_idx) + rot_xyz_offset = torch.matmul(rot_mtx, xyz_offset.unsqueeze(-1)).squeeze(-1).contiguous() + 0.5 ### 3 for the xyz offset + + c2w_matrix = torch.cat( + [rot_mtx, rot_xyz_offset.unsqueeze(-1)], dim=-1 + ) + tot_c2w_matrix.append(c2w_matrix) + + + # z_axis_rot_axis = np.array([0, 0, 1.], dtype=np.float32) + # z_axis_rot_angle = np.pi - theta[i_n] + # z_axis_rot_matrix = rotation_matrix_from_axis_angle_np(z_axis_rot_axis, z_axis_rot_angle) + # rotated_plane_rot_axis_ori = np.array([1, -1, 0], dtype=np.float32) + # rotated_plane_rot_axis_ori = rotated_plane_rot_axis_ori / np.sqrt(np.sum(rotated_plane_rot_axis_ori ** 2)) + # rotated_plane_rot_axis = np.matmul(z_axis_rot_matrix, rotated_plane_rot_axis_ori) + + # plane_rot_angle = phi[i_n] + # plane_rot_matrix = rotation_matrix_from_axis_angle_np(rotated_plane_rot_axis, plane_rot_angle) + + # c2w_matrix = np.matmul(plane_rot_matrix, z_axis_rot_matrix) + # c2w_trans_matrix = np.array( + # [np.cos(theta[i_n]) * np.sin(phi[i_n]), np.sin(theta[i_n]) * np.sin(phi[i_n]), np.cos(phi[i_n])], dtype=np.float32 + # ) + # c2w_matrix = np.concatenate( + # [c2w_matrix, c2w_trans_matrix.reshape(3, 1)], axis=-1 + # ) ##c2w matrix + # tot_c2w_matrix.append(c2w_matrix) + # tot_c2w_matrix = np.stack(tot_c2w_matrix, axis=0) + tot_c2w_matrix = torch.stack(tot_c2w_matrix, dim=0) + return tot_c2w_matrix + + +def get_camera_to_world_poses_th_routine_1(n=7, th_cuda_idx=0): + ## sample from the upper half sphere ## + # theta and phi for the + + # theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + # phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + + # n_total = 14 + n_xz = 2 * n # 14 + n_y = n # 7 + theta = [i_xz * 1.0 / float(n_xz) * np.pi * 2. for i_xz in range(n_xz)] + phi = [i_y * (-1.0) / float(n_y) * np.pi for i_y in range(n_y)] + + theta = torch.tensor(theta).float().cuda(th_cuda_idx) + phi = torch.tensor(phi).float().cuda(th_cuda_idx) + # theta = torch.from_numpy(theta).float().cuda(th_cuda_idx) + # phi = torch.from_numpy(phi).float().cuda(th_cuda_idx) + + tot_c2w_matrix = [] + + for i_theta in range(theta.size(0)): + for i_phi in range(phi.size(0)): + y_rot_angle = theta[i_theta] + y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + y_rot_mtx = rotation_matrix_from_axis_angle(y_rot_vec, y_rot_angle) + + x_axis = torch.tensor([1., 0., 0.]).float().cuda(th_cuda_idx) + y_rot_x_axis = torch.matmul(y_rot_mtx, x_axis.unsqueeze(-1)).squeeze(-1) ### y_rot_x_axis # + + x_rot_angle = phi[i_phi] + x_rot_mtx = rotation_matrix_from_axis_angle(y_rot_x_axis, x_rot_angle) + + rot_mtx = torch.matmul(x_rot_mtx, y_rot_mtx) + xyz_offset = torch.tensor([0., 0., 1.5]).float().cuda(th_cuda_idx) + rot_xyz_offset = torch.matmul(rot_mtx, xyz_offset.unsqueeze(-1)).squeeze(-1).contiguous() + 0.5 ### 3 for the xyz offset + + c2w_matrix = torch.cat( + [rot_mtx, rot_xyz_offset.unsqueeze(-1)], dim=-1 + ) + tot_c2w_matrix.append(c2w_matrix) + + tot_c2w_matrix = torch.stack(tot_c2w_matrix, dim=0) + return tot_c2w_matrix + + +def get_camera_to_world_poses_th_routine_2(n=7, th_cuda_idx=0): + ## sample from the upper half sphere ## + # theta and phi for the + + # theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + # phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + + # n_total = 14 + n_xz = 2 * n # 14 + n_y = 2 * n # 7 + theta = [i_xz * 1.0 / float(n_xz) * np.pi * 2. for i_xz in range(n_xz)] + # phi = [i_y * (-1.0) / float(n_y) * np.pi for i_y in range(n_y)] + phi = [i_y * (-1.0) / float(n_y) * np.pi * 2. for i_y in range(n_y)] + + theta = torch.tensor(theta).float().cuda(th_cuda_idx) + phi = torch.tensor(phi).float().cuda(th_cuda_idx) + # theta = torch.from_numpy(theta).float().cuda(th_cuda_idx) + # phi = torch.from_numpy(phi).float().cuda(th_cuda_idx) + + tot_c2w_matrix = [] + + for i_theta in range(theta.size(0)): + for i_phi in range(phi.size(0)): + y_rot_angle = theta[i_theta] + y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + y_rot_mtx = rotation_matrix_from_axis_angle(y_rot_vec, y_rot_angle) + + x_axis = torch.tensor([1., 0., 0.]).float().cuda(th_cuda_idx) + y_rot_x_axis = torch.matmul(y_rot_mtx, x_axis.unsqueeze(-1)).squeeze(-1) ### y_rot_x_axis # + + x_rot_angle = phi[i_phi] + x_rot_mtx = rotation_matrix_from_axis_angle(y_rot_x_axis, x_rot_angle) + + rot_mtx = torch.matmul(x_rot_mtx, y_rot_mtx) + xyz_offset = torch.tensor([0., 0., 1.5]).float().cuda(th_cuda_idx) + rot_xyz_offset = torch.matmul(rot_mtx, xyz_offset.unsqueeze(-1)).squeeze(-1).contiguous() + 0.5 ### 3 for the xyz offset + + c2w_matrix = torch.cat( + [rot_mtx, rot_xyz_offset.unsqueeze(-1)], dim=-1 + ) + tot_c2w_matrix.append(c2w_matrix) + + tot_c2w_matrix = torch.stack(tot_c2w_matrix, dim=0) + return tot_c2w_matrix + + + +#### Big TODO: the external contact forces from the manipulated object to the robot #### + +## optimize for actions from the redmax model ## + +if __name__=='__main__': # agent of the + + xml_fn = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere.xml" + robot_agent = RobotAgent(xml_fn=xml_fn, args=None) + init_visual_pts = robot_agent.init_visual_pts.detach().cpu().numpy() + + exit(0) + + xml_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new_scaled_nroot.xml" + robot_agent = RobotAgent(xml_fn=xml_fn, args=None) + init_visual_pts = robot_agent.init_visual_pts.detach().cpu().numpy() + + robot_agent.forward_stepping_test() + cur_visual_pts = robot_agent.get_init_state_visual_pts() + cur_visual_pts = cur_visual_pts.detach().cpu().numpy() + + reference_pts_dict = "timestep_to_visual_pts.npy" + robot_agent.initialize_optimization(reference_pts_dict=reference_pts_dict) + optimized_ts_to_visual_pts, ts_to_ref_points = robot_agent.forward_stepping_optimization() + + timestep_to_visual_pts = robot_agent.forward_stepping_test() + np.save(f"cur_visual_pts.npy", timestep_to_visual_pts) # cur_visual_pts # + np.save(f"timestep_to_visual_pts_opt.npy", timestep_to_visual_pts) + np.save(f"timestep_to_visual_pts_opt.npy", optimized_ts_to_visual_pts) + np.save(f"timestep_to_ref_pts.npy", ts_to_ref_points) + + + exit(0) + + + xml_fn = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere.xml" + robot_agent = RobotAgent(xml_fn=xml_fn, args=None) + init_visual_pts = robot_agent.init_visual_pts.detach().cpu().numpy() + + # np.save(f"init_visual_pts.npy", init_visual_pts) # + + # robot_agent.forward_stepping_test() + # cur_visual_pts = robot_agent.get_init_state_visual_pts() + # cur_visual_pts = cur_visual_pts.detach().cpu().numpy() + + # reference_pts_dict = "timestep_to_visual_pts.npy" + # robot_agent.initialize_optimization(reference_pts_dict=reference_pts_dict) + # optimized_ts_to_visual_pts, ts_to_ref_points = robot_agent.forward_stepping_optimization() + + # timestep_to_visual_pts = robot_agent.forward_stepping_test() + # np.save(f"cur_visual_pts.npy", timestep_to_visual_pts) # cur_visual_pts # + # np.save(f"timestep_to_visual_pts_opt.npy", timestep_to_visual_pts) + # np.save(f"timestep_to_visual_pts_opt.npy", optimized_ts_to_visual_pts) + # np.save(f"timestep_to_ref_pts.npy", ts_to_ref_points) + + exit(0) + + + xml_fn = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere.xml" + tot_robots = parse_data_from_xml(xml_fn=xml_fn) + # tot_robots = + + active_optimized_states = """-0.00025872 -0.00025599 -0.00025296 -0.00022881 -0.00024449 -0.0002549 -0.00025296 -0.00022881 -0.00024449 -0.0002549 -0.00025296 -0.00022881 -0.00024449 -0.0002549 -0.00025694 -0.00024656 -0.00025556 0. 0.0049 0.""" + active_optimized_states = """-1.10617972 -1.10742263 -1.06198363 -1.03212746 -1.05429142 -1.08617289 -1.05868192 -1.01624365 -1.04478191 -1.08260959 -1.06719107 -1.04082455 -1.05995886 -1.08674006 -1.09396691 -1.08965532 -1.10036577 -10.7117466 -3.62511998 1.49450353""" + # active_goal_optimized_states = """-1.10617972 -1.10742263 -1.0614858 -1.03189609 -1.05404354 -1.08610468 -1.05863293 -1.0174248 -1.04576456 -1.08297396 -1.06719107 -1.04082455 -1.05995886 -1.08674006 -1.09396691 -1.08965532 -1.10036577 -10.73396897 -3.68095432 1.50679285""" + active_optimized_states = """-0.42455298 -0.42570447 -0.40567708 -0.39798589 -0.40953955 -0.42025055 -0.37910662 -0.496165 -0.37664644 -0.41942727 -0.40596508 -0.3982109 -0.40959847 -0.42024905 -0.41835001 -0.41929961 -0.42365131 -1.18756073 -2.90337822 0.4224685""" + active_optimized_states = """-0.42442816 -0.42557961 -0.40366201 -0.3977891 -0.40947627 -0.4201424 -0.3799285 -0.3808375 -0.37953552 -0.42039598 -0.4058405 -0.39808804 -0.40947487 -0.42012458 -0.41822534 -0.41917521 -0.4235266 -0.87189658 -1.42093761 0.21977979""" + + active_robot = tot_robots[0] + zero_states = create_zero_states() + active_robot.set_state(zero_states) + active_robot.compute_transformation() + name_to_visual_pts_surfaces = {} + active_robot.get_name_to_visual_pts_faces(name_to_visual_pts_surfaces) + print(len(name_to_visual_pts_surfaces)) + + sv_res_rt = "/home/xueyi/diffsim/DiffHand/examples/save_res" + sv_res_rt = os.path.join(sv_res_rt, "load_utils_test") + os.makedirs(sv_res_rt, exist_ok=True) + + tmp_visual_res_sv_fn = os.path.join(sv_res_rt, f"res_with_zero_states.npy") + np.save(tmp_visual_res_sv_fn, name_to_visual_pts_surfaces) + print(f"tmp visual res saved to {tmp_visual_res_sv_fn}") + + + optimized_states = get_name_to_state_from_str(active_optimized_states) + active_robot.set_state(optimized_states) + active_robot.compute_transformation() + name_to_visual_pts_surfaces = {} + active_robot.get_name_to_visual_pts_faces(name_to_visual_pts_surfaces) + print(len(name_to_visual_pts_surfaces)) + # sv_res_rt = "/home/xueyi/diffsim/DiffHand/examples/save_res" + # sv_res_rt = os.path.join(sv_res_rt, "load_utils_test") + # os.makedirs(sv_res_rt, exist_ok=True) + + # tmp_visual_res_sv_fn = os.path.join(sv_res_rt, f"res_with_optimized_states.npy") + tmp_visual_res_sv_fn = os.path.join(sv_res_rt, f"active_ngoal_res_with_optimized_states_goal_n3.npy") + np.save(tmp_visual_res_sv_fn, name_to_visual_pts_surfaces) + print(f"tmp visual res with optimized states saved to {tmp_visual_res_sv_fn}") + diff --git a/models/dyn_model_act_v2.py b/models/dyn_model_act_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..14ed56258adcde2149c3cd2c41d7b87b811b90ef --- /dev/null +++ b/models/dyn_model_act_v2.py @@ -0,0 +1,2712 @@ + +import math +# import torch +# from ..utils import Timer +import numpy as np +# import torch.nn.functional as F +import os + +import argparse + +from xml.etree.ElementTree import ElementTree + +import trimesh +import torch +import torch.nn as nn +# import List +# class link; joint; body +### + +from scipy.spatial.transform import Rotation as R +from torch.distributions.uniform import Uniform +# deformable articulated objects with the articulated models # + +DAMPING = 1.0 +DAMPING = 0.3 + +urdf_fn = "" + +def plane_rotation_matrix_from_angle_xz(angle): + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + zero_padding = torch.zeros_like(cos_) + one_padding = torch.ones_like(cos_) + col_a = torch.stack( + [cos_, zero_padding, sin_], dim=0 + ) + col_b = torch.stack( + [zero_padding, one_padding, zero_padding], dim=0 + ) + col_c = torch.stack( + [-1. * sin_, zero_padding, cos_], dim=0 + ) + rot_mtx = torch.stack( + [col_a, col_b, col_c], dim=-1 + ) + return rot_mtx + +def plane_rotation_matrix_from_angle(angle): + ## angle of + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + col_a = torch.stack( + [cos_, sin_], dim=0 ### col of the rotation matrix + ) + col_b = torch.stack( + [-1. * sin_, cos_], dim=0 ## cols of the rotation matrix + ) + rot_mtx = torch.stack( + [col_a, col_b], dim=-1 ### rotation matrix + ) + return rot_mtx + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + +def update_quaternion(delta_angle, prev_quat): + s1 = 0 + s2 = prev_quat[0] + v2 = prev_quat[1:] + v1 = delta_angle / 2 + new_v = s1 * v2 + s2 * v1 + torch.cross(v1, v2) + new_s = s1 * s2 - torch.sum(v1 * v2) + new_quat = torch.cat([new_s.unsqueeze(0), new_v], dim=0) + return new_quat + + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) # -1 for the quaternion matrix # + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + + + +class Inertial: + def __init__(self, origin_rpy, origin_xyz, mass, inertia) -> None: + self.origin_rpy = origin_rpy + self.origin_xyz = origin_xyz + self.mass = mass + self.inertia = inertia + if torch.sum(self.inertia).item() < 1e-4: + self.inertia = self.inertia + torch.eye(3, dtype=torch.float32).cuda() + pass + +class Visual: + def __init__(self, visual_xyz, visual_rpy, geometry_mesh_fn, geometry_mesh_scale) -> None: + # self.visual_origin = visual_origin + self.visual_xyz = visual_xyz + self.visual_rpy = visual_rpy + self.mesh_nm = geometry_mesh_fn.split("/")[-1].split(".")[0] + mesh_root = "/home/xueyi/diffsim/NeuS/rsc/mano" + if not os.path.exists(mesh_root): + mesh_root = "/data/xueyi/diffsim/NeuS/rsc/mano" + if "shadow" in urdf_fn and "left" in urdf_fn: + mesh_root = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description_left" + if not os.path.exists(mesh_root): + mesh_root = "/root/diffsim/quasi-dyn/rsc/shadow_hand_description_left" + elif "shadow" in urdf_fn: + mesh_root = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description" + if not os.path.exists(mesh_root): + mesh_root = "/root/diffsim/quasi-dyn/rsc/shadow_hand_description" + elif "redmax" in urdf_fn: + mesh_root = "/home/xueyi/diffsim/NeuS/rsc/redmax_hand" + if not os.path.exists(mesh_root): + mesh_root = "/root/diffsim/quasi-dyn/rsc/redmax_hand" + + self.mesh_root = mesh_root + geometry_mesh_fn = geometry_mesh_fn.replace(".dae", ".obj") + self.geometry_mesh_fn = os.path.join(mesh_root, geometry_mesh_fn) + + self.geometry_mesh_scale = geometry_mesh_scale + # tranformed by xyz # + self.vertices, self.faces = self.load_geoemtry_mesh() + self.cur_expanded_visual_pts = None + pass + + def load_geoemtry_mesh(self, ): + # mesh_root = + # if self.geometry_mesh_fn.end + mesh = trimesh.load_mesh(self.geometry_mesh_fn) + vertices = mesh.vertices + faces = mesh.faces + + vertices = torch.from_numpy(vertices).float().cuda() + faces =torch.from_numpy(faces).long().cuda() + + vertices = vertices * self.geometry_mesh_scale.unsqueeze(0) + self.visual_xyz.unsqueeze(0) + + return vertices, faces + + # init_visual_meshes = get_init_visual_meshes(self, parent_rot, parent_trans, init_visual_meshes) + def get_init_visual_meshes(self, parent_rot, parent_trans, init_visual_meshes, expanded_pts=False): + # cur_vertices = torch.matmul(parent_rot, self.vertices.transpose(1, 0)).contiguous().transpose(1, 0).contiguous() + parent_trans.unsqueeze(0) + + if not expanded_pts: + cur_vertices = self.vertices + # print(f"adding mesh loaded from {self.geometry_mesh_fn}") + init_visual_meshes['vertices'].append(cur_vertices) # cur vertices # trans # + init_visual_meshes['faces'].append(self.faces) + else: + ## expanded visual meshes ## + cur_vertices = self.cur_expanded_visual_pts + init_visual_meshes['vertices'].append(cur_vertices) + init_visual_meshes['faces'].append(self.faces) + return init_visual_meshes + + def expand_visual_pts(self, ): + # expand_factor = 0.2 + # nn_expand_pts = 20 + + # expand_factor = 0.4 + # nn_expand_pts = 40 ### number of the expanded points ### ## points ## + + # expand_factor = 0.2 + # nn_expand_pts = 20 ## + + expand_factor = 0.1 + nn_expand_pts = 10 ## + expand_save_fn = f"{self.mesh_nm}_expanded_pts_factor_{expand_factor}_nnexp_{nn_expand_pts}_new.npy" + expand_save_fn = os.path.join(self.mesh_root, expand_save_fn) # + + if not os.path.exists(expand_save_fn): + cur_expanded_visual_pts = [] + if self.cur_expanded_visual_pts is None: + cur_src_pts = self.vertices + else: + cur_src_pts = self.cur_expanded_visual_pts + maxx_verts, _ = torch.max(cur_src_pts, dim=0) + minn_verts, _ = torch.min(cur_src_pts, dim=0) + extent_verts = maxx_verts - minn_verts ## (3,)-dim vecotr + norm_extent_verts = torch.norm(extent_verts, dim=-1).item() ## (1,)-dim vector + expand_r = norm_extent_verts * expand_factor + # nn_expand_pts = 5 # expand the vertices to 5 times of the original vertices + for i_pts in range(self.vertices.size(0)): + cur_pts = cur_src_pts[i_pts] + # sample from the circile with cur_pts as thejcenter and the radius as expand_r + # (-r, r) # sample the offset vector in the size of (nn_expand_pts, 3) + offset_dist = Uniform(-1. * expand_r, expand_r) + offset_vec = offset_dist.sample((nn_expand_pts, 3)).cuda() + cur_expanded_pts = cur_pts + offset_vec + cur_expanded_visual_pts.append(cur_expanded_pts) + cur_expanded_visual_pts = torch.cat(cur_expanded_visual_pts, dim=0) + np.save(expand_save_fn, cur_expanded_visual_pts.detach().cpu().numpy()) + else: + print(f"Loading visual pts from {expand_save_fn}") # load from the fn # + cur_expanded_visual_pts = np.load(expand_save_fn, allow_pickle=True) + cur_expanded_visual_pts = torch.from_numpy(cur_expanded_visual_pts).float().cuda() + self.cur_expanded_visual_pts = cur_expanded_visual_pts # expanded visual pts # + return self.cur_expanded_visual_pts + +## epand +## link urdf ## expand the visual pts to form the expanded visual grids pts # +# use get_name_to_visual_pts_faces to get the transformed visual pts and faces # +class Link_urdf: + def __init__(self, name, inertial: Inertial, visual: Visual=None) -> None: + + self.name = name + self.inertial = inertial + self.visual = visual # vsiual meshes # + + # self.joint = joint + # self.body = body + # self.children = children + # self.name = name + + self.link_idx = ... + + # self.args = args + + self.joint = None # joint name to struct + # self.join + self.children = ... + self.children = {} # joint name to child sruct + + def expand_visual_pts(self, expanded_visual_pts, link_name_to_visited, link_name_to_link_struct): + link_name_to_visited[self.name] = 1 + if self.visual is not None: + cur_expanded_visual_pts = self.visual.expand_visual_pts() + expanded_visual_pts.append(cur_expanded_visual_pts) + + for cur_link in self.children: + cur_link_struct = link_name_to_link_struct[self.children[cur_link]] + cur_link_name = cur_link_struct.name + if cur_link_name in link_name_to_visited: + continue + ## expanded visual pts for the expand visual ptsS ## + ## link name to visited ## + expanded_visual_pts = cur_link_struct.expand_visual_pts(expanded_visual_pts, link_name_to_visited, link_name_to_link_struct) + return expanded_visual_pts + + def set_initial_state(self, states, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct): + + link_name_to_visited[self.name] = 1 + + if self.joint is not None: + for cur_joint_name in self.joint: + cur_joint = self.joint[cur_joint_name] + cur_joint_name = cur_joint.name + cur_child = self.children[cur_joint_name] + cur_child_struct = link_name_to_link_struct[cur_child] + cur_child_name = cur_child_struct.name + + if cur_child_name in link_name_to_visited: + continue + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] # action joint name to joint idx # + # cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] # + # cur_joint = self.joint[cur_joint_name] + cur_state = states[cur_joint_idx] ### joint state ### + cur_joint.set_initial_state(cur_state) + cur_child_struct.set_initial_state(states, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct) + + + + def set_penetration_forces(self, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct, parent_rot, parent_trans, penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces): + link_name_to_visited[self.name] = 1 + + # the current joint of the # update state # + if self.joint is not None: + for cur_joint_name in self.joint: + + cur_joint = self.joint[cur_joint_name] # joint model + + cur_child = self.children[cur_joint_name] # child model # + + cur_child_struct = link_name_to_link_struct[cur_child] + + cur_child_name = cur_child_struct.name + + cur_child_link_idx = cur_child_struct.link_idx + + if cur_child_name in link_name_to_visited: + continue + + try: + cur_child_inertia = cur_child_struct.cur_inertia + except: + cur_child_inertia = torch.eye(3, dtype=torch.float32).cuda() + + + if cur_joint.type in ['revolute'] and (cur_joint_name not in ['WRJ2', 'WRJ1']): + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + # cur_action = actions[cur_joint_idx] + ### get the child struct ### + # set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + # set actions and update states # + cur_joint_rot, cur_joint_trans = cur_joint.compute_transformation_from_current_state(n_grad=True) + cur_joint_tot_rot = torch.matmul(parent_rot, cur_joint_rot) ## R_p (R_j p + t_j) + t_p + cur_joint_tot_trans = torch.matmul(parent_rot, cur_joint_trans.unsqueeze(-1)).squeeze(-1) + parent_trans + + # cur_joint.set_actions_and_update_states_v2(cur_action, cur_timestep, time_cons, cur_child_inertia.detach(), parent_rot, parent_trans + cur_joint.origin_xyz, penetration_forces=penetration_forces, link_idx=cur_child_link_idx) + + # cur_timestep, time_cons, cur_inertia, cur_joint_tot_rot=None, cur_joint_tot_trans=None, penetration_forces=None, sampled_visual_pts_joint_idxes=None, joint_idx=None + + + cur_joint.set_penetration_forces(cur_child_inertia.detach(), cur_joint_tot_rot, cur_joint_tot_trans, link_idx=cur_child_link_idx, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes, joint_idx=cur_joint_idx - 2, joint_penetration_forces=joint_penetration_forces) + else: + cur_joint_tot_rot = parent_rot + cur_joint_tot_trans = parent_trans + + + cur_child_struct.set_penetration_forces(action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct, parent_rot=cur_joint_tot_rot, parent_trans=cur_joint_tot_trans, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes, joint_penetration_forces=joint_penetration_forces) + + + + + + def get_init_visual_meshes(self, parent_rot, parent_trans, init_visual_meshes, link_name_to_link_struct, link_name_to_visited, expanded_pts=False, joint_idxes=None, state_vals=None): + link_name_to_visited[self.name] = 1 + + # 'transformed_joint_pos': [], 'link_idxes': [] + if self.joint is not None: # get init visual meshes # + # for i_ch, (cur_joint, cur_child) in enumerate(zip(self.joint, self.children)): + # print(f"joint: {cur_joint.name}, child: {cur_child.name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + # joint_origin_xyz = cur_joint.origin_xyz + # init_visual_meshes = cur_child.get_init_visual_meshes(parent_rot, parent_trans + joint_origin_xyz, init_visual_meshes) + # print(f"name: {self.name}, keys: {self.joint.keys()}") + for cur_joint_name in self.joint: # + cur_joint = self.joint[cur_joint_name] + + # if state_vals is not None: + # cur_joint_idx = cur_joint.joint_idx + # state_vals[cur_joint_idx] = cur_joint.state.detach().cpu().numpy() + + cur_child_name = self.children[cur_joint_name] + cur_child = link_name_to_link_struct[cur_child_name] + # print(f"joint: {cur_joint.name}, child: {cur_child_name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + # print(f"joint: {cur_joint.name}, child: {cur_child_name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + joint_origin_xyz = cur_joint.origin_xyz + if cur_child_name in link_name_to_visited: + continue + cur_child_visual_pts = {'vertices': [], 'faces': [], 'link_idxes': [], 'transformed_joint_pos': [], 'joint_link_idxes': []} + + # joint idxes # + cur_child_visual_pts, joint_idxes = cur_child.get_init_visual_meshes(parent_rot, parent_trans + joint_origin_xyz, cur_child_visual_pts, link_name_to_link_struct, link_name_to_visited, expanded_pts=expanded_pts, joint_idxes=joint_idxes) + + cur_child_verts, cur_child_faces = cur_child_visual_pts['vertices'], cur_child_visual_pts['faces'] + cur_child_link_idxes = cur_child_visual_pts['link_idxes'] + cur_transformed_joint_pos = cur_child_visual_pts['transformed_joint_pos'] + joint_link_idxes = cur_child_visual_pts['joint_link_idxes'] + + if len(cur_child_verts) > 0: + cur_child_verts, cur_child_faces = merge_meshes(cur_child_verts, cur_child_faces) + cur_child_verts = cur_child_verts + cur_joint.origin_xyz.unsqueeze(0) + cur_joint_rot, cur_joint_trans = cur_joint.compute_transformation_from_current_state() + cur_child_verts = torch.matmul(cur_joint_rot, cur_child_verts.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_joint_trans.unsqueeze(0) + + if len(cur_transformed_joint_pos) > 0: + cur_transformed_joint_pos = torch.cat(cur_transformed_joint_pos, dim=0) + cur_transformed_joint_pos = cur_transformed_joint_pos + cur_joint.origin_xyz.unsqueeze(0) + cur_transformed_joint_pos = torch.matmul(cur_joint_rot, cur_transformed_joint_pos.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_joint_trans.unsqueeze(0) + cur_joint_pos = cur_joint_trans.unsqueeze(0).clone() + cur_transformed_joint_pos = torch.cat( + [cur_transformed_joint_pos, cur_joint_pos], dim=0 ##### joint poses ##### + ) + else: + cur_transformed_joint_pos = cur_joint_trans.unsqueeze(0).clone() + + if len(joint_link_idxes) > 0: + joint_link_idxes = torch.cat(joint_link_idxes, dim=-1) ### joint_link idxes ### + cur_joint_idx = cur_child.link_idx + joint_link_idxes = torch.cat( + [joint_link_idxes, torch.tensor([cur_joint_idx], dtype=torch.long).cuda()], dim=-1 + ) + else: + joint_link_idxes = torch.tensor([cur_child.link_idx], dtype=torch.long).cuda().view(1,) + + # joint link idxes # + + # cur_child_verts = cur_child_verts + # transformed joint pos # + cur_child_link_idxes = torch.cat(cur_child_link_idxes, dim=-1) + # joint_link_idxes = torch.cat(joint_link_idxes, dim=-1) + init_visual_meshes['vertices'].append(cur_child_verts) + init_visual_meshes['faces'].append(cur_child_faces) + init_visual_meshes['link_idxes'].append(cur_child_link_idxes) + init_visual_meshes['transformed_joint_pos'].append(cur_transformed_joint_pos) + init_visual_meshes['joint_link_idxes'].append(joint_link_idxes) + + # joint_origin_xyz = self.joint.origin_xyz # c ## get forces from the expanded point set ## + else: + joint_origin_xyz = torch.tensor([0., 0., 0.], dtype=torch.float32).cuda() + # self.parent_rot_mtx = parent_rot + # self.parent_trans_vec = parent_trans + joint_origin_xyz + + + if self.visual is not None: + # ## get init visual meshes ## ## -- + init_visual_meshes = self.visual.get_init_visual_meshes(parent_rot, parent_trans, init_visual_meshes, expanded_pts=expanded_pts) + cur_visual_mesh_pts_nn = self.visual.vertices.size(0) + cur_link_idxes = torch.zeros((cur_visual_mesh_pts_nn, ), dtype=torch.long).cuda()+ self.link_idx + init_visual_meshes['link_idxes'].append(cur_link_idxes) + + # self.link_idx # + if joint_idxes is not None: + cur_idxes = [self.link_idx for _ in range(cur_visual_mesh_pts_nn)] + cur_idxes = torch.tensor(cur_idxes, dtype=torch.long).cuda() + joint_idxes.append(cur_idxes) + + + + # for cur_link in self.children: # + # init_visual_meshes = cur_link.get_init_visual_meshes(self.parent_rot_mtx, self.parent_trans_vec, init_visual_meshes) + return init_visual_meshes, joint_idxes ## init visual meshes ## + + # calculate inerti + def calculate_inertia(self, link_name_to_visited, link_name_to_link_struct): + link_name_to_visited[self.name] = 1 + self.cur_inertia = torch.zeros((3, 3), dtype=torch.float32).cuda() + + if self.joint is not None: + for joint_nm in self.joint: + cur_joint = self.joint[joint_nm] + cur_child = self.children[joint_nm] + cur_child_struct = link_name_to_link_struct[cur_child] + cur_child_name = cur_child_struct.name + if cur_child_name in link_name_to_visited: + continue + joint_rot, joint_trans = cur_joint.compute_transformation_from_current_state(n_grad=True) + # cur_parent_rot = torch.matmul(parent_rot, joint_rot) # + # cur_parent_trans = torch.matmul(parent_rot, joint_trans.unsqueeze(-1)).squeeze(-1) + parent_trans # + child_inertia = cur_child_struct.calculate_inertia(link_name_to_visited, link_name_to_link_struct) + child_inertia = torch.matmul( + joint_rot.detach(), torch.matmul(child_inertia, joint_rot.detach().transpose(1, 0).contiguous()) + ).detach() + self.cur_inertia += child_inertia + # if self.visual is not None: + # self.cur_inertia += self.visual.inertia + self.cur_inertia += self.inertial.inertia.detach() + return self.cur_inertia + + + def set_delta_state_and_update(self, states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, link_name_to_link_struct): + + link_name_to_visited[self.name] = 1 + + if self.joint is not None: + for cur_joint_name in self.joint: + + cur_joint = self.joint[cur_joint_name] # joint model + + cur_child = self.children[cur_joint_name] # child model # + + cur_child_struct = link_name_to_link_struct[cur_child] + + cur_child_name = cur_child_struct.name + + if cur_child_name in link_name_to_visited: + continue + + ## cur child inertia ## + # cur_child_inertia = cur_child_struct.cur_inertia + + + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + cur_state = states[cur_joint_idx] + ### get the child struct ### + # set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + # set actions and update states # + cur_joint.set_delta_state_and_update(cur_state, cur_timestep) + + cur_child_struct.set_delta_state_and_update(states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, link_name_to_link_struct) + + def set_delta_state_and_update_v2(self, states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, link_name_to_link_struct): + link_name_to_visited[self.name] = 1 + + if self.joint is not None: + for cur_joint_name in self.joint: + cur_joint = self.joint[cur_joint_name] # joint model + cur_child = self.children[cur_joint_name] # child model # + cur_child_struct = link_name_to_link_struct[cur_child] + cur_child_name = cur_child_struct.name + if cur_child_name in link_name_to_visited: + continue + # cur_child_inertia = cur_child_struct.cur_inertia + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + cur_state = states[cur_joint_idx] + ### get the child struct ### + # set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + # set actions and update states # + cur_joint.set_delta_state_and_update_v2(cur_state, cur_timestep) + cur_child_struct.set_delta_state_and_update_v2(states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, link_name_to_link_struct) + + # get_joint_state(self, cur_ts, state_vals): + def get_joint_state(self, cur_ts, state_vals, link_name_to_visited, link_name_to_link_struct, action_joint_name_to_joint_idx): + link_name_to_visited[self.name] = 1 + if self.joint is not None: + for cur_joint_name in self.joint: + + cur_joint = self.joint[cur_joint_name] # joint model + + cur_child = self.children[cur_joint_name] # child model # + + cur_child_struct = link_name_to_link_struct[cur_child] + + cur_child_name = cur_child_struct.name + + if cur_child_name in link_name_to_visited: + continue + + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + state_vals[cur_joint_idx] = cur_joint.timestep_to_states[cur_ts + 1] # .state.detach().cpu().numpy() + # state_vals = cur_joint.get_joint_state(cur_ts, state_vals) + + state_vals = cur_child_struct.get_joint_state(cur_ts, state_vals, link_name_to_visited, link_name_to_link_struct, action_joint_name_to_joint_idx) + return state_vals + + # the joint # + # set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + def set_actions_and_update_states(self, actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct): + + link_name_to_visited[self.name] = 1 + + # the current joint of the # update state # + if self.joint is not None: + for cur_joint_name in self.joint: + + cur_joint = self.joint[cur_joint_name] # joint model + + cur_child = self.children[cur_joint_name] # child model # + + cur_child_struct = link_name_to_link_struct[cur_child] + + cur_child_name = cur_child_struct.name + + if cur_child_name in link_name_to_visited: + continue + + cur_child_inertia = cur_child_struct.cur_inertia + + + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + cur_action = actions[cur_joint_idx] + ### get the child struct ### + # set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + # set actions and update states # + cur_joint.set_actions_and_update_states(cur_action, cur_timestep, time_cons, cur_child_inertia.detach()) + + cur_child_struct.set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct) + + + def set_actions_and_update_states_v2(self, actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct, parent_rot, parent_trans, penetration_forces=None, sampled_visual_pts_joint_idxes=None): + + link_name_to_visited[self.name] = 1 + + # the current joint of the # update state # + if self.joint is not None: + for cur_joint_name in self.joint: + + cur_joint = self.joint[cur_joint_name] # joint model + + cur_child = self.children[cur_joint_name] # child model # + + cur_child_struct = link_name_to_link_struct[cur_child] + + cur_child_name = cur_child_struct.name + + cur_child_link_idx = cur_child_struct.link_idx + + if cur_child_name in link_name_to_visited: + continue + + try: + cur_child_inertia = cur_child_struct.cur_inertia + except: + cur_child_inertia = torch.eye(3, dtype=torch.float32).cuda() + + + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + cur_action = actions[cur_joint_idx] + ### get the child struct ### + # set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + # set actions and update states # + cur_joint_rot, cur_joint_trans = cur_joint.compute_transformation_from_current_state(n_grad=True) + cur_joint_tot_rot = torch.matmul(parent_rot, cur_joint_rot) ## R_p (R_j p + t_j) + t_p + cur_joint_tot_trans = torch.matmul(parent_rot, cur_joint_trans.unsqueeze(-1)).squeeze(-1) + parent_trans + + # cur_joint.set_actions_and_update_states_v2(cur_action, cur_timestep, time_cons, cur_child_inertia.detach(), parent_rot, parent_trans + cur_joint.origin_xyz, penetration_forces=penetration_forces, link_idx=cur_child_link_idx) + + cur_joint.set_actions_and_update_states_v2(cur_action, cur_timestep, time_cons, cur_child_inertia.detach(), cur_joint_tot_rot, cur_joint_tot_trans, penetration_forces=penetration_forces, link_idx=cur_child_link_idx, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) + else: + cur_joint_tot_rot = parent_rot + cur_joint_tot_trans = parent_trans + + + cur_child_struct.set_actions_and_update_states_v2(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct, parent_rot=cur_joint_tot_rot, parent_trans=cur_joint_tot_trans, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) + + + def set_init_states_target_value(self, init_states): + if self.joint.type == 'revolute': + self.joint_angle = init_states[self.joint.joint_idx] + joint_axis = self.joint.axis + self.rot_vec = self.joint_angle * joint_axis + self.joint.state = torch.tensor([1, 0, 0, 0], dtype=torch.float32).cuda() + self.joint.state = self.joint.state + update_quaternion(self.rot_vec, self.joint.state) + self.joint.timestep_to_states[0] = self.joint.state.detach() + self.joint.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + for cur_link in self.children: + cur_link.set_init_states_target_value(init_states) + + # should forward for one single step -> use the action # + def set_init_states(self, ): + self.joint.state = torch.tensor([1, 0, 0, 0], dtype=torch.float32).cuda() + self.joint.timestep_to_states[0] = self.joint.state.detach() + self.joint.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + for cur_link in self.children: + cur_link.set_init_states() + + + def get_visual_pts(self, visual_pts_list): + visual_pts_list = self.body.get_visual_pts(visual_pts_list) + for cur_link in self.children: + visual_pts_list = cur_link.get_visual_pts(visual_pts_list) + visual_pts_list = torch.cat(visual_pts_list, dim=0) + return visual_pts_list + + def get_visual_faces_list(self, visual_faces_list): + visual_faces_list = self.body.get_visual_faces_list(visual_faces_list) + for cur_link in self.children: + visual_faces_list = cur_link.get_visual_faces_list(visual_faces_list) + return visual_faces_list + # pass + + + def set_state(self, name_to_state): + self.joint.set_state(name_to_state=name_to_state) + for child_link in self.children: + child_link.set_state(name_to_state) + + def set_state_via_vec(self, state_vec): + self.joint.set_state_via_vec(state_vec) + for child_link in self.children: + child_link.set_state_via_vec(state_vec) + + + + +class Joint_Limit: + def __init__(self, effort, lower, upper, velocity) -> None: + self.effort = effort + self.lower = lower + self.velocity = velocity + self.upper = upper + pass + +# Joint_urdf(name, joint_type, parent_link, child_link, origin_xyz, axis_xyz, limit: Joint_Limit) +class Joint_urdf: # + + def __init__(self, name, joint_type, parent_link, child_link, origin_xyz, axis_xyz, limit: Joint_Limit, origin_xyz_string="") -> None: + self.name = name + self.type = joint_type + self.parent_link = parent_link + self.child_link = child_link + self.origin_xyz = origin_xyz + self.axis_xyz = axis_xyz + self.limit = limit + + self.origin_xyz_string = origin_xyz_string + + # joint angle; joint state # + self.timestep_to_vels = {} + self.timestep_to_states = {} + + self.init_pos = self.origin_xyz.clone() + + #### only for the current state #### # joint urdf # + self.state = nn.Parameter( + torch.tensor([1., 0., 0., 0.], dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + self.action = nn.Parameter( + torch.zeros((1,), dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + # self.rot_mtx = np.eye(3, dtypes=np.float32) + # self.trans_vec = np.zeros((3,), dtype=np.float32) ## rot m + self.rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + self.trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + + def set_initial_state(self, state): + # joint angle as the state value # + self.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + delta_rot_vec = self.axis_xyz * state + # self.timestep_to_states[0] = state.detach() + cur_state = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + init_state = cur_state + update_quaternion(delta_rot_vec, cur_state) + self.timestep_to_states[0] = init_state.detach() + self.state = init_state + + def set_delta_state_and_update(self, state, cur_timestep): + self.timestep_to_vels[cur_timestep] = torch.zeros((3,), dtype=torch.float32).cuda().detach() + delta_rot_vec = self.axis_xyz * state + if cur_timestep == 0: + prev_state = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + else: + # prev_state = self.timestep_to_states[cur_timestep - 1].detach() + prev_state = self.timestep_to_states[cur_timestep - 1] # .detach() # not detach? # + cur_state = prev_state + update_quaternion(delta_rot_vec, prev_state) + self.timestep_to_states[cur_timestep] = cur_state.detach() + self.state = cur_state + + + def set_delta_state_and_update_v2(self, delta_state, cur_timestep): + self.timestep_to_vels[cur_timestep] = torch.zeros((3,), dtype=torch.float32).cuda().detach() + + if cur_timestep == 0: + cur_state = delta_state + else: + # prev_state = self.timestep_to_states[cur_timestep - 1].detach() + # prev_state = self.timestep_to_states[cur_timestep - 1] + cur_state = self.timestep_to_states[cur_timestep - 1].detach() + delta_state + ## cur_state ## # + self.timestep_to_states[cur_timestep] = cur_state # .detach() + + + # delta_rot_vec = self.axis_xyz * state # + + cur_rot_vec = self.axis_xyz * cur_state ### cur_state #### # + # angle to the quaternion ? # + init_state = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + cur_quat_state = init_state + update_quaternion(cur_rot_vec, init_state) + self.state = cur_quat_state + + # if cur_timestep == 0: + # prev_state = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + # else: + # # prev_state = self.timestep_to_states[cur_timestep - 1].detach() + # prev_state = self.timestep_to_states[cur_timestep - 1] # .detach() # not detach? # + # cur_state = prev_state + update_quaternion(delta_rot_vec, prev_state) + # self.timestep_to_states[cur_timestep] = cur_state.detach() + # self.state = cur_state + + + def compute_transformation_from_current_state(self, n_grad=False): + # together with the parent rot mtx and the parent trans vec # + # cur_joint_state = self.state + if self.type == "revolute": + # rot_mtx = rotation_matrix_from_axis_angle(self.axis, cur_joint_state) + # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + if n_grad: + rot_mtx = quaternion_to_matrix(self.state.detach()) + else: + rot_mtx = quaternion_to_matrix(self.state) + # trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + trans_vec = self.origin_xyz - torch.matmul(rot_mtx, self.origin_xyz.view(3, 1)).view(3).contiguous() + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + elif self.type == "fixed": + rot_mtx = torch.eye(3, dtype=torch.float32).cuda() + trans_vec = torch.zeros((3,), dtype=torch.float32).cuda() + # trans_vec = self.origin_xyz + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec # + else: + pass + return self.rot_mtx, self.trans_vec + + + # set actions # set actions and udpate states # + def set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + + # timestep_to_vels, timestep_to_states, state # + if self.type in ['revolute']: + + self.action = action + # + # visual_pts and visual_pts_mass # + # cur_joint_pos = self.joint.pos # + # TODO: check whether the following is correct # # set + torque = self.action * self.axis_xyz + + # # Compute inertia matrix # + # inertial = torch.zeros((3, 3), dtype=torch.float32).cuda() + # for i_pts in range(self.visual_pts.size(0)): + # cur_pts = self.visual_pts[i_pts] + # cur_pts_mass = self.visual_pts_mass[i_pts] + # cur_r = cur_pts - cur_joint_pos # r_i + # # cur_vert = init_passive_mesh[i_v] + # # cur_r = cur_vert - init_passive_mesh_center + # dot_r_r = torch.sum(cur_r * cur_r) + # cur_eye_mtx = torch.eye(3, dtype=torch.float32).cuda() + # r_mult_rT = torch.matmul(cur_r.unsqueeze(-1), cur_r.unsqueeze(0)) + # inertial += (dot_r_r * cur_eye_mtx - r_mult_rT) * cur_pts_mass + # m = torch.sum(self.visual_pts_mass) + # # Use torque to update angular velocity -> state # + # inertia_inv = torch.linalg.inv(inertial) + + # axis-angle of + # inertia_inv = self.cur_inertia_inv + # print(f"updating actions and states for the joint {self.name} with type {self.type}") + inertia_inv = torch.linalg.inv(cur_inertia).detach() + + delta_omega = torch.matmul(inertia_inv, torque.unsqueeze(-1)).squeeze(-1) + + # delta_omega = torque / 400 # # axis_xyz # + + ## actions -> with the dynamic information -> time cons -> angular acc -> delta angular vel -> delta angle + # TODO: dt should be an optim#izable constant? should it be the same value as that optimized for the passive object? # + delta_angular_vel = delta_omega * time_cons # * self.args.dt + delta_angular_vel = delta_angular_vel.squeeze(0) + if cur_timestep > 0: ## cur_timestep - 1 ## + prev_angular_vel = self.timestep_to_vels[cur_timestep - 1].detach() + # cur_angular_vel = prev_angular_vel + delta_angular_vel * DAMPING + cur_angular_vel = prev_angular_vel * DAMPING + delta_angular_vel # p + else: + cur_angular_vel = delta_angular_vel # angular vel # + + self.timestep_to_vels[cur_timestep] = cur_angular_vel.detach() + + cur_delta_quat = cur_angular_vel * time_cons # * self.args.dt + cur_delta_quat = cur_delta_quat.squeeze(0) # delta quat # + cur_state = self.timestep_to_states[cur_timestep].detach() # quaternion # + # print(f"cur_delta_quat: {cur_delta_quat.size()}, cur_state: {cur_state.size()}") + nex_state = cur_state + update_quaternion(cur_delta_quat, cur_state) + self.timestep_to_states[cur_timestep + 1] = nex_state.detach() + self.state = nex_state # set the joint state # + + + def set_actions_and_update_states_v2(self, action, cur_timestep, time_cons, cur_inertia, cur_joint_tot_rot=None, cur_joint_tot_trans=None, penetration_forces=None, link_idx=None, sampled_visual_pts_joint_idxes=None): + + # timestep_to_vels, timestep_to_states, state # + if self.type in ['revolute']: + + self.action = action ## strategy 2 + # + # visual_pts and visual_pts_mass # + # cur_joint_pos = self.joint.pos # + # TODO: check whether the following is correct # # set + + if penetration_forces is not None: + penetration_forces_values = penetration_forces['penetration_forces'].detach() + penetration_forces_points = penetration_forces['penetration_forces_points'].detach() + + ####### use a part of peentration points and forces ####### + if sampled_visual_pts_joint_idxes is not None: + selected_forces_mask = sampled_visual_pts_joint_idxes == link_idx ## select the current link's penetrated points + else: + selected_forces_mask = torch.ones_like(penetration_forces_values[:, 0]).bool() + ####### use a part of peentration points and forces ####### + + if torch.sum(selected_forces_mask.float()) > 0.5: ## has penetrated points in this link ## + + penetration_forces_values = penetration_forces_values[selected_forces_mask] + penetration_forces_points = penetration_forces_points[selected_forces_mask] + # tot_rot_mtx, tot_trans_vec + # cur_joint_rot = self.tot_rot_mtx + # cur_joint_trans = self.tot_trans_vec + cur_joint_rot = cur_joint_tot_rot.detach() + cur_joint_trans = cur_joint_tot_trans.detach() ## total rot; total trans ## + local_frame_penetration_forces_values = torch.matmul(cur_joint_rot.transpose(1, 0), penetration_forces_values.transpose(1, 0)).transpose(1, 0) + local_frame_penetration_forces_points = torch.matmul(cur_joint_rot.transpose(1, 0), (penetration_forces_points - cur_joint_trans.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + + joint_pos_to_forces_points = local_frame_penetration_forces_points - self.axis_xyz.unsqueeze(0) + forces_torques = torch.cross(joint_pos_to_forces_points, local_frame_penetration_forces_values) # forces values of the local frame # + forces_torques = torch.sum(forces_torques, dim=0) + + forces_torques_dot_axis = torch.sum(self.axis_xyz * forces_torques) + penetration_delta_state = forces_torques_dot_axis + else: + penetration_delta_state = 0.0 + else: + penetration_delta_state = 0.0 + + + torque = self.action * self.axis_xyz + + # # Compute inertia matrix # + # inertial = torch.zeros((3, 3), dtype=torch.float32).cuda() + # for i_pts in range(self.visual_pts.size(0)): + # cur_pts = self.visual_pts[i_pts] + # cur_pts_mass = self.visual_pts_mass[i_pts] + # cur_r = cur_pts - cur_joint_pos # r_i + # # cur_vert = init_passive_mesh[i_v] + # # cur_r = cur_vert - init_passive_mesh_center + # dot_r_r = torch.sum(cur_r * cur_r) + # cur_eye_mtx = torch.eye(3, dtype=torch.float32).cuda() + # r_mult_rT = torch.matmul(cur_r.unsqueeze(-1), cur_r.unsqueeze(0)) + # inertial += (dot_r_r * cur_eye_mtx - r_mult_rT) * cur_pts_mass + # m = torch.sum(self.visual_pts_mass) + # # Use torque to update angular velocity -> state # + # inertia_inv = torch.linalg.inv(inertial) + + # axis-angle of + # inertia_inv = self.cur_inertia_inv + # print(f"updating actions and states for the joint {self.name} with type {self.type}") + + + # inertia_inv = torch.linalg.inv(cur_inertia).detach() + + inertia_inv = torch.eye(n=3, dtype=torch.float32).cuda() + + + + delta_omega = torch.matmul(inertia_inv, torque.unsqueeze(-1)).squeeze(-1) + + # delta_omega = torque / 400 + + + # TODO: dt should be an optim#izable constant? should it be the same value as that optimized for the passive object? # + delta_angular_vel = delta_omega * time_cons # * self.args.dt + delta_angular_vel = delta_angular_vel.squeeze(0) + if cur_timestep > 0: ## cur_timestep - 1 ## + prev_angular_vel = self.timestep_to_vels[cur_timestep - 1].detach() + # cur_angular_vel = prev_angular_vel + delta_angular_vel * DAMPING + cur_angular_vel = prev_angular_vel * DAMPING + delta_angular_vel # p + # cur_angular_vel = prev_angular_vel + delta_angular_vel # p + else: + cur_angular_vel = delta_angular_vel # angular vel # + + self.timestep_to_vels[cur_timestep] = cur_angular_vel.detach() + + cur_delta_angle = cur_angular_vel * time_cons # * self.args.dt + # cur_delta_quat = cur_delta_angle.squeeze(0) # delta quat # + # cur_state = self.timestep_to_states[cur_timestep].detach() # quaternion # + # # print(f"cur_delta_quat: {cur_delta_quat.size()}, cur_state: {cur_state.size()}") + # nex_state = cur_state + update_quaternion(cur_delta_quat, cur_state) + + ### strategy 2 ### + dot_cur_delta_angle_w_axis = torch.sum( ## delta angle with axises ## + cur_delta_angle * self.axis_xyz, dim=-1 + ) + ## dot cur deltawith the + delta_state = dot_cur_delta_angle_w_axis ## delta angle w axieses ## + + # if cur_timestep + if cur_timestep == 0: + self.timestep_to_states[cur_timestep] = torch.zeros((1,), dtype=torch.float32).cuda() + cur_state = self.timestep_to_states[cur_timestep].detach() + nex_state = cur_state + delta_state + # nex_state = nex_state + penetration_delta_state + ## state rot vector along axis ## ## get the pentrated froces -- calulaterot qj + state_rot_vec_along_axis = nex_state * self.axis_xyz + ### state in the rotation vector -> state in quaternion ### + state_rot_quat = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + update_quaternion(state_rot_vec_along_axis, torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda()) + ### state + self.state = state_rot_quat + ### get states? ## + self.timestep_to_states[cur_timestep + 1] = nex_state # .detach() + # self.state = nex_state # set the joint state # + + + + def set_penetration_forces(self, cur_inertia, cur_joint_tot_rot=None, cur_joint_tot_trans=None, link_idx=None, penetration_forces=None, sampled_visual_pts_joint_idxes=None, joint_idx=None, joint_penetration_forces=None): + + # timestep_to_vels, timestep_to_states, state # + if self.type in ['revolute'] : + + # self.action = action ## strategy 2 + # + # visual_pts and visual_pts_mass # + # cur_joint_pos = self.joint.pos # + # TODO: check whether the following is correct # # set + + if penetration_forces is not None: + penetration_forces_values = penetration_forces['penetration_forces'].detach() + penetration_forces_points = penetration_forces['penetration_forces_points'].detach() + + ####### use a part of peentration points and forces ####### + if sampled_visual_pts_joint_idxes is not None: + selected_forces_mask = sampled_visual_pts_joint_idxes == link_idx ## select the current link's penetrated points + else: + selected_forces_mask = torch.ones_like(penetration_forces_values[:, 0]).bool() + ####### use a part of peentration points and forces ####### + + if torch.sum(selected_forces_mask.float()) > 0.5: ## has penetrated points in this link ## + + penetration_forces_values = penetration_forces_values[selected_forces_mask] + penetration_forces_points = penetration_forces_points[selected_forces_mask] + # tot_rot_mtx, tot_trans_vec + # cur_joint_rot = self.tot_rot_mtx + # cur_joint_trans = self.tot_trans_vec + cur_joint_rot = cur_joint_tot_rot.detach() + cur_joint_trans = cur_joint_tot_trans.detach() ## total rot; total trans ## + local_frame_penetration_forces_values = torch.matmul(cur_joint_rot.transpose(1, 0), penetration_forces_values.transpose(1, 0)).transpose(1, 0) + local_frame_penetration_forces_points = torch.matmul(cur_joint_rot.transpose(1, 0), (penetration_forces_points - cur_joint_trans.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + + joint_pos_to_forces_points = local_frame_penetration_forces_points - self.axis_xyz.unsqueeze(0) + forces_torques = torch.cross(joint_pos_to_forces_points, local_frame_penetration_forces_values) # forces values of the local frame # + forces_torques = torch.sum(forces_torques, dim=0) + + forces = torch.sum(local_frame_penetration_forces_values, dim=0) + + cur_joint_maximal_forces = torch.cat( + [forces, forces_torques], dim=0 + ) + cur_joint_idx = joint_idx + joint_penetration_forces[cur_joint_idx][:] = cur_joint_maximal_forces[:].clone() + + # forces_torques_dot_axis = torch.sum(self.axis_xyz * forces_torques) + # penetration_delta_state = forces_torques_dot_axis + else: + penetration_delta_state = 0.0 + cur_joint_maximal_forces = torch.zeros((6,), dtype=torch.float32).cuda() + cur_joint_idx = joint_idx + joint_penetration_forces[cur_joint_idx][:] = cur_joint_maximal_forces[:].clone() + + else: + penetration_delta_state = 0.0 + cur_joint_idx = joint_idx + joint_penetration_forces[cur_joint_idx][:] = cur_joint_maximal_forces[:].clone() + + + + + + + def get_joint_state(self, cur_ts, state_vals): + cur_joint_state = self.timestep_to_states[cur_ts + 1] + state_vals[self.joint_idx] = cur_joint_state + return state_vals + + +class Robot_urdf: + def __init__(self, links, link_name_to_link_idxes, link_name_to_link_struct, joint_name_to_joint_idx, actions_joint_name_to_joint_idx, tot_joints=None, real_actions_joint_name_to_joint_idx=None) -> None: + self.links = links + self.link_name_to_link_idxes = link_name_to_link_idxes + self.link_name_to_link_struct = link_name_to_link_struct + + # joint_name_to_joint_idx, actions_joint_name_to_joint_idx + self.joint_name_to_joint_idx = joint_name_to_joint_idx + self.actions_joint_name_to_joint_idx = actions_joint_name_to_joint_idx + + self.tot_joints = tot_joints + # # + # # + self.act_joint_idxes = list(self.actions_joint_name_to_joint_idx.values()) + self.act_joint_idxes = sorted(self.act_joint_idxes, reverse=False) + self.act_joint_idxes = torch.tensor(self.act_joint_idxes, dtype=torch.long).cuda()[2:] + + self.real_actions_joint_name_to_joint_idx = real_actions_joint_name_to_joint_idx + + + self.init_vertices, self.init_faces = self.get_init_visual_pts() + + joint_name_to_joint_idx_sv_fn = "mano_joint_name_to_joint_idx.npy" + np.save(joint_name_to_joint_idx_sv_fn, self.joint_name_to_joint_idx) + + actions_joint_name_to_joint_idx_sv_fn = "mano_actions_joint_name_to_joint_idx.npy" + np.save(actions_joint_name_to_joint_idx_sv_fn, self.actions_joint_name_to_joint_idx) + + tot_joints = len(self.joint_name_to_joint_idx) + tot_actions_joints = len(self.actions_joint_name_to_joint_idx) + + print(f"tot_joints: {tot_joints}, tot_actions_joints: {tot_actions_joints}") + + pass + + # robot.expande + def expand_visual_pts(self, ): + link_name_to_visited = {} + # transform the visual pts # + # action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + expanded_visual_pts = [] + # expanded the visual pts # # transformed viusal pts # or the translations of the visual pts # + expanded_visual_pts = palm_link.expand_visual_pts(expanded_visual_pts, link_name_to_visited, self.link_name_to_link_struct) + expanded_visual_pts = torch.cat(expanded_visual_pts, dim=0) + # pass + return expanded_visual_pts + + + ### samping issue? --- TODO` ` + def get_init_visual_pts(self, expanded_pts=False, joint_idxes=None): + init_visual_meshes = { + 'vertices': [], 'faces': [], 'link_idxes': [], 'transformed_joint_pos': [], 'link_idxes': [], 'transformed_joint_pos': [], 'joint_link_idxes': [] + } + init_parent_rot = torch.eye(3, dtype=torch.float32).cuda() + init_parent_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + ### from the palm linke ## + init_visual_meshes, joint_idxes = palm_link.get_init_visual_meshes(init_parent_rot, init_parent_trans, init_visual_meshes, self.link_name_to_link_struct, link_name_to_visited, expanded_pts=expanded_pts, joint_idxes=joint_idxes) + + self.link_idxes = torch.cat(init_visual_meshes['link_idxes'], dim=-1) + self.transformed_joint_pos = torch.cat(init_visual_meshes['transformed_joint_pos'], dim=0) + self.joint_link_idxes = torch.cat(init_visual_meshes['joint_link_idxes'], dim=-1) ### + + + if joint_idxes is not None: + joint_idxes = torch.cat(joint_idxes, dim=0) + + # for cur_link in self.links: + # init_visual_meshes = cur_link.get_init_visual_meshes(init_parent_rot, init_parent_trans, init_visual_meshes, self.link_name_to_link_struct, link_name_to_visited) + + init_vertices, init_faces = merge_meshes(init_visual_meshes['vertices'], init_visual_meshes['faces']) + + if joint_idxes is not None: + return init_vertices, init_faces, joint_idxes + else: + return init_vertices, init_faces + + def set_penetration_forces(self, penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces): + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + action_joint_name_to_joint_idx = self.real_actions_joint_name_to_joint_idx + # print(f"action_joint_name_to_joint_idx: {action_joint_name_to_joint_idx}") + + parent_rot = torch.eye(3, dtype=torch.float32).cuda() + parent_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + # cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct, parent_rot, parent_trans, penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces): + + palm_link.set_penetration_forces(action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct, parent_rot, parent_trans, penetration_forces, sampled_visual_pts_joint_idxes, joint_penetration_forces) + + def set_delta_state_and_update(self, states, cur_timestep): + link_name_to_visited = {} + + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.set_delta_state_and_update(states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, self.link_name_to_link_struct) + + + def set_delta_state_and_update_v2(self, states, cur_timestep, use_real_act_joint=False): + link_name_to_visited = {} + + if use_real_act_joint: + action_joint_name_to_joint_idx = self.real_actions_joint_name_to_joint_idx + else: + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.set_delta_state_and_update_v2(states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, self.link_name_to_link_struct) + + + + # cur_joint.set_actions_and_update_states(cur_action, cur_timestep, time_cons, cur_child_inertia) + def set_actions_and_update_states(self, actions, cur_timestep, time_cons,): + # self.actions_joint_name_to_joint_idx as the action joint name to joint idx + link_name_to_visited = {} + ## to joint idx ## + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + ## set actions ## + palm_link.set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + # for cur_joint in + + # for cur_link in self.links: + # if cur_link.joint is not None: + # for cur_joint_nm in cur_link.joint: + # if cur_link.joint[cur_joint_nm].type in ['revolute']: + # cur_link_joint_name = cur_link.joint[cur_joint_nm].name + # cur_link_joint_idx = self.actions_joint_name_to_joint_idx[cur_link_joint_name] + + + # for cur_link in self.links: + # cur_link.set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + + + def get_joint_state(self, cur_ts, state_vals): + # link_name_to_visited = {} + ## to joint idx ## + # action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + # parent_rot = torch.eye(3, dtype=torch.float32).cuda() + # parent_trans = torch.zeros((3,), dtype=torch.float32).cuda() + ## set actions ## # + # set_actions_and_update_states_v2(self, action, cur_timestep, time_cons, cur_inertia): + # self, actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct ## set and update states ## + state_vals = palm_link.get_joint_state(cur_ts, state_vals, link_name_to_visited, self.link_name_to_link_struct, self.actions_joint_name_to_joint_idx) + return state_vals + + + def set_actions_and_update_states_v2(self, actions, cur_timestep, time_cons, penetration_forces=None, sampled_visual_pts_joint_idxes=None): + # self.actions_joint_name_to_joint_idx as the action joint name to joint idx + link_name_to_visited = {} + ## to joint idx ## + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + parent_rot = torch.eye(3, dtype=torch.float32).cuda() + parent_trans = torch.zeros((3,), dtype=torch.float32).cuda() + ## set actions ## # + # set_actions_and_update_states_v2(self, action, cur_timestep, time_cons, cur_inertia): + # self, actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct ## set and update states ## + palm_link.set_actions_and_update_states_v2(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct, parent_rot=parent_rot, parent_trans=parent_trans, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) + + + ### TODO: add the contact torque when calculating the nextstep states ### + ### TODO: not an accurate implementation since differen joints should be considered ### + ### TODO: the articulated force modle is not so easy as this one .... ### + def set_contact_forces(self, hard_selected_forces, hard_selected_manipulating_points, hard_selected_sampled_input_pts_idxes): + # transformed_joint_pos, joint_link_idxes, link_idxes # + selected_pts_link_idxes = self.link_idxes[hard_selected_sampled_input_pts_idxes] + # use the selected link idxes # + # selected pts idxes # + + # self.joint_link_idxes, transformed_joint_pos # + self.link_idx_to_transformed_joint_pos = {} + for i_link in range(self.transformed_joint_pos.size(0)): + cur_link_idx = self.link_idxes[i_link].item() + cur_link_pos = self.transformed_joint_pos[i_link] + # if cur_link_idx not in self.link_idx_to_transformed_joint_pos: + self.link_idx_to_transformed_joint_pos[cur_link_idx] = cur_link_pos + # self.link_idx_to_transformed_joint_pos[cur_link_idx].append(cur_link_pos) + + # from the + self.link_idx_to_contact_forces = {} + for i_c_pts in range(hard_selected_forces.size(0)): + cur_contact_force = hard_selected_forces[i_c_pts] ## + cur_link_idx = selected_pts_link_idxes[i_c_pts].item() + cur_link_pos = self.link_idx_to_transformed_joint_pos[cur_link_idx] + cur_link_action_pos = hard_selected_manipulating_points[i_c_pts] + # (action_pos - link_pos) x (-contact_force) # + cur_contact_torque = torch.cross( + cur_link_action_pos - cur_link_pos, -cur_contact_force + ) + if cur_link_idx not in self.link_idx_to_contact_forces: + self.link_idx_to_contact_forces[cur_link_idx] = [cur_contact_torque] + else: + self.link_idx_to_contact_forces[cur_link_idx].append(cur_contact_torque) + for link_idx in self.link_idx_to_contact_forces: + self.link_idx_to_contact_forces[link_idx] = torch.stack(self.link_idx_to_contact_forces[link_idx], dim=0) + self.link_idx_to_contact_forces[link_idx] = torch.sum(self.link_idx_to_contact_forces[link_idx] , dim=0) + for link_idx, link_struct in enumerate(self.links): + if link_idx in self.link_idx_to_contact_forces: + cur_link_contact_force = self.link_idx_to_contact_forces[link_idx] + link_struct.contact_torque = cur_link_contact_force + else: + link_struct.contact_torque = None + + + # def se ### from the optimizable initial states ### + def set_initial_state(self, states): + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + link_name_to_visited = {} + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.set_initial_state(states, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + # for cur_link in self.links: + # cur_link.set_initial_state(states, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + ### after each timestep -> re-calculate the inertial matrix using the current simulated states and the set the new actiosn and forward the simulation # + def calculate_inertia(self): + link_name_to_visited = {} + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.calculate_inertia(link_name_to_visited, self.link_name_to_link_struct) + + # for cur_link in self.links: + # cur_link.calculate_inertia(link_name_to_visited, self.link_name_to_link_struct) + + ### + + + + +def parse_nparray_from_string(strr, args=None): + vals = strr.split(" ") + vals = [float(val) for val in vals] + vals = np.array(vals, dtype=np.float32) + vals = torch.from_numpy(vals).float() + ## vals ## + vals = nn.Parameter(vals.cuda(), requires_grad=True) + + return vals + + +### parse link data ### +def parse_link_data(link, args): + + link_name = link.attrib["name"] + # print(f"parsing link: {link_name}") ## joints body meshes # + + joint = link.find("./joint") + + joint_name = joint.attrib["name"] + joint_type = joint.attrib["type"] + if joint_type in ["revolute"]: ## a general xml parser here? + axis = joint.attrib["axis"] + axis = parse_nparray_from_string(axis, args=args) + else: + axis = None + pos = joint.attrib["pos"] # + pos = parse_nparray_from_string(pos, args=args) + quat = joint.attrib["quat"] + quat = parse_nparray_from_string(quat, args=args) + + try: + frame = joint.attrib["frame"] + except: + frame = "WORLD" + + if joint_type not in ["fixed"]: + damping = joint.attrib["damping"] + damping = float(damping) + else: + damping = 0.0 + + cur_joint = Joint(joint_name, joint_type, axis, pos, quat, frame, damping, args=args) + + body = link.find("./body") + body_name = body.attrib["name"] + body_type = body.attrib["type"] + if body_type == "mesh": + filename = body.attrib["filename"] + else: + filename = "" + + if body_type == "sphere": + radius = body.attrib["radius"] + radius = float(radius) + else: + radius = 0. + + pos = body.attrib["pos"] + pos = parse_nparray_from_string(pos, args=args) + quat = body.attrib["quat"] + quat = joint.attrib["quat"] + try: + transform_type = body.attrib["transform_type"] + except: + transform_type = "OBJ_TO_WORLD" + density = body.attrib["density"] + density = float(density) + mu = body.attrib["mu"] + mu = float(mu) + try: ## rgba ## + rgba = body.attrib["rgba"] + rgba = parse_nparray_from_string(rgba, args=args) + except: + rgba = np.zeros((4,), dtype=np.float32) + + cur_body = Body(body_name, body_type, filename, pos, quat, transform_type, density, mu, rgba, radius, args=args) + + children_link = [] + links = link.findall("./link") + for child_link in links: # + cur_child_link = parse_link_data(child_link, args=args) + children_link.append(cur_child_link) + + link_name = link.attrib["name"] + link_obj = Link(link_name, joint=cur_joint, body=cur_body, children=children_link, args=args) + return link_obj + + +### parse link data ### +def parse_link_data_urdf(link): + + link_name = link.attrib["name"] + # print(f"parsing link: {link_name}") ## joints body meshes # + + inertial = link.find("./inertial") + + origin = inertial.find("./origin") + + if origin is not None: + inertial_pos = origin.attrib["xyz"] + try: + inertial_rpy = origin.attrib["rpy"] + except: + inertial_rpy = "0.0 0.0 0.0" + else: + inertial_pos = "0.0 0.0 0.0" + inertial_rpy = "0.0 0.0 0.0" + inertial_pos = parse_nparray_from_string(inertial_pos) + + inertial_rpy = parse_nparray_from_string(inertial_rpy) + + inertial_mass = inertial.find("./mass") + inertial_mass = inertial_mass.attrib["value"] + + inertial_inertia = inertial.find("./inertia") + inertial_ixx = inertial_inertia.attrib["ixx"] + inertial_ixx = float(inertial_ixx) + inertial_ixy = inertial_inertia.attrib["ixy"] + inertial_ixy = float(inertial_ixy) + inertial_ixz = inertial_inertia.attrib["ixz"] + inertial_ixz = float(inertial_ixz) + inertial_iyy = inertial_inertia.attrib["iyy"] + inertial_iyy = float(inertial_iyy) + inertial_iyz = inertial_inertia.attrib["iyz"] + inertial_iyz = float(inertial_iyz) + inertial_izz = inertial_inertia.attrib["izz"] + inertial_izz = float(inertial_izz) + + inertial_inertia_mtx = torch.zeros((3, 3), dtype=torch.float32).cuda() + inertial_inertia_mtx[0, 0] = inertial_ixx + inertial_inertia_mtx[0, 1] = inertial_ixy + inertial_inertia_mtx[0, 2] = inertial_ixz + inertial_inertia_mtx[1, 0] = inertial_ixy + inertial_inertia_mtx[1, 1] = inertial_iyy + inertial_inertia_mtx[1, 2] = inertial_iyz + inertial_inertia_mtx[2, 0] = inertial_ixz + inertial_inertia_mtx[2, 1] = inertial_iyz + inertial_inertia_mtx[2, 2] = inertial_izz + + # [xx, xy, xz] # + # [0, yy, yz] # + # [0, 0, zz] # + + # a strange inertia value ... # + # TODO: how to compute the inertia matrix? # + + visual = link.find("./visual") + + if visual is not None: + origin = visual.find("./origin") + visual_pos = origin.attrib["xyz"] + visual_pos = parse_nparray_from_string(visual_pos) + visual_rpy = origin.attrib["rpy"] + visual_rpy = parse_nparray_from_string(visual_rpy) + geometry = visual.find("./geometry") + geometry_mesh = geometry.find("./mesh") + if geometry_mesh is None: + visual = None + else: + mesh_fn = geometry_mesh.attrib["filename"] + + try: + mesh_scale = geometry_mesh.attrib["scale"] + except: + mesh_scale = "1 1 1" + + mesh_scale = parse_nparray_from_string(mesh_scale) + mesh_fn = str(mesh_fn) + + + link_struct = Link_urdf(name=link_name, inertial=Inertial(origin_rpy=inertial_rpy, origin_xyz=inertial_pos, mass=inertial_mass, inertia=inertial_inertia_mtx), visual=Visual(visual_rpy=visual_rpy, visual_xyz=visual_pos, geometry_mesh_fn=mesh_fn, geometry_mesh_scale=mesh_scale) if visual is not None else None) + + return link_struct + +def parse_joint_data_urdf(joint): + joint_name = joint.attrib["name"] + joint_type = joint.attrib["type"] + + parent = joint.find("./parent") + child = joint.find("./child") + parent_name = parent.attrib["link"] + child_name = child.attrib["link"] + + joint_origin = joint.find("./origin") + # if joint_origin. + try: + origin_xyz_string = joint_origin.attrib["xyz"] + origin_xyz = parse_nparray_from_string(origin_xyz_string) + except: + origin_xyz = torch.tensor([0., 0., 0.], dtype=torch.float32).cuda() + origin_xyz_string = "" + + joint_axis = joint.find("./axis") + if joint_axis is not None: + joint_axis = joint_axis.attrib["xyz"] + joint_axis = parse_nparray_from_string(joint_axis) + else: + joint_axis = torch.tensor([1, 0., 0.], dtype=torch.float32).cuda() + + joint_limit = joint.find("./limit") + if joint_limit is not None: + joint_lower = joint_limit.attrib["lower"] + joint_lower = float(joint_lower) + joint_upper = joint_limit.attrib["upper"] + joint_upper = float(joint_upper) + joint_effort = joint_limit.attrib["effort"] + joint_effort = float(joint_effort) + if "velocity" in joint_limit.attrib: + joint_velocity = joint_limit.attrib["velocity"] + joint_velocity = float(joint_velocity) + else: + joint_velocity = 0.5 + else: + joint_lower = -0.5000 + joint_upper = 1.57 + joint_effort = 1000 + joint_velocity = 0.5 + + # cosntruct the joint data # + joint_limit = Joint_Limit(effort=joint_effort, lower=joint_lower, upper=joint_upper, velocity=joint_velocity) + cur_joint_struct = Joint_urdf(joint_name, joint_type, parent_name, child_name, origin_xyz, joint_axis, joint_limit, origin_xyz_string) + return cur_joint_struct + + + +def parse_data_from_urdf(xml_fn): + + tree = ElementTree() + tree.parse(xml_fn) + print(f"{xml_fn}") + ### get total robots ### + # robots = tree.findall("link") + cur_robot = tree + # i_robot = 0 + # tot_robots = [] + # for cur_robot in robots: + # print(f"Getting robot: {i_robot}") + # i_robot += 1 + # print(f"len(robots): {len(robots)}") + # cur_robot = robots[0] + cur_links = cur_robot.findall("./link") + # curlinks + # i_link = 0 + link_name_to_link_idxes = {} + cur_robot_links = [] + link_name_to_link_struct = {} + for i_link_idx, cur_link in enumerate(cur_links): + cur_link_struct = parse_link_data_urdf(cur_link) + print(f"Adding link {cur_link_struct.name}, link_idx: {i_link_idx}") + cur_link_struct.link_idx = i_link_idx + cur_robot_links.append(cur_link_struct) + + link_name_to_link_idxes[cur_link_struct.name] = i_link_idx + link_name_to_link_struct[cur_link_struct.name] = cur_link_struct + # for cur_link in cur_links: + # cur_robot_links.append(parse_link_data_urdf(cur_link, args=args)) + + print(f"link_name_to_link_struct: {len(link_name_to_link_struct)}, ") + + tot_robot_joints = [] + + joint_name_to_joint_idx = {} + + actions_joint_name_to_joint_idx = {} + + cur_joints = cur_robot.findall("./joint") + + real_actions_joint_name_to_joint_idx = {} + + act_joint_idx = 0 + for i_joint, cur_joint in enumerate(cur_joints): + cur_joint_struct = parse_joint_data_urdf(cur_joint) + cur_joint_parent_link = cur_joint_struct.parent_link + cur_joint_child_link = cur_joint_struct.child_link + + cur_joint_idx = len(tot_robot_joints) + cur_joint_name = cur_joint_struct.name + + joint_name_to_joint_idx[cur_joint_name] = cur_joint_idx + + print(f"cur_joint_name: {cur_joint_name}, cur_joint_idx: {cur_joint_idx}, axis: {cur_joint_struct.axis_xyz}, origin: {cur_joint_struct.origin_xyz}") + + cur_joint_type = cur_joint_struct.type + if cur_joint_type in ['revolute']: + actions_joint_name_to_joint_idx[cur_joint_name] = cur_joint_idx + # actions_joint_name_to_joint_idx[cur_joint_name] = act_joint_idx + # act_joint_idx = act_joint_idx + 1 + + real_actions_joint_name_to_joint_idx[cur_joint_name] = act_joint_idx + act_joint_idx = act_joint_idx + 1 + + + #### add the current joint to tot joints ### + tot_robot_joints.append(cur_joint_struct) + + parent_link_idx = link_name_to_link_idxes[cur_joint_parent_link] + cur_parent_link_struct = cur_robot_links[parent_link_idx] + + + child_link_idx = link_name_to_link_idxes[cur_joint_child_link] + cur_child_link_struct = cur_robot_links[child_link_idx] + # parent link struct # + if link_name_to_link_struct[cur_joint_parent_link].joint is not None: + link_name_to_link_struct[cur_joint_parent_link].joint[cur_joint_struct.name] = cur_joint_struct + link_name_to_link_struct[cur_joint_parent_link].children[cur_joint_struct.name] = cur_child_link_struct.name + # cur_child_link_struct + # cur_parent_link_struct.joint.append(cur_joint_struct) + # cur_parent_link_struct.children.append(cur_child_link_struct) + else: + link_name_to_link_struct[cur_joint_parent_link].joint = { + cur_joint_struct.name: cur_joint_struct + } + link_name_to_link_struct[cur_joint_parent_link].children = { + cur_joint_struct.name: cur_child_link_struct.name + # cur_child_link_struct + } + # cur_parent_link_struct.joint = [cur_joint_struct] + # cur_parent_link_struct.children.append(cur_child_link_struct) + # pass + + print(f"actions_joint_name_to_joint_idx: {len(actions_joint_name_to_joint_idx)}") + print(f"real_actions_joint_name_to_joint_idx: {len(real_actions_joint_name_to_joint_idx)}") + cur_robot_obj = Robot_urdf(cur_robot_links, link_name_to_link_idxes, link_name_to_link_struct, joint_name_to_joint_idx, actions_joint_name_to_joint_idx, tot_robot_joints, real_actions_joint_name_to_joint_idx=real_actions_joint_name_to_joint_idx) + # tot_robots.append(cur_robot_obj) + + print(f"Actions joint idxes:") + print(list(actions_joint_name_to_joint_idx.keys())) + + actions_joint_idxes = list(actions_joint_name_to_joint_idx.values()) + actions_joint_idxes = sorted(actions_joint_idxes) + print(f"joint indexes: {actions_joint_idxes}") + + # for the joint robots # + # for every joint + # tot_actuators = [] + # actuators = tree.findall("./actuator/motor") + # joint_nm_to_joint_idx = {} + # i_act = 0 + # for cur_act in actuators: + # cur_act_joint_nm = cur_act.attrib["joint"] + # joint_nm_to_joint_idx[cur_act_joint_nm] = i_act + # i_act += 1 ### add the act ### + + # tot_robots[0].set_joint_idx(joint_nm_to_joint_idx) ### set joint idx here ### # tot robots # + # tot_robots[0].get_nn_pts() + # tot_robots[1].get_nn_pts() + + return cur_robot_obj + + +def get_name_to_state_from_str(states_str): + tot_states = states_str.split(" ") + tot_states = [float(cur_state) for cur_state in tot_states] + joint_name_to_state = {} + for i in range(len(tot_states)): + cur_joint_name = f"joint{i + 1}" + cur_joint_state = tot_states[i] + joint_name_to_state[cur_joint_name] = cur_joint_state + return joint_name_to_state + + +def merge_meshes(verts_list, faces_list): + nn_verts = 0 + tot_verts_list = [] + tot_faces_list = [] + for i_vv, cur_verts in enumerate(verts_list): + cur_verts_nn = cur_verts.size(0) + tot_verts_list.append(cur_verts) + tot_faces_list.append(faces_list[i_vv] + nn_verts) + nn_verts = nn_verts + cur_verts_nn + tot_verts_list = torch.cat(tot_verts_list, dim=0) + tot_faces_list = torch.cat(tot_faces_list, dim=0) + return tot_verts_list, tot_faces_list + + +### get init s +class RobotAgent: # robot and the robot # + def __init__(self, xml_fn, args=None) -> None: + global urdf_fn + urdf_fn = xml_fn + self.xml_fn = xml_fn + # self.args = args + + ## + active_robot = parse_data_from_urdf(xml_fn) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ).cuda() + torch.nn.init.ones_(self.time_constant.weight) # + self.time_constant.weight.data = self.time_constant.weight.data * 0.2 ### time_constant data # + + self.optimizable_actions = nn.Embedding( + num_embeddings=100, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.optimizable_actions.weight) # + + self.learning_rate = 5e-4 + + self.active_robot = active_robot + + + self.set_init_states() + init_visual_pts = self.get_init_state_visual_pts() + self.init_visual_pts = init_visual_pts + + cur_verts, cur_faces = self.active_robot.get_init_visual_pts() + self.robot_pts = cur_verts + self.robot_faces = cur_faces + + + def set_init_states_target_value(self, init_states): + # glb_rot = torch.eye(n=3, dtype=torch.float32).cuda() + # glb_trans = torch.zeros((3,), dtype=torch.float32).cuda() ### glb_trans #### and the rot 3## + + # tot_init_states = {} + # tot_init_states['glb_rot'] = glb_rot; + # tot_init_states['glb_trans'] = glb_trans; + # tot_init_states['links_init_states'] = init_states + # self.active_robot.set_init_states_target_value(tot_init_states) + # init_joint_states = torch.zeros((60, ), dtype=torch.float32).cuda() + self.active_robot.set_initial_state(init_states) + + def set_init_states(self): + # glb_rot = torch.eye(n=3, dtype=torch.float32).cuda() + # glb_trans = torch.zeros((3,), dtype=torch.float32).cuda() ### glb_trans #### and the rot 3## + + # ### random rotation ### + # # glb_rot_np = R.random().as_matrix() + # # glb_rot = torch.from_numpy(glb_rot_np).float().cuda() + # ### random rotation ### + + # # glb_rot, glb_trans # + # init_states = {} + # init_states['glb_rot'] = glb_rot; + # init_states['glb_trans'] = glb_trans; + # self.active_robot.set_init_states(init_states) + + init_joint_states = torch.zeros((60, ), dtype=torch.float32).cuda() + self.active_robot.set_initial_state(init_joint_states) + + # cur_verts, joint_idxes = get_init_state_visual_pts(expanded_pts=False, ret_joint_idxes=True) + def get_init_state_visual_pts(self, expanded_pts=False, ret_joint_idxes=False): + # visual_pts_list = [] # compute the transformation via current state # + # visual_pts_list, visual_pts_mass_list = self.active_robot.compute_transformation_via_current_state( visual_pts_list) + + if ret_joint_idxes: + joint_idxes = [] + cur_verts, cur_faces, joint_idxes = self.active_robot.get_init_visual_pts(expanded_pts=expanded_pts, joint_idxes=joint_idxes) + else: + cur_verts, cur_faces = self.active_robot.get_init_visual_pts(expanded_pts=expanded_pts, joint_idxes=None) + self.faces = cur_faces + # joint_idxes = torch.cat() + # self.robot_pts = cur_verts + # self.robot_faces = cur_faces + # init_visual_pts = visual_pts_list + if ret_joint_idxes: + return cur_verts, joint_idxes + else: + return cur_verts + + def set_actions_and_update_states(self, actions, cur_timestep): + # + time_cons = self.time_constant(torch.zeros((1,), dtype=torch.long).cuda()) ### time constant of the system ## + self.active_robot.set_actions_and_update_states(actions, cur_timestep, time_cons) ### + pass + + def set_actions_and_update_states_v2(self, actions, cur_timestep, penetration_forces=None, sampled_visual_pts_joint_idxes=None): + # + time_cons = self.time_constant(torch.zeros((1,), dtype=torch.long).cuda()) ### time constant of the system ## + self.active_robot.set_actions_and_update_states_v2(actions, cur_timestep, time_cons, penetration_forces=penetration_forces, sampled_visual_pts_joint_idxes=sampled_visual_pts_joint_idxes) ### + pass + + # state_vals = self.robot_agent.get_joint_state( cur_ts, state_vals, link_name_to_link_struct) + def get_joint_state(self, cur_ts, state_vals): + state_vals = self.active_robot.get_joint_state(cur_ts, state_vals) + return state_vals + + def forward_stepping_test(self, ): + # delta_glb_rot; delta_glb_trans # + timestep_to_visual_pts = {} + for i_step in range(50): + actions = {} + actions['delta_glb_rot'] = torch.eye(3, dtype=torch.float32).cuda() + actions['delta_glb_trans'] = torch.zeros((3,), dtype=torch.float32).cuda() + actions_link_actions = torch.ones((22, ), dtype=torch.float32).cuda() + # actions_link_actions = actions_link_actions * 0.2 + actions_link_actions = actions_link_actions * -1. # + actions['link_actions'] = actions_link_actions + self.set_actions_and_update_states(actions=actions, cur_timestep=i_step) + + cur_visual_pts = robot_agent.get_init_state_visual_pts() + cur_visual_pts = cur_visual_pts.detach().cpu().numpy() + timestep_to_visual_pts[i_step + 1] = cur_visual_pts + return timestep_to_visual_pts + + def initialize_optimization(self, reference_pts_dict): + self.n_timesteps = 50 + # self.n_timesteps = 19 # first 19-timesteps optimization # + self.nn_tot_optimization_iters = 100 + # self.nn_tot_optimization_iters = 57 + # TODO: load reference points # + self.ts_to_reference_pts = np.load(reference_pts_dict, allow_pickle=True).item() #### + self.ts_to_reference_pts = { + ts // 2 + 1: torch.from_numpy(self.ts_to_reference_pts[ts]).float().cuda() for ts in self.ts_to_reference_pts + } + + + def forward_stepping_optimization(self, ): + nn_tot_optimization_iters = self.nn_tot_optimization_iters + params_to_train = [] + params_to_train += list(self.optimizable_actions.parameters()) + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + + for i_iter in range(nn_tot_optimization_iters): + + tot_losses = [] + ts_to_robot_points = {} + for cur_ts in range(self.n_timesteps): + # print(f"iter: {i_iter}, cur_ts: {cur_ts}") + # actions = {} + # actions['delta_glb_rot'] = torch.eye(3, dtype=torch.float32).cuda() + # actions['delta_glb_trans'] = torch.zeros((3,), dtype=torch.float32).cuda() + actions_link_actions = self.optimizable_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # actions_link_actions = actions_link_actions * 0.2 + # actions_link_actions = actions_link_actions * -1. # + # actions['link_actions'] = actions_link_actions + # self.set_actions_and_update_states(actions=actions, cur_timestep=cur_ts) # update the interaction # + + with torch.no_grad(): + self.active_robot.calculate_inertia() + + self.active_robot.set_actions_and_update_states(actions_link_actions, cur_ts, 0.2) + + cur_visual_pts, cur_faces = self.active_robot.get_init_visual_pts() + ts_to_robot_points[cur_ts + 1] = cur_visual_pts.clone() + + cur_reference_pts = self.ts_to_reference_pts[cur_ts + 1] + diff = torch.sum((cur_visual_pts - cur_reference_pts) ** 2, dim=-1) + diff = diff.mean() + + # diff. + self.optimizer.zero_grad() + diff.backward(retain_graph=True) + # diff.backward(retain_graph=False) + self.optimizer.step() + + tot_losses.append(diff.item()) + + + loss = sum(tot_losses) / float(len(tot_losses)) + print(f"Iter: {i_iter}, average loss: {loss}") + # print(f"Iter: {i_iter}, average loss: {loss.item()}, start optimizing") + # self.optimizer.zero_grad() + # loss.backward() + # self.optimizer.step() + + self.ts_to_robot_points = { + ts: ts_to_robot_points[ts].detach().cpu().numpy() for ts in ts_to_robot_points + } + self.ts_to_ref_points = { + ts: self.ts_to_reference_pts[ts].detach().cpu().numpy() for ts in ts_to_robot_points + } + return self.ts_to_robot_points, self.ts_to_ref_points + + + + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z ## + + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + +def calibreate_urdf_files(urdf_fn): + # active_robot = parse_data_from_urdf(xml_fn) + active_robot = parse_data_from_urdf(urdf_fn) + tot_joints = active_robot.tot_joints + + # class Joint_urdf: # + # def __init__(self, name, joint_type, parent_link, child_link, origin_xyz, axis_xyz, limit: Joint_Limit) -> None: + # self.name = name + # self.type = joint_type + # self.parent_link = parent_link + # self.child_link = child_link + # self.origin_xyz = origin_xyz + # self.axis_xyz = axis_xyz + # self.limit = limit + + with open(urdf_fn) as rf: + urdf_string = rf.read() + for cur_joint in tot_joints: + print(f"type: {cur_joint.type}, origin: {cur_joint.origin_xyz}") + cur_joint_origin = cur_joint.origin_xyz + scaled_joint_origin = cur_joint_origin * 3. + cur_joint_origin_string = cur_joint.origin_xyz_string + if len(cur_joint_origin_string) == 0 or torch.sum(cur_joint_origin).item() == 0.: + continue + # + cur_joint_origin_string_wtag = "" + scaled_joint_origin_string_wtag = "" + # scaled_joint_origin_string = f"{scaled_joint_origin[0].item()} {scaled_joint_origin[1].item()} {scaled_joint_origin[2].item()}" + # urdf_string = urdf_string.replace(cur_joint_origin_string, scaled_joint_origin_string) + urdf_string = urdf_string.replace(cur_joint_origin_string_wtag, scaled_joint_origin_string_wtag) + changed_urdf_fn = urdf_fn.replace(".urdf", "_scaled.urdf") + with open(changed_urdf_fn, "w") as wf: + wf.write(urdf_string) + print(f"changed_urdf_fn: {changed_urdf_fn}") + # exit(0) + + +def get_GT_states_data_from_ckpt(ckpt_fn): + mano_nn_substeps = 1 + num_steps = 60 + mano_robot_actions = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ) + torch.nn.init.zeros_(mano_robot_actions.weight) + # params_to_train += list(self.robot_actions.parameters()) + + mano_robot_delta_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ) + torch.nn.init.zeros_(mano_robot_delta_states.weight) + # params_to_train += list(self.robot_delta_states.parameters()) + + mano_robot_init_states = nn.Embedding( + num_embeddings=1, embedding_dim=60, + ) + torch.nn.init.zeros_(mano_robot_init_states.weight) + # params_to_train += list(self.robot_init_states.parameters()) + + mano_robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=4 + ) + mano_robot_glb_rotation.weight.data[:, 0] = 1. + mano_robot_glb_rotation.weight.data[:, 1:] = 0. + # params_to_train += list(self.robot_glb_rotation.parameters()) + + + mano_robot_glb_trans = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=3 + ) + torch.nn.init.zeros_(mano_robot_glb_trans.weight) + # params_to_train += list(self.robot_glb_trans.parameters()) + + mano_robot_states = nn.Embedding( + num_embeddings=num_steps * mano_nn_substeps, embedding_dim=60, + ) + torch.nn.init.zeros_(mano_robot_states.weight) + mano_robot_states.weight.data[0, :] = mano_robot_init_states.weight.data[0, :].clone() + + + ''' Load optimized MANO hand actions and states ''' + # ### laod optimized init actions #### # + # if 'model.load_optimized_init_actions' in self.conf and len(self.conf['model.load_optimized_init_actions']) > 0: + # print(f"[MANO] Loading optimized init transformations from {self.conf['model.load_optimized_init_actions']}") + cur_optimized_init_actions_fn = ckpt_fn + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location='cpu', ) + + if 'mano_robot_states' in optimized_init_actions_ckpt: + mano_robot_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_states']) + + if 'mano_robot_init_states' in optimized_init_actions_ckpt: + mano_robot_init_states.load_state_dict(optimized_init_actions_ckpt['mano_robot_init_states']) + + if 'mano_robot_glb_rotation' in optimized_init_actions_ckpt: + mano_robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_rotation']) + + if 'mano_robot_glb_trans' in optimized_init_actions_ckpt: # mano_robot_glb_trans + mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + + mano_glb_trans_np_data = mano_robot_glb_trans.weight.data.detach().cpu().numpy() + mano_glb_rotation_np_data = mano_robot_glb_rotation.weight.data.detach().cpu().numpy() + mano_states_np_data = mano_robot_states.weight.data.detach().cpu().numpy() + + if optimized_init_actions_ckpt is not None and 'object_transl' in optimized_init_actions_ckpt: + object_transl = optimized_init_actions_ckpt['object_transl'].detach().cpu().numpy() + object_global_orient = optimized_init_actions_ckpt['object_global_orient'].detach().cpu().numpy() + + print(mano_robot_states.weight.data[1]) + + #### TODO: add an arg to control where to save the gt-reference-data #### + sv_gt_refereces = { + 'mano_glb_rot': mano_glb_rotation_np_data, + 'mano_glb_trans': mano_glb_trans_np_data, + 'mano_states': mano_states_np_data, + 'obj_rot': object_global_orient, + 'obj_trans': object_transl + } + # sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/grab_train_split_20_cube_data.npy" + # sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/grab_train_split_25_ball_data.npy" + # sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/grab_train_split_54_cylinder_data.npy" + sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/grab_train_split_1_dingshuji_data.npy" + np.save(sv_gt_refereces_fn, sv_gt_refereces) + print(f'gt reference data saved to {sv_gt_refereces_fn}') + #### TODO: add an arg to control where to save the gt-reference-data #### + +def scale_and_save_meshes(meshes_folder): + minn_robo_pts = -0.1 + maxx_robo_pts = 0.2 + extent_robo_pts = maxx_robo_pts - minn_robo_pts + mult_const_after_cent = 0.437551664260203 ## should modify + + mult_const_after_cent = mult_const_after_cent / 3. * 0.9507 + + meshes_fn = os.listdir(meshes_folder) + meshes_fn = [fn for fn in meshes_fn if fn.endswith(".obj") and "scaled" not in fn] + for cur_fn in meshes_fn: + cur_mesh_name = cur_fn.split(".")[0] + print(f"cur_mesh_name: {cur_mesh_name}") + scaled_mesh_name = cur_mesh_name + "_scaled_bullet.obj" + full_mesh_fn = os.path.join(meshes_folder, cur_fn) + scaled_mesh_fn = os.path.join(meshes_folder, scaled_mesh_name) + try: + cur_mesh = trimesh.load_mesh(full_mesh_fn) + except: + continue + cur_mesh.vertices = cur_mesh.vertices + + if 'palm' in cur_mesh_name: + cur_mesh.vertices = (cur_mesh.vertices - minn_robo_pts) / extent_robo_pts + cur_mesh.vertices = cur_mesh.vertices * 2. -1. + cur_mesh.vertices = cur_mesh.vertices * mult_const_after_cent # mult_const # + else: + cur_mesh.vertices = (cur_mesh.vertices) / extent_robo_pts + cur_mesh.vertices = cur_mesh.vertices * 2. # -1. + cur_mesh.vertices = cur_mesh.vertices * mult_const_after_cent # mult_const # + + cur_mesh.export(scaled_mesh_fn) + print(f"scaled_mesh_fn: {scaled_mesh_fn}") + exit(0) + +def scale_and_save_meshes_v2(meshes_folder): + # /home/xueyi/diffsim/NeuS/rsc/redmax_hand/meshes/hand/body0_centered_scaled_v2.obj + for body_idx in range(0, 18): + cur_body_mesh_fn = f"body{body_idx}_centered_scaled_v2.obj" + cur_body_mesh_fn = os.path.join(meshes_folder, cur_body_mesh_fn) + cur_body_rescaled_mesh_fn = f"body{body_idx}_centered_scaled_v2_rescaled_grab.obj" + cur_body_rescaled_mesh_fn = os.path.join(meshes_folder, cur_body_rescaled_mesh_fn) + cur_mesh = trimesh.load_mesh(cur_body_mesh_fn) + + cur_mesh.vertices = cur_mesh.vertices / 4.0 + cur_mesh.export(cur_body_rescaled_mesh_fn) + + # minn_robo_pts = -0.1 + # maxx_robo_pts = 0.2 + # extent_robo_pts = maxx_robo_pts - minn_robo_pts + # mult_const_after_cent = 0.437551664260203 ## should modify + + # mult_const_after_cent = mult_const_after_cent / 3. * 0.9507 + + # meshes_fn = os.listdir(meshes_folder) + # meshes_fn = [fn for fn in meshes_fn if fn.endswith(".obj") and "scaled" not in fn] + # for cur_fn in meshes_fn: + # cur_mesh_name = cur_fn.split(".")[0] + # print(f"cur_mesh_name: {cur_mesh_name}") + # scaled_mesh_name = cur_mesh_name + "_scaled_bullet.obj" + # full_mesh_fn = os.path.join(meshes_folder, cur_fn) + # scaled_mesh_fn = os.path.join(meshes_folder, scaled_mesh_name) + # try: + # cur_mesh = trimesh.load_mesh(full_mesh_fn) + # except: + # continue + # cur_mesh.vertices = cur_mesh.vertices + + # if 'palm' in cur_mesh_name: + # cur_mesh.vertices = (cur_mesh.vertices - minn_robo_pts) / extent_robo_pts + # cur_mesh.vertices = cur_mesh.vertices * 2. -1. + # cur_mesh.vertices = cur_mesh.vertices * mult_const_after_cent # mult_const # + # else: + # cur_mesh.vertices = (cur_mesh.vertices) / extent_robo_pts + # cur_mesh.vertices = cur_mesh.vertices * 2. # -1. + # cur_mesh.vertices = cur_mesh.vertices * mult_const_after_cent # mult_const # + + # cur_mesh.export(scaled_mesh_fn) + # print(f"scaled_mesh_fn: {scaled_mesh_fn}") + exit(0) + + +def calibreate_urdf_files_v2(urdf_fn): + # active_robot = parse_data_from_urdf(xml_fn) + active_robot = parse_data_from_urdf(urdf_fn) + tot_joints = active_robot.tot_joints + tot_links = active_robot.link_name_to_link_struct + + minn_robo_pts = -0.1 + maxx_robo_pts = 0.2 + extent_robo_pts = maxx_robo_pts - minn_robo_pts + mult_const_after_cent = 0.437551664260203 ## should modify + + mult_const_after_cent = mult_const_after_cent / 3. * 0.9507 + + with open(urdf_fn) as rf: + urdf_string = rf.read() + for cur_joint in tot_joints: + # print(f"type: {cur_joint.type}, origin: {cur_joint.origin_xyz}") + cur_joint_origin = cur_joint.origin_xyz + + # cur_joint_origin = (cur_joint_origin / extent_robo_pts) * 2.0 * mult_const_after_cent + + # cur_joint_origin = (cur_joint_origin / extent_robo_pts) * 2.0 * mult_const_after_cent + + if cur_joint.name in ['FFJ4' , 'MFJ4' ,'RFJ4' ,'LFJ5' ,'THJ5']: + cur_joint_origin = (cur_joint_origin - minn_robo_pts) / extent_robo_pts + cur_joint_origin = cur_joint_origin * 2.0 - 1.0 + cur_joint_origin = cur_joint_origin * mult_const_after_cent + else: + cur_joint_origin = (cur_joint_origin) / extent_robo_pts + cur_joint_origin = cur_joint_origin * 2.0 # - 1.0 + cur_joint_origin = cur_joint_origin * mult_const_after_cent + + + origin_list = cur_joint_origin.detach().cpu().tolist() + origin_list = [str(cur_val) for cur_val in origin_list] + origin_str = " ".join(origin_list) + print(f"name: {cur_joint.name}, cur_joint_origin: {origin_str}") + + # scaled_joint_origin = cur_joint_origin * 3. + # cur_joint_origin_string = cur_joint.origin_xyz_string + # if len(cur_joint_origin_string) == 0 or torch.sum(cur_joint_origin).item() == 0.: + # continue + # # + # cur_joint_origin_string_wtag = "" + # scaled_joint_origin_string_wtag = "" + # # scaled_joint_origin_string = f"{scaled_joint_origin[0].item()} {scaled_joint_origin[1].item()} {scaled_joint_origin[2].item()}" + # # urdf_string = urdf_string.replace(cur_joint_origin_string, scaled_joint_origin_string) + # urdf_string = urdf_string.replace(cur_joint_origin_string_wtag, scaled_joint_origin_string_wtag) + # changed_urdf_fn = urdf_fn.replace(".urdf", "_scaled.urdf") + # with open(changed_urdf_fn, "w") as wf: + # wf.write(urdf_string) + # print(f"changed_urdf_fn: {changed_urdf_fn}") + # # exit(0) + + # for cur_link_nm in tot_links: + # cur_link = tot_links[cur_link_nm] + # if cur_link.visual is None: + # continue + # xyz_visual = cur_link.visual.visual_xyz + # xyz_visual = (xyz_visual / extent_robo_pts) * 2.0 * mult_const_after_cent + # xyz_visual_list = xyz_visual.detach().cpu().tolist() + # xyz_visual_list = [str(cur_val) for cur_val in xyz_visual_list] + # xyz_visual_str = " ".join(xyz_visual_list) + # print(f"name: {cur_link.name}, xyz_visual: {xyz_visual_str}") + +def get_shadow_GT_states_data_from_ckpt(ckpt_fn): + mano_nn_substeps = 1 + num_steps = 60 + + + # robot actions # # + # robot_actions = nn.Embedding( + # num_embeddings=num_steps, embedding_dim=22, + # ).cuda() + # torch.nn.init.zeros_(robot_actions.weight) + # # params_to_train += list(robot_actions.parameters()) + + # robot_delta_states = nn.Embedding( + # num_embeddings=num_steps, embedding_dim=60, + # ).cuda() + # torch.nn.init.zeros_(robot_delta_states.weight) + # # params_to_train += list(robot_delta_states.parameters()) + + + robot_states = nn.Embedding( + num_embeddings=num_steps, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(robot_states.weight) + # params_to_train += list(robot_states.parameters()) + + # robot_init_states = nn.Embedding( + # num_embeddings=1, embedding_dim=22, + # ).cuda() + # torch.nn.init.zeros_(robot_init_states.weight) + # # params_to_train += list(robot_init_states.parameters()) + + robot_glb_rotation = nn.Embedding( + num_embeddings=num_steps, embedding_dim=4 + ).cuda() + robot_glb_rotation.weight.data[:, 0] = 1. + robot_glb_rotation.weight.data[:, 1:] = 0. + + + robot_glb_trans = nn.Embedding( + num_embeddings=num_steps, embedding_dim=3 + ).cuda() + torch.nn.init.zeros_(robot_glb_trans.weight) + + ''' Load optimized MANO hand actions and states ''' + cur_optimized_init_actions_fn = ckpt_fn + optimized_init_actions_ckpt = torch.load(cur_optimized_init_actions_fn, map_location='cpu', ) + + print(f"optimized_init_actions_ckpt: {optimized_init_actions_ckpt.keys()}") + + if 'robot_glb_rotation' in optimized_init_actions_ckpt: + robot_glb_rotation.load_state_dict(optimized_init_actions_ckpt['robot_glb_rotation']) + + if 'robot_states' in optimized_init_actions_ckpt: + robot_states.load_state_dict(optimized_init_actions_ckpt['robot_states']) + + if 'robot_glb_trans' in optimized_init_actions_ckpt: + robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['robot_glb_trans']) + + # if 'mano_robot_glb_trans' in optimized_init_actions_ckpt: # mano_robot_glb_trans + # mano_robot_glb_trans.load_state_dict(optimized_init_actions_ckpt['mano_robot_glb_trans']) + + robot_glb_trans_np_data = robot_glb_trans.weight.data.detach().cpu().numpy() + robot_glb_rotation_np_data = robot_glb_rotation.weight.data.detach().cpu().numpy() + robot_states_np_data = robot_states.weight.data.detach().cpu().numpy() + + if optimized_init_actions_ckpt is not None and 'object_transl' in optimized_init_actions_ckpt: + object_transl = optimized_init_actions_ckpt['object_transl'].detach().cpu().numpy() + object_global_orient = optimized_init_actions_ckpt['object_global_orient'].detach().cpu().numpy() + + # print(mano_robot_states.weight.data[1]) + + #### TODO: add an arg to control where to save the gt-reference-data #### + sv_gt_refereces = { + 'mano_glb_rot': robot_glb_rotation_np_data, + 'mano_glb_trans': robot_glb_trans_np_data, + 'mano_states': robot_states_np_data, + 'obj_rot': object_global_orient, + 'obj_trans': object_transl + } + # sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/grab_train_split_20_cube_data.npy" + # sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/grab_train_split_25_ball_data.npy" + # sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/grab_train_split_54_cylinder_data.npy" + # sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/shadow_grab_train_split_224_tiantianquan_data.npy" + sv_gt_refereces_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/shadow_grab_train_split_54_cylinder_data.npy" + np.save(sv_gt_refereces_fn, sv_gt_refereces) + print(f'gt reference data saved to {sv_gt_refereces_fn}') + #### TODO: add an arg to control where to save the gt-reference-data #### + +## saved the robot file ## + + +def calibrate_left_shadow_hand(): + rgt_shadow_hand_des_folder = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description" + lft_shadow_hand_des_folder = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description_left" + os.makedirs(lft_shadow_hand_des_folder, exist_ok=True) + lft_shadow_hand_mesh_folder = os.path.join(lft_shadow_hand_des_folder, "meshes") + os.makedirs(lft_shadow_hand_mesh_folder, exist_ok=True) + rgt_shadow_hand_mesh_folder = os.path.join(rgt_shadow_hand_des_folder, "meshes") + tot_rgt_hand_meshes = os.listdir(rgt_shadow_hand_mesh_folder) + tot_rgt_hand_meshes = [fn for fn in tot_rgt_hand_meshes if fn.endswith(".obj")] + for cur_hand_mesh_fn in tot_rgt_hand_meshes: + full_rgt_mesh_fn = os.path.join(rgt_shadow_hand_mesh_folder, cur_hand_mesh_fn) + try: + full_rgt_mesh = trimesh.load(full_rgt_mesh_fn, force='mesh') + except: + continue + full_rgt_mesh_verts = full_rgt_mesh.vertices + full_rgt_mesh_faces = full_rgt_mesh.faces + full_rgt_mesh_verts[:, 1] = -1. * full_rgt_mesh_verts[:, 1] ## flip the y-axis + lft_mesh = trimesh.Trimesh(vertices=full_rgt_mesh_verts, faces=full_rgt_mesh_faces) + lft_mesh_fn = os.path.join(lft_shadow_hand_mesh_folder, cur_hand_mesh_fn) + lft_mesh.export(lft_mesh_fn) + print(f"lft_mesh_fn: {lft_mesh_fn}") + exit(0) + + +## urd for the left hand +def calibreate_urdf_files_left_hand(urdf_fn): + # active_robot = parse_data_from_urdf(xml_fn) + active_robot = parse_data_from_urdf(urdf_fn) + tot_joints = active_robot.tot_joints + tot_links = active_robot.link_name_to_link_struct + + minn_robo_pts = -0.1 + maxx_robo_pts = 0.2 + extent_robo_pts = maxx_robo_pts - minn_robo_pts + mult_const_after_cent = 0.437551664260203 ## should modify + + mult_const_after_cent = mult_const_after_cent / 3. * 0.9507 + + with open(urdf_fn) as rf: + urdf_string = rf.read() + for cur_joint in tot_joints: + # print(f"type: {cur_joint.type}, origin: {cur_joint.origin_xyz}") + cur_joint_origin = cur_joint.origin_xyz + + cur_joint_axis = cur_joint.axis_xyz + + cur_joint_origin = cur_joint_origin.detach() + cur_joint_axis = cur_joint_axis.detach() + + cur_joint_origin[1] = -1.0 * cur_joint_origin[1] + cur_joint_axis[1] = -1.0 * cur_joint_axis[1] + + + origin_list = cur_joint_origin.detach().cpu().tolist() + origin_list = [str(cur_val) for cur_val in origin_list] + origin_str = " ".join(origin_list) + + axis_list = cur_joint_axis.detach().cpu().tolist() + axis_list = [str(cur_val) for cur_val in axis_list] + axis_str = " ".join(axis_list) + print(f"name: {cur_joint.name}, cur_joint_origin: {origin_str}, axis_str: {axis_str}") + + # cur_joint_origin = (cur_joint_origin / extent_robo_pts) * 2.0 * mult_const_after_cent + + # cur_joint_origin = (cur_joint_origin / extent_robo_pts) * 2.0 * mult_const_after_cent + + # if cur_joint.name in ['FFJ4' , 'MFJ4' ,'RFJ4' ,'LFJ5' ,'THJ5']: + # cur_joint_origin = (cur_joint_origin - minn_robo_pts) / extent_robo_pts + # cur_joint_origin = cur_joint_origin * 2.0 - 1.0 + # cur_joint_origin = cur_joint_origin * mult_const_after_cent + # else: + # cur_joint_origin = (cur_joint_origin) / extent_robo_pts + # cur_joint_origin = cur_joint_origin * 2.0 # - 1.0 + # cur_joint_origin = cur_joint_origin * mult_const_after_cent + + + # origin_list = cur_joint_origin.detach().cpu().tolist() + # origin_list = [str(cur_val) for cur_val in origin_list] + # origin_str = " ".join(origin_list) + # print(f"name: {cur_joint.name}, cur_joint_origin: {origin_str}") + +def calibreate_urdf_files_v4(urdf_fn, dst_urdf_fn): + # active_robot = parse_data_from_urdf(xml_fn) + active_robot = parse_data_from_urdf(urdf_fn) + tot_joints = active_robot.tot_joints + tot_links = active_robot.link_name_to_link_struct + + # minn_robo_pts = -0.1 + # maxx_robo_pts = 0.2 + # extent_robo_pts = maxx_robo_pts - minn_robo_pts + # mult_const_after_cent = 0.437551664260203 ## should modify + + # mult_const_after_cent = mult_const_after_cent / 3. * 0.9507 + + with open(urdf_fn) as rf: + urdf_string = rf.read() + for cur_joint in tot_joints: + # print(f"type: {cur_joint.type}, origin: {cur_joint.origin_xyz}") + cur_joint_origin = cur_joint.origin_xyz + modified_joint_origin = cur_joint_origin / 4. + + origin_list = cur_joint_origin.detach().cpu().tolist() + origin_list = [str(cur_val) for cur_val in origin_list] + origin_str = " ".join(origin_list) + + dst_list = modified_joint_origin.detach().cpu().tolist() + dst_list = [str(cur_val) for cur_val in dst_list] + dst_str = " ".join(dst_list) + + urdf_string = urdf_string.replace(origin_str, dst_str) + + with open(dst_urdf_fn, "w") as wf: + wf.write(urdf_string) + wf.close() + # # cur_joint_origin = (cur_joint_origin / extent_robo_pts) * 2.0 * mult_const_after_cent + + # # cur_joint_origin = (cur_joint_origin / extent_robo_pts) * 2.0 * mult_const_after_cent + + # if cur_joint.name in ['FFJ4' , 'MFJ4' ,'RFJ4' ,'LFJ5' ,'THJ5']: + # cur_joint_origin = (cur_joint_origin - minn_robo_pts) / extent_robo_pts + # cur_joint_origin = cur_joint_origin * 2.0 - 1.0 + # cur_joint_origin = cur_joint_origin * mult_const_after_cent + # else: + # cur_joint_origin = (cur_joint_origin) / extent_robo_pts + # cur_joint_origin = cur_joint_origin * 2.0 # - 1.0 + # cur_joint_origin = cur_joint_origin * mult_const_after_cent + + + # origin_list = cur_joint_origin.detach().cpu().tolist() + # origin_list = [str(cur_val) for cur_val in origin_list] + # origin_str = " ".join(origin_list) + # print(f"name: {cur_joint.name}, cur_joint_origin: {origin_str}") + + +def test_gt_ref_data(gt_ref_data_fn): + cur_gt_ref_data = np.load(gt_ref_data_fn, allow_pickle=True).item() + print(cur_gt_ref_data.keys()) + + mano_glb_rot, glb_trans, states = cur_gt_ref_data['mano_glb_rot'], cur_gt_ref_data['mano_glb_trans'], cur_gt_ref_data['mano_states'] + return mano_glb_rot, glb_trans, states + + +def get_states(gt_ref_data_fn): + states = np.load(gt_ref_data_fn, allow_pickle=True).item() + return states['target'] + +#### Big TODO: the external contact forces from the manipulated object to the robot #### +if __name__=='__main__': # # # + + gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/Data/ReferenceData/shadow_grab_train_split_85_bunny_wact_data.npy" + # mano_glb_rot, glb_trans, states = test_gt_ref_data(gt_ref_data_fn) + # eixt(0) + mano_states_fn = '/home/xueyi/diffsim/NeuS/raw_data/evalulated_traj_sm_l512_wana_v3_subiters1024_optim_params_shadow_85_bunny_std0d01_netv1_mass10000_new_dp1d0_wtable_gn9d8__step_2.npy' + mano_states_fn = '/home/xueyi/diffsim/NeuS/raw_data/evalulated_traj_sm_l512_wana_v3_subiters1024_optim_params_shadow_102_mouse_wact_std0d01_netv1_mass10000_new_dp1d0_dtv2tsv2ctlv2_netv3optt_lstd_langdamp_wcmase_ni4_wtable_gn_adp1d0_trwmonly_cs0d6_predtarmano_wambient__step_9.npy' + mano_states = get_states(mano_states_fn) + + blended_ratio = 0.5 + + blended_states = [] + + tot_rot_mtxes = [] + tot_trans = [] + for i_state in range(len(mano_states)): + cur_trans = mano_states[i_state][:3] + cur_rot = mano_states[i_state][3:6] + cur_states = mano_states[i_state][6:] + + cur_rot_struct = R.from_euler('zyx', cur_rot[[2, 1, 0]], degrees=False) + cur_rot_mtx = cur_rot_struct.as_matrix() + + tot_rot_mtxes.append(cur_rot_mtx) + tot_trans.append(cur_trans) + + + cur_state = cur_states # states[i_state] + cur_modified_state = mano_states[0][6:] + (cur_state - mano_states[0][6:] ) * blended_ratio + + cur_modified_state = np.concatenate([np.zeros((2,), dtype=np.float32), cur_modified_state], axis=-1) + blended_states.append(cur_modified_state) + # return blended_states + + tot_rot_mtxes = np.stack(tot_rot_mtxes, axis=0) + tot_trans = np.stack(tot_trans, axis=0) + blended_states = np.stack(blended_states, axis=0) + + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/redmax_hand/redmax_hand_test_3_wcollision.urdf" + # dst_urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/redmax_hand/redmax_hand_test_3_wcollision_rescaled_grab.urdf" + # calibreate_urdf_files_v4(urdf_fn, dst_urdf_fn) + # exit(0) + + # meshes_folder = "/home/xueyi/diffsim/NeuS/rsc/redmax_hand/meshes/hand" + # scale_and_save_meshes_v2(meshes_folder) + # exit(0) + + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new_scaled.urdf" + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new_scaled_nroot_new.urdf" + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new_scaled_nroot.urdf" + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/redmax_hand/redmax_hand_test_3_wcollision_rescaled_grab.urdf" + robot_agent = RobotAgent(urdf_fn) + + init_vertices, init_faces = robot_agent.active_robot.init_vertices, robot_agent.active_robot.init_faces + init_vertices = init_vertices.detach().cpu().numpy() + init_faces = init_faces.detach().cpu().numpy() + + tot_transformed_pts = [] + for i_ts in range(len(blended_states)): + cur_blended_states = blended_states[i_ts] + cur_blended_states = torch.from_numpy(cur_blended_states).float().cuda() + robot_agent.active_robot.set_delta_state_and_update_v2(cur_blended_states, 0) + cur_pts = robot_agent.get_init_state_visual_pts().detach().cpu().numpy() + + cur_pts_transformed = np.matmul( + tot_rot_mtxes[i_ts], cur_pts.T + ).T + tot_trans[i_ts][None] + tot_transformed_pts.append(cur_pts_transformed) + tot_transformed_pts = np.stack(tot_transformed_pts, axis=0) + np.save("/home/xueyi/diffsim/NeuS/raw_data/transformed_pts.npy", {'tot_transformed_pts': tot_transformed_pts, 'init_faces': init_faces}) + exit(0) + + + robot_agent.active_robot.set_delta_state_and_update_v2() + + init_vertices, init_faces = robot_agent.active_robot.init_vertices, robot_agent.active_robot.init_faces + init_vertices = init_vertices.detach().cpu().numpy() + init_faces = init_faces.detach().cpu().numpy() + print(f"init_vertices: {init_vertices.shape}, init_faces: {init_faces.shape}") + shadow_hand_mesh = trimesh.Trimesh(vertices=init_vertices, faces=init_faces) + # shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/shadow_hand_lft.obj" + shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/shadow_hand_new.ply" + shadow_hand_mesh.export(shadow_hand_sv_fn) + np.save("/home/xueyi/diffsim/NeuS/raw_data/faces.npy", init_faces) + + exit(0) + + init_vertices, init_faces = robot_agent.active_robot.init_vertices, robot_agent.active_robot.init_faces + init_vertices = init_vertices.detach().cpu().numpy() + init_faces = init_faces.detach().cpu().numpy() + print(f"init_vertices: {init_vertices.shape}, init_faces: {init_faces.shape}") + shadow_hand_mesh = trimesh.Trimesh(vertices=init_vertices, faces=init_faces) + # shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/shadow_hand_lft.obj" + shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/scaled_shadow_hand.obj" + shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/scaled_redmax_hand_rescaled_grab.obj" + shadow_hand_mesh.export(shadow_hand_sv_fn) + + init_joint_states = torch.randn((60, ), dtype=torch.float32).cuda() + robot_agent.set_initial_state(init_joint_states) + + + cur_verts, cur_faces = robot_agent.get_init_visual_pts() + cur_mesh = trimesh.Trimesh(vertices=cur_verts.detach().cpu().numpy(), faces=cur_faces.detach().cpu().numpy()) + shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/scaled_redmax_hand_rescaled_grab_wstate.obj" + cur_mesh.export(shadow_hand_sv_fn) + exit(0) + + + + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new_scaled.urdf" + + ## + lft_urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description_left/shadowhand_left_new_scaled.urdf" + + + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/redmax_hand/redmax_hand_test_3_wcollision.urdf" + + ## + lft_urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/redmax_hand/redmax_hand_test_3_wcollision.urdf" + + robot_agent = RobotAgent(lft_urdf_fn) + init_vertices, init_faces = robot_agent.active_robot.init_vertices, robot_agent.active_robot.init_faces + init_vertices = init_vertices.detach().cpu().numpy() + init_faces = init_faces.detach().cpu().numpy() + print(f"init_vertices: {init_vertices.shape}, init_faces: {init_faces.shape}") + shadow_hand_mesh = trimesh.Trimesh(vertices=init_vertices, faces=init_faces) + shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/shadow_hand_lft.obj" + shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/redmax_hand.obj" + shadow_hand_mesh.export(shadow_hand_sv_fn) + exit(0) + + + rgt_urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new_scaled.urdf" + # rgt_urdf_fn + calibreate_urdf_files_left_hand(rgt_urdf_fn) + exit(0) + + calibrate_left_shadow_hand() + exit(0) + + # ckpt_fn = "/data3/datasets/xueyi/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_states_/checkpoints/ckpt_320000.pth" + # ckpt_fn = "/data3/datasets/xueyi/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_retargeted_shadow_hand_states_optrobot__seq_54_optrules_/checkpoints/ckpt_030000.pth" + # get_shadow_GT_states_data_from_ckpt(ckpt_fn) + # exit(0) + + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new_scaled.urdf" + # calibreate_urdf_files_v2(urdf_fn) + # exit(0) + + meshes_folder = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/meshes" + # scale_and_save_meshes(meshes_folder) + # exit(0) + + # sv_ckpt_fn = "/data3/datasets/xueyi/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_mano_states_grab_train_54_cylinder_tst_/checkpoints/ckpt_070000.pth" + # sv_ckpt_fn = "/data3/datasets/xueyi/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_train_mano_states_grab_train_1_dingshuji_tst_/checkpoints/ckpt_070000.pth" + # get_GT_states_data_from_ckpt(sv_ckpt_fn) + # exit(0) + + + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/mano_mean_wcollision_scaled_scaled_0_9507_nroot.urdf" + # robot_agent = RobotAgent(urdf_fn) + # exit(0) + + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/mano_mean_nocoll_simplified.urdf" + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/mano_mean_wcollision_scaled.urdf" + # calibreate_urdf_files(urdf_fn) + # exit(0) + + # urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/mano_mean_nocoll_simplified.urdf" + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/shadow_hand_description/shadowhand_new.urdf" + robot_agent = RobotAgent(urdf_fn) + + init_vertices, init_faces = robot_agent.active_robot.init_vertices, robot_agent.active_robot.init_faces + init_vertices = init_vertices.detach().cpu().numpy() + init_faces = init_faces.detach().cpu().numpy() + + shadow_hand_mesh = trimesh.Trimesh(vertices=init_vertices, faces=init_faces) + shadow_hand_sv_fn = "/home/xueyi/diffsim/NeuS/raw_data/shadow_hand.obj" + shadow_hand_mesh.export(shadow_hand_sv_fn) + exit(0) + + + ref_dict_npy = "reference_verts.npy" + robot_agent.initialize_optimization(ref_dict_npy) + ts_to_robot_points, ts_to_ref_points = robot_agent.forward_stepping_optimization() + np.save(f"ts_to_robot_points.npy", ts_to_robot_points) + np.save(f"ts_to_ref_points.npy", ts_to_ref_points) + exit(0) + + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/mano_mean_nocoll_simplified.urdf" + cur_robot = parse_data_from_urdf(urdf_fn) + # self.init_vertices, self.init_faces + init_vertices, init_faces = cur_robot.init_vertices, cur_robot.init_faces + + + + init_vertices = init_vertices.detach().cpu().numpy() + init_faces = init_faces.detach().cpu().numpy() + + + ## initial states ehre ##3 + # mesh_obj = trimesh.Trimesh(vertices=init_vertices, faces=init_faces) + # mesh_obj.export(f"hand_urdf.ply") + + ##### Test the set initial state function ##### + init_joint_states = torch.zeros((60, ), dtype=torch.float32).cuda() + cur_robot.set_initial_state(init_joint_states) + ##### Test the set initial state function ##### + + + + + cur_zeros_actions = torch.zeros((60, ), dtype=torch.float32).cuda() + cur_ones_actions = torch.ones((60, ), dtype=torch.float32).cuda() # * 100 + + ts_to_mesh_verts = {} + for i_ts in range(50): + cur_robot.calculate_inertia() + + cur_robot.set_actions_and_update_states(cur_ones_actions, i_ts, 0.2) ### + + + cur_verts, cur_faces = cur_robot.get_init_visual_pts() + cur_mesh = trimesh.Trimesh(vertices=cur_verts.detach().cpu().numpy(), faces=cur_faces.detach().cpu().numpy()) + + ts_to_mesh_verts[i_ts + i_ts] = cur_verts.detach().cpu().numpy() + # cur_mesh.export(f"stated_mano_mesh.ply") + # cur_mesh.export(f"zero_actioned_mano_mesh.ply") + cur_mesh.export(f"ones_actioned_mano_mesh_ts_{i_ts}.ply") + + np.save(f"reference_verts.npy", ts_to_mesh_verts) + + exit(0) + + xml_fn = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere.xml" + robot_agent = RobotAgent(xml_fn=xml_fn, args=None) + init_visual_pts = robot_agent.init_visual_pts.detach().cpu().numpy() + exit(0) + \ No newline at end of file diff --git a/models/dyn_model_act_v2_deformable.py b/models/dyn_model_act_v2_deformable.py new file mode 100644 index 0000000000000000000000000000000000000000..ab8f113aa07aa01e11d9917fd048b75eed2ae67a --- /dev/null +++ b/models/dyn_model_act_v2_deformable.py @@ -0,0 +1,1582 @@ + +import math +# import torch +# from ..utils import Timer +import numpy as np +# import torch.nn.functional as F +import os + +import argparse + +from xml.etree.ElementTree import ElementTree + +import trimesh +import torch +import torch.nn as nn +# import List +# class link; joint; body +### + +from scipy.spatial.transform import Rotation as R +from torch.distributions.uniform import Uniform + +# deformable articulated objects with the articulated models # + +DAMPING = 1.0 +DAMPING = 0.3 + +def plane_rotation_matrix_from_angle_xz(angle): + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + zero_padding = torch.zeros_like(cos_) + one_padding = torch.ones_like(cos_) + col_a = torch.stack( + [cos_, zero_padding, sin_], dim=0 + ) + col_b = torch.stack( + [zero_padding, one_padding, zero_padding], dim=0 + ) + col_c = torch.stack( + [-1. * sin_, zero_padding, cos_], dim=0 + ) + rot_mtx = torch.stack( + [col_a, col_b, col_c], dim=-1 + ) + return rot_mtx + +def plane_rotation_matrix_from_angle(angle): + ## angle of + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + col_a = torch.stack( + [cos_, sin_], dim=0 ### col of the rotation matrix + ) + col_b = torch.stack( + [-1. * sin_, cos_], dim=0 ## cols of the rotation matrix + ) + rot_mtx = torch.stack( + [col_a, col_b], dim=-1 ### rotation matrix + ) + return rot_mtx + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + +def update_quaternion(delta_angle, prev_quat): + s1 = 0 + s2 = prev_quat[0] + v2 = prev_quat[1:] + v1 = delta_angle / 2 + new_v = s1 * v2 + s2 * v1 + torch.cross(v1, v2) + new_s = s1 * s2 - torch.sum(v1 * v2) + new_quat = torch.cat([new_s.unsqueeze(0), new_v], dim=0) + return new_quat + + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) # -1 for the quaternion matrix # + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + + + +class Inertial: + def __init__(self, origin_rpy, origin_xyz, mass, inertia) -> None: + self.origin_rpy = origin_rpy + self.origin_xyz = origin_xyz + self.mass = mass + self.inertia = inertia + if torch.sum(self.inertia).item() < 1e-4: + self.inertia = self.inertia + torch.eye(3, dtype=torch.float32).cuda() + pass + +class Visual: + def __init__(self, visual_xyz, visual_rpy, geometry_mesh_fn, geometry_mesh_scale) -> None: + # self.visual_origin = visual_origin + self.visual_xyz = visual_xyz + self.visual_rpy = visual_rpy + self.mesh_nm = geometry_mesh_fn.split("/")[-1].split(".")[0] + mesh_root = "/home/xueyi/diffsim/NeuS/rsc/mano" ## mano models of the mesh root ## + if not os.path.exists(mesh_root): + mesh_root = "/data/xueyi/diffsim/NeuS/rsc/mano" + self.mesh_root = mesh_root + self.geometry_mesh_fn = os.path.join(mesh_root, geometry_mesh_fn) + self.geometry_mesh_scale = geometry_mesh_scale + # tranformed by xyz # + self.vertices, self.faces = self.load_geoemtry_mesh() + self.cur_expanded_visual_pts = None + pass + + def load_geoemtry_mesh(self, ): + # mesh_root = + mesh = trimesh.load_mesh(self.geometry_mesh_fn) + vertices = mesh.vertices + faces = mesh.faces + + vertices = torch.from_numpy(vertices).float().cuda() + faces =torch.from_numpy(faces).long().cuda() + + vertices = vertices * self.geometry_mesh_scale.unsqueeze(0) + self.visual_xyz.unsqueeze(0) + + return vertices, faces + + # init_visual_meshes = get_init_visual_meshes(self, parent_rot, parent_trans, init_visual_meshes) + def get_init_visual_meshes(self, parent_rot, parent_trans, init_visual_meshes): + # cur_vertices = torch.matmul(parent_rot, self.vertices.transpose(1, 0)).contiguous().transpose(1, 0).contiguous() + parent_trans.unsqueeze(0) + cur_vertices = self.vertices + # print(f"adding mesh loaded from {self.geometry_mesh_fn}") + init_visual_meshes['vertices'].append(cur_vertices) # cur vertices # trans # + init_visual_meshes['faces'].append(self.faces) + return init_visual_meshes + + def expand_visual_pts(self, ): + expand_factor = 0.2 + nn_expand_pts = 20 + + expand_factor = 0.4 + nn_expand_pts = 40 ### number of the expanded points ### ## points ## + expand_save_fn = f"{self.mesh_nm}_expanded_pts_factor_{expand_factor}_nnexp_{nn_expand_pts}.npy" + expand_save_fn = os.path.join(self.mesh_root, expand_save_fn) + + if not os.path.exists(expand_save_fn): + cur_expanded_visual_pts = [] + if self.cur_expanded_visual_pts is None: + cur_src_pts = self.vertices + else: + cur_src_pts = self.cur_expanded_visual_pts + maxx_verts, _ = torch.max(cur_src_pts, dim=0) + minn_verts, _ = torch.min(cur_src_pts, dim=0) + extent_verts = maxx_verts - minn_verts ## (3,)-dim vecotr + norm_extent_verts = torch.norm(extent_verts, dim=-1).item() ## (1,)-dim vector + expand_r = norm_extent_verts * expand_factor + # nn_expand_pts = 5 # expand the vertices to 5 times of the original vertices + for i_pts in range(self.vertices.size(0)): + cur_pts = cur_src_pts[i_pts] + # sample from the circile with cur_pts as thejcenter and the radius as expand_r + # (-r, r) # sample the offset vector in the size of (nn_expand_pts, 3) + offset_dist = Uniform(-1. * expand_r, expand_r) + offset_vec = offset_dist.sample((nn_expand_pts, 3)).cuda() + cur_expanded_pts = cur_pts + offset_vec + cur_expanded_visual_pts.append(cur_expanded_pts) + cur_expanded_visual_pts = torch.cat(cur_expanded_visual_pts, dim=0) + np.save(expand_save_fn, cur_expanded_visual_pts.detach().cpu().numpy()) + else: + print(f"Loading visual pts from {expand_save_fn}") # load from the fn # + cur_expanded_visual_pts = np.load(expand_save_fn, allow_pickle=True) + cur_expanded_visual_pts = torch.from_numpy(cur_expanded_visual_pts).float().cuda() + self.cur_expanded_visual_pts = cur_expanded_visual_pts # expanded visual pts # + return self.cur_expanded_visual_pts + # cur_pts # + # use r as the search direction # # expande save fn # + def get_transformed_visual_pts(self, visual_pts_list): + visual_pts_list.append(self.cur_expanded_visual_pts) # + return visual_pts_list + + + +## link urdf ## expand the visual pts to form the expanded visual grids pts # +# use get_name_to_visual_pts_faces to get the transformed visual pts and faces # +## Link_urdf ## +class Link_urdf: # get_transformed_visual_pts # + def __init__(self, name, inertial: Inertial, visual: Visual=None) -> None: + + self.name = name + self.inertial = inertial + self.visual = visual # vsiual meshes # + + # self.joint = joint + # self.body = body + # self.children = children + # self.name = name + + self.link_idx = ... + + # self.args = args + + self.joint = None # joint name to struct + # self.join + self.children = ... + self.children = {} # joint name to child sruct + + ### dyn_model_act ### + # parent_rot_mtx, parent_trans_vec # + # parent_rot_mtx, parent_trans_vec # # link urdf # + # self.parent_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(), requires_grad=True) + # self.parent_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(), requires_grad=True) + # self.curr_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(), requires_grad=True) + # self.curr_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(), requires_grad=True) + # # + # self.tot_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(), requires_grad=True) + # self.tot_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(), requires_grad=True) + + # expand visual pts # + def expand_visual_pts(self, expanded_visual_pts, link_name_to_visited, link_name_to_link_struct): + link_name_to_visited[self.name] = 1 + if self.visual is not None: + cur_expanded_visual_pts = self.visual.expand_visual_pts() + expanded_visual_pts.append(cur_expanded_visual_pts) + + for cur_link in self.children: + cur_link_struct = link_name_to_link_struct[self.children[cur_link]] + cur_link_name = cur_link_struct.name + if cur_link_name in link_name_to_visited: + continue + expanded_visual_pts = cur_link_struct.expand_visual_pts(expanded_visual_pts, link_name_to_visited, link_name_to_link_struct) + return expanded_visual_pts + + def get_transformed_visual_pts(self, visual_pts_list, link_name_to_visited, link_name_to_link_struct): + link_name_to_visited[self.name] = 1 + + if self.joint is not None: + for cur_joint_name in self.joint: + cur_joint = self.joint[cur_joint_name] + cur_child_name = self.children[cur_joint_name] + cur_child = link_name_to_link_struct[cur_child_name] # parent and the child_visual, cur_child.visual # + # parent # + # print(f"joint: {cur_joint.name}, child: {cur_child_name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + # print(f"joint: {cur_joint.name}, child: {cur_child_name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + # joint_origin_xyz = cur_joint.origin_xyz ## transformed_visual_pts #### + if cur_child_name in link_name_to_visited: + continue + # cur_child_visual_pts = {'vertices': [], 'faces': [], 'link_idxes': [], 'transformed_joint_pos': [], 'joint_link_idxes': []} + cur_child_visual_pts_list = [] + cur_child_visual_pts_list = cur_child.get_transformed_visual_pts(cur_child_visual_pts_list, link_name_to_visited, link_name_to_link_struct) + + if len(cur_child_visual_pts_list) > 0: + cur_child_visual_pts = torch.cat(cur_child_visual_pts_list, dim=0) + # cur_child_verts, cur_child_faces = cur_child_visual_pts['vertices'], cur_child_visual_pts['faces'] + # cur_child_link_idxes = cur_child_visual_pts['link_idxes'] + # cur_transformed_joint_pos = cur_child_visual_pts['transformed_joint_pos'] + # joint_link_idxes = cur_child_visual_pts['joint_link_idxes'] + # if len(cur_child_verts) > 0: + # cur_child_verts, cur_child_faces = merge_meshes(cur_child_verts, cur_child_faces) + cur_child_visual_pts = cur_child_visual_pts + cur_joint.origin_xyz.unsqueeze(0) + cur_joint_rot, cur_joint_trans = cur_joint.compute_transformation_from_current_state() + cur_child_visual_pts = torch.matmul(cur_joint_rot, cur_child_visual_pts.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_joint_trans.unsqueeze(0) + + # if len(cur_transformed_joint_pos) > 0: + # cur_transformed_joint_pos = torch.cat(cur_transformed_joint_pos, dim=0) + # cur_transformed_joint_pos = cur_transformed_joint_pos + cur_joint.origin_xyz.unsqueeze(0) + # cur_transformed_joint_pos = torch.matmul(cur_joint_rot, cur_transformed_joint_pos.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_joint_trans.unsqueeze(0) + # cur_joint_pos = cur_joint_trans.unsqueeze(0).clone() + # cur_transformed_joint_pos = torch.cat( + # [cur_transformed_joint_pos, cur_joint_pos], dim=0 ##### joint poses ##### + # ) + # else: + # cur_transformed_joint_pos = cur_joint_trans.unsqueeze(0).clone() + + # if len(joint_link_idxes) > 0: + # joint_link_idxes = torch.cat(joint_link_idxes, dim=-1) ### joint_link idxes ### + # cur_joint_idx = cur_child.link_idx + # joint_link_idxes = torch.cat( + # [joint_link_idxes, torch.tensor([cur_joint_idx], dtype=torch.long).cuda()], dim=-1 + # ) + # else: + # joint_link_idxes = torch.tensor([cur_child.link_idx], dtype=torch.long).cuda().view(1,) + + + visual_pts_list.append(cur_child_visual_pts) + # cur_child_verts = cur_child_verts + # transformed joint pos # + # cur_child_link_idxes = torch.cat(cur_child_link_idxes, dim=-1) + # # joint_link_idxes = torch.cat(joint_link_idxes, dim=-1) + # init_visual_meshes['vertices'].append(cur_child_verts) + # init_visual_meshes['faces'].append(cur_child_faces) + # init_visual_meshes['link_idxes'].append(cur_child_link_idxes) + # init_visual_meshes['transformed_joint_pos'].append(cur_transformed_joint_pos) + # init_visual_meshes['joint_link_idxes'].append(joint_link_idxes) + + + if self.visual is not None: + # get_transformed_visual_pts # + visual_pts_list = self.visual.get_transformed_visual_pts(visual_pts_list) + + # for cur_link in self.children: + # cur_link_name = cur_link.name + # if cur_link_name in link_name_to_visited: # link name to visited # + # continue + # visual_pts_list = cur_link.get_transformed_visual_pts(visual_pts_list, link_name_to_visited, link_name_to_link_struct) + return visual_pts_list + + # use both the articulated motion and the frre form + def set_initial_state(self, states, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct): + + link_name_to_visited[self.name] = 1 + + if self.joint is not None: + for cur_joint_name in self.joint: + cur_joint = self.joint[cur_joint_name] + cur_joint_name = cur_joint.name + cur_child = self.children[cur_joint_name] + cur_child_struct = link_name_to_link_struct[cur_child] + cur_child_name = cur_child_struct.name + + if cur_child_name in link_name_to_visited: + continue + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] # action joint name to joint idx # + # cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] # + # cur_joint = self.joint[cur_joint_name] + cur_state = states[cur_joint_idx] ### joint state ### + cur_joint.set_initial_state(cur_state) + cur_child_struct.set_initial_state(states, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct) + + + + def get_init_visual_meshes(self, parent_rot, parent_trans, init_visual_meshes, link_name_to_link_struct, link_name_to_visited): + link_name_to_visited[self.name] = 1 + + # 'transformed_joint_pos': [], 'link_idxes': [] + if self.joint is not None: + # for i_ch, (cur_joint, cur_child) in enumerate(zip(self.joint, self.children)): + # print(f"joint: {cur_joint.name}, child: {cur_child.name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + # joint_origin_xyz = cur_joint.origin_xyz + # init_visual_meshes = cur_child.get_init_visual_meshes(parent_rot, parent_trans + joint_origin_xyz, init_visual_meshes) + # print(f"name: {self.name}, keys: {self.joint.keys()}") + for cur_joint_name in self.joint: # + cur_joint = self.joint[cur_joint_name] + cur_child_name = self.children[cur_joint_name] + cur_child = link_name_to_link_struct[cur_child_name] + # print(f"joint: {cur_joint.name}, child: {cur_child_name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + # print(f"joint: {cur_joint.name}, child: {cur_child_name}, parent: {self.name}, child_visual: {cur_child.visual is not None}") + joint_origin_xyz = cur_joint.origin_xyz + if cur_child_name in link_name_to_visited: + continue + cur_child_visual_pts = {'vertices': [], 'faces': [], 'link_idxes': [], 'transformed_joint_pos': [], 'joint_link_idxes': []} + cur_child_visual_pts = cur_child.get_init_visual_meshes(parent_rot, parent_trans + joint_origin_xyz, cur_child_visual_pts, link_name_to_link_struct, link_name_to_visited) + cur_child_verts, cur_child_faces = cur_child_visual_pts['vertices'], cur_child_visual_pts['faces'] + cur_child_link_idxes = cur_child_visual_pts['link_idxes'] + cur_transformed_joint_pos = cur_child_visual_pts['transformed_joint_pos'] + joint_link_idxes = cur_child_visual_pts['joint_link_idxes'] + if len(cur_child_verts) > 0: + cur_child_verts, cur_child_faces = merge_meshes(cur_child_verts, cur_child_faces) + cur_child_verts = cur_child_verts + cur_joint.origin_xyz.unsqueeze(0) + cur_joint_rot, cur_joint_trans = cur_joint.compute_transformation_from_current_state() + cur_child_verts = torch.matmul(cur_joint_rot, cur_child_verts.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_joint_trans.unsqueeze(0) + + if len(cur_transformed_joint_pos) > 0: + cur_transformed_joint_pos = torch.cat(cur_transformed_joint_pos, dim=0) + cur_transformed_joint_pos = cur_transformed_joint_pos + cur_joint.origin_xyz.unsqueeze(0) + cur_transformed_joint_pos = torch.matmul(cur_joint_rot, cur_transformed_joint_pos.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_joint_trans.unsqueeze(0) + cur_joint_pos = cur_joint_trans.unsqueeze(0).clone() + cur_transformed_joint_pos = torch.cat( + [cur_transformed_joint_pos, cur_joint_pos], dim=0 ##### joint poses ##### + ) + else: + cur_transformed_joint_pos = cur_joint_trans.unsqueeze(0).clone() + + if len(joint_link_idxes) > 0: + joint_link_idxes = torch.cat(joint_link_idxes, dim=-1) ### joint_link idxes ### + cur_joint_idx = cur_child.link_idx + joint_link_idxes = torch.cat( + [joint_link_idxes, torch.tensor([cur_joint_idx], dtype=torch.long).cuda()], dim=-1 + ) + else: + joint_link_idxes = torch.tensor([cur_child.link_idx], dtype=torch.long).cuda().view(1,) + + + + # cur_child_verts = cur_child_verts + # transformed joint pos # + cur_child_link_idxes = torch.cat(cur_child_link_idxes, dim=-1) + # joint_link_idxes = torch.cat(joint_link_idxes, dim=-1) + init_visual_meshes['vertices'].append(cur_child_verts) + init_visual_meshes['faces'].append(cur_child_faces) + init_visual_meshes['link_idxes'].append(cur_child_link_idxes) + init_visual_meshes['transformed_joint_pos'].append(cur_transformed_joint_pos) + init_visual_meshes['joint_link_idxes'].append(joint_link_idxes) + + + # joint_origin_xyz = self.joint.origin_xyz + else: + joint_origin_xyz = torch.tensor([0., 0., 0.], dtype=torch.float32).cuda() + # self.parent_rot_mtx = parent_rot + # self.parent_trans_vec = parent_trans + joint_origin_xyz + + + if self.visual is not None: + init_visual_meshes = self.visual.get_init_visual_meshes(parent_rot, parent_trans, init_visual_meshes) + cur_visual_mesh_pts_nn = self.visual.vertices.size(0) + cur_link_idxes = torch.zeros((cur_visual_mesh_pts_nn, ), dtype=torch.long).cuda()+ self.link_idx + init_visual_meshes['link_idxes'].append(cur_link_idxes) + + # for cur_link in self.children: # + # init_visual_meshes = cur_link.get_init_visual_meshes(self.parent_rot_mtx, self.parent_trans_vec, init_visual_meshes) + return init_visual_meshes ## init visual meshes ## + + # calculate inerti + def calculate_inertia(self, link_name_to_visited, link_name_to_link_struct): + link_name_to_visited[self.name] = 1 + self.cur_inertia = torch.zeros((3, 3), dtype=torch.float32).cuda() + + if self.joint is not None: + for joint_nm in self.joint: + cur_joint = self.joint[joint_nm] + cur_child = self.children[joint_nm] + cur_child_struct = link_name_to_link_struct[cur_child] + cur_child_name = cur_child_struct.name + if cur_child_name in link_name_to_visited: + continue + joint_rot, joint_trans = cur_joint.compute_transformation_from_current_state(n_grad=True) + # cur_parent_rot = torch.matmul(parent_rot, joint_rot) # + # cur_parent_trans = torch.matmul(parent_rot, joint_trans.unsqueeze(-1)).squeeze(-1) + parent_trans # + child_inertia = cur_child_struct.calculate_inertia(link_name_to_visited, link_name_to_link_struct) + child_inertia = torch.matmul( + joint_rot.detach(), torch.matmul(child_inertia, joint_rot.detach().transpose(1, 0).contiguous()) + ).detach() + self.cur_inertia += child_inertia + # if self.visual is not None: + # self.cur_inertia += self.visual.inertia + self.cur_inertia += self.inertial.inertia.detach() + return self.cur_inertia + + + def set_delta_state_and_update(self, states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, link_name_to_link_struct): + + link_name_to_visited[self.name] = 1 + + if self.joint is not None: + for cur_joint_name in self.joint: + + cur_joint = self.joint[cur_joint_name] # joint model + + cur_child = self.children[cur_joint_name] # child model # + + cur_child_struct = link_name_to_link_struct[cur_child] + + cur_child_name = cur_child_struct.name + + if cur_child_name in link_name_to_visited: + continue + + ## cur child inertia ## + # cur_child_inertia = cur_child_struct.cur_inertia + + + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + cur_state = states[cur_joint_idx] + ### get the child struct ### + # set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + # set actions and update states # + cur_joint.set_delta_state_and_update(cur_state, cur_timestep) + + cur_child_struct.set_delta_state_and_update(states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, link_name_to_link_struct) + + + + # the joint # + # set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + def set_actions_and_update_states(self, actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct): + + link_name_to_visited[self.name] = 1 + + # the current joint of the + if self.joint is not None: + for cur_joint_name in self.joint: + + cur_joint = self.joint[cur_joint_name] # joint model + + cur_child = self.children[cur_joint_name] # child model # + + cur_child_struct = link_name_to_link_struct[cur_child] + + cur_child_name = cur_child_struct.name + + if cur_child_name in link_name_to_visited: + continue + + cur_child_inertia = cur_child_struct.cur_inertia + + + if cur_joint.type in ['revolute']: + cur_joint_idx = action_joint_name_to_joint_idx[cur_joint_name] + cur_action = actions[cur_joint_idx] + ### get the child struct ### + # set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + # set actions and update states # + cur_joint.set_actions_and_update_states(cur_action, cur_timestep, time_cons, cur_child_inertia.detach()) + + cur_child_struct.set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, link_name_to_link_struct) + + + def set_init_states_target_value(self, init_states): + if self.joint.type == 'revolute': + self.joint_angle = init_states[self.joint.joint_idx] + joint_axis = self.joint.axis + self.rot_vec = self.joint_angle * joint_axis + self.joint.state = torch.tensor([1, 0, 0, 0], dtype=torch.float32).cuda() + self.joint.state = self.joint.state + update_quaternion(self.rot_vec, self.joint.state) + self.joint.timestep_to_states[0] = self.joint.state.detach() + self.joint.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + for cur_link in self.children: + cur_link.set_init_states_target_value(init_states) + + # should forward for one single step -> use the action # + def set_init_states(self, ): + self.joint.state = torch.tensor([1, 0, 0, 0], dtype=torch.float32).cuda() + self.joint.timestep_to_states[0] = self.joint.state.detach() + self.joint.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + for cur_link in self.children: + cur_link.set_init_states() + + + def get_visual_pts(self, visual_pts_list): + visual_pts_list = self.body.get_visual_pts(visual_pts_list) + for cur_link in self.children: + visual_pts_list = cur_link.get_visual_pts(visual_pts_list) + visual_pts_list = torch.cat(visual_pts_list, dim=0) + return visual_pts_list + + def get_visual_faces_list(self, visual_faces_list): + visual_faces_list = self.body.get_visual_faces_list(visual_faces_list) + for cur_link in self.children: + visual_faces_list = cur_link.get_visual_faces_list(visual_faces_list) + return visual_faces_list + # pass + + + def set_state(self, name_to_state): + self.joint.set_state(name_to_state=name_to_state) + for child_link in self.children: + child_link.set_state(name_to_state) + + def set_state_via_vec(self, state_vec): + self.joint.set_state_via_vec(state_vec) + for child_link in self.children: + child_link.set_state_via_vec(state_vec) + + + + +class Joint_Limit: + def __init__(self, effort, lower, upper, velocity) -> None: + self.effort = effort + self.lower = lower + self.velocity = velocity + self.upper = upper + pass + +# Joint_urdf(name, joint_type, parent_link, child_link, origin_xyz, axis_xyz, limit: Joint_Limit) +class Joint_urdf: # + + def __init__(self, name, joint_type, parent_link, child_link, origin_xyz, axis_xyz, limit: Joint_Limit) -> None: + self.name = name + self.type = joint_type + self.parent_link = parent_link + self.child_link = child_link + self.origin_xyz = origin_xyz + self.axis_xyz = axis_xyz + self.limit = limit + + # joint angle; joint state # + self.timestep_to_vels = {} + self.timestep_to_states = {} + + self.init_pos = self.origin_xyz.clone() + + #### only for the current state #### # joint urdf # + self.state = nn.Parameter( + torch.tensor([1., 0., 0., 0.], dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + self.action = nn.Parameter( + torch.zeros((1,), dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True + ) + # self.rot_mtx = np.eye(3, dtypes=np.float32) + # self.trans_vec = np.zeros((3,), dtype=np.float32) ## rot m + self.rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + self.trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32, requires_grad=True).cuda(), requires_grad=True) + + def set_initial_state(self, state): + # joint angle as the state value # + self.timestep_to_vels[0] = torch.zeros((3,), dtype=torch.float32).cuda().detach() ## velocity ## + delta_rot_vec = self.axis_xyz * state + # self.timestep_to_states[0] = state.detach() + cur_state = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + init_state = cur_state + update_quaternion(delta_rot_vec, cur_state) + self.timestep_to_states[0] = init_state.detach() + self.state = init_state + + def set_delta_state_and_update(self, state, cur_timestep): + self.timestep_to_vels[cur_timestep] = torch.zeros((3,), dtype=torch.float32).cuda().detach() + delta_rot_vec = self.axis_xyz * state + if cur_timestep == 0: + prev_state = torch.tensor([1., 0., 0., 0.], dtype=torch.float32).cuda() + else: + prev_state = self.timestep_to_states[cur_timestep - 1].detach() + cur_state = prev_state + update_quaternion(delta_rot_vec, prev_state) + self.timestep_to_states[cur_timestep] = cur_state.detach() + self.state = cur_state + + + + def compute_transformation_from_current_state(self, n_grad=False): + # together with the parent rot mtx and the parent trans vec # + # cur_joint_state = self.state + if self.type == "revolute": + # rot_mtx = rotation_matrix_from_axis_angle(self.axis, cur_joint_state) + # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + if n_grad: + rot_mtx = quaternion_to_matrix(self.state.detach()) + else: + rot_mtx = quaternion_to_matrix(self.state) + # trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + trans_vec = self.origin_xyz - torch.matmul(rot_mtx, self.origin_xyz.view(3, 1)).view(3).contiguous() + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + elif self.type == "fixed": + rot_mtx = torch.eye(3, dtype=torch.float32).cuda() + trans_vec = torch.zeros((3,), dtype=torch.float32).cuda() + # trans_vec = self.origin_xyz + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec # + else: + pass + return self.rot_mtx, self.trans_vec + + + # set actions # set actions and udpate states # + def set_actions_and_update_states(self, action, cur_timestep, time_cons, cur_inertia): + + # timestep_to_vels, timestep_to_states, state # + if self.type in ['revolute']: + + self.action = action + # + # visual_pts and visual_pts_mass # + # cur_joint_pos = self.joint.pos # + # TODO: check whether the following is correct # + torque = self.action * self.axis_xyz + + # # Compute inertia matrix # + # inertial = torch.zeros((3, 3), dtype=torch.float32).cuda() + # for i_pts in range(self.visual_pts.size(0)): + # cur_pts = self.visual_pts[i_pts] + # cur_pts_mass = self.visual_pts_mass[i_pts] + # cur_r = cur_pts - cur_joint_pos # r_i + # # cur_vert = init_passive_mesh[i_v] + # # cur_r = cur_vert - init_passive_mesh_center + # dot_r_r = torch.sum(cur_r * cur_r) + # cur_eye_mtx = torch.eye(3, dtype=torch.float32).cuda() + # r_mult_rT = torch.matmul(cur_r.unsqueeze(-1), cur_r.unsqueeze(0)) + # inertial += (dot_r_r * cur_eye_mtx - r_mult_rT) * cur_pts_mass + # m = torch.sum(self.visual_pts_mass) + # # Use torque to update angular velocity -> state # + # inertia_inv = torch.linalg.inv(inertial) + + # axis-angle of + # inertia_inv = self.cur_inertia_inv + # print(f"updating actions and states for the joint {self.name} with type {self.type}") + inertia_inv = torch.linalg.inv(cur_inertia).detach() + + delta_omega = torch.matmul(inertia_inv, torque.unsqueeze(-1)).squeeze(-1) + + # delta_omega = torque / 400 # + + # timestep_to_vels, timestep_to_states, state # + + # TODO: dt should be an optimizable constant? should it be the same value as that optimized for the passive object? # + delta_angular_vel = delta_omega * time_cons # * self.args.dt + delta_angular_vel = delta_angular_vel.squeeze(0) + if cur_timestep > 0: ## cur_timestep - 1 ## + prev_angular_vel = self.timestep_to_vels[cur_timestep - 1].detach() + cur_angular_vel = prev_angular_vel + delta_angular_vel * DAMPING + else: + cur_angular_vel = delta_angular_vel + + self.timestep_to_vels[cur_timestep] = cur_angular_vel.detach() + + cur_delta_quat = cur_angular_vel * time_cons # * self.args.dt + cur_delta_quat = cur_delta_quat.squeeze(0) + cur_state = self.timestep_to_states[cur_timestep].detach() # quaternion # + # print(f"cur_delta_quat: {cur_delta_quat.size()}, cur_state: {cur_state.size()}") + nex_state = cur_state + update_quaternion(cur_delta_quat, cur_state) + self.timestep_to_states[cur_timestep + 1] = nex_state.detach() + self.state = nex_state # set the joint state # + + +# get_transformed_visual_pts() --- transformed_visual_pts ## +# use the transformed visual # the articulated motion field # +# then we should add the free motion field here # # add the free motion field # # hwo to use that? # +# another rules for optimizing articulation motion field # +# -> the articulated model predicted transformations # +# -> the free motion field -> the motion field predicted by the network for each timestep -> an implicit motion field # + +class Robot_urdf: + def __init__(self, links, link_name_to_link_idxes, link_name_to_link_struct, joint_name_to_joint_idx, actions_joint_name_to_joint_idx) -> None: + self.links = links + self.link_name_to_link_idxes = link_name_to_link_idxes + self.link_name_to_link_struct = link_name_to_link_struct + # joint_name_to_joint_idx, actions_joint_name_to_joint_idx + self.joint_name_to_joint_idx = joint_name_to_joint_idx + self.actions_joint_name_to_joint_idx = actions_joint_name_to_joint_idx + + # + # particles + # sample particles + # how to sample particles? + # how to expand the particles? # -> you can use weights in the model dict # + # from grids and jample from grids # + # link idx to the + # robot # + # init vertices, init faces # + # expande the aprticles # + # expanede particles # + # use particles to conduct the simulation # + + + + self.init_vertices, self.init_faces = self.get_init_visual_pts() + + + init_visual_pts_sv_fn = "robot_expanded_visual_pts.npy" + np.save(init_visual_pts_sv_fn, self.init_vertices.detach().cpu().numpy()) + + joint_name_to_joint_idx_sv_fn = "mano_joint_name_to_joint_idx.npy" + np.save(joint_name_to_joint_idx_sv_fn, self.joint_name_to_joint_idx) + + actions_joint_name_to_joint_idx_sv_fn = "mano_actions_joint_name_to_joint_idx.npy" + np.save(actions_joint_name_to_joint_idx_sv_fn, self.actions_joint_name_to_joint_idx) + + tot_joints = len(self.joint_name_to_joint_idx) + tot_actions_joints = len(self.actions_joint_name_to_joint_idx) + + print(f"tot_joints: {tot_joints}, tot_actions_joints: {tot_actions_joints}") + + pass + + + + + def expand_visual_pts(self, ): + link_name_to_visited = {} + # transform the visual pts # + # action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + expanded_visual_pts = [] + # expanded the visual pts # # transformed viusal pts # or the translations of the visual pts # + expanded_visual_pts = palm_link.expand_visual_pts(expanded_visual_pts, link_name_to_visited, self.link_name_to_link_struct) + expanded_visual_pts = torch.cat(expanded_visual_pts, dim=0) + # pass + return expanded_visual_pts + + + # get_transformed_visual_pts() # get_transformed_visual_pts of the visual pts ### get_transformed_visual_pts ## get_transformed_visual_pts ### # + def get_transformed_visual_pts(self, ): + init_visual_pts = [] + link_name_to_visited = {} + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + ### init_visual_pts # from the pal mink to get the total transformed visual pts ## + init_visual_pts = palm_link.get_transformed_visual_pts(init_visual_pts, link_name_to_visited, self.link_name_to_link_struct) + + init_visual_pts = torch.cat(init_visual_pts, dim=0) ## get the inita visual pts from the palm link ### + return init_visual_pts + + + ### samping issue? --- TODO` ` + def get_init_visual_pts(self, ): + init_visual_meshes = { + 'vertices': [], 'faces': [], 'link_idxes': [], 'transformed_joint_pos': [], 'link_idxes': [], 'transformed_joint_pos': [], 'joint_link_idxes': [] + } + init_parent_rot = torch.eye(3, dtype=torch.float32).cuda() + init_parent_trans = torch.zeros((3,), dtype=torch.float32).cuda() + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + init_visual_meshes = palm_link.get_init_visual_meshes(init_parent_rot, init_parent_trans, init_visual_meshes, self.link_name_to_link_struct, link_name_to_visited) + + self.link_idxes = torch.cat(init_visual_meshes['link_idxes'], dim=-1) + self.transformed_joint_pos = torch.cat(init_visual_meshes['transformed_joint_pos'], dim=0) + self.joint_link_idxes = torch.cat(init_visual_meshes['joint_link_idxes'], dim=-1) ### + + + + # for cur_link in self.links: + # init_visual_meshes = cur_link.get_init_visual_meshes(init_parent_rot, init_parent_trans, init_visual_meshes, self.link_name_to_link_struct, link_name_to_visited) + + init_vertices, init_faces = merge_meshes(init_visual_meshes['vertices'], init_visual_meshes['faces']) + return init_vertices, init_faces + + + def set_delta_state_and_update(self, states, cur_timestep): + link_name_to_visited = {} + + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.set_delta_state_and_update(states, cur_timestep, link_name_to_visited, action_joint_name_to_joint_idx, self.link_name_to_link_struct) + + # cur_joint.set_actions_and_update_states(cur_action, cur_timestep, time_cons, cur_child_inertia) + def set_actions_and_update_states(self, actions, cur_timestep, time_cons,): + # actions + # self.actions_joint_name_to_joint_idx as the action joint name to joint idx + link_name_to_visited = {} + + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + # for cur_joint in + + # for cur_link in self.links: + # if cur_link.joint is not None: + # for cur_joint_nm in cur_link.joint: + # if cur_link.joint[cur_joint_nm].type in ['revolute']: + # cur_link_joint_name = cur_link.joint[cur_joint_nm].name + # cur_link_joint_idx = self.actions_joint_name_to_joint_idx[cur_link_joint_name] + + + # for cur_link in self.links: + # cur_link.set_actions_and_update_states(actions, cur_timestep, time_cons, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + ### TODO: add the contact torque when calculating the nextstep states ### + ### TODO: not an accurate implementation since differen joints should be jconsidered for one single link ### + ### TODO: the articulated force modle is not so easy as this one .... ### + def set_contact_forces(self, hard_selected_forces, hard_selected_manipulating_points, hard_selected_sampled_input_pts_idxes): + # transformed_joint_pos, joint_link_idxes, link_idxes # + selected_pts_link_idxes = self.link_idxes[hard_selected_sampled_input_pts_idxes] + # use the selected link idxes # + # selected pts idxes # + + # self.joint_link_idxes, transformed_joint_pos # + self.link_idx_to_transformed_joint_pos = {} + for i_link in range(self.transformed_joint_pos.size(0)): + cur_link_idx = self.link_idxes[i_link].item() + cur_link_pos = self.transformed_joint_pos[i_link] + # if cur_link_idx not in self.link_idx_to_transformed_joint_pos: + self.link_idx_to_transformed_joint_pos[cur_link_idx] = cur_link_pos + # self.link_idx_to_transformed_joint_pos[cur_link_idx].append(cur_link_pos) + + # from the + self.link_idx_to_contact_forces = {} + for i_c_pts in range(hard_selected_forces.size(0)): + cur_contact_force = hard_selected_forces[i_c_pts] ## + cur_link_idx = selected_pts_link_idxes[i_c_pts].item() + cur_link_pos = self.link_idx_to_transformed_joint_pos[cur_link_idx] + cur_link_action_pos = hard_selected_manipulating_points[i_c_pts] + # (action_pos - link_pos) x (-contact_force) # + cur_contact_torque = torch.cross( + cur_link_action_pos - cur_link_pos, -cur_contact_force + ) + if cur_link_idx not in self.link_idx_to_contact_forces: + self.link_idx_to_contact_forces[cur_link_idx] = [cur_contact_torque] + else: + self.link_idx_to_contact_forces[cur_link_idx].append(cur_contact_torque) + for link_idx in self.link_idx_to_contact_forces: + self.link_idx_to_contact_forces[link_idx] = torch.stack(self.link_idx_to_contact_forces[link_idx], dim=0) + self.link_idx_to_contact_forces[link_idx] = torch.sum(self.link_idx_to_contact_forces[link_idx] , dim=0) + for link_idx, link_struct in enumerate(self.links): + if link_idx in self.link_idx_to_contact_forces: + cur_link_contact_force = self.link_idx_to_contact_forces[link_idx] + link_struct.contact_torque = cur_link_contact_force + else: + link_struct.contact_torque = None + + + # def se ### from the optimizable initial states ### + def set_initial_state(self, states): + action_joint_name_to_joint_idx = self.actions_joint_name_to_joint_idx + link_name_to_visited = {} + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.set_initial_state(states, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + # for cur_link in self.links: + # cur_link.set_initial_state(states, action_joint_name_to_joint_idx, link_name_to_visited, self.link_name_to_link_struct) + + ### after each timestep -> re-calculate the inertial matrix using the current simulated states and the set the new actiosn and forward the simulation # + def calculate_inertia(self): + link_name_to_visited = {} + + palm_idx = self.link_name_to_link_idxes["palm"] + palm_link = self.links[palm_idx] + + link_name_to_visited = {} + + palm_link.calculate_inertia(link_name_to_visited, self.link_name_to_link_struct) + + # for cur_link in self.links: + # cur_link.calculate_inertia(link_name_to_visited, self.link_name_to_link_struct) + + + + + +def parse_nparray_from_string(strr, args=None): + vals = strr.split(" ") + vals = [float(val) for val in vals] + vals = np.array(vals, dtype=np.float32) + vals = torch.from_numpy(vals).float() + ## vals ## + vals = nn.Parameter(vals.cuda(), requires_grad=True) + + return vals + + +### parse link data ### +def parse_link_data(link, args): + + link_name = link.attrib["name"] + # print(f"parsing link: {link_name}") ## joints body meshes # + + joint = link.find("./joint") + + joint_name = joint.attrib["name"] + joint_type = joint.attrib["type"] + if joint_type in ["revolute"]: ## a general xml parser here? + axis = joint.attrib["axis"] + axis = parse_nparray_from_string(axis, args=args) + else: + axis = None + pos = joint.attrib["pos"] # + pos = parse_nparray_from_string(pos, args=args) + quat = joint.attrib["quat"] + quat = parse_nparray_from_string(quat, args=args) + + try: + frame = joint.attrib["frame"] + except: + frame = "WORLD" + + if joint_type not in ["fixed"]: + damping = joint.attrib["damping"] + damping = float(damping) + else: + damping = 0.0 + + cur_joint = Joint(joint_name, joint_type, axis, pos, quat, frame, damping, args=args) + + body = link.find("./body") + body_name = body.attrib["name"] + body_type = body.attrib["type"] + if body_type == "mesh": + filename = body.attrib["filename"] + else: + filename = "" + + if body_type == "sphere": + radius = body.attrib["radius"] + radius = float(radius) + else: + radius = 0. + + pos = body.attrib["pos"] + pos = parse_nparray_from_string(pos, args=args) + quat = body.attrib["quat"] + quat = joint.attrib["quat"] + try: + transform_type = body.attrib["transform_type"] + except: + transform_type = "OBJ_TO_WORLD" + density = body.attrib["density"] + density = float(density) + mu = body.attrib["mu"] + mu = float(mu) + try: ## rgba ## + rgba = body.attrib["rgba"] + rgba = parse_nparray_from_string(rgba, args=args) + except: + rgba = np.zeros((4,), dtype=np.float32) + + cur_body = Body(body_name, body_type, filename, pos, quat, transform_type, density, mu, rgba, radius, args=args) + + children_link = [] + links = link.findall("./link") + for child_link in links: # + cur_child_link = parse_link_data(child_link, args=args) + children_link.append(cur_child_link) + + link_name = link.attrib["name"] + link_obj = Link(link_name, joint=cur_joint, body=cur_body, children=children_link, args=args) + return link_obj + + +### parse link data ### +def parse_link_data_urdf(link): + + link_name = link.attrib["name"] + # print(f"parsing link: {link_name}") ## joints body meshes # + + inertial = link.find("./inertial") + + origin = inertial.find("./origin") + inertial_pos = origin.attrib["xyz"] + inertial_pos = parse_nparray_from_string(inertial_pos) + + inertial_rpy = origin.attrib["rpy"] + inertial_rpy = parse_nparray_from_string(inertial_rpy) + + inertial_mass = inertial.find("./mass") + inertial_mass = inertial_mass.attrib["value"] + + inertial_inertia = inertial.find("./inertia") + inertial_ixx = inertial_inertia.attrib["ixx"] + inertial_ixx = float(inertial_ixx) + inertial_ixy = inertial_inertia.attrib["ixy"] + inertial_ixy = float(inertial_ixy) + inertial_ixz = inertial_inertia.attrib["ixz"] + inertial_ixz = float(inertial_ixz) + inertial_iyy = inertial_inertia.attrib["iyy"] + inertial_iyy = float(inertial_iyy) + inertial_iyz = inertial_inertia.attrib["iyz"] + inertial_iyz = float(inertial_iyz) + inertial_izz = inertial_inertia.attrib["izz"] + inertial_izz = float(inertial_izz) + + inertial_inertia_mtx = torch.zeros((3, 3), dtype=torch.float32).cuda() + inertial_inertia_mtx[0, 0] = inertial_ixx + inertial_inertia_mtx[0, 1] = inertial_ixy + inertial_inertia_mtx[0, 2] = inertial_ixz + inertial_inertia_mtx[1, 0] = inertial_ixy + inertial_inertia_mtx[1, 1] = inertial_iyy + inertial_inertia_mtx[1, 2] = inertial_iyz + inertial_inertia_mtx[2, 0] = inertial_ixz + inertial_inertia_mtx[2, 1] = inertial_iyz + inertial_inertia_mtx[2, 2] = inertial_izz + + # [xx, xy, xz] # + # [0, yy, yz] # + # [0, 0, zz] # + + # a strange inertia value ... # + # TODO: how to compute the inertia matrix? # + + visual = link.find("./visual") + + if visual is not None: + origin = visual.find("./origin") + visual_pos = origin.attrib["xyz"] + visual_pos = parse_nparray_from_string(visual_pos) + visual_rpy = origin.attrib["rpy"] + visual_rpy = parse_nparray_from_string(visual_rpy) + geometry = visual.find("./geometry") + geometry_mesh = geometry.find("./mesh") + mesh_fn = geometry_mesh.attrib["filename"] + mesh_scale = geometry_mesh.attrib["scale"] + + mesh_scale = parse_nparray_from_string(mesh_scale) + mesh_fn = str(mesh_fn) + + + link_struct = Link_urdf(name=link_name, inertial=Inertial(origin_rpy=inertial_rpy, origin_xyz=inertial_pos, mass=inertial_mass, inertia=inertial_inertia_mtx), visual=Visual(visual_rpy=visual_rpy, visual_xyz=visual_pos, geometry_mesh_fn=mesh_fn, geometry_mesh_scale=mesh_scale) if visual is not None else None) + + return link_struct + +def parse_joint_data_urdf(joint): + joint_name = joint.attrib["name"] + joint_type = joint.attrib["type"] + + parent = joint.find("./parent") + child = joint.find("./child") + parent_name = parent.attrib["link"] + child_name = child.attrib["link"] + + joint_origin = joint.find("./origin") + # if joint_origin. + try: + origin_xyz = joint_origin.attrib["xyz"] + origin_xyz = parse_nparray_from_string(origin_xyz) + except: + origin_xyz = torch.tensor([0., 0., 0.], dtype=torch.float32).cuda() + + joint_axis = joint.find("./axis") + if joint_axis is not None: + joint_axis = joint_axis.attrib["xyz"] + joint_axis = parse_nparray_from_string(joint_axis) + else: + joint_axis = torch.tensor([1, 0., 0.], dtype=torch.float32).cuda() + + joint_limit = joint.find("./limit") + if joint_limit is not None: + joint_lower = joint_limit.attrib["lower"] + joint_lower = float(joint_lower) + joint_upper = joint_limit.attrib["upper"] + joint_upper = float(joint_upper) + joint_effort = joint_limit.attrib["effort"] + joint_effort = float(joint_effort) + joint_velocity = joint_limit.attrib["velocity"] + joint_velocity = float(joint_velocity) + else: + joint_lower = -0.5000 + joint_upper = 1.57 + joint_effort = 1000 + joint_velocity = 0.5 + + # cosntruct the joint data # + joint_limit = Joint_Limit(effort=joint_effort, lower=joint_lower, upper=joint_upper, velocity=joint_velocity) + cur_joint_struct = Joint_urdf(joint_name, joint_type, parent_name, child_name, origin_xyz, joint_axis, joint_limit) + return cur_joint_struct + + + +def parse_data_from_urdf(xml_fn): + + tree = ElementTree() + tree.parse(xml_fn) + print(f"{xml_fn}") + ### get total robots ### + # robots = tree.findall("link") + cur_robot = tree + # i_robot = 0 # + # tot_robots = [] # + # for cur_robot in robots: # + # print(f"Getting robot: {i_robot}") # + # i_robot += 1 # + # print(f"len(robots): {len(robots)}") # + # cur_robot = robots[0] # + cur_links = cur_robot.findall("./link") + # i_link = 0 + link_name_to_link_idxes = {} + cur_robot_links = [] + link_name_to_link_struct = {} + for i_link_idx, cur_link in enumerate(cur_links): + cur_link_struct = parse_link_data_urdf(cur_link) + print(f"Adding link {cur_link_struct.name}") + cur_link_struct.link_idx = i_link_idx + cur_robot_links.append(cur_link_struct) + + link_name_to_link_idxes[cur_link_struct.name] = i_link_idx + link_name_to_link_struct[cur_link_struct.name] = cur_link_struct + # for cur_link in cur_links: + # cur_robot_links.append(parse_link_data_urdf(cur_link, args=args)) + + print(f"link_name_to_link_struct: {len(link_name_to_link_struct)}, ") + + tot_robot_joints = [] + + joint_name_to_joint_idx = {} + + actions_joint_name_to_joint_idx = {} + + cur_joints = cur_robot.findall("./joint") + for i_joint, cur_joint in enumerate(cur_joints): + cur_joint_struct = parse_joint_data_urdf(cur_joint) + cur_joint_parent_link = cur_joint_struct.parent_link + cur_joint_child_link = cur_joint_struct.child_link + + cur_joint_idx = len(tot_robot_joints) + cur_joint_name = cur_joint_struct.name + + joint_name_to_joint_idx[cur_joint_name] = cur_joint_idx + + cur_joint_type = cur_joint_struct.type + if cur_joint_type in ['revolute']: + actions_joint_name_to_joint_idx[cur_joint_name] = cur_joint_idx + + + #### add the current joint to tot joints ### + tot_robot_joints.append(cur_joint_struct) + + parent_link_idx = link_name_to_link_idxes[cur_joint_parent_link] + cur_parent_link_struct = cur_robot_links[parent_link_idx] + + + child_link_idx = link_name_to_link_idxes[cur_joint_child_link] + cur_child_link_struct = cur_robot_links[child_link_idx] + # parent link struct # + if link_name_to_link_struct[cur_joint_parent_link].joint is not None: + link_name_to_link_struct[cur_joint_parent_link].joint[cur_joint_struct.name] = cur_joint_struct + link_name_to_link_struct[cur_joint_parent_link].children[cur_joint_struct.name] = cur_child_link_struct.name + # cur_child_link_struct + # cur_parent_link_struct.joint.append(cur_joint_struct) + # cur_parent_link_struct.children.append(cur_child_link_struct) + else: + link_name_to_link_struct[cur_joint_parent_link].joint = { + cur_joint_struct.name: cur_joint_struct + } + link_name_to_link_struct[cur_joint_parent_link].children = { + cur_joint_struct.name: cur_child_link_struct.name + # cur_child_link_struct + } + # cur_parent_link_struct.joint = [cur_joint_struct] + # cur_parent_link_struct.children.append(cur_child_link_struct) + # pass + + + cur_robot_obj = Robot_urdf(cur_robot_links, link_name_to_link_idxes, link_name_to_link_struct, joint_name_to_joint_idx, actions_joint_name_to_joint_idx) + # tot_robots.append(cur_robot_obj) + + # for the joint robots # + # for every joint + # tot_actuators = [] + # actuators = tree.findall("./actuator/motor") + # joint_nm_to_joint_idx = {} + # i_act = 0 + # for cur_act in actuators: + # cur_act_joint_nm = cur_act.attrib["joint"] + # joint_nm_to_joint_idx[cur_act_joint_nm] = i_act + # i_act += 1 ### add the act ### + + # tot_robots[0].set_joint_idx(joint_nm_to_joint_idx) ### set joint idx here ### # tot robots # + # tot_robots[0].get_nn_pts() + # tot_robots[1].get_nn_pts() + + return cur_robot_obj + + +def get_name_to_state_from_str(states_str): + tot_states = states_str.split(" ") + tot_states = [float(cur_state) for cur_state in tot_states] + joint_name_to_state = {} + for i in range(len(tot_states)): + cur_joint_name = f"joint{i + 1}" + cur_joint_state = tot_states[i] + joint_name_to_state[cur_joint_name] = cur_joint_state + return joint_name_to_state + + +def merge_meshes(verts_list, faces_list): + nn_verts = 0 + tot_verts_list = [] + tot_faces_list = [] + for i_vv, cur_verts in enumerate(verts_list): + cur_verts_nn = cur_verts.size(0) + tot_verts_list.append(cur_verts) + tot_faces_list.append(faces_list[i_vv] + nn_verts) + nn_verts = nn_verts + cur_verts_nn + tot_verts_list = torch.cat(tot_verts_list, dim=0) + tot_faces_list = torch.cat(tot_faces_list, dim=0) + return tot_verts_list, tot_faces_list + + + +class RobotAgent: # robot and the robot # + def __init__(self, xml_fn) -> None: + self.xml_fn = xml_fn + # self.args = args + + ## + active_robot = parse_data_from_urdf(xml_fn) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ).cuda() + torch.nn.init.ones_(self.time_constant.weight) # + self.time_constant.weight.data = self.time_constant.weight.data * 0.2 ### time_constant data # + + self.optimizable_actions = nn.Embedding( + num_embeddings=100, embedding_dim=60, + ).cuda() + torch.nn.init.zeros_(self.optimizable_actions.weight) # + + self.learning_rate = 5e-4 + + self.active_robot = active_robot + + # # get init states # # + self.set_init_states() + init_visual_pts = self.get_init_state_visual_pts() + self.init_visual_pts = init_visual_pts + + + def set_init_states_target_value(self, init_states): + # glb_rot = torch.eye(n=3, dtype=torch.float32).cuda() + # glb_trans = torch.zeros((3,), dtype=torch.float32).cuda() ### glb_trans #### and the rot 3## + + # tot_init_states = {} + # tot_init_states['glb_rot'] = glb_rot; + # tot_init_states['glb_trans'] = glb_trans; + # tot_init_states['links_init_states'] = init_states + # self.active_robot.set_init_states_target_value(tot_init_states) + # init_joint_states = torch.zeros((60, ), dtype=torch.float32).cuda() + self.active_robot.set_initial_state(init_states) + + def set_init_states(self): + # glb_rot = torch.eye(n=3, dtype=torch.float32).cuda() + # glb_trans = torch.zeros((3,), dtype=torch.float32).cuda() ### glb_trans #### and the rot 3## + + # ### random rotation ### + # # glb_rot_np = R.random().as_matrix() + # # glb_rot = torch.from_numpy(glb_rot_np).float().cuda() + # ### random rotation ### + + # # glb_rot, glb_trans # + # init_states = {} # init states # + # init_states['glb_rot'] = glb_rot; # + # init_states['glb_trans'] = glb_trans; + # self.active_robot.set_init_states(init_states) + + init_joint_states = torch.zeros((60, ), dtype=torch.float32).cuda() + self.active_robot.set_initial_state(init_joint_states) + + def get_init_state_visual_pts(self, ): + # visual_pts_list = [] # compute the transformation via current state # + # visual_pts_list, visual_pts_mass_list = self.active_robot.compute_transformation_via_current_state( visual_pts_list) + cur_verts, cur_faces = self.active_robot.get_init_visual_pts() + self.faces = cur_faces + # init_visual_pts = visual_pts_list + return cur_verts + + def set_actions_and_update_states(self, actions, cur_timestep): + # + time_cons = self.time_constant(torch.zeros((1,), dtype=torch.long).cuda()) ### time constant of the system ## + self.active_robot.set_actions_and_update_states(actions, cur_timestep, time_cons) ### + pass + + def forward_stepping_test(self, ): + # delta_glb_rot; delta_glb_trans # + timestep_to_visual_pts = {} + for i_step in range(50): + actions = {} + actions['delta_glb_rot'] = torch.eye(3, dtype=torch.float32).cuda() + actions['delta_glb_trans'] = torch.zeros((3,), dtype=torch.float32).cuda() + actions_link_actions = torch.ones((22, ), dtype=torch.float32).cuda() + # actions_link_actions = actions_link_actions * 0.2 + actions_link_actions = actions_link_actions * -1. # + actions['link_actions'] = actions_link_actions + self.set_actions_and_update_states(actions=actions, cur_timestep=i_step) + + cur_visual_pts = robot_agent.get_init_state_visual_pts() + cur_visual_pts = cur_visual_pts.detach().cpu().numpy() + timestep_to_visual_pts[i_step + 1] = cur_visual_pts + return timestep_to_visual_pts + + def initialize_optimization(self, reference_pts_dict): + self.n_timesteps = 50 + # self.n_timesteps = 19 # first 19-timesteps optimization # + self.nn_tot_optimization_iters = 100 + # self.nn_tot_optimization_iters = 57 + # TODO: load reference points # + self.ts_to_reference_pts = np.load(reference_pts_dict, allow_pickle=True).item() #### + self.ts_to_reference_pts = { + ts // 2 + 1: torch.from_numpy(self.ts_to_reference_pts[ts]).float().cuda() for ts in self.ts_to_reference_pts + } + + + def forward_stepping_optimization(self, ): + nn_tot_optimization_iters = self.nn_tot_optimization_iters + params_to_train = [] + params_to_train += list(self.optimizable_actions.parameters()) + self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate) + + for i_iter in range(nn_tot_optimization_iters): + + tot_losses = [] + ts_to_robot_points = {} + for cur_ts in range(self.n_timesteps): + # print(f"iter: {i_iter}, cur_ts: {cur_ts}") + # actions = {} + # actions['delta_glb_rot'] = torch.eye(3, dtype=torch.float32).cuda() + # actions['delta_glb_trans'] = torch.zeros((3,), dtype=torch.float32).cuda() + actions_link_actions = self.optimizable_actions(torch.zeros((1,), dtype=torch.long).cuda() + cur_ts).squeeze(0) + # actions_link_actions = actions_link_actions * 0.2 + # actions_link_actions = actions_link_actions * -1. # + # actions['link_actions'] = actions_link_actions + # self.set_actions_and_update_states(actions=actions, cur_timestep=cur_ts) # update the interaction # + + with torch.no_grad(): + self.active_robot.calculate_inertia() + + self.active_robot.set_actions_and_update_states(actions_link_actions, cur_ts, 0.2) + + cur_visual_pts, cur_faces = self.active_robot.get_init_visual_pts() + ts_to_robot_points[cur_ts + 1] = cur_visual_pts.clone() + + cur_reference_pts = self.ts_to_reference_pts[cur_ts + 1] + diff = torch.sum((cur_visual_pts - cur_reference_pts) ** 2, dim=-1) + diff = diff.mean() + + # diff. + self.optimizer.zero_grad() + diff.backward(retain_graph=True) + # diff.backward(retain_graph=False) + self.optimizer.step() + + tot_losses.append(diff.item()) + + + loss = sum(tot_losses) / float(len(tot_losses)) + print(f"Iter: {i_iter}, average loss: {loss}") + # print(f"Iter: {i_iter}, average loss: {loss.item()}, start optimizing") + # self.optimizer.zero_grad() + # loss.backward() + # self.optimizer.step() + + self.ts_to_robot_points = { + ts: ts_to_robot_points[ts].detach().cpu().numpy() for ts in ts_to_robot_points + } + self.ts_to_ref_points = { + ts: self.ts_to_reference_pts[ts].detach().cpu().numpy() for ts in ts_to_robot_points + } + return self.ts_to_robot_points, self.ts_to_ref_points + + + + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z ## + + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + + +#### Big TODO: the external contact forces from the manipulated object to the robot #### +if __name__=='__main__': + + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/mano_mean_nocoll_simplified.urdf" + robot_agent = RobotAgent(urdf_fn) + + ref_dict_npy = "reference_verts.npy" + robot_agent.initialize_optimization(ref_dict_npy) + ts_to_robot_points, ts_to_ref_points = robot_agent.forward_stepping_optimization() + np.save(f"ts_to_robot_points.npy", ts_to_robot_points) + np.save(f"ts_to_ref_points.npy", ts_to_ref_points) + exit(0) + + urdf_fn = "/home/xueyi/diffsim/NeuS/rsc/mano/mano_mean_nocoll_simplified.urdf" + cur_robot = parse_data_from_urdf(urdf_fn) + # self.init_vertices, self.init_faces + init_vertices, init_faces = cur_robot.init_vertices, cur_robot.init_faces + + + + init_vertices = init_vertices.detach().cpu().numpy() + init_faces = init_faces.detach().cpu().numpy() + + + ## initial states ehre ##3 + # mesh_obj = trimesh.Trimesh(vertices=init_vertices, faces=init_faces) + # mesh_obj.export(f"hand_urdf.ply") + + ##### Test the set initial state function ##### + init_joint_states = torch.zeros((60, ), dtype=torch.float32).cuda() + cur_robot.set_initial_state(init_joint_states) + ##### Test the set initial state function ##### + + + + + cur_zeros_actions = torch.zeros((60, ), dtype=torch.float32).cuda() + cur_ones_actions = torch.ones((60, ), dtype=torch.float32).cuda() # * 100 + + ts_to_mesh_verts = {} + for i_ts in range(50): + cur_robot.calculate_inertia() + + cur_robot.set_actions_and_update_states(cur_ones_actions, i_ts, 0.2) ### + + + cur_verts, cur_faces = cur_robot.get_init_visual_pts() + cur_mesh = trimesh.Trimesh(vertices=cur_verts.detach().cpu().numpy(), faces=cur_faces.detach().cpu().numpy()) + + ts_to_mesh_verts[i_ts + i_ts] = cur_verts.detach().cpu().numpy() + # cur_mesh.export(f"stated_mano_mesh.ply") + # cur_mesh.export(f"zero_actioned_mano_mesh.ply") + cur_mesh.export(f"ones_actioned_mano_mesh_ts_{i_ts}.ply") + + np.save(f"reference_verts.npy", ts_to_mesh_verts) + + exit(0) + + xml_fn = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere.xml" + robot_agent = RobotAgent(xml_fn=xml_fn, args=None) + init_visual_pts = robot_agent.init_visual_pts.detach().cpu().numpy() + exit(0) + \ No newline at end of file diff --git a/models/dyn_model_utils.py b/models/dyn_model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..82182a5c32e7fb0b3b6c9e51e71907b914e694b2 --- /dev/null +++ b/models/dyn_model_utils.py @@ -0,0 +1,1369 @@ + +import math +# import torch +# from ..utils import Timer +import numpy as np +# import torch.nn.functional as F +import os + +import argparse + +from xml.etree.ElementTree import ElementTree + +import trimesh +import torch +import torch.nn as nn +# import List +# class link; joint; body +### + +## calculate transformation to the frame ## + +## the joint idx ## +##### th_cuda_idx ##### + +# name to the main axis? # + +# def get_body_name_to_main_axis() +# another one is just getting the joint offset positions? +# and after the revolute transformation # # all revolute joint points #### +def get_body_name_to_main_axis(): + # negative y; positive x # + body_name_to_main_axis = { + "body2": -2, "body6": 1, "body10": 1, "body14": 1, "body17": 1 + } + return body_name_to_main_axis ## get the body name to main axis ## + +## insert one +def plane_rotation_matrix_from_angle_xz(angle): + ## angle of + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + zero_padding = torch.zeros_like(cos_) + one_padding = torch.ones_like(cos_) + col_a = torch.stack( + [cos_, zero_padding, sin_], dim=0 + ) + col_b = torch.stack( + [zero_padding, one_padding, zero_padding], dim=0 + ) + col_c = torch.stack( + [-1. * sin_, zero_padding, cos_], dim=0 + ) + rot_mtx = torch.stack( + [col_a, col_b, col_c], dim=-1 + ) + # col_a = torch.stack( + # [cos_, sin_], dim=0 ### col of the rotation matrix + # ) + # col_b = torch.stack( + # [-1. * sin_, cos_], dim=0 ## cols of the rotation matrix + # ) + # rot_mtx = torch.stack( + # [col_a, col_b], dim=-1 ### rotation matrix + # ) + return rot_mtx + +def plane_rotation_matrix_from_angle(angle): + ## angle of + sin_ = torch.sin(angle) + cos_ = torch.cos(angle) + col_a = torch.stack( + [cos_, sin_], dim=0 ### col of the rotation matrix + ) + col_b = torch.stack( + [-1. * sin_, cos_], dim=0 ## cols of the rotation matrix + ) + rot_mtx = torch.stack( + [col_a, col_b], dim=-1 ### rotation matrix + ) + return rot_mtx + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z ## + # rot_mtx = np.stack( + # [np.array([cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dtype=np.float32), + # np.array([u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dtype=np.float32), + # np.array([u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dtype=np.float32) + # ], axis=-1 ### np stack + # ) ## a single + + # rot_mtx = torch.stack( + # [ + # torch.tensor([cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dtype=torch.float32), + # torch.tensor([u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dtype=torch.float32), + # torch.tensor([u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dtype=torch.float32) + # ], dim=-1 ## stack those torch tensors ## + # ) + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + # rot_mtx = torch.stack( + # [ + + # torch.tensor([cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dtype=torch.float32), + # torch.tensor([u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dtype=torch.float32), + # torch.tensor([u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dtype=torch.float32) + # ], dim=-1 ## stack those torch tensors ## + # ) + + # rot_mtx_numpy = rot_mtx.to_numpy() + # rot_mtx_at_rot_mtx = rot_mtx @ rot_mtx.transpose() + # print(rot_mtx_at_rot_mtx) + return rot_mtx + +## joint name = "joint3" ## +# +class Joint: + def __init__(self, name, joint_type, axis, pos, quat, frame, damping, args) -> None: + self.name = name + self.type = joint_type + self.axis = axis + self.pos = pos + self.quat = quat + self.frame = frame + self.damping = damping + + self.args = args + + #### TODO: the dimension of the state vector ? #### + # self.state = 0. ## parameter + self.state = nn.Parameter( + torch.zeros((1,), dtype=torch.float32, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True + ) + # self.rot_mtx = np.eye(3, dtypes=np.float32) + # self.trans_vec = np.zeros((3,), dtype=np.float32) ## rot m + self.rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True) + self.trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True) + # self.rot_mtx = np.eye(3, dtype=np.float32) + # self.trans_vec = np.zeros((3,), dtype=np.float32) + + self.axis_rot_mtx = torch.tensor( + [ + [1, 0, 0], [0, -1, 0], [0, 0, -1] + ], dtype=torch.float32 + ).cuda(self.args.th_cuda_idx) + + self.joint_idx = -1 + + self.transformed_joint_pts = self.pos.clone() + + def print_grads(self, ): + print(f"rot_mtx: {self.rot_mtx.grad}") + print(f"trans_vec: {self.trans_vec.grad}") + + def clear_grads(self,): + if self.rot_mtx.grad is not None: + self.rot_mtx.grad.data = self.rot_mtx.grad.data * 0. + if self.trans_vec.grad is not None: + self.trans_vec.grad.data = self.trans_vec.grad.data * 0. + + def compute_transformation(self,): + # use the state to transform them # # transform # ## transform the state ## + # use the state to transform them # # transform them for the state # + if self.type == "revolute": + # print(f"computing transformation matrices with axis: {self.axis}, state: {self.state}") + # rotation matrix from the axis angle # + rot_mtx = rotation_matrix_from_axis_angle(self.axis, self.state) + # rot_mtx(p - p_v) + p_v -> rot_mtx p - rot_mtx p_v + p_v + # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + # self.rot_mtx = np.copy(rot_mtx) + # self.trans_vec = np.copy(trans_vec) + trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + else: + ### TODO: implement transformations for joints in other types ### + pass + + def set_state(self, name_to_state): + if self.name in name_to_state: + # self.state = name_to_state["name"] + self.state = name_to_state[self.name] ## + + def set_state_via_vec(self, state_vec): ### transform points via the state vectors here ### + if self.joint_idx >= 0: + self.state = state_vec[self.joint_idx] ## give the parameter to the parameters ## + + def set_joint_idx(self, joint_name_to_idx): + if self.name in joint_name_to_idx: + self.joint_idx = joint_name_to_idx[self.name] + + + def set_args(self, args): + self.args = args + + + def compute_transformation_via_state_vals(self, state_vals): + if self.joint_idx >= 0: + cur_joint_state = state_vals[self.joint_idx] + else: + cur_joint_state = self.state + # use the state to transform them # # transform # ## transform the state ## + # use the state to transform them # # transform them for the state # + if self.type == "revolute": + # print(f"computing transformation matrices with axis: {self.axis}, state: {self.state}") + # rotation matrix from the axis angle # + rot_mtx = rotation_matrix_from_axis_angle(self.axis, cur_joint_state) + # rot_mtx(p - p_v) + p_v -> rot_mtx p - rot_mtx p_v + p_v + # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + # self.rot_mtx = np.copy(rot_mtx) + # self.trans_vec = np.copy(trans_vec) + trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec + elif self.type == "free2d": + cur_joint_state = state_vals # still only for the current scene # + # cur_joint_state + cur_joint_rot_val = state_vals[2] + ### rot_mtx ### ### rot_mtx ### + rot_mtx = plane_rotation_matrix_from_angle_xz(cur_joint_rot_val) + # rot_mtx = plane_rotation_matrix_from_angle(cur_joint_rot_val) ### 2 x 2 rot matrix # + # cur joint rot val # + # rot mtx of the rotation + # xy_val = + # axis_rot_mtx + # R_axis^T ( R R_axis (p) + trans (with the y-axis padded) ) + cur_trans_vec = torch.stack( + [state_vals[0], torch.zeros_like(state_vals[0]), state_vals[1]], dim=0 + ) + # cur_trans_vec # + rot_mtx = torch.matmul(self.axis_rot_mtx.transpose(1, 0), torch.matmul(rot_mtx, self.axis_rot_mtx)) + trans_vec = torch.matmul(self.axis_rot_mtx.transpose(1, 0), cur_trans_vec.unsqueeze(-1).contiguous()).squeeze(-1).contiguous() + self.pos + + self.rot_mtx = rot_mtx + self.trans_vec = trans_vec ## rot_mtx and trans_vec # + else: + ### TODO: implement transformations for joints in other types ### + pass + return self.rot_mtx, self.trans_vec + + def transform_joints_via_parent_rot_trans_infos(self, parent_rot_mtx, parent_trans_vec): + # + # if self.type == "revolute" or self.type == "free2d": + transformed_joint_pts = torch.matmul(parent_rot_mtx, self.pos.view(3 ,1).contiguous()).view(3).contiguous() + parent_trans_vec + # else: + self.transformed_joint_pts = transformed_joint_pts ### get self transformed joint pts here ### + return transformed_joint_pts + # if self.joint_idx >= 0: + # cur_joint_state = state_vals[self.joint_idx] + # else: + # cur_joint_state = self.state # state # + # # use the state to transform them # # transform ### transform the state ## + # # use the state to transform them # # transform them for the state # transform for the state # + # if self.type == "revolute": + # # print(f"computing transformation matrices with axis: {self.axis}, state: {self.state}") + # # rotation matrix from the axis angle # + # rot_mtx = rotation_matrix_from_axis_angle(self.axis, cur_joint_state) + # # rot_mtx(p - p_v) + p_v -> rot_mtx p - rot_mtx p_v + p_v + # # trans_vec = self.pos - np.matmul(rot_mtx, self.pos.reshape(3, 1)).reshape(3) + # # self.rot_mtx = np.copy(rot_mtx) + # # self.trans_vec = np.copy(trans_vec) + # trans_vec = self.pos - torch.matmul(rot_mtx, self.pos.view(3, 1)).view(3).contiguous() + # self.rot_mtx = rot_mtx + # self.trans_vec = trans_vec + # elif self.type == "free2d": + # cur_joint_state = state_vals # still only for the current scene # + # # cur_joint_state + # cur_joint_rot_val = state_vals[2] + # ### rot_mtx ### ### rot_mtx ### + # rot_mtx = plane_rotation_matrix_from_angle_xz(cur_joint_rot_val) + # # rot_mtx = plane_rotation_matrix_from_angle(cur_joint_rot_val) ### 2 x 2 rot matrix # + # # cur joint rot val # + # # rot mtx of the rotation + # # xy_val = + # # axis_rot_mtx + # # R_axis^T ( R R_axis (p) + trans (with the y-axis padded) ) + # cur_trans_vec = torch.stack( + # [state_vals[0], torch.zeros_like(state_vals[0]), state_vals[1]], dim=0 + # ) + # # cur_trans_vec # + # rot_mtx = torch.matmul(self.axis_rot_mtx.transpose(1, 0), torch.matmul(rot_mtx, self.axis_rot_mtx)) + # trans_vec = torch.matmul(self.axis_rot_mtx.transpose(1, 0), cur_trans_vec.unsqueeze(-1).contiguous()).squeeze(-1).contiguous() + self.pos + + # self.rot_mtx = rot_mtx + # self.trans_vec = trans_vec ## rot_mtx and trans_vec # + # else: + # ### TODO: implement transformations for joints in other types ### + # pass + # return self.rot_mtx, self.trans_vec + +## fixed joint -> translation and rotation ## +## revolute joint -> can be actuated ## +## set states and compute the transfromations in a top-to-down manner ## + +## trnasform the robot -> a list of qs ## +## a list of qs ## +## transform from the root of the robot; pass qs from the root to the leaf node ## +## visual meshes or visual meshes from the basic description of robots ## +## visual meshes; or visual points ## +## visual meshes -> transform them into the visual density values here ## +## visual meshes -> transform them into the ## into the visual counterparts ## +## ## visual meshes -> ## ## ## +# +class Body: + def __init__(self, name, body_type, filename, pos, quat, transform_type, density, mu, rgba, radius, args) -> None: + self.name = name + self.body_type = body_type + ### for mesh object ### + self.filename = filename + self.args = args + + self.pos = pos + self.quat = quat + self.transform_type = transform_type + self.density = density + self.mu = mu + self.rgba = rgba + + ### for sphere object ### + self.radius = radius + ## or vertices here # + ## pass them to the child and treat them as the parent transformation ## + + self.visual_pts_ref = None + self.visual_faces_ref = None + + self.visual_pts = None ## visual pts and + + self.body_name_to_main_axis = get_body_name_to_main_axis() ### get the body name to main axis here # + + self.get_visual_counterparts() + + + def update_radius(self,): + self.radius.data = self.radius.data - self.radius.grad.data + + self.radius.grad.data = self.radius.grad.data * 0. + + + def update_xml_file(self,): + xml_content_with_flexible_radius = f""" + + + +""" + xml_loading_fn = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere_free_sphere_geo_test.xml" + with open(xml_loading_fn, "w") as wf: + wf.write(xml_content_with_flexible_radius) + wf.close() + + ### get visual pts colorrs ### ### + def get_visual_pts_colors(self, ): + tot_visual_pts_nn = self.visual_pts_ref.size(0) + # self.pts_rgba = [torch.from_numpy(self.rgba).float().cuda(self.args.th_cuda_idx) for _ in range(tot_visual_pts_nn)] # total visual pts nn + self.pts_rgba = [torch.tensor(self.rgba.data).cuda(self.args.th_cuda_idx) for _ in range(tot_visual_pts_nn)] # total visual pts nn skeletong + self.pts_rgba = torch.stack(self.pts_rgba, dim=0) # + return self.pts_rgba + + def get_visual_counterparts(self,): + ### TODO: implement this for visual counterparts ### mid line regression and name to body mapping relations --- for each body, how to calculate the midline and other properties? + ######## get body type ########## get visual midline of the input mesh and the mesh vertices? ######## # skeleton of the hand -> 21 points ? retarget from this hand to the mano hand and use the mano hand priors? + if self.body_type == "sphere": + filename = "/home/xueyi/diffsim/DiffHand/examples/save_res/hand_sphere_demo/meshes/18.obj" + if not os.path.exists(filename): + filename = "/data/xueyi/diffsim/DiffHand/assets/18.obj" + else: + filename = self.filename + rt_asset_path = "/home/xueyi/diffsim/DiffHand/assets" ### assets folder ### + if not os.path.exists(rt_asset_path): + rt_asset_path = "/data/xueyi/diffsim/DiffHand/assets" + filename = os.path.join(rt_asset_path, filename) + body_mesh = trimesh.load(filename, process=False) + # verts = np.array(body_mesh.vertices) + # faces = np.array(body_mesh.faces, dtype=np.long) + + # self.visual_pts_ref = np.copy(verts) ## verts ## + # self.visual_faces_ref = np.copy(faces) ## faces + # self.visual_pts_ref # + + #### body_mesh.vertices #### + # verts = torch.tensor(body_mesh.vertices, dtype=torch.float32).cuda(self.args.th_cuda_idx) + # faces = torch.tensor(body_mesh.faces, dtype=torch.long).cuda(self.args.th_cuda_idx) + #### body_mesh.vertices #### + + # self.pos = nn.Parameter( + # torch.tensor([0., 0., 0.], dtype=torch.float32, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True + # ) + + self.pos = nn.Parameter( + torch.tensor(self.pos.detach().cpu().tolist(), dtype=torch.float32, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True + ) + + ### Step 1 ### -> set the pos to the correct initial pose ### + + self.radius = nn.Parameter( + torch.tensor([self.args.initial_radius], dtype=torch.float32, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True + ) + ### visual pts ref ### ## body_mesh.vertices -> # + self.visual_pts_ref = torch.tensor(body_mesh.vertices, dtype=torch.float32).cuda(self.args.th_cuda_idx) + + # if self.name == "sphere": + # self.visual_pts_ref = self.visual_pts_ref / 2. # the initial radius + # self.visual_pts_ref = self.visual_pts_ref * self.radius ## multiple the initla radius # + + # self.visual_pts_ref = nn.Parameter( + # torch.tensor(body_mesh.vertices, dtype=torch.float32, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True + # ) + # self.visual_faces_ref = nn.Parameter( + # torch.tensor(body_mesh.faces, dtype=torch.long, requires_grad=True).cuda(self.args.th_cuda_idx), requires_grad=True + # ) + self.visual_faces_ref = torch.tensor(body_mesh.faces, dtype=torch.long).cuda(self.args.th_cuda_idx) + + # body_name_to_main_axis + # body_name_to_main_axis for the body_name_to_main_axis # + # visual_faces_ref # + # visual_pts_ref # + + minn_pts, _ = torch.min(self.visual_pts_ref, dim=0) ### get the visual pts minn ### + maxx_pts, _ = torch.max(self.visual_pts_ref, dim=0) ### visual pts maxx ### + mean_pts = torch.mean(self.visual_pts_ref, dim=0) ### mean_pts of the mean_pts ### + + if self.name in self.body_name_to_main_axis: + cur_main_axis = self.body_name_to_main_axis[self.name] ## get the body name ## + + if cur_main_axis == -2: + main_axis_pts = minn_pts[1] # the main axis pts + full_main_axis_pts = torch.tensor([mean_pts[0], main_axis_pts, mean_pts[2]], dtype=torch.float32).cuda(self.args.th_cuda_idx) + elif cur_main_axis == 1: + main_axis_pts = maxx_pts[0] # the maxx axis pts + full_main_axis_pts = torch.tensor([main_axis_pts, mean_pts[1], mean_pts[2]], dtype=torch.float32).cuda(self.args.th_cuda_idx) + self.full_main_axis_pts_ref = full_main_axis_pts + else: + self.full_main_axis_pts_ref = mean_pts.clone() ### get the mean pts ### + # mean_pts + # main_axis_pts = + + + # self.visual_pts_ref = verts # + # self.visual_faces_ref = faces # + # get visual points colors # the color should be an optimizable property # # # or init visual point colors here ## # or init visual point colors here # + # simulatoable assets ## for the + + def transform_visual_pts_ref(self,): + if self.name == "sphere": + visual_pts_ref = self.visual_pts_ref / 2. # + visual_pts_ref = visual_pts_ref * self.radius + else: + visual_pts_ref = self.visual_pts_ref + return visual_pts_ref + + def transform_visual_pts(self, rot_mtx, trans_vec): + visual_pts_ref = self.transform_visual_pts_ref() + # rot_mtx: 3 x 3 numpy array + # trans_vec: 3 numpy array + # print(f"transforming body with rot_mtx: {rot_mtx} and trans_vec: {trans_vec}") + # self.visual_pts = np.matmul(rot_mtx, self.visual_pts_ref.T).T + trans_vec.reshape(1, 3) # reshape # + # print(f"rot_mtx: {rot_mtx}, trans_vec: {trans_vec}") + self.visual_pts = torch.matmul(rot_mtx, visual_pts_ref.transpose(1, 0)).transpose(1, 0) + trans_vec.unsqueeze(0) + + # full_main_axis_pts -> + self.full_main_axis_pts = torch.matmul(rot_mtx, self.full_main_axis_pts_ref.unsqueeze(-1)).contiguous().squeeze(-1) + trans_vec + self.full_main_axis_pts = self.full_main_axis_pts.unsqueeze(0) + + return self.visual_pts + + def get_tot_transformed_joints(self, transformed_joints): + if self.name in self.body_name_to_main_axis: + transformed_joints.append(self.full_main_axis_pts) + return transformed_joints + + def get_nn_pts(self,): + self.nn_pts = self.visual_pts_ref.size(0) + return self.nn_pts + + def set_args(self, args): + self.args = args + + def clear_grad(self, ): + if self.pos.grad is not None: + self.pos.grad.data = self.pos.grad.data * 0. + if self.radius.grad is not None: + self.radius.grad.data = self.radius.grad.data * 0. + + + # get the visual counterparts of the boyd mesh or elements # + + # xyz attribute ## ## xyz attribute # + +# use get_name_to_visual_pts +# use get_name_to_visual_pts_faces to get the transformed visual pts and faces # +class Link: + def __init__(self, name, joint: Joint, body: Body, children, args) -> None: + + self.joint = joint + self.body = body + self.children = children + self.name = name + + self.args = args + + # joint # parent_rot_mtx, parent_trans_vec + self.parent_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(self.args.th_cuda_idx), requires_grad=True) + self.parent_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(self.args.th_cuda_idx), requires_grad=True) + self.curr_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(self.args.th_cuda_idx), requires_grad=True) + self.curr_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(self.args.th_cuda_idx), requires_grad=True) + # + self.tot_rot_mtx = nn.Parameter(torch.eye(n=3, dtype=torch.float32).cuda(self.args.th_cuda_idx), requires_grad=True) + self.tot_trans_vec = nn.Parameter(torch.zeros((3,), dtype=torch.float32).cuda(self.args.th_cuda_idx), requires_grad=True) ## torch zeros # + + def print_grads(self, ): ### print grads here ### + print(f"parent_rot_mtx: {self.parent_rot_mtx.grad}") + print(f"parent_trans_vec: {self.parent_trans_vec.grad}") + print(f"curr_rot_mtx: {self.curr_rot_mtx.grad}") + print(f"curr_trans_vec: {self.curr_trans_vec.grad}") + print(f"tot_rot_mtx: {self.tot_rot_mtx.grad}") + print(f"tot_trans_vec: {self.tot_trans_vec.grad}") + print(f"Joint") + self.joint.print_grads() + for cur_link in self.children: + cur_link.print_grads() + + + def set_state(self, name_to_state): + self.joint.set_state(name_to_state=name_to_state) + for child_link in self.children: + child_link.set_state(name_to_state) + + + def set_state_via_vec(self, state_vec): # + self.joint.set_state_via_vec(state_vec) + for child_link in self.children: + child_link.set_state_via_vec(state_vec) + # if self.joint_idx >= 0: + # self.state = state_vec[self.joint_idx] + + ## + def get_tot_transformed_joints(self, transformed_joints): + cur_joint_transformed_pts = self.joint.transformed_joint_pts.unsqueeze(0) ### 3 pts + transformed_joints.append(cur_joint_transformed_pts) + transformed_joints = self.body.get_tot_transformed_joints(transformed_joints) + # if self.joint.name + for cur_link in self.children: + transformed_joints = cur_link.get_tot_transformed_joints(transformed_joints) + return transformed_joints + + def compute_transformation_via_state_vecs(self, state_vals, parent_rot_mtx, parent_trans_vec, visual_pts_list): + # state vecs and rot mtx # state vecs ##### + joint_rot_mtx, joint_trans_vec = self.joint.compute_transformation_via_state_vals(state_vals=state_vals) + + self.curr_rot_mtx = joint_rot_mtx + self.curr_trans_vec = joint_trans_vec + + self.joint.transform_joints_via_parent_rot_trans_infos(parent_rot_mtx=parent_rot_mtx, parent_trans_vec=parent_trans_vec) ## get rot and trans mtx and vecs ### + + tot_parent_rot_mtx = torch.matmul(parent_rot_mtx, joint_rot_mtx) + tot_parent_trans_vec = torch.matmul(parent_rot_mtx, joint_trans_vec.unsqueeze(-1)).view(3) + parent_trans_vec + + self.tot_rot_mtx = tot_parent_rot_mtx + self.tot_trans_vec = tot_parent_trans_vec + + # self.tot_rot_mtx = np.copy(tot_parent_rot_mtx) + # self.tot_trans_vec = np.copy(tot_parent_trans_vec) + + ### visual_pts_list for recording visual pts ### + + cur_body_visual_pts = self.body.transform_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + visual_pts_list.append(cur_body_visual_pts) + + for cur_link in self.children: + # cur_link.parent_rot_mtx = np.copy(tot_parent_rot_mtx) ### set children parent rot mtx and the trans vec + # cur_link.parent_trans_vec = np.copy(tot_parent_trans_vec) ## + cur_link.parent_rot_mtx = tot_parent_rot_mtx ### set children parent rot mtx and the trans vec # + cur_link.parent_trans_vec = tot_parent_trans_vec ## + # cur_link.compute_transformation() ## compute self's transformations + cur_link.compute_transformation_via_state_vecs(state_vals, tot_parent_rot_mtx, tot_parent_trans_vec, visual_pts_list) + + def get_visual_pts_rgba_values(self, pts_rgba_vals_list): + + cur_body_visual_rgba_vals = self.body.get_visual_pts_colors() + pts_rgba_vals_list.append(cur_body_visual_rgba_vals) + + for cur_link in self.children: + cur_link.get_visual_pts_rgba_values(pts_rgba_vals_list) + + + + def compute_transformation(self,): + self.joint.compute_transformation() + # self.curr_rot_mtx = np.copy(self.joint.rot_mtx) + # self.curr_trans_vec = np.copy(self.joint.trans_vec) + + self.curr_rot_mtx = self.joint.rot_mtx + self.curr_trans_vec = self.joint.trans_vec + # rot_p (rot_c p + trans_c) + trans_p # + # rot_p rot_c p + rot_p trans_c + trans_p # + #### matmul #### + # tot_parent_rot_mtx = np.matmul(self.parent_rot_mtx, self.curr_rot_mtx) + # tot_parent_trans_vec = np.matmul(self.parent_rot_mtx, self.curr_trans_vec.reshape(3, 1)).reshape(3) + self.parent_trans_vec + + tot_parent_rot_mtx = torch.matmul(self.parent_rot_mtx, self.curr_rot_mtx) + tot_parent_trans_vec = torch.matmul(self.parent_rot_mtx, self.curr_trans_vec.unsqueeze(-1)).view(3) + self.parent_trans_vec + + self.tot_rot_mtx = tot_parent_rot_mtx + self.tot_trans_vec = tot_parent_trans_vec + + # self.tot_rot_mtx = np.copy(tot_parent_rot_mtx) + # self.tot_trans_vec = np.copy(tot_parent_trans_vec) + + for cur_link in self.children: + # cur_link.parent_rot_mtx = np.copy(tot_parent_rot_mtx) ### set children parent rot mtx and the trans vec + # cur_link.parent_trans_vec = np.copy(tot_parent_trans_vec) ## + cur_link.parent_rot_mtx = tot_parent_rot_mtx ### set children parent rot mtx and the trans vec # + cur_link.parent_trans_vec = tot_parent_trans_vec ## + cur_link.compute_transformation() ## compute self's transformations + + def get_name_to_visual_pts_faces(self, name_to_visual_pts_faces): + # transform_visual_pts # ## rot_mt + self.body.transform_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + name_to_visual_pts_faces[self.body.name] = {"pts": self.body.visual_pts, "faces": self.body.visual_faces_ref} + for cur_link in self.children: + cur_link.get_name_to_visual_pts_faces(name_to_visual_pts_faces) ## transform the pts faces + + def get_visual_pts_list(self, visual_pts_list): + # transform_visual_pts # ## rot_mt + self.body.transform_visual_pts(rot_mtx=self.tot_rot_mtx, trans_vec=self.tot_trans_vec) + visual_pts_list.append(self.body.visual_pts) # body template # + # name_to_visual_pts_faces[self.body.name] = {"pts": self.body.visual_pts, "faces": self.body.visual_faces_ref} + for cur_link in self.children: + # cur_link.get_name_to_visual_pts_faces(name_to_visual_pts_faces) ## transform the pts faces + cur_link.get_visual_pts_list(visual_pts_list) + + + + def set_joint_idx(self, joint_name_to_idx): + self.joint.set_joint_idx(joint_name_to_idx) + for cur_link in self.children: + cur_link.set_joint_idx(joint_name_to_idx) + # if self.name in joint_name_to_idx: + # self.joint_idx = joint_name_to_idx[self.name] + + def get_nn_pts(self,): + nn_pts = 0 + nn_pts += self.body.get_nn_pts() + for cur_link in self.children: + nn_pts += cur_link.get_nn_pts() + self.nn_pts = nn_pts + return self.nn_pts + + def clear_grads(self,): + + if self.parent_rot_mtx.grad is not None: + self.parent_rot_mtx.grad.data = self.parent_rot_mtx.grad.data * 0. + if self.parent_trans_vec.grad is not None: + self.parent_trans_vec.grad.data = self.parent_trans_vec.grad.data * 0. + if self.curr_rot_mtx.grad is not None: + self.curr_rot_mtx.grad.data = self.curr_rot_mtx.grad.data * 0. + if self.curr_trans_vec.grad is not None: + self.curr_trans_vec.grad.data = self.curr_trans_vec.grad.data * 0. + if self.tot_rot_mtx.grad is not None: + self.tot_rot_mtx.grad.data = self.tot_rot_mtx.grad.data * 0. + if self.tot_trans_vec.grad is not None: + self.tot_trans_vec.grad.data = self.tot_trans_vec.grad.data * 0. + # print(f"parent_rot_mtx: {self.parent_rot_mtx.grad}") + # print(f"parent_trans_vec: {self.parent_trans_vec.grad}") + # print(f"curr_rot_mtx: {self.curr_rot_mtx.grad}") + # print(f"curr_trans_vec: {self.curr_trans_vec.grad}") + # print(f"tot_rot_mtx: {self.tot_rot_mtx.grad}") + # print(f"tot_trans_vec: {self.tot_trans_vec.grad}") + # print(f"Joint") + self.joint.clear_grads() + self.body.clear_grad() + for cur_link in self.children: + cur_link.clear_grads() + + def set_args(self, args): + self.args = args + for cur_link in self.children: + cur_link.set_args(args) + + + + +class Robot: # robot and the robot # + def __init__(self, children_links, args) -> None: + self.children = children_links + self.args = args + + def set_state(self, name_to_state): + for cur_link in self.children: + cur_link.set_state(name_to_state) + + def compute_transformation(self,): + for cur_link in self.children: + cur_link.compute_transformation() + + def get_name_to_visual_pts_faces(self, name_to_visual_pts_faces): + for cur_link in self.children: + cur_link.get_name_to_visual_pts_faces(name_to_visual_pts_faces) + + def get_visual_pts_list(self, visual_pts_list): + for cur_link in self.children: + cur_link.get_visual_pts_list(visual_pts_list) + + def set_joint_idx(self, joint_name_to_idx): + for cur_link in self.children: + cur_link.set_joint_idx(joint_name_to_idx) ### set joint idx ### + + def set_state_via_vec(self, state_vec): ### set the state vec for the state vec ### + for cur_link in self.children: ### set the state vec for the state vec ### + cur_link.set_state_via_vec(state_vec) + # self.joint.set_state_via_vec(state_vec) + # for child_link in self.children: + # child_link.set_state_via_vec(state_vec) + + # get_tot_transformed_joints + def get_tot_transformed_joints(self, transformed_joints): + for cur_link in self.children: # + transformed_joints = cur_link.get_tot_transformed_joints(transformed_joints) + return transformed_joints + + def get_nn_pts(self): + nn_pts = 0 + for cur_link in self.children: + nn_pts += cur_link.get_nn_pts() + self.nn_pts = nn_pts + return self.nn_pts + + def set_args(self, args): + self.args = args + for cur_link in self.children: ## args ## + cur_link.set_args(args) + + def print_grads(self): + for cur_link in self.children: + cur_link.print_grads() + + def clear_grads(self,): ## clear grads ## + for cur_link in self.children: + cur_link.clear_grads() + + def compute_transformation_via_state_vecs(self, state_vals, visual_pts_list): + # parent_rot_mtx, parent_trans_vec + for cur_link in self.children: + cur_link.compute_transformation_via_state_vecs(state_vals, cur_link.parent_rot_mtx, cur_link.parent_trans_vec, visual_pts_list) + return visual_pts_list + + # get_visual_pts_rgba_values(self, pts_rgba_vals_list): + def get_visual_pts_rgba_values(self, pts_rgba_vals_list): + for cur_link in self.children: + cur_link.get_visual_pts_rgba_values(pts_rgba_vals_list) + return pts_rgba_vals_list ## compute pts rgba vals list ## + +def parse_nparray_from_string(strr, args): + vals = strr.split(" ") + vals = [float(val) for val in vals] + vals = np.array(vals, dtype=np.float32) + vals = torch.from_numpy(vals).float() + ## vals ## + vals = nn.Parameter(vals.cuda(args.th_cuda_idx), requires_grad=True) + + return vals + + +### parse link data ### +def parse_link_data(link, args): + + link_name = link.attrib["name"] + # print(f"parsing link: {link_name}") ## joints body meshes # + + joint = link.find("./joint") + + joint_name = joint.attrib["name"] + joint_type = joint.attrib["type"] + if joint_type in ["revolute"]: ## a general xml parser here? + axis = joint.attrib["axis"] + axis = parse_nparray_from_string(axis, args=args) + else: + axis = None + pos = joint.attrib["pos"] # + pos = parse_nparray_from_string(pos, args=args) + quat = joint.attrib["quat"] + quat = parse_nparray_from_string(quat, args=args) + + try: + frame = joint.attrib["frame"] + except: + frame = "WORLD" + + if joint_type not in ["fixed"]: + damping = joint.attrib["damping"] + damping = float(damping) + else: + damping = 0.0 + + cur_joint = Joint(joint_name, joint_type, axis, pos, quat, frame, damping, args=args) + + body = link.find("./body") + body_name = body.attrib["name"] + body_type = body.attrib["type"] + if body_type == "mesh": + filename = body.attrib["filename"] + else: + filename = "" + + if body_type == "sphere": + radius = body.attrib["radius"] + radius = float(radius) + else: + radius = 0. + + pos = body.attrib["pos"] + pos = parse_nparray_from_string(pos, args=args) + quat = body.attrib["quat"] + quat = joint.attrib["quat"] + try: + transform_type = body.attrib["transform_type"] + except: + transform_type = "OBJ_TO_WORLD" + density = body.attrib["density"] + density = float(density) + mu = body.attrib["mu"] + mu = float(mu) + try: ## rgba ## + rgba = body.attrib["rgba"] + rgba = parse_nparray_from_string(rgba, args=args) + except: + rgba = np.zeros((4,), dtype=np.float32) + + cur_body = Body(body_name, body_type, filename, pos, quat, transform_type, density, mu, rgba, radius, args=args) + + children_link = [] + links = link.findall("./link") + for child_link in links: # + cur_child_link = parse_link_data(child_link, args=args) + children_link.append(cur_child_link) + + link_name = link.attrib["name"] + link_obj = Link(link_name, joint=cur_joint, body=cur_body, children=children_link, args=args) + return link_obj + + + + +def parse_data_from_xml(xml_fn, args): + + tree = ElementTree() + tree.parse(xml_fn) + + ### get total robots ### + robots = tree.findall("./robot") + i_robot = 0 + tot_robots = [] + for cur_robot in robots: + print(f"Getting robot: {i_robot}") + i_robot += 1 + cur_links = cur_robot.findall("./link") + # i_link = 0 + cur_robot_links = [] + for cur_link in cur_links: ## child of the link ## + ### a parse link util -> the child of the link is composed of (the joint; body; and children links (with children or with no child here)) + # cur_link_name = cur_link.attrib["name"] + # print(f"Getting link: {i_link} with name: {cur_link_name}") + # i_link += 1 ## + cur_robot_links.append(parse_link_data(cur_link, args=args)) + cur_robot_obj = Robot(cur_robot_links, args=args) + tot_robots.append(cur_robot_obj) + + + tot_actuators = [] + actuators = tree.findall("./actuator/motor") + joint_nm_to_joint_idx = {} + i_act = 0 + for cur_act in actuators: + cur_act_joint_nm = cur_act.attrib["joint"] + joint_nm_to_joint_idx[cur_act_joint_nm] = i_act + i_act += 1 ### add the act ### + + tot_robots[0].set_joint_idx(joint_nm_to_joint_idx) ### set joint idx here ### # tot robots # + tot_robots[0].get_nn_pts() + tot_robots[1].get_nn_pts() + + return tot_robots + +def get_name_to_state_from_str(states_str): + tot_states = states_str.split(" ") + tot_states = [float(cur_state) for cur_state in tot_states] + joint_name_to_state = {} + for i in range(len(tot_states)): + cur_joint_name = f"joint{i + 1}" + cur_joint_state = tot_states[i] + joint_name_to_state[cur_joint_name] = cur_joint_state + return joint_name_to_state + +def create_zero_states(): + nn_joints = 17 + joint_name_to_state = {} + for i_j in range(nn_joints): + cur_joint_name = f"joint{i_j + 1}" + joint_name_to_state[cur_joint_name] = 0. + return joint_name_to_state + +# [6.96331033e-17 3.54807679e-06 1.74046190e-15 2.66367417e-05 +# 1.22444894e-05 3.38976792e-06 1.46917635e-15 2.66367383e-05 +# 1.22444882e-05 3.38976786e-06 1.97778813e-15 2.66367383e-05 +# 1.22444882e-05 3.38976786e-06 4.76033293e-16 1.26279884e-05 +# 3.51189993e-06 0.00000000e+00 4.89999978e-03 0.00000000e+00] + + +def rotation_matrix_from_axis_angle_np(axis, angle): # rotation_matrix_from_axis_angle -> + sin_ = np.sin(angle) # ti.math.sin(angle) + cos_ = np.cos(angle) # ti.math.cos(angle) + # sin_ = torch.sin(angle) # ti.math.sin(angle) + # cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z ## + + + row_a = np.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], axis=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = np.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], axis=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = np.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], axis=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = np.stack( + [row_a, row_b, row_c], axis=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + + + +def rotation_matrix_from_axis_angle(axis, angle): # rotation_matrix_from_axis_angle -> + # sin_ = np.sin(angle) # ti.math.sin(angle) + # cos_ = np.cos(angle) # ti.math.cos(angle) + sin_ = torch.sin(angle) # ti.math.sin(angle) + cos_ = torch.cos(angle) # ti.math.cos(angle) + u_x, u_y, u_z = axis[0], axis[1], axis[2] + u_xx = u_x * u_x + u_yy = u_y * u_y + u_zz = u_z * u_z + u_xy = u_x * u_y + u_xz = u_x * u_z + u_yz = u_y * u_z ## + + + row_a = torch.stack( + [cos_ + u_xx * (1 - cos_), u_xy * (1. - cos_) + u_z * sin_, u_xz * (1. - cos_) - u_y * sin_], dim=0 + ) + # print(f"row_a: {row_a.size()}") + row_b = torch.stack( + [u_xy * (1. - cos_) - u_z * sin_, cos_ + u_yy * (1. - cos_), u_yz * (1. - cos_) + u_x * sin_], dim=0 + ) + # print(f"row_b: {row_b.size()}") + row_c = torch.stack( + [u_xz * (1. - cos_) + u_y * sin_, u_yz * (1. - cos_) - u_x * sin_, cos_ + u_zz * (1. - cos_)], dim=0 + ) + # print(f"row_c: {row_c.size()}") + + ### rot_mtx for the rot_mtx ### + rot_mtx = torch.stack( + [row_a, row_b, row_c], dim=-1 ### rot_matrix of he matrix ## + ) + + return rot_mtx + + +def get_camera_to_world_poses(n=10, ): + ## sample from the upper half sphere ## + # theta and phi for the + theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + # theta = torch.from_numpy(theta).float().cuda() + tot_c2w_matrix = [] + for i_n in range(n): + # y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + # y_rot_mtx = load_utils.rotation_matrix_from_axis_angle(rot_vec, rot_angle) + + + z_axis_rot_axis = np.array([0, 0, 1.], dtype=np.float32) + z_axis_rot_angle = np.pi - theta[i_n] + z_axis_rot_matrix = rotation_matrix_from_axis_angle_np(z_axis_rot_axis, z_axis_rot_angle) + rotated_plane_rot_axis_ori = np.array([1, -1, 0], dtype=np.float32) + rotated_plane_rot_axis_ori = rotated_plane_rot_axis_ori / np.sqrt(np.sum(rotated_plane_rot_axis_ori ** 2)) + rotated_plane_rot_axis = np.matmul(z_axis_rot_matrix, rotated_plane_rot_axis_ori) + + plane_rot_angle = phi[i_n] + plane_rot_matrix = rotation_matrix_from_axis_angle_np(rotated_plane_rot_axis, plane_rot_angle) + + c2w_matrix = np.matmul(plane_rot_matrix, z_axis_rot_matrix) + c2w_trans_matrix = np.array( + [np.cos(theta[i_n]) * np.sin(phi[i_n]), np.sin(theta[i_n]) * np.sin(phi[i_n]), np.cos(phi[i_n])], dtype=np.float32 + ) + c2w_matrix = np.concatenate( + [c2w_matrix, c2w_trans_matrix.reshape(3, 1)], axis=-1 + ) ##c2w matrix + tot_c2w_matrix.append(c2w_matrix) + tot_c2w_matrix = np.stack(tot_c2w_matrix, axis=0) + return tot_c2w_matrix + + +def get_camera_to_world_poses_th(n=10, th_cuda_idx=0): + ## sample from the upper half sphere ## + # theta and phi for the + theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + + # n_total = 14 + # n_xz = 14 + # n_y = 7 + # theta = [i_xz * 1.0 / float(n_xz) * np.pi * 2. for i_xz in range(n_xz)] + # phi = [i_y * (-1.0) / float(n_y) * np.pi for i_y in range(n_y)] + + + theta = torch.from_numpy(theta).float().cuda(th_cuda_idx) + phi = torch.from_numpy(phi).float().cuda(th_cuda_idx) + + tot_c2w_matrix = [] + for i_n in range(n): # if use veyr dense views like those + y_rot_angle = theta[i_n] + y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + y_rot_mtx = rotation_matrix_from_axis_angle(y_rot_vec, y_rot_angle) + + x_axis = torch.tensor([1., 0., 0.]).float().cuda(th_cuda_idx) + y_rot_x_axis = torch.matmul(y_rot_mtx, x_axis.unsqueeze(-1)).squeeze(-1) ### y_rot_x_axis # + + x_rot_angle = phi[i_n] + x_rot_mtx = rotation_matrix_from_axis_angle(y_rot_x_axis, x_rot_angle) + + rot_mtx = torch.matmul(x_rot_mtx, y_rot_mtx) + xyz_offset = torch.tensor([0., 0., 1.5]).float().cuda(th_cuda_idx) + rot_xyz_offset = torch.matmul(rot_mtx, xyz_offset.unsqueeze(-1)).squeeze(-1).contiguous() + 0.5 ### 3 for the xyz offset + + c2w_matrix = torch.cat( + [rot_mtx, rot_xyz_offset.unsqueeze(-1)], dim=-1 + ) + tot_c2w_matrix.append(c2w_matrix) + + + # z_axis_rot_axis = np.array([0, 0, 1.], dtype=np.float32) + # z_axis_rot_angle = np.pi - theta[i_n] + # z_axis_rot_matrix = rotation_matrix_from_axis_angle_np(z_axis_rot_axis, z_axis_rot_angle) + # rotated_plane_rot_axis_ori = np.array([1, -1, 0], dtype=np.float32) + # rotated_plane_rot_axis_ori = rotated_plane_rot_axis_ori / np.sqrt(np.sum(rotated_plane_rot_axis_ori ** 2)) + # rotated_plane_rot_axis = np.matmul(z_axis_rot_matrix, rotated_plane_rot_axis_ori) + + # plane_rot_angle = phi[i_n] + # plane_rot_matrix = rotation_matrix_from_axis_angle_np(rotated_plane_rot_axis, plane_rot_angle) + + # c2w_matrix = np.matmul(plane_rot_matrix, z_axis_rot_matrix) + # c2w_trans_matrix = np.array( + # [np.cos(theta[i_n]) * np.sin(phi[i_n]), np.sin(theta[i_n]) * np.sin(phi[i_n]), np.cos(phi[i_n])], dtype=np.float32 + # ) + # c2w_matrix = np.concatenate( + # [c2w_matrix, c2w_trans_matrix.reshape(3, 1)], axis=-1 + # ) ##c2w matrix + # tot_c2w_matrix.append(c2w_matrix) + # tot_c2w_matrix = np.stack(tot_c2w_matrix, axis=0) + tot_c2w_matrix = torch.stack(tot_c2w_matrix, dim=0) + return tot_c2w_matrix + + +def get_camera_to_world_poses_th_routine_1(n=7, th_cuda_idx=0): + ## sample from the upper half sphere ## + # theta and phi for the + + # theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + # phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + + # n_total = 14 + n_xz = 2 * n # 14 + n_y = n # 7 + theta = [i_xz * 1.0 / float(n_xz) * np.pi * 2. for i_xz in range(n_xz)] + phi = [i_y * (-1.0) / float(n_y) * np.pi for i_y in range(n_y)] + + theta = torch.tensor(theta).float().cuda(th_cuda_idx) + phi = torch.tensor(phi).float().cuda(th_cuda_idx) + # theta = torch.from_numpy(theta).float().cuda(th_cuda_idx) + # phi = torch.from_numpy(phi).float().cuda(th_cuda_idx) + + tot_c2w_matrix = [] + + for i_theta in range(theta.size(0)): + for i_phi in range(phi.size(0)): + y_rot_angle = theta[i_theta] + y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + y_rot_mtx = rotation_matrix_from_axis_angle(y_rot_vec, y_rot_angle) + + x_axis = torch.tensor([1., 0., 0.]).float().cuda(th_cuda_idx) + y_rot_x_axis = torch.matmul(y_rot_mtx, x_axis.unsqueeze(-1)).squeeze(-1) ### y_rot_x_axis # + + x_rot_angle = phi[i_phi] + x_rot_mtx = rotation_matrix_from_axis_angle(y_rot_x_axis, x_rot_angle) + + rot_mtx = torch.matmul(x_rot_mtx, y_rot_mtx) + xyz_offset = torch.tensor([0., 0., 1.5]).float().cuda(th_cuda_idx) + rot_xyz_offset = torch.matmul(rot_mtx, xyz_offset.unsqueeze(-1)).squeeze(-1).contiguous() + 0.5 ### 3 for the xyz offset + + c2w_matrix = torch.cat( + [rot_mtx, rot_xyz_offset.unsqueeze(-1)], dim=-1 + ) + tot_c2w_matrix.append(c2w_matrix) + + tot_c2w_matrix = torch.stack(tot_c2w_matrix, dim=0) + return tot_c2w_matrix + + +def get_camera_to_world_poses_th_routine_2(n=7, th_cuda_idx=0): + ## sample from the upper half sphere ## + # theta and phi for the + + # theta = np.random.uniform(low=0.0, high=1.0, size=(n,)) * np.pi * 2. # xz palne # + # phi = np.random.uniform(low=-1.0, high=0.0, size=(n,)) * np.pi ## [-0.5 \pi, 0.5 \pi] ## negative pi to the original pi + + # n_total = 14 + n_xz = 2 * n # 14 + n_y = 2 * n # 7 + theta = [i_xz * 1.0 / float(n_xz) * np.pi * 2. for i_xz in range(n_xz)] + # phi = [i_y * (-1.0) / float(n_y) * np.pi for i_y in range(n_y)] + phi = [i_y * (-1.0) / float(n_y) * np.pi * 2. for i_y in range(n_y)] + + theta = torch.tensor(theta).float().cuda(th_cuda_idx) + phi = torch.tensor(phi).float().cuda(th_cuda_idx) + # theta = torch.from_numpy(theta).float().cuda(th_cuda_idx) + # phi = torch.from_numpy(phi).float().cuda(th_cuda_idx) + + tot_c2w_matrix = [] + + for i_theta in range(theta.size(0)): + for i_phi in range(phi.size(0)): + y_rot_angle = theta[i_theta] + y_rot_vec = torch.tensor([0., 1., 0.]).float().cuda(th_cuda_idx) + y_rot_mtx = rotation_matrix_from_axis_angle(y_rot_vec, y_rot_angle) + + x_axis = torch.tensor([1., 0., 0.]).float().cuda(th_cuda_idx) + y_rot_x_axis = torch.matmul(y_rot_mtx, x_axis.unsqueeze(-1)).squeeze(-1) ### y_rot_x_axis # + + x_rot_angle = phi[i_phi] + x_rot_mtx = rotation_matrix_from_axis_angle(y_rot_x_axis, x_rot_angle) + + rot_mtx = torch.matmul(x_rot_mtx, y_rot_mtx) + xyz_offset = torch.tensor([0., 0., 1.5]).float().cuda(th_cuda_idx) + rot_xyz_offset = torch.matmul(rot_mtx, xyz_offset.unsqueeze(-1)).squeeze(-1).contiguous() + 0.5 ### 3 for the xyz offset + + c2w_matrix = torch.cat( + [rot_mtx, rot_xyz_offset.unsqueeze(-1)], dim=-1 + ) + tot_c2w_matrix.append(c2w_matrix) + + tot_c2w_matrix = torch.stack(tot_c2w_matrix, dim=0) + return tot_c2w_matrix + + + + + + +if __name__=='__main__': + xml_fn = "/home/xueyi/diffsim/DiffHand/assets/hand_sphere.xml" + tot_robots = parse_data_from_xml(xml_fn=xml_fn) + # tot_robots = + + active_optimized_states = """-0.00025872 -0.00025599 -0.00025296 -0.00022881 -0.00024449 -0.0002549 -0.00025296 -0.00022881 -0.00024449 -0.0002549 -0.00025296 -0.00022881 -0.00024449 -0.0002549 -0.00025694 -0.00024656 -0.00025556 0. 0.0049 0.""" + active_optimized_states = """-1.10617972 -1.10742263 -1.06198363 -1.03212746 -1.05429142 -1.08617289 -1.05868192 -1.01624365 -1.04478191 -1.08260959 -1.06719107 -1.04082455 -1.05995886 -1.08674006 -1.09396691 -1.08965532 -1.10036577 -10.7117466 -3.62511998 1.49450353""" + # active_goal_optimized_states = """-1.10617972 -1.10742263 -1.0614858 -1.03189609 -1.05404354 -1.08610468 -1.05863293 -1.0174248 -1.04576456 -1.08297396 -1.06719107 -1.04082455 -1.05995886 -1.08674006 -1.09396691 -1.08965532 -1.10036577 -10.73396897 -3.68095432 1.50679285""" + active_optimized_states = """-0.42455298 -0.42570447 -0.40567708 -0.39798589 -0.40953955 -0.42025055 -0.37910662 -0.496165 -0.37664644 -0.41942727 -0.40596508 -0.3982109 -0.40959847 -0.42024905 -0.41835001 -0.41929961 -0.42365131 -1.18756073 -2.90337822 0.4224685""" + active_optimized_states = """-0.42442816 -0.42557961 -0.40366201 -0.3977891 -0.40947627 -0.4201424 -0.3799285 -0.3808375 -0.37953552 -0.42039598 -0.4058405 -0.39808804 -0.40947487 -0.42012458 -0.41822534 -0.41917521 -0.4235266 -0.87189658 -1.42093761 0.21977979""" + + active_robot = tot_robots[0] + zero_states = create_zero_states() + active_robot.set_state(zero_states) + active_robot.compute_transformation() + name_to_visual_pts_surfaces = {} + active_robot.get_name_to_visual_pts_faces(name_to_visual_pts_surfaces) + print(len(name_to_visual_pts_surfaces)) + + sv_res_rt = "/home/xueyi/diffsim/DiffHand/examples/save_res" + sv_res_rt = os.path.join(sv_res_rt, "load_utils_test") + os.makedirs(sv_res_rt, exist_ok=True) + + tmp_visual_res_sv_fn = os.path.join(sv_res_rt, f"res_with_zero_states.npy") + np.save(tmp_visual_res_sv_fn, name_to_visual_pts_surfaces) + print(f"tmp visual res saved to {tmp_visual_res_sv_fn}") + + + optimized_states = get_name_to_state_from_str(active_optimized_states) + active_robot.set_state(optimized_states) + active_robot.compute_transformation() + name_to_visual_pts_surfaces = {} + active_robot.get_name_to_visual_pts_faces(name_to_visual_pts_surfaces) + print(len(name_to_visual_pts_surfaces)) + # sv_res_rt = "/home/xueyi/diffsim/DiffHand/examples/save_res" + # sv_res_rt = os.path.join(sv_res_rt, "load_utils_test") + # os.makedirs(sv_res_rt, exist_ok=True) + + # tmp_visual_res_sv_fn = os.path.join(sv_res_rt, f"res_with_optimized_states.npy") + tmp_visual_res_sv_fn = os.path.join(sv_res_rt, f"active_ngoal_res_with_optimized_states_goal_n3.npy") + np.save(tmp_visual_res_sv_fn, name_to_visual_pts_surfaces) + print(f"tmp visual res with optimized states saved to {tmp_visual_res_sv_fn}") + diff --git a/models/embedder.py b/models/embedder.py new file mode 100644 index 0000000000000000000000000000000000000000..c74f16bd03f6425bf9e9e8db814171d6a7b04bfd --- /dev/null +++ b/models/embedder.py @@ -0,0 +1,51 @@ +import torch +import torch.nn as nn + + +# Positional encoding embedding. Code was taken from https://github.com/bmild/nerf. +class Embedder: + def __init__(self, **kwargs): + self.kwargs = kwargs + self.create_embedding_fn() + + def create_embedding_fn(self): + embed_fns = [] + d = self.kwargs['input_dims'] + out_dim = 0 + if self.kwargs['include_input']: + embed_fns.append(lambda x: x) + out_dim += d + + max_freq = self.kwargs['max_freq_log2'] + N_freqs = self.kwargs['num_freqs'] + + if self.kwargs['log_sampling']: + freq_bands = 2. ** torch.linspace(0., max_freq, N_freqs) + else: + freq_bands = torch.linspace(2.**0., 2.**max_freq, N_freqs) + + for freq in freq_bands: + for p_fn in self.kwargs['periodic_fns']: + embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq)) + out_dim += d + + self.embed_fns = embed_fns + self.out_dim = out_dim + + def embed(self, inputs): + return torch.cat([fn(inputs) for fn in self.embed_fns], -1) + + +def get_embedder(multires, input_dims=3): + embed_kwargs = { + 'include_input': True, + 'input_dims': input_dims, + 'max_freq_log2': multires-1, + 'num_freqs': multires, + 'log_sampling': True, + 'periodic_fns': [torch.sin, torch.cos], + } + + embedder_obj = Embedder(**embed_kwargs) + def embed(x, eo=embedder_obj): return eo.embed(x) + return embed, embedder_obj.out_dim diff --git a/models/fields.py b/models/fields.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc6b7a1637a76641d99a0f44841e491dbb5f249 --- /dev/null +++ b/models/fields.py @@ -0,0 +1,9373 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from models.embedder import get_embedder + +from scipy.spatial import KDTree +from torch.utils.data.sampler import WeightedRandomSampler +from torch.distributions.categorical import Categorical +from torch.distributions.uniform import Uniform + +def batched_index_select(values, indices, dim = 1): + value_dims = values.shape[(dim + 1):] + values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) + indices = indices[(..., *((None,) * len(value_dims)))] + indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) + value_expand_len = len(indices_shape) - (dim + 1) + values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] + + value_expand_shape = [-1] * len(values.shape) + expand_slice = slice(dim, (dim + value_expand_len)) + value_expand_shape[expand_slice] = indices.shape[expand_slice] + values = values.expand(*value_expand_shape) + + dim += value_expand_len + return values.gather(dim, indices) + +def update_quaternion(delta_angle, prev_quat): + s1 = 0 + s2 = prev_quat[0] + v2 = prev_quat[1:] + v1 = delta_angle / 2 + new_v = s1 * v2 + s2 * v1 + torch.cross(v1, v2) + new_s = s1 * s2 - torch.sum(v1 * v2) + new_quat = torch.cat([new_s.unsqueeze(0), new_v], dim=0) + return new_quat + +# def euler_to_quaternion(yaw, pitch, roll): +def euler_to_quaternion(roll, pitch, yaw): + qx = torch.sin(roll/2) * torch.cos(pitch/2) * torch.cos(yaw/2) - torch.cos(roll/2) * torch.sin(pitch/2) * torch.sin(yaw/2) + qy = torch.cos(roll/2) * torch.sin(pitch/2) * torch.cos(yaw/2) + torch.sin(roll/2) * torch.cos(pitch/2) * torch.sin(yaw/2) + qz = torch.cos(roll/2) * torch.cos(pitch/2) * torch.sin(yaw/2) - torch.sin(roll/2) * torch.sin(pitch/2) * torch.cos(yaw/2) + qw = torch.cos(roll/2) * torch.cos(pitch/2) * torch.cos(yaw/2) + torch.sin(roll/2) * torch.sin(pitch/2) * torch.sin(yaw/2) + + # qx = torch.sin() + return [qw, qx, qy, qz] + # return [qx, qy, qz, qw] + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) # -1 for the quaternion matrix # + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +# This implementation is borrowed from IDR: https://github.com/lioryariv/idr +class SDFNetwork(nn.Module): + def __init__(self, + d_in, + d_out, + d_hidden, + n_layers, + skip_in=(4,), + multires=0, + bias=0.5, + scale=1, + geometric_init=True, + weight_norm=True, + inside_outside=False): + super(SDFNetwork, self).__init__() + + dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out] + + self.embed_fn_fine = None + + if multires > 0: + embed_fn, input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + dims[0] = input_ch + + self.num_layers = len(dims) + self.skip_in = skip_in + self.scale = scale + + for l in range(0, self.num_layers - 1): + if l + 1 in self.skip_in: + out_dim = dims[l + 1] - dims[0] + else: + out_dim = dims[l + 1] + + lin = nn.Linear(dims[l], out_dim) + + if geometric_init: + if l == self.num_layers - 2: + if not inside_outside: + torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001) + torch.nn.init.constant_(lin.bias, -bias) + else: + torch.nn.init.normal_(lin.weight, mean=-np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001) + torch.nn.init.constant_(lin.bias, bias) + elif multires > 0 and l == 0: + torch.nn.init.constant_(lin.bias, 0.0) + torch.nn.init.constant_(lin.weight[:, 3:], 0.0) + torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim)) + elif multires > 0 and l in self.skip_in: + torch.nn.init.constant_(lin.bias, 0.0) + torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) + torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3):], 0.0) + else: + torch.nn.init.constant_(lin.bias, 0.0) + torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)) + + if weight_norm: + lin = nn.utils.weight_norm(lin) + + setattr(self, "lin" + str(l), lin) + + self.activation = nn.Softplus(beta=100) + + def forward(self, inputs): + inputs = inputs * self.scale + if self.embed_fn_fine is not None: # input; input fn fine # + inputs = self.embed_fn_fine(inputs) + + x = inputs + for l in range(0, self.num_layers - 1): + lin = getattr(self, "lin" + str(l)) + + if l in self.skip_in: + x = torch.cat([x, inputs], 1) / np.sqrt(2) + + x = lin(x) + + if l < self.num_layers - 2: + x = self.activation(x) + return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1) + + def sdf(self, x): + return self.forward(x)[:, :1] + + def sdf_hidden_appearance(self, x): + return self.forward(x) + + def gradient(self, x): + x.requires_grad_(True) + y = self.sdf(x) + d_output = torch.ones_like(y, requires_grad=False, device=y.device) + gradients = torch.autograd.grad( + outputs=y, + inputs=x, + grad_outputs=d_output, + create_graph=True, + retain_graph=True, + only_inputs=True)[0] + return gradients.unsqueeze(1) + + +# This implementation is borrowed from IDR: https://github.com/lioryariv/idr +class RenderingNetwork(nn.Module): + def __init__(self, + d_feature, + mode, + d_in, + d_out, + d_hidden, + n_layers, + weight_norm=True, + multires_view=0, + squeeze_out=True): + super().__init__() + + self.mode = mode + self.squeeze_out = squeeze_out + dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out] + + self.embedview_fn = None + if multires_view > 0: + embedview_fn, input_ch = get_embedder(multires_view) + self.embedview_fn = embedview_fn + dims[0] += (input_ch - 3) + + self.num_layers = len(dims) + + for l in range(0, self.num_layers - 1): + out_dim = dims[l + 1] + lin = nn.Linear(dims[l], out_dim) + + if weight_norm: + lin = nn.utils.weight_norm(lin) + + setattr(self, "lin" + str(l), lin) + + self.relu = nn.ReLU() + + def forward(self, points, normals, view_dirs, feature_vectors): + if self.embedview_fn is not None: + view_dirs = self.embedview_fn(view_dirs) + + rendering_input = None + + if self.mode == 'idr': + rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1) + elif self.mode == 'no_view_dir': + rendering_input = torch.cat([points, normals, feature_vectors], dim=-1) + elif self.mode == 'no_normal': + rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1) + + x = rendering_input + + for l in range(0, self.num_layers - 1): + lin = getattr(self, "lin" + str(l)) + + x = lin(x) + + if l < self.num_layers - 2: + x = self.relu(x) + + if self.squeeze_out: + x = torch.sigmoid(x) + return x + + +# This implementation is borrowed from nerf-pytorch: https://github.com/yenchenlin/nerf-pytorch +class NeRF(nn.Module): + def __init__(self, + D=8, + W=256, + d_in=3, + d_in_view=3, + multires=0, + multires_view=0, + output_ch=4, + skips=[4], + use_viewdirs=False): + super(NeRF, self).__init__() + self.D = D + self.W = W + self.d_in = d_in + self.d_in_view = d_in_view + self.input_ch = 3 + self.input_ch_view = 3 + self.embed_fn = None + self.embed_fn_view = None + + if multires > 0: + embed_fn, input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn = embed_fn + self.input_ch = input_ch + + if multires_view > 0: + embed_fn_view, input_ch_view = get_embedder(multires_view, input_dims=d_in_view) + self.embed_fn_view = embed_fn_view + self.input_ch_view = input_ch_view + + self.skips = skips + self.use_viewdirs = use_viewdirs + + self.pts_linears = nn.ModuleList( + [nn.Linear(self.input_ch, W)] + + [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + self.input_ch, W) for i in range(D - 1)]) + + ### Implementation according to the official code release + ### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105) + self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)]) + + ### Implementation according to the paper + # self.views_linears = nn.ModuleList( + # [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)]) + + if use_viewdirs: + self.feature_linear = nn.Linear(W, W) + self.alpha_linear = nn.Linear(W, 1) + self.rgb_linear = nn.Linear(W // 2, 3) + else: + self.output_linear = nn.Linear(W, output_ch) + + def forward(self, input_pts, input_views): + if self.embed_fn is not None: + input_pts = self.embed_fn(input_pts) + if self.embed_fn_view is not None: + input_views = self.embed_fn_view(input_views) + + h = input_pts + for i, l in enumerate(self.pts_linears): + h = self.pts_linears[i](h) + h = F.relu(h) + if i in self.skips: + h = torch.cat([input_pts, h], -1) + + if self.use_viewdirs: + alpha = self.alpha_linear(h) + feature = self.feature_linear(h) + h = torch.cat([feature, input_views], -1) + + for i, l in enumerate(self.views_linears): + h = self.views_linears[i](h) + h = F.relu(h) + + rgb = self.rgb_linear(h) + return alpha, rgb + else: + assert False + + +class SingleVarianceNetwork(nn.Module): + def __init__(self, init_val): + super(SingleVarianceNetwork, self).__init__() + self.register_parameter('variance', nn.Parameter(torch.tensor(init_val))) + + def forward(self, x): + return torch.ones([len(x), 1]) * torch.exp(self.variance * 10.0) + + + + + +class BendingNetworkActiveForceFieldForwardLagV18(nn.Module): + def __init__(self, # self # + d_in, # + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False, + nn_instances=1, + minn_dist_threshold=0.05, + ): # contact + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV18, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 # get + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.nn_instances = nn_instances + + self.contact_spring_rest_length = 2. + + # self.minn_dist_sampled_pts_passive_obj_thres = 0.05 # minn_dist_threshold ### + self.minn_dist_sampled_pts_passive_obj_thres = minn_dist_threshold + + self.spring_contact_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_contact_ks_values.weight) + self.spring_contact_ks_values.weight.data = self.spring_contact_ks_values.weight.data * 0.01 + + self.spring_friction_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_friction_ks_values.weight) + self.spring_friction_ks_values.weight.data = self.spring_friction_ks_values.weight.data * 0.001 + + if self.nn_instances == 1: + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + self.spring_ks_values.weight.data[:, :] = 0.1395 + else: + self.spring_ks_values = nn.ModuleList( + [ + nn.Embedding(num_embeddings=5, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_values in self.spring_ks_values: + torch.nn.init.ones_(cur_ks_values.weight) + cur_ks_values.weight.data = cur_ks_values.weight.data * 0.01 + + self.inertia_div_factor = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.inertia_div_factor.weight) + # self.inertia_div_factor.weight.data[:, :] = 30.0 + self.inertia_div_factor.weight.data[:, :] = 20.0 + + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + + ## [ \alpha, \beta ] ## + if self.nn_instances == 1: + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + else: + self.ks_weights = nn.ModuleList( + [ + nn.Embedding(num_embeddings=2, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_weights in self.ks_weights: + torch.nn.init.ones_(cur_ks_weights.weight) # + cur_ks_weights.weight.data[1] = cur_ks_weights.weight.data[1] * (1. / (778 * 2)) + + + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + self.sep_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_time_constant.weight) # + + self.sep_torque_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_torque_time_constant.weight) # + + self.sep_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_damping_constant.weight) # # # # + # self.sep_damping_constant.weight.data = self.sep_damping_constant.weight.data * 0.9 + self.sep_damping_constant.weight.data = self.sep_damping_constant.weight.data * 0.2 + + + self.sep_angular_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_angular_damping_constant.weight) # # # # + # self.sep_angular_damping_constant.weight.data = self.sep_angular_damping_constant.weight.data * 0.9 + self.sep_angular_damping_constant.weight.data = self.sep_angular_damping_constant.weight.data * 0.2 + + + + + if self.nn_instances == 1: + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + else: + self.time_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_time_constant in self.time_constant: + torch.nn.init.ones_(cur_time_constant.weight) # + + if self.nn_instances == 1: + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + else: + self.damping_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_damping_constant in self.damping_constant: + torch.nn.init.ones_(cur_damping_constant.weight) # # # # + cur_damping_constant.weight.data = cur_damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + + + # self.actuator_friction_forces = nn.Embedding( # actuator's forces # + # num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + if nn_instances == 1: + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + else: + self.actuator_friction_forces = nn.ModuleList( + [nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) for _ in range(self.nn_instances) ] + ) + for cur_friction_force_net in self.actuator_friction_forces: + torch.nn.init.zeros_(cur_friction_force_net.weight) # + + # simulator # + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + ''' the bending network ''' + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + ''' the bending network ''' + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + self.friction_input_dim = 3 + 3 + 1 + 3 ### + self.friction_network = [ + nn.Linear(self.friction_input_dim, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, 3) + ] + for i_sub_net, sub_net in enumerate(self.friction_network): + if isinstance(sub_net, nn.Linear): + if i_sub_net < len(self.friction_network) - 1: + torch.nn.init.kaiming_uniform_( + sub_net.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(sub_net.bias) + else: + torch.nn.init.zeros_(sub_net.weight) + torch.nn.init.zeros_(sub_net.bias) + self.friction_network = nn.Sequential( + *self.friction_network + ) + + + self.contact_normal_force_network = [ + nn.Linear(self.friction_input_dim, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, 3) + ] + for i_sub_net, sub_net in enumerate(self.contact_normal_force_network): + if isinstance(sub_net, nn.Linear): + if i_sub_net < len(self.contact_normal_force_network) - 1: + torch.nn.init.kaiming_uniform_( + sub_net.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(sub_net.bias) + else: + torch.nn.init.zeros_(sub_net.weight) + torch.nn.init.zeros_(sub_net.bias) + self.contact_normal_force_network = nn.Sequential( + *self.contact_normal_force_network + ) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": # periodict activation functions # + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + + self.obj_inertia = nn.Embedding( + num_embeddings=1, embedding_dim=3 + ) + torch.nn.init.ones_(self.obj_inertia.weight) + + self.optimizable_obj_mass = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_obj_mass.weight) + import math + self.optimizable_obj_mass.weight.data *= math.sqrt(30) + + + self.optimizable_spring_ks = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_spring_ks.weight) + # 400000000, 100000000 # optimizabale spring forces an the + self.optimizable_spring_ks.weight.data[0, :] = math.sqrt(1.0) + self.optimizable_spring_ks.weight.data[1, :] = math.sqrt(1.0) + + + + # optimizable_spring_ks_normal, optimizable_spring_ks_friction # + self.optimizable_spring_ks_normal = nn.Embedding( + num_embeddings=200, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_spring_ks_normal.weight) + # # 400000000, 100000000 # optimizabale spring forces an the + # self.optimizable_spring_ks.weight.data[0, :] = math.sqrt(1.0) + # self.optimizable_spring_ks.weight.data[1, :] = math.sqrt(1.0) + + self.optimizable_spring_ks_friction = nn.Embedding( + num_embeddings=200, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_spring_ks_friction.weight) + + + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} # record the optimizable offset # + self.save_values = {} + # ws_normed, defed_input_pts_sdf, # + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # # active mesh # active mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + self.timestep_to_accum_acc = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # # actuators + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + self.obj_sdf_th = None + self.obj_sdf_grad_th = None + + self.normal_plane_max_y = torch.tensor([0, 1., 0], dtype=torch.float32).cuda() ## 0, 1, 0 + self.normal_plane_min_y = torch.tensor([0, -1., 0.], dtype=torch.float32).cuda() # + + self.normal_plane_max_x = torch.tensor([1, 0, 0], dtype=torch.float32).cuda() ## 0, 1, 0 + self.normal_plane_min_x = torch.tensor([-1, 0., 0.], dtype=torch.float32).cuda() # + + self.normal_plane_max_z = torch.tensor([0, 0, 1.], dtype=torch.float32).cuda() ## 0, 1, 0 + self.normal_plane_min_z = torch.tensor([0, 0, -1.], dtype=torch.float32).cuda() # + + ## set the initial passive object verts and normals ### + ## the default scene is the box scene ## + self.penetration_determining = "plane_primitives" + self.canon_passive_obj_verts = None + self.canon_passive_obj_normals = None + self.train_residual_normal_forces = False + + self.lin_damping_coefs = nn.Embedding( # tim + num_embeddings=150, embedding_dim=1 + ) + torch.nn.init.ones_(self.lin_damping_coefs.weight) # (1.0 - damping) * prev_ts_vel + cur_ts_delta_vel + + self.ang_damping_coefs = nn.Embedding( # tim + num_embeddings=150, embedding_dim=1 + ) + torch.nn.init.ones_(self.ang_damping_coefs.weight) # (1.0 - samping_coef) * prev_ts_vel + cur_ts_delta_vel # + + + self.contact_damping_coef = 5e3 ## contact damping coef -- damping coef contact ## ## damping coef contact ## + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): # no_grad() + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays # + # + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + # ### + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + + def query_for_sdf(self, cur_pts, cur_frame_transformations): + # + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + # cur_pts: nn_pts x 3 # + # print(f"cur_pts: {cur_pts.size()}, cur_frame_translation: {cur_frame_translation.size()}, cur_frame_rotation: {cur_frame_rotation.size()}") + ### transformed pts ### + # cur_transformed_pts = torch.matmul( + # cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + # center_init_passive_obj_verts # + cur_transformed_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + # v = (v - center) * scale # + # sdf_space_center # + cur_transformed_pts_np = cur_transformed_pts.detach().cpu().numpy() + cur_transformed_pts_np = (cur_transformed_pts_np - np.reshape(self.sdf_space_center, (1, 3))) * self.sdf_space_scale + cur_transformed_pts_np = (cur_transformed_pts_np + 1.) / 2. + cur_transformed_pts_xs = (cur_transformed_pts_np[:, 0] * self.sdf_res).astype(np.int32) # [x, y, z] of the transformed_pts_np # + cur_transformed_pts_ys = (cur_transformed_pts_np[:, 1] * self.sdf_res).astype(np.int32) + cur_transformed_pts_zs = (cur_transformed_pts_np[:, 2] * self.sdf_res).astype(np.int32) + + cur_transformed_pts_xs = np.clip(cur_transformed_pts_xs, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_ys = np.clip(cur_transformed_pts_ys, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_zs = np.clip(cur_transformed_pts_zs, a_min=0, a_max=self.sdf_res - 1) + + + if self.obj_sdf_th is None: + self.obj_sdf_th = torch.from_numpy(self.obj_sdf).float().cuda() + cur_transformed_pts_xs_th = torch.from_numpy(cur_transformed_pts_xs).long().cuda() + cur_transformed_pts_ys_th = torch.from_numpy(cur_transformed_pts_ys).long().cuda() + cur_transformed_pts_zs_th = torch.from_numpy(cur_transformed_pts_zs).long().cuda() + + cur_pts_sdf = batched_index_select(self.obj_sdf_th, cur_transformed_pts_xs_th, 0) + # print(f"After selecting the x-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the y-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the z-axis: {cur_pts_sdf.size()}") + + if self.obj_sdf_grad is not None: + if self.obj_sdf_grad_th is None: + self.obj_sdf_grad_th = torch.from_numpy(self.obj_sdf_grad).float().cuda() + self.obj_sdf_grad_th = self.obj_sdf_grad_th / torch.clamp(torch.norm(self.obj_sdf_grad_th, p=2, keepdim=True, dim=-1), min=1e-5) + cur_pts_sdf_grad = batched_index_select(self.obj_sdf_grad_th, cur_transformed_pts_xs_th, 0) # nn_pts x res x res x 3 + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # cur_pts_sdf_grad = cur_pts_sdf_grad / torch + else: + cur_pts_sdf_grad = None + + # cur_pts_sdf = self.obj_sdf[cur_transformed_pts_xs] + # cur_pts_sdf = cur_pts_sdf[:, cur_transformed_pts_ys] + # cur_pts_sdf = cur_pts_sdf[:, :, cur_transformed_pts_zs] + # cur_pts_sdf = np.diagonal(cur_pts_sdf) + # print(f"cur_pts_sdf: {cur_pts_sdf.shape}") + # # gradient of sdf # + # # the contact force dierection should be the negative direction of the sdf gradient? # + # # it seems true # + # # get the cur_pts_sdf value # + # cur_pts_sdf = torch.from_numpy(cur_pts_sdf).float().cuda() + if cur_pts_sdf_grad is None: + return cur_pts_sdf + else: + return cur_pts_sdf, cur_pts_sdf_grad # return the grad as the + + + def query_for_sdf_of_canon_obj(self, cur_pts, cur_frame_transformations): + + # + + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + + cur_transformed_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + + # cur_transformed_pts = torch.matmul( + # cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + # # v = (v - center) * scale # + # sdf_space_center # + cur_transformed_pts_np = cur_transformed_pts.detach().cpu().numpy() + # + cur_transformed_pts_np = (cur_transformed_pts_np - np.reshape(self.sdf_space_center, (1, 3))) * self.sdf_space_scale + cur_transformed_pts_np = (cur_transformed_pts_np + 1.) / 2. + cur_transformed_pts_xs = (cur_transformed_pts_np[:, 0] * self.sdf_res).astype(np.int32) # [x, y, z] of the transformed_pts_np # + cur_transformed_pts_ys = (cur_transformed_pts_np[:, 1] * self.sdf_res).astype(np.int32) + cur_transformed_pts_zs = (cur_transformed_pts_np[:, 2] * self.sdf_res).astype(np.int32) + + cur_transformed_pts_xs = np.clip(cur_transformed_pts_xs, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_ys = np.clip(cur_transformed_pts_ys, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_zs = np.clip(cur_transformed_pts_zs, a_min=0, a_max=self.sdf_res - 1) + + + if self.obj_sdf_th is None: + self.obj_sdf_th = torch.from_numpy(self.obj_sdf).float().cuda() + cur_transformed_pts_xs_th = torch.from_numpy(cur_transformed_pts_xs).long().cuda() + cur_transformed_pts_ys_th = torch.from_numpy(cur_transformed_pts_ys).long().cuda() + cur_transformed_pts_zs_th = torch.from_numpy(cur_transformed_pts_zs).long().cuda() + + cur_pts_sdf = batched_index_select(self.obj_sdf_th, cur_transformed_pts_xs_th, 0) + # print(f"After selecting the x-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the y-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the z-axis: {cur_pts_sdf.size()}") + + if self.obj_sdf_grad is not None: + if self.obj_sdf_grad_th is None: + self.obj_sdf_grad_th = torch.from_numpy(self.obj_sdf_grad).float().cuda() + self.obj_sdf_grad_th = self.obj_sdf_grad_th / torch.clamp(torch.norm(self.obj_sdf_grad_th, p=2, keepdim=True, dim=-1), min=1e-5) + cur_pts_sdf_grad = batched_index_select(self.obj_sdf_grad_th, cur_transformed_pts_xs_th, 0) # nn_pts x res x res x 3 + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # cur_pts_sdf_grad = cur_pts_sdf_grad / torch + else: + cur_pts_sdf_grad = None + + # cur_pts_sdf = self.obj_sdf[cur_transformed_pts_xs] + # cur_pts_sdf = cur_pts_sdf[:, cur_transformed_pts_ys] + # cur_pts_sdf = cur_pts_sdf[:, :, cur_transformed_pts_zs] + # cur_pts_sdf = np.diagonal(cur_pts_sdf) + # print(f"cur_pts_sdf: {cur_pts_sdf.shape}") + # # the contact force dierection should be the negative direction of the sdf gradient? # + # # get the cur_pts_sdf value # + # cur_pts_sdf = torch.from_numpy(cur_pts_sdf).float().cuda() + if cur_pts_sdf_grad is None: + return cur_pts_sdf + else: + return cur_pts_sdf, cur_pts_sdf_grad # return the grad as the + + ## query for cotnacting + def query_for_contacting_ball_primitives(self, cur_pts, cur_frame_transformations): + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + + inv_transformed_queried_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + + # center_verts, ball_r # + center_verts = self.center_verts + ball_r = self.ball_r + dist_inv_transformed_pts_w_center_ball = torch.sum( + (inv_transformed_queried_pts - center_verts.unsqueeze(0)) ** 2, dim=-1 ## + ) + + penetration_indicators = dist_inv_transformed_pts_w_center_ball <= (ball_r ** 2) + + # maxx_dist_to_planes, projected_plane_pts_transformed, projected_plane_normals_transformed, projected_plane_pts, selected_plane_normals + dir_center_to_ball = inv_transformed_queried_pts - center_verts.unsqueeze(0) ## nn_pts x 3 ## + norm_center_to_ball = torch.norm(dir_center_to_ball, dim=-1, p=2, keepdim=True) + dir_center_to_ball = dir_center_to_ball / torch.clamp(torch.norm(dir_center_to_ball, dim=-1, p=2, keepdim=True), min=1e-6) + sd_dist = norm_center_to_ball - ball_r + projected_ball_pts = center_verts.unsqueeze(0) + dir_center_to_ball * ball_r + projected_ball_normals = dir_center_to_ball.clone() + + projected_ball_normals_transformed = torch.matmul( + cur_frame_rotation, projected_ball_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + projected_ball_pts_transformed = torch.matmul( ## center init passive obj verts + cur_frame_rotation, (projected_ball_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_frame_translation.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) + + return penetration_indicators, sd_dist, projected_ball_pts_transformed, projected_ball_normals_transformed, projected_ball_pts, projected_ball_normals + + ## because of ## because of + def query_for_contacting_primitives(self, cur_pts, cur_frame_transformations): + # cur_frame rotation -> 3 x 3 rtoations # translation -> 3 translations # + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + # cur_pts: nn_pts x 3 # + # print(f"cur_pts: {cur_pts.size()}, cur_frame_translation: {cur_frame_translation.size()}, cur_frame_rotation: {cur_frame_rotation.size()}") + ### transformed pts ### + # inv_transformed_queried_pts = torch.matmul( + # cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + inv_transformed_queried_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + + # maxximum # jut define the maxim # + # normal to six palnes -> + # normal to six planes -> + maxx_init_passive_mesh = self.maxx_init_passive_mesh + minn_init_passive_mesh = self.minn_init_passive_mesh # + + + # max y-coordiante; min y-coordiante; max + dist_to_plane_max_y = torch.sum((inv_transformed_queried_pts - maxx_init_passive_mesh.unsqueeze(0)) * self.normal_plane_max_y.unsqueeze(0), dim=-1) ### signed distance to the upper s + # maximum distnace? # + dist_to_plane_min_y = torch.sum((inv_transformed_queried_pts - minn_init_passive_mesh.unsqueeze(0)) * self.normal_plane_min_y.unsqueeze(0), dim=-1) ### signed distance to the lower surface # + + dist_to_plane_max_z = torch.sum((inv_transformed_queried_pts - maxx_init_passive_mesh.unsqueeze(0)) * self.normal_plane_max_z.unsqueeze(0), dim=-1) ### signed distance to the upper s + # maximum distnace? # + dist_to_plane_min_z = torch.sum((inv_transformed_queried_pts - minn_init_passive_mesh.unsqueeze(0)) * self.normal_plane_min_z.unsqueeze(0), dim=-1) ### signed distance to the lower surface # + + dist_to_plane_max_x = torch.sum((inv_transformed_queried_pts - maxx_init_passive_mesh.unsqueeze(0)) * self.normal_plane_max_x.unsqueeze(0), dim=-1) ### signed distance to the upper s + # maximum distnace? # + dist_to_plane_min_x = torch.sum((inv_transformed_queried_pts - minn_init_passive_mesh.unsqueeze(0)) * self.normal_plane_min_x.unsqueeze(0), dim=-1) ### signed distance to the lower surface # + tot_dist_to_planes = torch.stack( + [dist_to_plane_max_y, dist_to_plane_min_y, dist_to_plane_max_z, dist_to_plane_min_z, dist_to_plane_max_x, dist_to_plane_min_x], dim=-1 + ) + maxx_dist_to_planes, maxx_dist_to_planes_plane_idx = torch.max(tot_dist_to_planes, dim=-1) ### maxx dist to planes ## # nn_pts # + + # selected plane normals # selected plane normals # kinematics dirven mano? # + # a much more simplified setting> # # need frictio + # contact points established and the contact information maintainacnce # + # test cases -> test such two relatively moving objects # + # assume you have the correct forces --- how to opt them # + # model the frictions # + tot_plane_normals = torch.stack( + [self.normal_plane_max_y, self.normal_plane_min_y, self.normal_plane_max_z, self.normal_plane_min_z, self.normal_plane_max_x, self.normal_plane_min_x], dim=0 ### 6 x 3 -> plane normals # + ) + # nearest plane points # # nearest plane points # # nearest plane points # # nearest plane points # + # nearest_plane_points # + selected_plane_normals = tot_plane_normals[maxx_dist_to_planes_plane_idx ] ### nn_tot_pts x 3 ### + projected_plane_pts = cur_pts - selected_plane_normals * maxx_dist_to_planes.unsqueeze(-1) ### nn_tot_pts x 3 ### + projected_plane_pts_x = projected_plane_pts[:, 0] + projected_plane_pts_y = projected_plane_pts[:, 1] + projected_plane_pts_z = projected_plane_pts[:, 2] + projected_plane_pts_x = torch.clamp(projected_plane_pts_x, min=minn_init_passive_mesh[0], max=maxx_init_passive_mesh[0]) + projected_plane_pts_y = torch.clamp(projected_plane_pts_y, min=minn_init_passive_mesh[1], max=maxx_init_passive_mesh[1]) + projected_plane_pts_z = torch.clamp(projected_plane_pts_z, min=minn_init_passive_mesh[2], max=maxx_init_passive_mesh[2]) + + projected_plane_pts = torch.stack( + [projected_plane_pts_x, projected_plane_pts_y, projected_plane_pts_z], dim=-1 + ) + + # query # + projected_plane_pts_transformed = torch.matmul( + cur_frame_rotation, (projected_plane_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_frame_translation.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) + projected_plane_normals_transformed = torch.matmul( + cur_frame_rotation, selected_plane_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + + # ### penetration indicator, signed distance, projected points onto the plane as the contact points ### + return maxx_dist_to_planes <= 0, maxx_dist_to_planes, projected_plane_pts_transformed, projected_plane_normals_transformed, projected_plane_pts, selected_plane_normals + + + + ### forward; #### # + def forward2(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False, contact_pairs_set=None, pts_frictional_forces=None): + + + nex_pts_ts = input_pts_ts + 1 + + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] + # ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + # nn_sampled_input_pts = sampled_input_pts.size(0) + + if nex_pts_ts in timestep_to_active_mesh: + ### disp_sampled_input_pts = nex_sampled_input_pts - sampled_input_pts ### + nex_sampled_input_pts = timestep_to_active_mesh[nex_pts_ts].detach() + else: + nex_sampled_input_pts = timestep_to_active_mesh[input_pts_ts].detach() ## + if sampled_verts_idxes is not None: + nex_sampled_input_pts = nex_sampled_input_pts[sampled_verts_idxes] ## + disp_act_pts_cur_to_nex = nex_sampled_input_pts - sampled_input_pts ## act pts cur to nex ## # nex sampled input pts # + # disp_act_pts_cur_to_nex = disp_act_pts_cur_to_nex / torch.clamp(torch.norm(disp_act_pts_cur_to_nex, p=2, keepdim=True, dim=-1), min=1e-5) + + ### + if sampled_input_pts.size(0) > 20000: + norm_disp_act_pts = torch.clamp(torch.norm(disp_act_pts_cur_to_nex, dim=-1, p=2, keepdim=True), min=1e-5) + else: + norm_disp_act_pts = torch.clamp(torch.norm(disp_act_pts_cur_to_nex, p=2, keepdim=True), min=1e-5) + + disp_act_pts_cur_to_nex = disp_act_pts_cur_to_nex / norm_disp_act_pts + real_norm = torch.clamp(torch.norm(disp_act_pts_cur_to_nex, p=2, keepdim=True, dim=-1), min=1e-5) + real_norm = torch.mean(real_norm) + + # print(sampled_input_pts.size(), norm_disp_act_pts, real_norm) + + if self.canon_passive_obj_verts is None: + ## center init passsive obj verts ## + init_passive_obj_verts = timestep_to_passive_mesh[0] # at the timestep 0 ## + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + self.center_init_passive_obj_verts = center_init_passive_obj_verts.clone() + else: + init_passive_obj_verts = self.canon_passive_obj_verts + init_passive_obj_ns = self.canon_passive_obj_normals + + # center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + # self.center_init_passive_obj_verts = center_init_passive_obj_verts.clone() + + # direction of the normal direction has been changed # + # contact region and multiple contact points ## + center_init_passive_obj_verts = torch.zeros((3, ), dtype=torch.float32).cuda() + self.center_init_passive_obj_verts = center_init_passive_obj_verts.clone() + + + # use_same_contact_spring_k # + # cur_passive_obj_rot, cur_passive_obj_trans # ## quaternion to matrix -- quaternion for # + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() # passive obj trans # + + ''' Transform the passive object verts and normals ''' + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) + + + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + ### passvie obj ns ### + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center # passive obj center # + + self.cur_passive_obj_ns = cur_passive_obj_ns + self.cur_passive_obj_verts = cur_passive_obj_verts + + ## velcoti of the manipulator is enough to serve as the velocity of the peentration deo ## + + + # nn instances # # # cur passive obj ns ## + # if self.nn_instances == 1: + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + # else: + # ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + # ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + # print(f"sampled_input_pts: {sampled_input_pts.size()}") + + if self.use_sqrt_dist: # use sqrt distance # + dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + ) + else: + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + #### use sqrt distances #### + + # ### add the sqrt for calculate the l2 distance ### + # dist_sampled_pts_to_passive_obj = torch.sqrt(dist_sampled_pts_to_passive_obj) ### + + + # dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + # (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + # ) + + ''' distance between sampled pts and the passive object ''' + ## get the object vert idx with the + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) # get the minn idx sampled pts to passive obj ## + + + ''' calculate the apssvie objects normals ''' + # inter obj normals at the current frame # + # inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] + + + ### use obj normals as the direction ### + # inter_obj_normals = -1 * inter_obj_normals.detach().clone() + ### use the active points displacement directions as the direction ### # the normal + + inter_obj_normals = -1 * disp_act_pts_cur_to_nex.detach().clone() + + # penetration_determining # + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + cur_passive_obj_verts_pts_idxes = torch.arange(0, cur_passive_obj_verts.size(0), dtype=torch.long).cuda() # + + # inter_passive_obj_pts_idxes = cur_passive_obj_verts_pts_idxes[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + # inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() # sampled p + # dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) + + ###### penetration penalty strategy v1 ###### + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + # ws_beta; 10 # # sum over the forces but not the weighted sum... # + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) # ws_alpha # + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 # m + ''' get the prespecified passive obj threshold ''' + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) e + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + # penetrating # + ### penetration strategy v4 #### ## threshold of the sampled pts ## + + ''' Calculate the penetration depth / sdf from the input point to the object ''' + if input_pts_ts > 0 or (input_pts_ts == 0 and input_pts_ts in self.timestep_to_total_def): + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + # obj_sdf_grad + if self.penetration_determining == "sdf_of_canon": ### queried sdf? ### + if self.obj_sdf_grad is None: ## query for sdf of canon + queried_sdf = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # inter_obj_normals = -1.0 * queried_sdf_grad + # inter_obj_normals = queried_sdf_grad + # inter_obj_normals = torch.matmul( # 3 x 3 xxxx 3 x N -> 3 x N + # cur_rot, inter_obj_normals.contiguous().transpose(1, 0).contiguous() + # ).contiguous().transpose(1, 0).contiguous() + penetrating_indicator = queried_sdf < 0 + else: + cur_rot = torch.eye(n=3, dtype=torch.float32).cuda() + cur_trans = torch.zeros((3,), dtype=torch.float32).cuda() + if self.penetration_determining == "sdf_of_canon": + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # inter_obj_normals = -1.0 * queried_sdf_grad + # inter_obj_normals = queried_sdf_grad + penetrating_indicator = queried_sdf < 0 + + + ''' decide forces via kinematics statistics ''' + + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + ''' calculate the penetration indicator ''' + penetrating_indicator_mult_factor = torch.ones_like(penetrating_indicator).float() + penetrating_indicator_mult_factor[penetrating_indicator] = -1. + + + + dist_sampled_pts_to_passive_obj = dist_sampled_pts_to_passive_obj * penetrating_indicator_mult_factor + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + # use contact + if self.use_contact_dist_as_sdf: + queried_sdf = dist_sampled_pts_to_passive_obj + + # + in_contact_indicator_robot_to_obj = queried_sdf <= self.minn_dist_threshold_robot_to_obj ### queried_sdf <= minn_dist_threshold_robot_to_obj + + zero_level_incontact_indicator_robot_to_obj = queried_sdf <= 0.0 + + + ## minn_dist_sampled_pts_passive_obj_thres # ## in contct indicator ## ## in contact indicator ## + in_contact_indicator = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + + # ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + # ws_unnormed = torch.ones_like(ws_unnormed) + ws_unnormed = torch.ones_like(dist_sampled_pts_to_passive_obj) + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + + # minimized motions # + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + # self.penetrating_indicator = penetrating_indicator # # + self.penetrating_indicator = in_contact_indicator_robot_to_obj + cur_inter_obj_normals = inter_obj_normals.clone().detach() + + ### + if self.penetration_determining == "plane_primitives": # optimize the ruels for ball case? # + in_contact_indicator_robot_to_obj, queried_sdf, inter_obj_pts, inter_obj_normals, canon_inter_obj_pts, canon_inter_obj_normals = self.query_for_contacting_primitives(sampled_input_pts, (cur_rot, cur_trans)) + + self.penetrating_indicator = in_contact_indicator_robot_to_obj + + cur_inter_obj_normals = inter_obj_normals.clone().detach() + elif self.penetration_determining == "ball_primitives": + in_contact_indicator_robot_to_obj, queried_sdf, inter_obj_pts, inter_obj_normals, canon_inter_obj_pts, canon_inter_obj_normals = self.query_for_contacting_ball_primitives(sampled_input_pts, (cur_rot, cur_trans)) + self.penetrating_indicator = in_contact_indicator_robot_to_obj + cur_inter_obj_normals = inter_obj_normals.clone().detach() + else: + # inter_obj_pts + canon_inter_obj_pts = torch.matmul( + cur_passive_obj_rot.contiguous().transpose(1, 0).contiguous(), (inter_obj_pts - cur_passive_obj_trans.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() ## + canon_inter_obj_normals = torch.matmul( # passive obj rot ## # R^T n --> R R^T n --the current inter obj normals ## + cur_passive_obj_rot.contiguous().transpose(1, 0).contiguous(), inter_obj_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() ## -> inter obj normals + + + ##### penetration depth penalty loss calculation: strategy 2 ##### + penetration_proj_ks = self.minn_dist_threshold_robot_to_obj - queried_sdf + penetration_proj_pos = sampled_input_pts + penetration_proj_ks.unsqueeze(-1) * inter_obj_normals ## nn_sampled_pts x 3 ## + dot_pos_to_proj_with_normal = torch.sum( + (penetration_proj_pos.detach() - sampled_input_pts) * inter_obj_normals.detach(), dim=-1 ### nn_sampled_pts + ) + + + # self.penetrating_depth_penalty = dot_pos_to_proj_with_normal[in_contact_indicator_robot_to_obj].mean() + self.smaller_than_zero_level_set_indicator = queried_sdf < 0.0 + self.penetrating_depth_penalty = dot_pos_to_proj_with_normal[queried_sdf < 0.0].mean() + ##### penetration depth penalty loss calculation: strategy 2 ##### + + + ##### penetration depth penalty loss calculation: strategy 1 ##### + # self.penetrating_depth_penalty = (self.minn_dist_threshold_robot_to_obj - queried_sdf[in_contact_indicator_robot_to_obj]).mean() + ##### penetration depth penalty loss calculation: strategy 1 ##### + + + ### penetration strategy v4 #### # another mophology # + + + + if self.nn_instances == 1: # spring ks values + # contact ks values # # if we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 1).view(1,) + # contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + # tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + # contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + # contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + # tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + # optm_alltime_ks + # # optimizable_spring_ks_normal, optimizable_spring_ks_friction # + if self.optm_alltime_ks: + opt_penetration_proj_k_to_robot = self.optimizable_spring_ks_normal(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + opt_penetration_proj_k_to_robot_friction = self.optimizable_spring_ks_friction(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + else: + # optimizable_spring_ks # + opt_penetration_proj_k_to_robot = self.optimizable_spring_ks(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + opt_penetration_proj_k_to_robot_friction = self.optimizable_spring_ks(torch.zeros((1,), dtype=torch.long).cuda() + 1).view(1,) + + # self.penetration_proj_k_to_robot = opt_penetration_proj_k_to_robot ** 2 + # self.penetration_proj_k_to_robot_friction = opt_penetration_proj_k_to_robot_friction ** 2 + + ## penetration proj k to robot ## + penetration_proj_k_to_robot = self.penetration_proj_k_to_robot * opt_penetration_proj_k_to_robot ** 2 + penetration_proj_k_to_robot_friction = self.penetration_proj_k_to_robot_friction * opt_penetration_proj_k_to_robot_friction ** 2 + + # penetration_proj_k_to_robot = self.penetration_proj_k_to_robot + # penetration_proj_k_to_robot = opt_penetration_proj_k_to_robot + + # if self.use_split_params: ## + # contact_spring_ka = self.spring_contact_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + # if self.use_sqr_spring_stiffness: + # contact_spring_ka = contact_spring_ka ** 2 + + ## ks may be differnet across different timesteps ## + + # if self.train_residual_friction: + contact_spring_ka = 0.1907073 ** 2 ## contact spring ka ## + contact_spring_kb = 0.00131699 + + ''' use same contact spring k should be no ''' + if self.use_same_contact_spring_k: + # contact_spring_ka_ori = contact_spring_ka.clone() + + ''' Equal forces ''' + contact_spring_ka = penetration_proj_k_to_robot * contact_spring_ka # equal forc stiffjess + + contact_spring_kb = contact_spring_kb * penetration_proj_k_to_robot + + penetration_proj_k_to_robot = contact_spring_ka + + ''' N-Equal forces ''' + # contact_spring_ka = 30. * contact_spring_ka ## change the contact spring k ## + # penetration_proj_k_to_robot = penetration_proj_k_to_robot * contact_spring_ka_ori ### change the projection coeff to the robot + + + ''' N-Equal forces ''' + # contact_spring_ka = penetration_proj_k_to_robot * contact_spring_ka ## contact spring ka ## + # penetration_proj_k_to_robot = 30. * contact_spring_ka_ori + else: + contact_spring_ka = penetration_proj_k_to_robot * contact_spring_ka # + # contact_spring_kb = contact_spring_kb * self.penetration_proj_k_to_robot_friction + contact_spring_kb = contact_spring_kb * penetration_proj_k_to_robot_friction + penetration_proj_k_to_robot = contact_spring_ka + + + + ### contact spring ka ## + + if torch.isnan(self.penetrating_depth_penalty): # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + + # penetrating_points = sampled_input_pts[penetrating_indicator] # robot to obj # # + penetrating_points = sampled_input_pts[in_contact_indicator_robot_to_obj] # + # penetration_proj_k_to_robot = 1.0 + # penetration_proj_k_to_robot = 0.01 + + # penetration_proj_k_to_robot = 0.0 + # proj_force = dist * normal * penetration_k # # + ## penetration forces for each manipulator point ## + penetrating_forces = penetration_proj_ks.unsqueeze(-1) * cur_inter_obj_normals * penetration_proj_k_to_robot + # penetrating_forces = penetrating_forces[penetrating_indicator] + penetrating_forces = penetrating_forces[in_contact_indicator_robot_to_obj] + self.penetrating_forces = penetrating_forces # forces + self.penetrating_points = penetrating_points # penetrating points ## # incontact indicator toothers ## + + + # contact psring ka ## cotnact + + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance # + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + ## contact force_d = k^d contact_dist - contact spring_damping * d * (\dot d) + ## ---- should get ## + ## + time_cons = 0.0005 + contact_manipulator_point_vel = nex_sampled_input_pts - sampled_input_pts ### nn_ampled_pts x ij + # time_cons_rot + contact_manipulator_point_vel = contact_manipulator_point_vel / time_cons + contact_manipulator_point_vel_norm = torch.norm(contact_manipulator_point_vel, p=2, dim=-1, keepdim=True) + contact_force_d = contact_force_d + self.contact_damping_coef * (contact_manipulator_point_vel_norm * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) ) + + + + + # contac force d contact spring ka * penetration depth # + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + # norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts, nnsampledpts # + # penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + # penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + # penalty_friction_constraint = torch.mean(penalty_friction_constraint) # friction + self.penalty_friction_constraint = torch.zeros((1,), dtype=torch.float32).cuda().mean() # penalty friction + # contact_force_d_scalar = norm_along_normals_forces.clone() + + # friction models # + # penalty friction constraints # + penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + + + + # rotation and translatiosn # + cur_fr_rot = cur_passive_obj_rot # passive obj rot # + cur_fr_trans = cur_passive_obj_trans # + + tot_contact_active_pts = [] + tot_contact_passive_pts = [] + tot_contact_active_idxes = [] + # tot_contact_passive_idxes = [] # # + tot_canon_contact_passive_normals = [] + tot_canon_contact_passive_pts = [] + tot_contact_passive_normals = [] # tot contact passive pts; tot cotnact passive normals # + tot_contact_frictions = [] + tot_residual_normal_forces = [] + + if contact_pairs_set is not None: + # contact_active_pts = contact_pairs_set['contact_active_pts'] + # contact_passive_pts = contact_pairs_set['contact_passive_pts'] + contact_active_idxes = contact_pairs_set['contact_active_idxes'] + # contact_passive_idxes = contact_pairs_set # # app + + # contact active idxes # + # nn_contact_pts x 3 -> as the cotnact passvie normals # + canon_contact_passive_normals = contact_pairs_set['canon_contact_passive_normals'] + canon_contact_passive_pts = contact_pairs_set['canon_contact_passive_pts'] + cur_fr_contact_passive_normals = torch.matmul( ## penetration normals ## + cur_fr_rot, canon_contact_passive_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() # tranformed normals # frame passive normals # + + # not irrelevant at all # + cur_fr_contact_act_pts = sampled_input_pts[contact_active_idxes] + # cur_fr_contact_passive_pts = canon_contact_passive_pts + # + cur_fr_contact_passive_pts = torch.matmul( + cur_fr_rot, (canon_contact_passive_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_fr_trans.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) ## passive pts + + nex_fr_contact_act_pts = nex_sampled_input_pts[contact_active_idxes] + + # cur_fr_contact_passive_to_act = cur_fr_contact_act_pts - cur_fr_contact_passive_pts # + + cur_fr_contact_passive_to_act = nex_fr_contact_act_pts - cur_fr_contact_passive_pts + + dot_rel_disp_with_passive_normals = torch.sum( + cur_fr_contact_passive_to_act * cur_fr_contact_passive_normals, dim=-1 + ) + cur_friction_forces = cur_fr_contact_passive_to_act - dot_rel_disp_with_passive_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + + ## cur frame cotnct act + cur_cur_fr_contact_passive_to_act = cur_fr_contact_act_pts - cur_fr_contact_passive_pts + cur_cur_penetration_depth = torch.sum( + cur_cur_fr_contact_passive_to_act * cur_fr_contact_passive_normals, dim=-1 + ) + + + if self.train_residual_friction: + ''' add residual fictions ''' + # 3 + 3 + 3 + 3 ### active points's current relative position, active point's offset, penetration depth, normal direction + friction_net_in_feats = torch.cat( + [cur_cur_fr_contact_passive_to_act, cur_fr_contact_passive_to_act, cur_cur_penetration_depth.unsqueeze(-1), cur_fr_contact_passive_normals], dim=-1 + ) + residual_frictions = self.friction_network(friction_net_in_feats) + + residual_frictions_dot_w_normals = torch.sum( + residual_frictions * cur_fr_contact_passive_normals, dim=-1 + ) + residual_frictions = residual_frictions - residual_frictions_dot_w_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + + cur_friction_forces = cur_friction_forces + residual_frictions + ''' add residual fictions ''' + + if self.train_residual_normal_forces: + # contact_normal_force_network + contact_normal_forces_in_feats = torch.cat( + [cur_cur_fr_contact_passive_to_act, cur_fr_contact_passive_to_act, cur_cur_penetration_depth.unsqueeze(-1), cur_fr_contact_passive_normals], dim=-1 + ) + residual_normal_forces = self.contact_normal_force_network(contact_normal_forces_in_feats) + residual_normal_forces_dot_w_normals = torch.sum( + residual_normal_forces * cur_fr_contact_passive_normals, dim=-1 + ) + residual_normal_forces = residual_normal_forces_dot_w_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + tot_residual_normal_forces.append(residual_normal_forces[remaining_contact_indicators]) + + + # cur_rel_passive_to_active = cur_fr_contact_act_pts - cur_fr_contact_passive_pts + # dot_rel_disp_w_obj_normals = torch.sum( + # cur_rel_passive_to_active * cur_fr_contact_passive_normals, dim=-1 + # ) + # cur_friction_forces = cur_rel_passive_to_active - dot_rel_disp_w_obj_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + + + # if the dot < 0 -> still in contact ## rremaning contacts ## + # if the dot > 0 -. not in contact and can use the points to establish new conatcts --- # maitnian the contacts # + # remaining_contact_indicators = dot_rel_disp_with_passive_normals <= 0.0 ## + + ''' Remaining penetration indicator determining -- strategy 1 ''' + # remaining_contact_indicators = cur_cur_penetration_depth <= 0.0 ## dot relative passive to active with passive normals ## + ''' Remaining penetration indicator determining -- strategy 2 ''' + remaining_contact_indicators = cur_cur_penetration_depth <= self.minn_dist_threshold_robot_to_obj + + remaining_contact_act_idxes = contact_active_idxes[remaining_contact_indicators] + + # remaining contact act idxes # + + if torch.sum(remaining_contact_indicators.float()).item() > 0.5: + # contact_active_pts, contact_passive_pts, ## remaining cotnact indicators ## + tot_contact_passive_normals.append(cur_fr_contact_passive_normals[remaining_contact_indicators]) + tot_contact_passive_pts.append(cur_fr_contact_passive_pts[remaining_contact_indicators]) ## + tot_contact_active_pts.append(cur_fr_contact_act_pts[remaining_contact_indicators]) ## contact act pts + + tot_contact_active_idxes.append(contact_active_idxes[remaining_contact_indicators]) + # tot_contact_passive_idxes.append(contact_passive_idxes[remaining_contact_indicators]) # # passive idxes # + tot_contact_frictions.append(cur_friction_forces[remaining_contact_indicators]) + tot_canon_contact_passive_pts.append(canon_contact_passive_pts[remaining_contact_indicators]) + tot_canon_contact_passive_normals.append(canon_contact_passive_normals[remaining_contact_indicators]) + + else: + remaining_contact_act_idxes = torch.empty((0,), dtype=torch.long).cuda() ## remaining contact act idxes ## + + # remaining idxes # + + new_in_contact_indicator_robot_to_obj = in_contact_indicator_robot_to_obj.clone() + new_in_contact_indicator_robot_to_obj[remaining_contact_act_idxes] = False + + tot_active_pts_idxes = torch.arange(0, sampled_input_pts.size(0), dtype=torch.long).cuda() + + + if torch.sum(new_in_contact_indicator_robot_to_obj.float()).item() > 0.5: + # + # in_contact_indicator_robot_to_obj, queried_sdf, inter_obj_pts, inter_obj_normals, canon_inter_obj_pts, canon_inter_obj_normals + new_contact_active_pts = sampled_input_pts[new_in_contact_indicator_robot_to_obj] + new_canon_contact_passive_pts = canon_inter_obj_pts[new_in_contact_indicator_robot_to_obj] + new_canon_contact_passive_normals = canon_inter_obj_normals[new_in_contact_indicator_robot_to_obj] ## obj normals ## + new_contact_active_idxes = tot_active_pts_idxes[new_in_contact_indicator_robot_to_obj] + + new_cur_fr_contact_passive_normals = torch.matmul( + cur_fr_rot, new_canon_contact_passive_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() # + + # new cur fr contact passive pts # + new_cur_fr_contact_passive_pts = torch.matmul( + cur_fr_rot, (new_canon_contact_passive_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_fr_trans.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) ## passive pts + + + + new_nex_fr_contact_active_pts = nex_sampled_input_pts[new_in_contact_indicator_robot_to_obj] + + + new_cur_fr_contact_passive_to_act = new_nex_fr_contact_active_pts - new_cur_fr_contact_passive_pts + + dot_rel_disp_with_passive_normals = torch.sum( + new_cur_fr_contact_passive_to_act * new_cur_fr_contact_passive_normals, dim=-1 + ) + new_frictions = new_cur_fr_contact_passive_to_act - dot_rel_disp_with_passive_normals.unsqueeze(-1) * new_cur_fr_contact_passive_normals + + + if self.train_residual_friction: + ''' add residual fictions ''' + new_cur_cur_fr_contact_passive_to_act = new_contact_active_pts - new_cur_fr_contact_passive_pts + new_cur_cur_penetration_depth = torch.sum( + new_cur_cur_fr_contact_passive_to_act * new_cur_fr_contact_passive_normals, dim=-1 + ) + # 3 + 3 + 3 + 3 ### active points's current relative position, active point's offset, penetration depth, normal direction + new_friction_net_in_feats = torch.cat( + [new_cur_cur_fr_contact_passive_to_act, new_cur_fr_contact_passive_to_act, new_cur_cur_penetration_depth.unsqueeze(-1), new_cur_fr_contact_passive_normals], dim=-1 + ) + new_residual_frictions = self.friction_network(new_friction_net_in_feats) + + new_residual_frictions_dot_w_normals = torch.sum( + new_residual_frictions * new_cur_fr_contact_passive_normals, dim=-1 + ) + new_residual_frictions = new_residual_frictions - new_residual_frictions_dot_w_normals.unsqueeze(-1) * new_cur_fr_contact_passive_normals + new_frictions = new_frictions + new_residual_frictions + ''' add residual fictions ''' + + if self.train_residual_normal_forces: + contact_normal_forces_in_feats = torch.cat( + [new_cur_cur_fr_contact_passive_to_act, new_cur_fr_contact_passive_to_act, new_cur_cur_penetration_depth.unsqueeze(-1), new_cur_fr_contact_passive_normals], dim=-1 + ) + new_residual_normal_forces = self.contact_normal_force_network(contact_normal_forces_in_feats) + new_residual_normal_forces_dot_w_normals = torch.sum( + new_residual_normal_forces * new_cur_fr_contact_passive_normals, dim=-1 + ) + new_residual_normal_forces = new_residual_normal_forces_dot_w_normals.unsqueeze(-1) * new_cur_fr_contact_passive_normals + tot_residual_normal_forces.append(new_residual_normal_forces) + + + # new_frictions = torch.zeros_like(new_cur_fr_contact_passive_pts) + tot_contact_passive_normals.append(new_cur_fr_contact_passive_normals) + tot_contact_passive_pts.append(new_cur_fr_contact_passive_pts) + tot_contact_active_pts.append(new_contact_active_pts) + tot_contact_active_idxes.append(new_contact_active_idxes) + tot_canon_contact_passive_pts.append(new_canon_contact_passive_pts) + tot_canon_contact_passive_normals.append(new_canon_contact_passive_normals) + tot_contact_frictions.append(new_frictions) + + + if len(tot_contact_passive_normals) > 0: + # forces ? # not hard to compute ... # + # passive normals; passive pts # + tot_contact_passive_normals = torch.cat( + tot_contact_passive_normals, dim=0 + ) + tot_contact_passive_pts = torch.cat(tot_contact_passive_pts, dim=0) + tot_contact_active_pts = torch.cat(tot_contact_active_pts, dim=0) + tot_contact_active_idxes = torch.cat(tot_contact_active_idxes, dim=0) + tot_canon_contact_passive_pts = torch.cat(tot_canon_contact_passive_pts, dim=0) + tot_canon_contact_passive_normals = torch.cat(tot_canon_contact_passive_normals, dim=0) + tot_contact_frictions = torch.cat(tot_contact_frictions, dim=0) + if self.train_residual_normal_forces: ## the + tot_residual_normal_forces = torch.cat(tot_residual_normal_forces, dim=0) + + contact_passive_to_active = tot_contact_active_pts - tot_contact_passive_pts + # dot relative passive to active with the passive normals # ## relative + + # this depth should be adjusted according to minn_dist_threshold_robot_to_obj ## + dot_rel_passive_to_active_with_normals = torch.sum( + contact_passive_to_active * tot_contact_passive_normals, dim=-1 ### dot with the passive normals ## + ) + # Adjust the penetration depth used for contact force computing using the distance threshold # + dot_rel_passive_to_active_with_normals = dot_rel_passive_to_active_with_normals - self.minn_dist_threshold_robot_to_obj + # dot with the passive normals ## dot with passive normals ## ## passive normals ## + ### penetration depth * the passive obj normals ### # dot value and with + contact_forces_along_normals = dot_rel_passive_to_active_with_normals.unsqueeze(-1) * tot_contact_passive_normals * contact_spring_ka # dot wiht relative # negative normal directions # + + if self.train_residual_normal_forces: + contact_forces_along_normals = contact_forces_along_normals + tot_residual_normal_forces + + # return the contact pairs and return the contact dicts # + # return the contact pairs and the contact dicts # + # having got the contact pairs -> contact dicts # + # having got the contact pairs -> contact dicts # ## contact spring kb ## + tot_contact_frictions = tot_contact_frictions * contact_spring_kb # change it to spring_kb... + + if pts_frictional_forces is not None: + tot_contact_frictions = pts_frictional_forces[tot_contact_active_idxes] + + # contac_forces_along_normals + upd_contact_pairs_information = { + 'contact_active_idxes': tot_contact_active_idxes.clone().detach(), + 'canon_contact_passive_normals': tot_canon_contact_passive_normals.clone().detach(), + 'canon_contact_passive_pts': tot_canon_contact_passive_pts.clone().detach(), + 'contact_passive_pts': tot_contact_passive_pts.clone().detach(), + } + else: + upd_contact_pairs_information = None + + + + ''' average acitve points weights ''' + if torch.sum(cur_act_weights).item() > 0.5: + cur_act_weights = cur_act_weights / torch.sum(cur_act_weights) + + + # norm_penalty_friction_tangential_forces = torch.norm(penalty_friction_tangential_forces, dim=-1, p=2) + # maxx_norm_penalty_friction_tangential_forces, _ = torch.max(norm_penalty_friction_tangential_forces, dim=-1) + # minn_norm_penalty_friction_tangential_forces, _ = torch.min(norm_penalty_friction_tangential_forces, dim=-1) + # print(f"maxx_norm_penalty_friction_tangential_forces: {maxx_norm_penalty_friction_tangential_forces}, minn_norm_penalty_friction_tangential_forces: {minn_norm_penalty_friction_tangential_forces}") + + # tangetntial forces --- dot with normals # + if not self.use_pre_proj_frictions: # inter obj normals # # if ue proj frictions # + dot_tangential_forces_with_inter_obj_normals = torch.sum(penalty_friction_tangential_forces * inter_obj_normals, dim=-1) ### nn_active_pts x # + penalty_friction_tangential_forces = penalty_friction_tangential_forces - dot_tangential_forces_with_inter_obj_normals.unsqueeze(-1) * inter_obj_normals + + # penalty_friction_tangential_forces = torch.zeros_like(penalty_friction_tangential_forces) + penalty_friction_tangential_forces = tot_contact_frictions + + + + + if upd_contact_pairs_information is not None: + contact_force_d = contact_forces_along_normals # forces along normals # + # contact forces along normals # + self.contact_force_d = contact_force_d + + # penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + + #### penalty_frictiontangential_forces, tangential_forces #### + # self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.tangential_forces = penalty_friction_tangential_forces + + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.contact_force_d = contact_force_d + self.penalty_based_friction_forces = penalty_friction_tangential_forces + + self.tot_contact_passive_normals = tot_contact_passive_normals + # penalty dot forces normals # + ''' Penalty dot forces normals ''' + # penalty_dot_forces_normals = dot_forces_normals ** 2 # must in the negative direction of the object normal # + # penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + # penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) # 1) must # 2) must # # + # self.penalty_dot_forces_normals = penalty_dot_forces_normals # + + forces = self.contact_force_d + self.penalty_friction_tangential_forces + + center_point_to_contact_pts = tot_contact_passive_pts - passive_center_point.unsqueeze(0) + # cneter point to contact pts # + # cneter point to contact pts # + torque = torch.cross(center_point_to_contact_pts, forces) + torque = torch.mean(torque, dim=0) + forces = torch.mean(forces, dim=0) ## get rigid acc ## # + else: + # self.contact_force_d = torch.zeros((3,), dtype=torch.float32).cuda() + torque = torch.zeros((3,), dtype=torch.float32).cuda() + forces = torch.zeros((3,), dtype=torch.float32).cuda() + self.contact_force_d = torch.zeros((1, 3), dtype=torch.float32).cuda() + self.penalty_friction_tangential_forces = torch.zeros((1, 3), dtype=torch.float32).cuda() + self.penalty_based_friction_forces = torch.zeros((1, 3), dtype=torch.float32).cuda() + + self.tot_contact_passive_normals = torch.zeros((1, 3), dtype=torch.float32).cuda() + + + + + ''' Forces and rigid acss: Strategy and version 1 ''' + # rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc # + + # ###### sampled input pts to center ####### + # if contact_pairs_set is not None: + # inter_obj_pts[contact_active_idxes] = cur_passive_obj_verts[contact_passive_idxes] + + # # center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + + # center_point_to_sampled_pts = inter_obj_pts - passive_center_point.unsqueeze(0) + # ###### sampled input pts to center ####### + + # ###### nearest passive object point to center ####### + # # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) # squeeze(1) # + + # # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) # + # ###### nearest passive object point to center ####### + + # sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # # torque = torch.sum( + # # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # # ) + # torque = torch.sum( + # sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + # ) + + + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + ## + if self.use_split_params: ## ## friction network should be trained? ## + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + time_cons = self.sep_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + time_cons_2 = self.sep_torque_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + # damping_cons = self.sep_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + # damping_cons_2 = self.sep_angular_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + + # time_cons = 0.05 + # time_cons_2 = 0.05 + # time_cons_rot = 0.05 + + + time_cons = 0.005 + time_cons_2 = 0.005 + time_cons_rot = 0.005 + + + time_cons = 0.0005 + time_cons_2 = 0.0005 + time_cons_rot = 0.0005 + + # time_cons = 0.00005 + # time_cons_2 = 0.00005 + # time_cons_rot = 0.00005 + + # time_cons = 0.0005 + # time_cons_2 = 0.0005 + # time_cons_rot = 0.0005 + + + ## not a good ## + # time_cons = 0.005 + # time_cons_2 = 0.005 + # time_cons_rot = 0.005 + + + + obj_mass = self.obj_mass + + obj_mass_value = self.optimizable_obj_mass(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + + obj_mass_value = obj_mass_value ** 2 + + rigid_acc = forces / obj_mass_value # + + damping_coef = 5e2 + + damping_coef = 0.0 + damping_coef_angular = 0.0 + + + + # small clip with not very noticiable # # + + + if self.use_optimizable_params: ## + damping_coef = self.sep_damping_constant(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + damping_coef_angular = self.sep_angular_damping_constant(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + + damping_coef = damping_coef ** 2 + damping_coef_angular = damping_coef_angular ** 2 ## sue the sampiing coef angular and dampoing coef here ## + + if self.use_damping_params_vel: + damping_coef_lin_vel = self.lin_damping_coefs(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + damping_coef_ang_vel = self.ang_damping_coefs(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + damping_coef_lin_vel = damping_coef_lin_vel ** 2 + damping_coef_ang_vel = damping_coef_ang_vel ** 2 + else: + damping_coef_lin_vel = 1.0 + damping_coef_ang_vel = self.ang_vel_damping + + + if input_pts_ts > 0: + # the sampoing for the rigid acc here ? # + rigid_acc = rigid_acc - damping_coef * self.timestep_to_vel[input_pts_ts - 1].detach() ## dam + + + #F the sampoing for the rigid acc here ? # + # rigid_acc = # + # rigid acc = forces # + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + ##### TMP ###### + # cur_vel = delta_vel # + cur_vel = delta_vel + (1.0 - damping_coef_lin_vel) * self.timestep_to_vel[input_pts_ts - 1].detach() # * damping_cons # + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() # timestep + + + cur_inertia_div_factor = self.inertia_div_factor(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + + + # cur inv inertia is a large value? # # bug free? # ### divide the inv_inertia using the factor 20.0 # + cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, self.I_inv_ref), cur_passive_obj_rot.transpose(1, 0)) / float(20.) + # cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, self.I_inv_ref), cur_passive_obj_rot.transpose(1, 0)) / float(10.) ## + # cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, self.I_inv_ref), cur_passive_obj_rot.transpose(1, 0)) / float(cur_inertia_div_factor) ## + + cur_inv_inertia = torch.eye(n=3, dtype=torch.float32).cuda() # three values for the inertia? # + + obj_inertia_value = self.obj_inertia(torch.zeros((1,), dtype=torch.long).cuda()).view(3,) + obj_inertia_value = obj_inertia_value ** 2 + # cur_inv_inertia = torch.diag(obj_inertia_value) + cur_inv_inertia = cur_inv_inertia * obj_inertia_value.unsqueeze(0) ## 3 x 3 matrix ## ### the inertia values ## + cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, cur_inv_inertia), cur_passive_obj_rot.transpose(1, 0)) + + torque = torch.matmul(cur_inv_inertia, torque.unsqueeze(-1)).contiguous().squeeze(-1) ### get the torque of the object ### + # + # + if input_pts_ts > 0: # + torque = torque - damping_coef_angular * self.timestep_to_angular_vel[input_pts_ts - 1].detach() + delta_angular_vel = torque * time_cons_rot + + # print(f"torque: {torque}") # + + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + ##### TMP ###### + # cur_angular_vel = delta_angular_vel # + # cur_angular_vel = delta_angular_vel + (1.0 - self.ang_vel_damping) * (self.timestep_to_angular_vel[input_pts_ts - 1].detach()) + # (1.0 - damping_coef_lin_vel) * + cur_angular_vel = delta_angular_vel + (1.0 - damping_coef_ang_vel) * (self.timestep_to_angular_vel[input_pts_ts - 1].detach()) # damping coef ### + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 # / 2 # # \delta_t w^1 # + + # prev # # # ## + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # input pts ts # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + # cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) # + cur_quaternion = cur_quaternion / torch.norm(cur_quaternion, p=2, dim=-1, keepdim=True) + # angular + # obj_mass # + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + # cur_ 3 no frictions/ # # # + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + + ## quaternion to matrix and + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) # + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion # + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # timestep # timestep # + + + self.upd_rigid_acc = rigid_acc.clone() + self.upd_rigid_def = cur_upd_rigid_def.clone() + self.upd_optimizable_total_def = cur_optimizable_total_def.clone() + self.upd_quaternion = cur_quaternion.clone() + self.upd_rot_mtx = cur_optimizable_rot_mtx.clone() + self.upd_angular_vel = cur_angular_vel.clone() + self.upd_forces = forces.clone() ## ## + + ## + self.timestep_to_accum_acc[input_pts_ts] = rigid_acc.detach().clone() + + if not fix_obj: + if input_pts_ts == 0 and input_pts_ts not in self.timestep_to_optimizable_total_def: + self.timestep_to_total_def[input_pts_ts] = torch.zeros_like(cur_upd_rigid_def) + self.timestep_to_optimizable_total_def[input_pts_ts] = torch.zeros_like(cur_optimizable_total_def) + self.timestep_to_optimizable_quaternion[input_pts_ts] = torch.tensor([1., 0., 0., 0.],dtype=torch.float32).cuda() + self.timestep_to_quaternion[input_pts_ts] = torch.tensor([1., 0., 0., 0.],dtype=torch.float32).cuda() + self.timestep_to_angular_vel[input_pts_ts] = torch.zeros_like(cur_angular_vel).detach() + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, # quaternion + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return upd_contact_pairs_information + + def update_timestep_to_quantities(self, input_pts_ts, upd_quat, upd_trans): + nex_pts_ts = input_pts_ts + 1 + self.timestep_to_total_def[nex_pts_ts] = upd_trans # .detach().clone().detach() + self.timestep_to_optimizable_total_def[nex_pts_ts] = upd_trans # .detach().clone().detach() + self.timestep_to_optimizable_quaternion[nex_pts_ts] = upd_quat # .detach().clone().detach() + self.timestep_to_quaternion[nex_pts_ts] = upd_quat # .detach().clone().detach() + + # self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = quaternion_to_matrix(upd_quat.detach().clone()).clone().detach() + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = quaternion_to_matrix(upd_quat) # .clone().detach() # the upd quat # + + + + def reset_timestep_to_quantities(self, input_pts_ts): + nex_pts_ts = input_pts_ts + 1 + self.timestep_to_accum_acc[input_pts_ts] = self.upd_rigid_acc.detach() + self.timestep_to_total_def[nex_pts_ts] = self.upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = self.upd_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = self.upd_quaternion + self.timestep_to_quaternion[nex_pts_ts] = self.upd_quaternion.detach() + + # cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = self.upd_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = self.upd_angular_vel.detach() + self.timestep_to_point_accs[input_pts_ts] = self.upd_forces.detach() + + + +# +class BendingNetworkActiveForceFieldForwardLagV16(nn.Module): + def __init__(self, + d_in, + multires, + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False, + nn_instances=1, + minn_dist_threshold=0.05, + ): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV16, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. # + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.nn_instances = nn_instances + + self.contact_spring_rest_length = 2. + + # self.minn_dist_sampled_pts_passive_obj_thres = 0.05 # minn_dist_threshold ### + self.minn_dist_sampled_pts_passive_obj_thres = minn_dist_threshold + + self.spring_contact_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_contact_ks_values.weight) + self.spring_contact_ks_values.weight.data = self.spring_contact_ks_values.weight.data * 0.01 + + self.spring_friction_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_friction_ks_values.weight) + self.spring_friction_ks_values.weight.data = self.spring_friction_ks_values.weight.data * 0.001 + + if self.nn_instances == 1: + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + else: + self.spring_ks_values = nn.ModuleList( + [ + nn.Embedding(num_embeddings=5, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_values in self.spring_ks_values: + torch.nn.init.ones_(cur_ks_values.weight) + cur_ks_values.weight.data = cur_ks_values.weight.data * 0.01 + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + + ## [ \alpha, \beta ] ## + if self.nn_instances == 1: + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + else: + self.ks_weights = nn.ModuleList( + [ + nn.Embedding(num_embeddings=2, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_weights in self.ks_weights: + torch.nn.init.ones_(cur_ks_weights.weight) # + cur_ks_weights.weight.data[1] = cur_ks_weights.weight.data[1] * (1. / (778 * 2)) + + + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + self.sep_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_time_constant.weight) # + + self.sep_torque_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_torque_time_constant.weight) # + + self.sep_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_damping_constant.weight) # # # # + self.sep_damping_constant.weight.data = self.sep_damping_constant.weight.data * 0.9 + + + self.sep_angular_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_angular_damping_constant.weight) # # # # + self.sep_angular_damping_constant.weight.data = self.sep_angular_damping_constant.weight.data * 0.9 + + + if self.nn_instances == 1: + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + else: + self.time_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_time_constant in self.time_constant: + torch.nn.init.ones_(cur_time_constant.weight) # + + if self.nn_instances == 1: + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + else: + self.damping_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_damping_constant in self.damping_constant: + torch.nn.init.ones_(cur_damping_constant.weight) # # # # + cur_damping_constant.weight.data = cur_damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + + + # self.actuator_friction_forces = nn.Embedding( # actuator's forces # + # num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + if nn_instances == 1: + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + else: + self.actuator_friction_forces = nn.ModuleList( + [nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) for _ in range(self.nn_instances) ] + ) + for cur_friction_force_net in self.actuator_friction_forces: + torch.nn.init.zeros_(cur_friction_force_net.weight) # + + + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": # periodict activation functions # + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} # record the optimizable offset # + self.save_values = {} + # ws_normed, defed_input_pts_sdf, # + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + self.obj_sdf_th = None + + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): # no_grad() + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays # + # + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + # ### + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + + def query_for_sdf(self, cur_pts, cur_frame_transformations): + # + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + # cur_pts: nn_pts x 3 # + # print(f"cur_pts: {cur_pts.size()}, cur_frame_translation: {cur_frame_translation.size()}, cur_frame_rotation: {cur_frame_rotation.size()}") + cur_transformed_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + # v = (v - center) * scale # + # sdf_space_center # + cur_transformed_pts_np = cur_transformed_pts.detach().cpu().numpy() + cur_transformed_pts_np = (cur_transformed_pts_np - np.reshape(self.sdf_space_center, (1, 3))) * self.sdf_space_scale + cur_transformed_pts_np = (cur_transformed_pts_np + 1.) / 2. + cur_transformed_pts_xs = (cur_transformed_pts_np[:, 0] * self.sdf_res).astype(np.int32) # [x, y, z] of the transformed_pts_np # + cur_transformed_pts_ys = (cur_transformed_pts_np[:, 1] * self.sdf_res).astype(np.int32) + cur_transformed_pts_zs = (cur_transformed_pts_np[:, 2] * self.sdf_res).astype(np.int32) + + cur_transformed_pts_xs = np.clip(cur_transformed_pts_xs, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_ys = np.clip(cur_transformed_pts_ys, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_zs = np.clip(cur_transformed_pts_zs, a_min=0, a_max=self.sdf_res - 1) + + + if self.obj_sdf_th is None: + self.obj_sdf_th = torch.from_numpy(self.obj_sdf).float().cuda() + cur_transformed_pts_xs_th = torch.from_numpy(cur_transformed_pts_xs).long().cuda() + cur_transformed_pts_ys_th = torch.from_numpy(cur_transformed_pts_ys).long().cuda() + cur_transformed_pts_zs_th = torch.from_numpy(cur_transformed_pts_zs).long().cuda() + + cur_pts_sdf = batched_index_select(self.obj_sdf_th, cur_transformed_pts_xs_th, 0) + # print(f"After selecting the x-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the y-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the z-axis: {cur_pts_sdf.size()}") + + + # cur_pts_sdf = self.obj_sdf[cur_transformed_pts_xs] + # cur_pts_sdf = cur_pts_sdf[:, cur_transformed_pts_ys] + # cur_pts_sdf = cur_pts_sdf[:, :, cur_transformed_pts_zs] + # cur_pts_sdf = np.diagonal(cur_pts_sdf) + # print(f"cur_pts_sdf: {cur_pts_sdf.shape}") + # # gradient of sdf # + # # the contact force dierection should be the negative direction of the sdf gradient? # + # # it seems true # + # # get the cur_pts_sdf value # + # cur_pts_sdf = torch.from_numpy(cur_pts_sdf).float().cuda() + return cur_pts_sdf # # cur_pts_sdf # + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False, contact_pairs_set=None): + #### contact_pairs_set #### + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # # + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) # + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) # + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx # + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + # friction_qd = 0.1 # # + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> sampled points # + ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + nn_sampled_input_pts = sampled_input_pts.size(0) + + if nex_pts_ts in timestep_to_active_mesh: + ### disp_sampled_input_pts = nex_sampled_input_pts - sampled_input_pts ### + nex_sampled_input_pts = timestep_to_active_mesh[nex_pts_ts].detach() + else: + nex_sampled_input_pts = timestep_to_active_mesh[input_pts_ts].detach() + nex_sampled_input_pts = nex_sampled_input_pts[sampled_verts_idxes] + + + # ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + # ws_normed = ws_normed / float(sampled_input_pts.size(0)) + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 20000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + + + # sampled_input_pts_normals = # + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + # cur_passive_obj_rot, cur_passive_obj_trans # + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + + # + + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh ### the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k # + # forces = friction_force + # ######## vel for frictions ######### # # maintain the contact / continuous contact -> patch contact + # coantacts in previous timesteps -> ### + + # cur actuation # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) # actuation embedding idxes # + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + if friction_forces is None: + if self.nn_instances == 1: + cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + else: + cur_actuation_friction_forces = self.actuator_friction_forces[i_instance](cur_actuation_embedding_idxes) + else: + if reference_mano_pts is not None: + ref_mano_pts_nn = reference_mano_pts.size(0) + cur_actuation_embedding_st_idx = ref_mano_pts_nn * input_pts_ts + cur_actuation_embedding_ed_idx = ref_mano_pts_nn * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn_ref_pts x 3 # + # sampled_input_pts # + # r = 0.01 # + threshold_ball_r = 0.01 + dist_input_pts_to_reference_pts = torch.sum( + (sampled_input_pts.unsqueeze(1) - reference_mano_pts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_input_pts_to_reference_pts = torch.sqrt(dist_input_pts_to_reference_pts) + weights_input_to_reference = 0.5 - dist_input_pts_to_reference_pts + weights_input_to_reference[weights_input_to_reference < 0] = 0 + weights_input_to_reference[dist_input_pts_to_reference_pts > threshold_ball_r] = 0 + + minn_dist_input_pts_to_reference_pts, minn_idx_input_pts_to_reference_pts = torch.min(dist_input_pts_to_reference_pts, dim=-1) + + weights_input_to_reference[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] = 0.1 - dist_input_pts_to_reference_pts[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] + + weights_input_to_reference = weights_input_to_reference / torch.clamp(torch.sum(weights_input_to_reference, dim=-1, keepdim=True), min=1e-9) + + # cur_actuation_friction_forces = weights_input_to_reference.unsqueeze(-1) * cur_actuation_friction_forces.unsqueeze(0) # nn_input_pts x nn_ref_pts x 1 xxxx 1 x nn_ref_pts x 3 -> nn_input_pts x nn_ref_pts x 3 + # cur_actuation_friction_forces = cur_actuation_friction_forces.sum(dim=1) + + # cur_actuation_friction_forces * weights_input_to_reference.unsqueeze(-1) + cur_actuation_friction_forces = batched_index_select(cur_actuation_friction_forces, minn_idx_input_pts_to_reference_pts, dim=0) + else: + # cur_actuation_embedding_st_idx = 365428 * input_pts_ts + # cur_actuation_embedding_ed_idx = 365428 * (input_pts_ts + 1) + if sampled_verts_idxes is not None: + cur_actuation_embedding_st_idx = ori_nns * input_pts_ts + cur_actuation_embedding_ed_idx = ori_nns * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + cur_actuation_friction_forces = cur_actuation_friction_forces[sampled_verts_idxes] + else: + cur_actuation_embedding_st_idx = nn_sampled_input_pts * input_pts_ts + cur_actuation_embedding_ed_idx = nn_sampled_input_pts * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn instances # # nninstances # # + if self.nn_instances == 1: + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + else: + ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + + # use_penalty_based_friction, use_disp_based_friction # + # ### get the nearest object point to the in-active object ### + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): # contact pairs set # # contact pairs set ## + # for each calculated contacts, calculate the current contact points reversed transformed to the contact local frame # + # use the reversed transformed active point and the previous rest contact point position to calculate the contact friction force # + # transform the force to the current contact frame # + # x_h^{cur} - x_o^{cur} --- add the frictions for the hand + # add the friction force onto the object point # # contact point position -> nn_contacts x 3 # + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + # contact_active_pos = sampled_input_pts[contact_active_idxes] # should not be inter_obj_pts... # + # contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + # to the passive obje ###s + minn_idx_sampled_pts_to_passive_obj[contact_active_idxes] = contact_passive_idxes + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj = batched_index_select(dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1).squeeze(1) + # ### get the nearest object point to the in-active object ### + + + + # sampled_input_pts # + # inter_obj_pts # + # inter_obj_normals + + # nn_sampledjpoints # + # cur_passive_obj_ns # # inter obj normals # # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + cur_passive_obj_verts_pts_idxes = torch.arange(0, cur_passive_obj_verts.size(0), dtype=torch.long).cuda() # + inter_passive_obj_pts_idxes = cur_passive_obj_verts_pts_idxes[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() + dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) + + # contact_pairs_set # + + ###### penetration penalty strategy v1 ###### + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + + ###### penetration penalty strategy v2 ###### + # if input_pts_ts > 0: + # prev_active_obj = timestep_to_active_mesh[input_pts_ts - 1].detach() + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + # else: + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v2 ###### + + + ###### penetration penalty strategy v3 ###### + # if input_pts_ts > 0: + # cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + # cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + # queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # penetrating_indicator = queried_sdf < 0 + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # else: + # # cur_rot = torch.eye(3, dtype=torch.float32).cuda() + # # cur_trans = torch.zeros((3,), dtype=torch.float32).cuda() + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v3 ###### + + # ws_beta; 10 # # sum over the forces but not the weighted sum... # + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) # ws_alpha # + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + # penetrating # + ### penetration strategy v4 #### + + if input_pts_ts > 0: + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + penetrating_indicator = queried_sdf < 0 + else: + penetrating_indicator = torch.zeros_like(dot_inter_obj_pts_to_sampled_pts_normals).bool() + + + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + # penetrating + + ### nearest #### + ''' decide forces via kinematics statistics ''' + ### nearest #### + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + + # cannot be adapted to this easily # + # what's a better realization way? # + + + # dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] # + dist_sampled_pts_to_passive_obj[penetrating_indicator] = -1. * dist_sampled_pts_to_passive_obj[penetrating_indicator] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + in_contact_indicator = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + # in_contact_indicator + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + + # penetrating_indicator = + + # penetrating + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + self.penetrating_indicator = penetrating_indicator + penetration_proj_ks = 0 - dot_inter_obj_pts_to_sampled_pts_normals + ### penetratio nproj penalty ### + penetration_proj_penalty = penetration_proj_ks * (-1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1)) + self.penetrating_depth_penalty = penetration_proj_penalty[penetrating_indicator].mean() + if torch.isnan(self.penetrating_depth_penalty): # get the penetration penalties # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + penetrating_points = sampled_input_pts[penetrating_indicator] + penetration_proj_k_to_robot = 1.0 # 0.7 + # penetration_proj_k_to_robot = 0.01 + penetration_proj_k_to_robot = 0.0 + penetrating_forces_allpts = penetration_proj_ks.unsqueeze(-1) * inter_obj_normals.detach() * penetration_proj_k_to_robot + + self.penetrating_forces_allpts = penetrating_forces_allpts + penetrating_forces = penetrating_forces_allpts[penetrating_indicator] + self.penetrating_forces = penetrating_forces # + self.penetrating_points = penetrating_points # + ### penetration strategy v4 #### # another mophology # + + # maintain the forces # + + # # contact_pairs_set # # + + # for contact pair in the contact_pair_set, get the contact pair -> the mesh index of the passive object and the active object # + # the orientation of the contact frame # + # original contact point position of the contact pair # + # original orientation of the contact frame # + ##### get previous contact information ###### + # for cur_contact_pair in contact_pairs_set: + # # cur_contact_pair = (contact point position, contact frame orientation) # + # # contact_point_positon -> should be the contact position transformed to the local contact frame # + # contact_point_positon, (contact_passive_idx, contact_active_idx), contact_frame_pose = cur_contact_pair # + # # contact_point_positon of the contact pair # + # cur_active_pos = sampled_input_pts[contact_active_idx] # passive_position # + # # (original passive position - current passive position) * K_f = penalty based friction force # # # # + # cur_passive_pos = inter_obj_pts[contact_passive_idx] # active_position # + # # (the transformed passive position) # + # # + # # # the continuous active and passive pos ## + # # # the continuous active and passive pos ## + # # the continuous active and passive pos ## + # contact_frame_orientation, contact_frame_translation = contact_frame_pose # # set the orientation and the contact frame translation + # # orientation, translation # + # cur_inv_transformed_active_pos = torch.matmul( + # contact_frame_orientation.contiguous().transpose(1, 0).contiguous(), (cur_active_pos - contact_frame_translation.unsqueeze(0)).transpose(1, 0) + # ) + + + + # should be the contact penalty frictions added onto the passive object verts # + # use the frictional force to mainatian the contact here # + + # maintain the contact and calculate the penetrating forces and points for each timestep and then use the displacemnet to calculate the penalty based friction forces # + + + if self.nn_instances == 1: # spring ks values + # contact ks values # # if we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + + + ###### the contact force decided by the rest_length ###### # not very sure ... # + # contact_force_d = contact_spring_ka * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) # + contact_spring_kb * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 2 + contact_spring_kc * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 3 # + ###### the contact force decided by the rest_length ###### + + + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance # + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + ###### Get the tangential forces via optimizable forces ###### # dot along the normals ## + cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + ###### Get the tangential forces via optimizable forces ###### + + # cur actuation friction forces along normals # + + ###### Get the tangential forces via tangential velocities ###### + # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + ###### Get the tangential forces via tangential velocities ###### + + tangential_forces = tangential_vel * tangential_ks # tangential forces # + contact_force_d_scalar = contact_force_d.clone() # + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts, nnsampledpts # + penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + penalty_friction_constraint = torch.mean(penalty_friction_constraint) + self.penalty_friction_constraint = penalty_friction_constraint # penalty friction + contact_force_d_scalar = norm_along_normals_forces.clone() + # penalty friction constraints # + penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + ''' Get the contact information that should be maintained''' + if contact_pairs_set is not None: # contact pairs set # # contact pairs set ## + # for each calculated contacts, calculate the current contact points reversed transformed to the contact local frame # + # use the reversed transformed active point and the previous rest contact point position to calculate the contact friction force # + # transform the force to the current contact frame # + # x_h^{cur} - x_o^{cur} --- add the frictions for the hand + # add the friction force onto the object point # # contact point position -> nn_contacts x 3 # + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + contact_active_pos = sampled_input_pts[contact_active_idxes] # should not be inter_obj_pts... # + contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + + ''' Penalty based contact force v2 ''' + contact_frame_orientations, contact_frame_translations = contact_frame_pose + transformed_prev_contact_active_pos = torch.matmul( + contact_frame_orientations.contiguous(), contact_active_point_pts.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + transformed_prev_contact_point_position = torch.matmul( + contact_frame_orientations.contiguous(), contact_point_position.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + diff_transformed_prev_contact_passive_to_active = transformed_prev_contact_active_pos - transformed_prev_contact_point_position + # cur_contact_passive_pos_from_active = contact_passive_pos + diff_transformed_prev_contact_passive_to_active + cur_contact_passive_pos_from_active = contact_active_pos - diff_transformed_prev_contact_passive_to_active + + friction_k = 1.0 + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + # penalty_based_friction_forces = friction_k * (contact_active_pos - transformed_prev_contact_active_pos) + + # + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + penalty_based_friction_forces = friction_k * (cur_contact_passive_pos_from_active - contact_passive_pos) + ''' Penalty based contact force v2 ''' + + ''' Penalty based contact force v1 ''' + ###### Contact frame orientations and translations ###### + # contact_frame_orientations, contact_frame_translations = contact_frame_pose # (nn_contacts x 3 x 3) # (nn_contacts x 3) # + # # cur_passive_obj_verts # + # inv_transformed_contact_active_pos = torch.matmul( + # contact_frame_orientations.contiguous().transpose(2, 1).contiguous(), (contact_active_pos - contact_frame_translations).contiguous().unsqueeze(-1) + # ).squeeze(-1) # nn_contacts x 3 # + # inv_transformed_contact_passive_pos = torch.matmul( # contact frame translations # ## nn_contacts x 3 ## # # + # contact_frame_orientations.contiguous().transpose(2, 1).contiguous(), (contact_passive_pos - contact_frame_translations).contiguous().unsqueeze(-1) + # ).squeeze(-1) + # # inversely transformed cotnact active and passive pos # + + # # inv_transformed_contact_active_pos, inv_transformed_contact_passive_pos # + # ### contact point position ### # + # ### use the passive point disp ### + # # disp_active_pos = (inv_transformed_contact_active_pos - contact_point_position) # nn_contacts x 3 # + # ### use the active point disp ### + # # disp_active_pos = (inv_transformed_contact_active_pos - contact_active_point_pts) + # disp_active_pos = (inv_transformed_contact_active_pos - contact_active_point_pts) + # ### friction_k is equals to 1.0 ### + # friction_k = 1. + # # use the disp_active_pose as the penalty based friction forces # # nn_contacts x 3 # + # penalty_based_friction_forces = disp_active_pos * friction_k + + # # get the penalty based friction forces # + # penalty_based_friction_forces = torch.matmul( + # contact_frame_orientations.contiguous(), penalty_based_friction_forces.unsqueeze(-1) + # ).contiguous().squeeze(-1).contiguous() + ''' Penalty based contact force v1 ''' + + #### strategy 1: implement the dynamic friction forces #### + # dyn_friction_k = 1.0 # together with the friction_k # + # # dyn_friction_k # + # dyn_friction_force = dyn_friction_k * contact_force_d # nn_sampled_pts x 3 # + # dyn_friction_force # + # dyn_friction_force = # + # tangential velocities # # tangential velocities # + #### strategy 1: implement the dynamic friction forces #### + + #### strategy 2: do not use the dynamic friction forces #### + # equalt to use a hard selector to screen the friction forces # + # + # contact_force_d # # contact_force_d # + + valid_contact_force_d_scalar = contact_force_d_scalar[contact_active_idxes] + + + # penalty_based_friction_forces # + norm_penalty_based_friction_forces = torch.norm(penalty_based_friction_forces, dim=-1, p=2) + # valid penalty friction forces # # valid contact force d scalar # + valid_penalty_friction_forces_indicator = norm_penalty_based_friction_forces <= (valid_contact_force_d_scalar * self.static_friction_mu * 500) + valid_penalty_friction_forces_indicator[:] = True + + + summ_valid_penalty_friction_forces_indicator = torch.sum(valid_penalty_friction_forces_indicator.float()) + + # print(f"summ_valid_penalty_friction_forces_indicator: {summ_valid_penalty_friction_forces_indicator}") + # print(f"penalty_based_friction_forces: {penalty_based_friction_forces.size()}, summ_valid_penalty_friction_forces_indicator: {summ_valid_penalty_friction_forces_indicator}") + # tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.005 # * 1000. + + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + + # penalty_friction_tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * contact_spring_kb + + penalty_friction_tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * contact_friction_spring_cur + + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * contact_spring_kb + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. # based friction + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.02 + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.05 # + + else: + contact_active_idxes = None + self.contact_active_idxes = contact_active_idxes + valid_penalty_friction_forces_indicator = None + # tangential forces with inter obj normals # -> + dot_tangential_forces_with_inter_obj_normals = torch.sum(penalty_friction_tangential_forces * inter_obj_normals, dim=-1) ### nn_active_pts x # + penalty_friction_tangential_forces = penalty_friction_tangential_forces - dot_tangential_forces_with_inter_obj_normals.unsqueeze(-1) * inter_obj_normals + tangential_forces_clone = tangential_forces.clone() + # tangential_forces = torch.zeros_like(tangential_forces) ### + + # if contact_active_idxes is not None: + # self.contact_active_idxes = contact_active_idxes + # self.valid_penalty_friction_forces_indicator = valid_penalty_friction_forces_indicator # + # # print(f"here {summ_valid_penalty_friction_forces_indicator}") + # # tangential_forces[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] = tangential_forces_clone[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] + # contact_active_idxes_indicators = torch.ones((tangential_forces.size(0)), dtype=torch.float).cuda().bool() + # contact_active_idxes_indicators[:] = True + # contact_active_idxes_indicators[self.contact_active_idxes] = False + + # tangential_forces[contact_active_idxes_indicators] = 0. + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # tangential forces # + # maxx_norm_tangential, _ = torch.max(norm_tangential_forces, dim=-1) + # minn_norm_tangential, _ = torch.min(norm_tangential_forces, dim=-1) + # print(f"maxx_norm_tangential: {maxx_norm_tangential}, minn_norm_tangential: {minn_norm_tangential}") + + # two + ### ## get new contacts ## ### + tot_contact_point_position = [] + tot_contact_active_point_pts = [] + tot_contact_active_idxes = [] + tot_contact_passive_idxes = [] + tot_contact_frame_rotations = [] + tot_contact_frame_translations = [] + + if torch.sum(in_contact_indicator.float()) > 0.5: # in contact indicator # + cur_in_contact_passive_pts = inter_obj_pts[in_contact_indicator] + cur_in_contact_passive_normals = inter_obj_normals[in_contact_indicator] + cur_in_contact_active_pts = sampled_input_pts[in_contact_indicator] # in_contact_active_pts # + + # in contact active pts # + # sampled input pts # + # cur_passive_obj_rot, cur_passive_obj_trans # + # cur_passive_obj_trans # + # cur_in_contact_activE_pts # + # in_contact_passive_pts # + cur_contact_frame_rotations = cur_passive_obj_rot.unsqueeze(0).repeat(cur_in_contact_passive_pts.size(0), 1, 1).contiguous() + cur_contact_frame_translations = cur_in_contact_passive_pts.clone() # + #### contact farme active points ##### -> ## + cur_contact_frame_active_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_active_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_contact_frame_passive_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_passive_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_in_contact_active_pts_all = torch.arange(0, sampled_input_pts.size(0)).long().cuda() + cur_in_contact_active_pts_all = cur_in_contact_active_pts_all[in_contact_indicator] + cur_inter_passive_obj_pts_idxes = inter_passive_obj_pts_idxes[in_contact_indicator] + # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose + # cur_contact_frame_pose = (cur_contact_frame_rotations, cur_contact_frame_translations) + # contact_point_positions = cur_contact_frame_passive_pts # + # contact_active_idxes, cotnact_passive_idxes # + # contact_point_position = cur_contact_frame_passive_pts + # contact_active_idxes = cur_in_contact_active_pts_all + # contact_passive_idxes = cur_inter_passive_obj_pts_idxes + tot_contact_active_point_pts.append(cur_contact_frame_active_pts) + tot_contact_point_position.append(cur_contact_frame_passive_pts) # contact frame points + tot_contact_active_idxes.append(cur_in_contact_active_pts_all) # active_pts_idxes + tot_contact_passive_idxes.append(cur_inter_passive_obj_pts_idxes) # passive_pts_idxes + tot_contact_frame_rotations.append(cur_contact_frame_rotations) # rotations + tot_contact_frame_translations.append(cur_contact_frame_translations) # translations + + + ## + # ####### if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: ######## + # if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: + # # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # prev_contact_active_point_pts = contact_active_point_pts[valid_penalty_friction_forces_indicator] + # prev_contact_point_position = contact_point_position[valid_penalty_friction_forces_indicator] + # prev_contact_active_idxes = contact_active_idxes[valid_penalty_friction_forces_indicator] + # prev_contact_passive_idxes = contact_passive_idxes[valid_penalty_friction_forces_indicator] + # prev_contact_frame_rotations = contact_frame_orientations[valid_penalty_friction_forces_indicator] + # prev_contact_frame_translations = contact_frame_translations[valid_penalty_friction_forces_indicator] + + # tot_contact_active_point_pts.append(prev_contact_active_point_pts) + # tot_contact_point_position.append(prev_contact_point_position) + # tot_contact_active_idxes.append(prev_contact_active_idxes) + # tot_contact_passive_idxes.append(prev_contact_passive_idxes) + # tot_contact_frame_rotations.append(prev_contact_frame_rotations) + # tot_contact_frame_translations.append(prev_contact_frame_translations) + ####### if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: ######## + + + + if len(tot_contact_frame_rotations) > 0: + upd_contact_active_point_pts = torch.cat(tot_contact_active_point_pts, dim=0) + upd_contact_point_position = torch.cat(tot_contact_point_position, dim=0) + upd_contact_active_idxes = torch.cat(tot_contact_active_idxes, dim=0) + upd_contact_passive_idxes = torch.cat(tot_contact_passive_idxes, dim=0) + upd_contact_frame_rotations = torch.cat(tot_contact_frame_rotations, dim=0) + upd_contact_frame_translations = torch.cat(tot_contact_frame_translations, dim=0) + upd_contact_pairs_information = [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] + else: + upd_contact_pairs_information = None + + + + # # previus + if self.use_penalty_based_friction and self.use_disp_based_friction: + disp_friction_tangential_forces = nex_sampled_input_pts - sampled_input_pts + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + disp_friction_tangential_forces = disp_friction_tangential_forces * contact_friction_spring_cur + disp_friction_tangential_forces_dot_normals = torch.sum( + disp_friction_tangential_forces * inter_obj_normals, dim=-1 + ) + disp_friction_tangential_forces = disp_friction_tangential_forces - disp_friction_tangential_forces_dot_normals.unsqueeze(-1) * inter_obj_normals + + penalty_friction_tangential_forces = disp_friction_tangential_forces + + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) # # + ### strict cosntraints ### + if self.use_penalty_based_friction: + forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + else: + # print(f"not using use_penalty_based_friction...") + tangential_forces_norm = torch.sum(tangential_forces ** 2, dim=-1) + pos_tangential_forces = tangential_forces[tangential_forces_norm > 1e-5] + # print(pos_tangential_forces) + forces = tangential_forces + contact_force_d # tantential forces and contact force d # + # forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' # + # penalty_dot_forces_normals, penalty_friction_constraint # # contraints # # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # # tangential forces ### tangential forces ## + # penalty_friction_tangential_forces = force - + + + #### penalty_friction_tangential_forces, tangential_forces #### + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.tangential_forces = tangential_forces + + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) # 1) must # 2) must # + self.penalty_dot_forces_normals = penalty_dot_forces_normals # + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc # + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) # squeeze(1) # + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) # + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + # cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # + + if not fix_obj: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return upd_contact_pairs_information + + + ### forward; + def forward2(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False, contact_pairs_set=None): + #### contact_pairs_set #### + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + ##### tiemstep to active mesh ##### + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # # + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) # + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) # + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx # + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + ''' Kinematics transformations from acc and torques ''' + + # friction_qd = 0.1 # + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> sampled points # + ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + nn_sampled_input_pts = sampled_input_pts.size(0) + + if nex_pts_ts in timestep_to_active_mesh: ## + ### disp_sampled_input_pts = nex_sampled_input_pts - sampled_input_pts ### + nex_sampled_input_pts = timestep_to_active_mesh[nex_pts_ts].detach() + else: + nex_sampled_input_pts = timestep_to_active_mesh[input_pts_ts].detach() + nex_sampled_input_pts = nex_sampled_input_pts[sampled_verts_idxes] + + # ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + # ws_normed = ws_normed / float(sampled_input_pts.size(0)) + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 20000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + + + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + # cur_passive_obj_rot, cur_passive_obj_trans # + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + + + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center # obj_center # + + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] # + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] # + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh ### the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k # + # forces = friction_force + # ######## vel for frictions ######### # # maintain the contact / continuous contact -> patch contact + # coantacts in previous timesteps -> ### + + # cur actuation # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) # actuation embedding idxes # + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + + # nn instances # # nninstances # # + if self.nn_instances == 1: + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + else: + ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + if self.use_sqrt_dist: + dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + ) + else: + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + + # ### add the sqrt for calculate the l2 distance ### + # dist_sampled_pts_to_passive_obj = torch.sqrt(dist_sampled_pts_to_passive_obj) ### + + + # dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + # (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + # ) + + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + + + # inte robj normals at the current frame # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + cur_passive_obj_verts_pts_idxes = torch.arange(0, cur_passive_obj_verts.size(0), dtype=torch.long).cuda() # + inter_passive_obj_pts_idxes = cur_passive_obj_verts_pts_idxes[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() + # dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) + + + ###### penetration penalty strategy v1 ###### + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + # ws_beta; 10 # # sum over the forces but not the weighted sum... # + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) # ws_alpha # + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) e + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + # penetrating # + ### penetration strategy v4 #### + + if input_pts_ts > 0: + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + penetrating_indicator = queried_sdf < 0 + else: + # penetrating_indicator = torch.zeros_like(dot_inter_obj_pts_to_sampled_pts_normals).bool() + penetrating_indicator = torch.zeros((sampled_input_pts.size(0),), dtype=torch.bool).cuda().bool() + + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + # penetrating + + ### nearest #### + ''' decide forces via kinematics statistics ''' + ### nearest #### + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + penetrating_indicator_mult_factor = torch.ones_like(penetrating_indicator).float() + penetrating_indicator_mult_factor[penetrating_indicator] = -1. + + + # dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] # + # dist_sampled_pts_to_passive_obj[penetrating_indicator] = -1. * dist_sampled_pts_to_passive_obj[penetrating_indicator] + # dist_sampled_pts_to_passive_obj[penetrating_indicator] = -1. * dist_sampled_pts_to_passive_obj[penetrating_indicator] + dist_sampled_pts_to_passive_obj = dist_sampled_pts_to_passive_obj * penetrating_indicator_mult_factor + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + ## minn_dist_sampled_pts_passive_obj_thres + in_contact_indicator = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + ws_unnormed = torch.ones_like(ws_unnormed) + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + + + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + self.penetrating_indicator = penetrating_indicator + cur_inter_obj_normals = inter_obj_normals.clone().detach() + penetration_proj_ks = 0 - torch.sum(inter_obj_pts_to_sampled_pts * cur_inter_obj_normals, dim=-1) + ### penetratio nproj penalty ### + # inter_obj_pts_to_sampled_pts # + + penetration_proj_penalty = penetration_proj_ks * (-1 * torch.sum(inter_obj_pts_to_sampled_pts * cur_inter_obj_normals, dim=-1)) + self.penetrating_depth_penalty = penetration_proj_penalty[penetrating_indicator].mean() + if torch.isnan(self.penetrating_depth_penalty): # get the penetration penalties # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + penetrating_points = sampled_input_pts[penetrating_indicator] + # penetration_proj_k_to_robot = 1.0 + # penetration_proj_k_to_robot = 0.01 + penetration_proj_k_to_robot = self.penetration_proj_k_to_robot + # penetration_proj_k_to_robot = 0.0 + penetrating_forces = penetration_proj_ks.unsqueeze(-1) * cur_inter_obj_normals * penetration_proj_k_to_robot + penetrating_forces = penetrating_forces[penetrating_indicator] + self.penetrating_forces = penetrating_forces # + self.penetrating_points = penetrating_points # + ### penetration strategy v4 #### # another mophology # + + + + if self.nn_instances == 1: # spring ks values + # contact ks values # # if we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + + contact_spring_ka = self.spring_contact_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance # + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + + + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts, nnsampledpts # + # penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + # penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + # penalty_friction_constraint = torch.mean(penalty_friction_constraint) + self.penalty_friction_constraint = torch.zeros((1,), dtype=torch.float32).cuda().mean() # penalty friction + # contact_force_d_scalar = norm_along_normals_forces.clone() + contact_force_d_scalar = norm_along_normals_forces.clone() + + + # penalty friction constraints # + penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + + + ''' Get the contact information that should be maintained''' + if contact_pairs_set is not None: + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + contact_active_pos = sampled_input_pts[contact_active_idxes] + contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + + ''' Penalty based contact force v2 ''' + contact_frame_orientations, contact_frame_translations = contact_frame_pose + transformed_prev_contact_active_pos = torch.matmul( + contact_frame_orientations.contiguous(), contact_active_point_pts.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + transformed_prev_contact_point_position = torch.matmul( + contact_frame_orientations.contiguous(), contact_point_position.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + # transformed prev contact active pose # + diff_transformed_prev_contact_passive_to_active = transformed_prev_contact_active_pos - transformed_prev_contact_point_position + ### + # cur_contact_passive_pos_from_active = contact_passive_pos + diff_transformed_prev_contact_passive_to_active + cur_contact_passive_pos_from_active = contact_active_pos - diff_transformed_prev_contact_passive_to_active + + friction_k = 1.0 + friction_k = 0.01 + friction_k = 0.001 + friction_k = 0.001 + friction_k = 1.0 + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + # penalty_based_friction_forces = friction_k * (contact_active_pos - transformed_prev_contact_active_pos) + + # contact passive posefrom active ## + penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + # penalty_based_friction_forces = friction_k * (cur_contact_passive_pos_from_active - contact_passive_pos) + + + # a good way to optiize the actions? # + dist_contact_active_pos_to_passive_pose = torch.sum( + (contact_active_pos - contact_passive_pos) ** 2, dim=-1 + ) + dist_contact_active_pos_to_passive_pose = torch.sqrt(dist_contact_active_pos_to_passive_pose) + + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.1 # how many contacts to keep # + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.2 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.3 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.5 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 1.0 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 1000.0 # + ### contact maintaning dist thres ### # + remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= self.contact_maintaining_dist_thres + ''' Penalty based contact force v2 ''' + + ### hwo to produce the cotact force and how to produce the frictional forces # + ### optimized + # tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.005 # * 1000. + + # ### spring_friction_ks_values ### # + # spring_friction_ks_values # + + ### TODO: how to check the correctness of the switching between the static friction and the dynamic friction ### + # contact friction spring cur # + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + # use the relative scale of the friction force and thejcoantact force to decide the remaining contact indicator # + ##### contact active penalty based friction forces -> spring_k * relative displacement ##### + contact_active_penalty_based_friction_forces = penalty_based_friction_forces * contact_friction_spring_cur + contact_active_penalty_based_friction_forces_norm = torch.norm(contact_active_penalty_based_friction_forces, p=2, dim=-1) + # contact_active_penalty_based_friction_forces # + # #### contact_force_d_scalar_ #### # + contact_active_force_d_scalar = contact_force_d_scalar[contact_active_idxes] + #### # contact_friction_static_mu # #### + contact_friction_static_mu = 1000. #### + remaining_contact_indicator = contact_active_penalty_based_friction_forces_norm <= contact_friction_static_mu * contact_active_force_d_scalar # + ### not_remaining_contacts ### + not_remaining_contacts = contact_active_penalty_based_friction_forces_norm > contact_friction_static_mu * contact_active_force_d_scalar + + contact_active_penalty_based_friction_forces_dir = contact_active_penalty_based_friction_forces / torch.clamp(contact_active_penalty_based_friction_forces_norm.unsqueeze(-1), min=1e-8) + dyn_contact_active_penalty_based_friction_forces = contact_active_penalty_based_friction_forces_dir * (contact_friction_static_mu * contact_active_force_d_scalar).unsqueeze(-1) + contact_active_penalty_based_friction_forces[not_remaining_contacts] = dyn_contact_active_penalty_based_friction_forces[not_remaining_contacts] # correctnesss # + ### TODO: how to check the correctness of the switching between the static friction and the dynamic friction ### + + ### TODO: + + + penalty_friction_tangential_forces[contact_active_idxes] = penalty_based_friction_forces * contact_friction_spring_cur # * 0.1 + + + ''' update contact_force_d ''' + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance + if self.use_sqrt_dist: + dist_cur_active_to_passive = torch.norm( + (contact_active_pos - contact_passive_pos), dim=-1, p=2 + ) + else: + dist_cur_active_to_passive = torch.sum( + (contact_active_pos - contact_passive_pos) ** 2, dim=-1 + ) + + # ### add the sqrt for calculate the l2 distance ### + # dist_cur_active_to_passive = torch.sqrt(dist_cur_active_to_passive) + + # dist_cur_active_to_passive = torch.norm( + # (contact_active_pos - contact_passive_pos), dim=-1, p=2 + # ) + + + penetrating_indicator_mult_factor = torch.ones_like(penetrating_indicator).float() + penetrating_indicator_mult_factor[penetrating_indicator] = -1. + + cur_penetrating_indicator_mult_factor = penetrating_indicator_mult_factor[contact_active_idxes] + + dist_cur_active_to_passive = dist_cur_active_to_passive * cur_penetrating_indicator_mult_factor + + # dist_cur_active_to_passive[penetrating_indicator[contact_active_idxes]] = -1. * dist_cur_active_to_passive[penetrating_indicator[contact_active_idxes]] # + + # dist -- contact_d # # spring_ka -> force scales # # spring_ka -> force scales # # + cur_contact_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_cur_active_to_passive) + + # contact_force_d_scalar = contact_force_d.clone() # + cur_contact_d = cur_contact_d.unsqueeze(-1) * (-1. * cur_passive_obj_ns[contact_passive_idxes]) + + inter_obj_normals[contact_active_idxes] = cur_passive_obj_ns[contact_passive_idxes] + + contact_force_d[contact_active_idxes] = cur_contact_d + ''' update contact_force_d ''' + + cur_act_weights[contact_active_idxes] = 1. + ws_unnormed[contact_active_idxes] = 1. + else: + contact_active_idxes = None + self.contact_active_idxes = contact_active_idxes + valid_penalty_friction_forces_indicator = None + penalty_based_friction_forces = None + # tangential forces with inter obj normals # -> #### + if torch.sum(cur_act_weights).item() > 0.5: + cur_act_weights = cur_act_weights / torch.sum(cur_act_weights) + + # penalty based # + + # norm_penalty_friction_tangential_forces = torch.norm(penalty_friction_tangential_forces, dim=-1, p=2) + # maxx_norm_penalty_friction_tangential_forces, _ = torch.max(norm_penalty_friction_tangential_forces, dim=-1) + # minn_norm_penalty_friction_tangential_forces, _ = torch.min(norm_penalty_friction_tangential_forces, dim=-1) + # print(f"maxx_norm_penalty_friction_tangential_forces: {maxx_norm_penalty_friction_tangential_forces}, minn_norm_penalty_friction_tangential_forces: {minn_norm_penalty_friction_tangential_forces}") + + # tangetntial forces --- dot with normals # + dot_tangential_forces_with_inter_obj_normals = torch.sum(penalty_friction_tangential_forces * inter_obj_normals, dim=-1) ### nn_active_pts x # + penalty_friction_tangential_forces = penalty_friction_tangential_forces - dot_tangential_forces_with_inter_obj_normals.unsqueeze(-1) * inter_obj_normals + + + # penalty_based_friction_forces # + # norm_penalty_friction_tangential_forces = torch.norm(penalty_friction_tangential_forces, dim=-1, p=2) + # # valid penalty friction forces # # valid contact force d scalar # + # maxx_norm_penalty_friction_tangential_forces, _ = torch.max(norm_penalty_friction_tangential_forces, dim=-1) + # minn_norm_penalty_friction_tangential_forces, _ = torch.min(norm_penalty_friction_tangential_forces, dim=-1) + # print(f"[After proj.] maxx_norm_penalty_friction_tangential_forces: {maxx_norm_penalty_friction_tangential_forces}, minn_norm_penalty_friction_tangential_forces: {minn_norm_penalty_friction_tangential_forces}") + + + # tangential_forces_clone = tangential_forces.clone() + # tangential_forces = torch.zeros_like(tangential_forces) ### + + # if contact_active_idxes is not None: + # self.contact_active_idxes = contact_active_idxes + # self.valid_penalty_friction_forces_indicator = valid_penalty_friction_forces_indicator # + # # print(f"here {summ_valid_penalty_friction_forces_indicator}") + # # tangential_forces[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] = tangential_forces_clone[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] + # contact_active_idxes_indicators = torch.ones((tangential_forces.size(0)), dtype=torch.float).cuda().bool() + # contact_active_idxes_indicators[:] = True + # contact_active_idxes_indicators[self.contact_active_idxes] = False + + # tangential_forces[contact_active_idxes_indicators] = 0. + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # tangential forces # + # maxx_norm_tangential, _ = torch.max(norm_tangential_forces, dim=-1) + # minn_norm_tangential, _ = torch.min(norm_tangential_forces, dim=-1) + # print(f"maxx_norm_tangential: {maxx_norm_tangential}, minn_norm_tangential: {minn_norm_tangential}") + + ### ## get new contacts ## ### + tot_contact_point_position = [] + tot_contact_active_point_pts = [] + tot_contact_active_idxes = [] + tot_contact_passive_idxes = [] + tot_contact_frame_rotations = [] + tot_contact_frame_translations = [] + + + if contact_pairs_set is not None: # contact + if torch.sum(remaining_contact_indicator.float()) > 0.5: + # contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + remaining_contact_active_point_pts = contact_active_point_pts[remaining_contact_indicator] + remaining_contact_point_position = contact_point_position[remaining_contact_indicator] + remaining_contact_active_idxes = contact_active_idxes[remaining_contact_indicator] + remaining_contact_passive_idxes = contact_passive_idxes[remaining_contact_indicator] + remaining_contact_frame_rotations = contact_frame_orientations[remaining_contact_indicator] + remaining_contact_frame_translations = contact_frame_translations[remaining_contact_indicator] + tot_contact_point_position.append(remaining_contact_point_position) + tot_contact_active_point_pts.append(remaining_contact_active_point_pts) + tot_contact_active_idxes.append(remaining_contact_active_idxes) + tot_contact_passive_idxes.append(remaining_contact_passive_idxes) + tot_contact_frame_rotations.append(remaining_contact_frame_rotations) + tot_contact_frame_translations.append(remaining_contact_frame_translations) + + # remaining_contact_active_idxes + in_contact_indicator[remaining_contact_active_idxes] = False + + + + if torch.sum(in_contact_indicator.float()) > 0.5: # in contact indicator # + cur_in_contact_passive_pts = inter_obj_pts[in_contact_indicator] + # cur_in_contact_passive_normals = inter_obj_normals[in_contact_indicator] + cur_in_contact_active_pts = sampled_input_pts[in_contact_indicator] # in_contact_active_pts # + + + cur_contact_frame_rotations = cur_passive_obj_rot.unsqueeze(0).repeat(cur_in_contact_passive_pts.size(0), 1, 1).contiguous() + cur_contact_frame_translations = cur_in_contact_passive_pts.clone() # + #### contact farme active points ##### -> ## + cur_contact_frame_active_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_active_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_contact_frame_passive_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_passive_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_in_contact_active_pts_all = torch.arange(0, sampled_input_pts.size(0)).long().cuda() + cur_in_contact_active_pts_all = cur_in_contact_active_pts_all[in_contact_indicator] + cur_inter_passive_obj_pts_idxes = inter_passive_obj_pts_idxes[in_contact_indicator] + # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose + # cur_contact_frame_pose = (cur_contact_frame_rotations, cur_contact_frame_translations) + # contact_point_positions = cur_contact_frame_passive_pts # + # contact_active_idxes, cotnact_passive_idxes # + # contact_point_position = cur_contact_frame_passive_pts + # contact_active_idxes = cur_in_contact_active_pts_all + # contact_passive_idxes = cur_inter_passive_obj_pts_idxes + tot_contact_active_point_pts.append(cur_contact_frame_active_pts) + tot_contact_point_position.append(cur_contact_frame_passive_pts) # contact frame points + tot_contact_active_idxes.append(cur_in_contact_active_pts_all) # active_pts_idxes + tot_contact_passive_idxes.append(cur_inter_passive_obj_pts_idxes) # passive_pts_idxes + tot_contact_frame_rotations.append(cur_contact_frame_rotations) # rotations + tot_contact_frame_translations.append(cur_contact_frame_translations) # translations + + + + if len(tot_contact_frame_rotations) > 0: + upd_contact_active_point_pts = torch.cat(tot_contact_active_point_pts, dim=0) + upd_contact_point_position = torch.cat(tot_contact_point_position, dim=0) + upd_contact_active_idxes = torch.cat(tot_contact_active_idxes, dim=0) + upd_contact_passive_idxes = torch.cat(tot_contact_passive_idxes, dim=0) + upd_contact_frame_rotations = torch.cat(tot_contact_frame_rotations, dim=0) + upd_contact_frame_translations = torch.cat(tot_contact_frame_translations, dim=0) + upd_contact_pairs_information = [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] + else: + upd_contact_pairs_information = None + + + + if self.use_penalty_based_friction and self.use_disp_based_friction: + disp_friction_tangential_forces = nex_sampled_input_pts - sampled_input_pts + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + disp_friction_tangential_forces = disp_friction_tangential_forces * contact_friction_spring_cur + disp_friction_tangential_forces_dot_normals = torch.sum( + disp_friction_tangential_forces * inter_obj_normals, dim=-1 + ) + disp_friction_tangential_forces = disp_friction_tangential_forces - disp_friction_tangential_forces_dot_normals.unsqueeze(-1) * inter_obj_normals + + penalty_friction_tangential_forces = disp_friction_tangential_forces + + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) # # + ### strict cosntraints ### + if self.use_penalty_based_friction: + forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + else: + # print(f"not using use_penalty_based_friction...") + tangential_forces_norm = torch.sum(tangential_forces ** 2, dim=-1) + pos_tangential_forces = tangential_forces[tangential_forces_norm > 1e-5] + # print(pos_tangential_forces) + forces = tangential_forces + contact_force_d # tantential forces and contact force d # + # forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' # + # penalty_dot_forces_normals, penalty_friction_constraint # # contraints # # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # # tangential forces ### tangential forces ## + # penalty_friction_tangential_forces = force - + + + #### penalty_friction_tangential_forces, tangential_forces #### + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.tangential_forces = penalty_friction_tangential_forces + + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.contact_force_d = contact_force_d + self.penalty_based_friction_forces = penalty_based_friction_forces + + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) # 1) must # 2) must # + self.penalty_dot_forces_normals = penalty_dot_forces_normals # + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc # + + ###### sampled input pts to center ####### + if contact_pairs_set is not None: + inter_obj_pts[contact_active_idxes] = cur_passive_obj_verts[contact_passive_idxes] + + # center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + + center_point_to_sampled_pts = inter_obj_pts - passive_center_point.unsqueeze(0) + + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) # squeeze(1) # + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) # + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + time_cons = self.sep_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + time_cons_2 = self.sep_torque_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + damping_cons = self.sep_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + damping_cons_2 = self.sep_angular_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + ##### TMP ###### + # cur_vel = delta_vel + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() # + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + ##### TMP ###### + # cur_angular_vel = delta_angular_vel + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + # cur_ + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def # update the current rigid def using the offset and the cur_rigid_def ## # + # curupd + # if update_tot_def: # update rigid def # + + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # + + if not fix_obj: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return upd_contact_pairs_information + + + + +class BendingNetworkActiveForceFieldForwardLagRoboV13(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagRoboV13, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.contact_spring_rest_length = 2. + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None): + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + + + + friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> + + + ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + ws_normed = ws_normed / float(sampled_input_pts.size(0)) + m = Categorical(ws_normed) + nn_sampled_input_pts = 5000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = sampled_input_pts[sampled_input_pts_idx] ### sampled input pts #### + + + + # sampled_input_pts_normals = # # sampled # # # + init_passive_obj_verts = timestep_to_passive_mesh[0] # get the passive object point # + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + # passive obj center # + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # active # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] # active mesh # + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + cur_active_mesh = cur_active_mesh[sampled_input_pts_idx] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # cur actuation # embedding st idx # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + if friction_forces is None: + ###### get the friction forces ##### + cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + else: + cur_actuation_embedding_st_idx = 365428 * input_pts_ts + cur_actuation_embedding_ed_idx = 365428 * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + cur_actuation_friction_forces = cur_actuation_friction_forces[sampled_input_pts_idx] ## sample ## + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # cur_passive_obj_ns # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10.) + ####### sharp the weights ####### + + minn_dist_sampled_pts_passive_obj_thres = 0.05 + # minn_dist_sampled_pts_passive_obj_thres = 2. + # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + ### + ''' decide forces via kinematics statistics ''' + rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + # contact_force_d = -contact_spring_ka * (dist_sampled_pts_to_passive_obj - self.contact_spring_rest_length) # + contact_force_d = contact_spring_ka * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) # + contact_spring_kb * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 2 + contact_spring_kc * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 3 + # vel_sampled_pts = nex_active_mesh - cur_active_mesh + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + ###### Get the tangential forces via optimizable forces ###### + cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + ###### Get the tangential forces via optimizable forces ###### + + ###### Get the tangential forces via tangential velocities ###### + # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + ###### Get the tangential forces via tangential velocities ###### + + tangential_forces = tangential_vel * tangential_ks + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts ## + penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + penalty_friction_constraint = torch.mean(penalty_friction_constraint) + # + self.penalty_friction_constraint = penalty_friction_constraint + + + ### strict cosntraints ### + # mult_weights = torch.ones_like(norm_along_normals_forces).detach() + # hard_selector = norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces + # hard_selector = hard_selector.detach() + # mult_weights[hard_selector] = self.static_friction_mu * norm_along_normals_forces.detach()[hard_selector] / norm_tangential_forces.detach()[hard_selector] + # ### change to the strict constraint ### + # # tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] = tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] / norm_tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) * self.static_friction_mu * norm_along_normals_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) + # ### change to the strict constraint ### + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) + ### strict cosntraints ### + + forces = tangential_forces + contact_force_d + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' + # penalty_dot_forces_normals, penalty_friction_constraint # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + tangential_forces = forces - forces_along_normals # tangential forces # ## tangential forces ## + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) + self.penalty_dot_forces_normals = penalty_dot_forces_normals + + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc ## rigid acc ## + + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return + + + + +class BendingNetworkActiveForceFieldForwardLagV14(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV14, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.gravity_acc = 9.8 + self.gravity_dir = torch.tensor([0., 0., -1]).float().cuda() + self.passive_obj_mass = 1. + self.passive_obj_inertia = ... + self.passive_obj_inertia_inv = ... + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + + self.contact_spring_rest_length = 2. + self.spring_ks_values = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.5 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # weighting model ks # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True): + ### from input_pts to new pts ### + # wieghting force field # + # prev_pts_ts = input_pts_ts - 1 + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + + friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> + # sampled_input_pts_normals = timesteptopassivemehsn + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # active # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + + ###### get the friction forces ##### + cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # cur_passive_obj_ns # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + ####### sharp the weights ####### + + minn_dist_sampled_pts_passive_obj_thres = 0.05 + # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + ### + ''' decide forces via kinematics statistics ''' + rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_force_d = -contact_spring_ka * (dist_sampled_pts_to_passive_obj - self.contact_spring_rest_length) # + vel_sampled_pts = nex_active_mesh - cur_active_mesh + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + ###### Get the tangential forces via optimizable forces ###### + cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + ###### Get the tangential forces via optimizable forces ###### + + ###### Get the tangential forces via tangential velocities ###### + # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + ###### Get the tangential forces via tangential velocities ###### + tangential_forces = tangential_vel * tangential_ks + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + forces = tangential_forces + contact_force_d + ''' decide forces via kinematics statistics ''' + # + + + ''' Decompose forces and calculate penalty froces ''' + # penalty_dot_forces_normals, penalty_friction_constraint # + # # get the forces -> decompose forces # + # dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # ## tangential forces ## + + # penalty_dot_forces_normals = dot_forces_normals ** 2 + # penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + # penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + # norm_along_normals_forces = torch.norm(forces_along_normals, dim=-1, p=2) # nn_sampled_pts ## + # penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + # penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + # penalty_friction_constraint = torch.mean(penalty_friction_constraint) + # self.penalty_dot_forces_normals = penalty_dot_forces_normals + # self.penalty_friction_constraint = penalty_friction_constraint + + + ''' Integrate all rigid forces, including the contact force and the gravity force ''' + tot_contact_force = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) + tot_gravity_force = self.passive_obj_mass * self.gravity_acc * self.gravity_dir + rigid_force = tot_contact_force + tot_gravity_force + rigid_acc = rigid_force / self.passive_obj_mass + + # rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + # + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + # I^-1 = R_cur I_ref^{-1} R_cur^T + cur_inertia_inv = torch.matmul( + cur_passive_obj_rot, torch.matmul(self.passive_obj_inertia_inv, cur_passive_obj_rot.transpose(1, 0).contiguous()) ### passive obj rot transpose + ) + torque = torch.matmul(cur_inertia_inv, torque.unsqueeze(-1)).squeeze(-1) ### torque # ## + # torque # + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return + + ''' Deform input points via the passive rigid deformations ''' + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + # defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # # self.timestep_to_ori_input_pts = {} + # # self.timestep_to_ori_input_pts_sdf = {} + # # ori_input_pts, ori_input_pts_sdf #### input_pts #### + # ori_input_pts = input_pts.clone().detach() + # ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, # + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + # optimizable point weights with fixed spring rules # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = self.nn_uniformly_sampled_pts + #### uniformly_sampled_pts: nn_sampled_pts x 3 #### + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + # use weighting_network to get weights of those sampled pts # + # expanded_prev_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) # if we do not have a kinematics observation? # + + expanded_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() ### get + expanded_pts_ts = expanded_pts_ts + input_pts_ts + input_latents = self.bending_latent(expanded_pts_ts) + x = torch.cat([uniformly_sampled_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < self.cur_window_size // 2): + cur_network = self.weighting_network + else: + cur_network = self.split_weighting_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([uniformly_sampled_pts, x], -1) + # x: nn_uniformly_sampled_pts x 1 weights # + x = x.squeeze(-1) + ws_normed = F.softmax(x, dim=0) #### calculate the softmax as weights # + + ### total def copy ## + # prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts].detach() + # # + # prev_quaternion = self.timestep_to_quaternion[prev_pts_ts].detach() # + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion) # prev_quaternion + # # + # defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + # defed_uniformly_sampled_pts = torch.matmul(defed_uniformly_sampled_pts, prev_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### inversely rotate the sampled pts # + # defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + # # defed_uniformly_sampled_pts_sdf: nn_sampled_pts # + # minn_sampled_sdf, minn_sampled_sdf_pts_idx = torch.min(defed_uniformly_sampled_pts_sdf, dim=0) ## the pts_idx ## + # passive_center_point = uniformly_sampled_pts[minn_sampled_sdf_pts_idx] ## center of the passive object ## + + + cur_passive_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() + cur_passive_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_rot_mtx = quaternion_to_matrix(cur_passive_quaternion) + + init_passive_obj_verts = timestep_to_passive_mesh[0].detach() + + cur_passive_obj_verts = torch.matmul(init_passive_obj_verts, cur_rot_mtx) + cur_passive_trans.unsqueeze(0) ## nn_pts x 3 ## + passive_center_point = cur_passive_obj_verts.mean(0) + + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 20000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + # defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + # ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + ws_normed_sampled = ws_normed[sampled_input_pts_idx] + + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # # + ''' Distance to previous prev meshes to optimize ''' + # to active mesh # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] ## nn_active_pts x 3 ## # active mesh # + + ##### using the points from active meshes directly #### + ori_input_pts = cur_active_mesh.clone() + sampled_input_pts = cur_active_mesh.clone() + + # if prev_pts_ts == 0: + # prev_prev_active_mesh_vel = torch.zeros_like(prev_active_mesh) + # else: + # # prev_prev_active_mesh_vel = prev_active_mesh - timestep_to_active_mesh[prev_pts_ts - 1] + # #### prev_prev active mehs #### + # # prev_prev_active_mesh = timestep_to_active_mesh[prev_pts_ts - 1] + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + # ## distnaces from act_mesh to the prev_prev ### prev_pts_ts ### + # dist_prev_act_mesh_to_prev_prev = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # minn_dist_prev_act_mesh_to_cur, minn_idx_dist_prev_act_mesh_to_cur = torch.min( + # dist_prev_act_mesh_to_prev_prev, dim=-1 ## + # ) + # selected_mesh_pts = batched_index_select(values=cur_active_mesh, indices=minn_idx_dist_prev_act_mesh_to_cur, dim=0) + # prev_prev_active_mesh_vel = selected_mesh_pts - prev_active_mesh + + nex_pts_ts = input_pts_ts + 1 + nex_active_mesh = timestep_to_active_mesh[nex_pts_ts] + cur_active_mesh_vel = nex_active_mesh - cur_active_mesh + + # dist_act_mesh_to_nex_ = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + # dist input pts active + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=self.nn_patch_active_pts, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) # + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts > thres_dist.unsqueeze(-1) + 1e-6] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = self.nn_patch_active_pts # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + + sampled_input_pts_idx = topk_dist_input_pts_active_mesh_idx + + + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + cur_active_mesh_vel_exp = cur_active_mesh_vel.unsqueeze(0).repeat(rel_input_pts_active_mesh.size(0), 1, 1).contiguous() + cur_active_mesh_vel = batched_index_select(values=cur_active_mesh_vel_exp, indices=sampled_input_pts_idx, dim=1) ## + + # prev_active_mesh_exp = prev_active_mesh.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # prev_active_mesh_exp = batched_index_select(values=prev_active_mesh_exp, indices=sampled_input_pts_idx, dim=1) ### nn_sampled_pts x nn_selected_pts x 3 + # self.timestep_to_prev_selected_active_mesh[prev_pts_ts] = prevactive + # ''' Distance to previous active meshes to optimize ''' + # prev_active_mesh_ori = self.timestep_to_prev_active_mesh_ori[prev_pts_ts] ## nn_active_pts x 3 ## + + # dist_input_pts_active_mesh_ori = torch.sum( + # (sampled_input_pts.detach().unsqueeze(1) - cur_active_mesh_vel.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_input_pts_active_mesh_ori = torch.sqrt(dist_input_pts_active_mesh_ori) # nn_sampled_pts x nn_active_pts # + # topk_dist_input_pts_active_mesh_ori, topk_dist_input_pts_active_mesh_idx_ori = torch.topk(dist_input_pts_active_mesh_ori, k=500, largest=False, dim=-1) + # thres_dist_ori, _ = torch.max(topk_dist_input_pts_active_mesh_ori, dim=-1) + # weighting_ka_ori = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + # weighting_kb_ori = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + # unnormed_weight_active_pts_to_input_pts_ori = weighting_ka_ori * torch.exp(-1. * dist_input_pts_active_mesh_ori * weighting_kb_ori * 50) # + # unnormed_weight_active_pts_to_input_pts_ori[unnormed_weight_active_pts_to_input_pts_ori >= thres_dist_ori.unsqueeze(-1)] = 0. + # normed_weight_active_pts_to_input_pts_ori = unnormed_weight_active_pts_to_input_pts_ori / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts_ori, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + # m_ori = Categorical(normed_weight_active_pts_to_input_pts_ori) # + # nn_sampled_input_pts = 500 # + # # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + # sampled_input_pts_idx_ori = m_ori.sample(sample_shape=(nn_sampled_input_pts,)) + # # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx_ori = sampled_input_pts_idx_ori.contiguous().transpose(1, 0).contiguous() + + # rel_input_pts_active_mesh_ori = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0).detach() + + # prev_active_mesh_ori_exp = prev_active_mesh_ori.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() + # prev_active_mesh_ori_exp = batched_index_select(values=prev_active_mesh_ori_exp, indices=sampled_input_pts_idx_ori, dim=1) + # # prev_active_mesh_ori_exp: nn_sampled_pts x nn_active_pts x 3 # + # # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + # self.timestep_to_prev_selected_active_mesh_ori[prev_pts_ts] = prev_active_mesh_ori_exp.detach() + # ''' Distance to previous active meshes to optimize ''' + + + + ''' spring force v2: use the spring force as input ''' + ### determine the spring coefficient ### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_qd = 1. # fix the qd to 1 # spring_qd # # spring_qd # + spring_qd = 0.5 + # dist input pts to active mesh # # + # a threshold distance -(d - d_thres)^3 * k + 2.*(2 - d_thres)**3 --> use the (2 - d_thres) ** 3 * k as the maximum distances -> k sould not be larger than 2. # + #### The k_d(d) in the form of inverse functions #### + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) ### + + #### The k_d(d) in the form of polynomial functions #### + # spring_qd = 0.01 + # spring_kd = spring_qd * ((-(dist_input_pts_active_mesh - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + # wish to use simple functions to achieve the adjustmenet of k-d relations # # k-d relations # + + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") # + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) # tiem_constant + spring_k_val = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) + + spring_kd = spring_kd * time_cons ### get the spring_kd (nn_sampled_pts x nn_act_pts ) #### + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) # prev_active_mesh # + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) * spring_k_val + ''' spring force v2: use the spring force as input ''' + + + + # ''' get the spring force of the reference motion ''' + # #### The k_d(d) in the form of inverse functions #### + # # spring_kd_ori = spring_qd / (dist_input_pts_active_mesh_ori - self.spring_x_min) + # #### The k_d(d) in the form of polynomial functions #### + # spring_kd_ori = spring_qd * ((-(dist_input_pts_active_mesh_ori - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + # spring_kd_ori = spring_kd_ori * time_cons + # spring_force_ori = -1. * spring_kd_ori * (dist_input_pts_active_mesh_ori - self.spring_rest_length) + # spring_force_ori = batched_index_select(values=spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = sampled_input_pts.unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0) + # dir_spring_force_ori = batched_index_select(values=dir_spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = dir_spring_force_ori / torch.clamp(torch.norm(dir_spring_force_ori, dim=-1, keepdim=True, p=2), min=1e-9) + # spring_force_ori = dir_spring_force_ori * spring_force_ori.unsqueeze(-1) * spring_k_val + # ''' get the spring force of the reference motion ''' + ''' spring force v2: use the spring force as input ''' + + + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) # and force weighting # + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( # # use the spring force as input # + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + self.timestep_to_spring_forces[input_pts_ts] = forces + ''' spring force v3: use the spring force as input ''' + + + # ''' spring force from the reference trajectory ''' + # transformed_w_ori = self.patch_force_scale_network[0](rel_input_pts_active_mesh_ori) + # transformed_w_ori = self.patch_force_scale_network[1](transformed_w_ori) + # glb_transformed_w_ori, _ = torch.max(transformed_w_ori, dim=1, keepdim=True) + # glb_transformed_w_ori = glb_transformed_w_ori.repeat(1, transformed_w_ori.size(1), 1) # + # transformed_w_ori = torch.cat( + # [transformed_w_ori, glb_transformed_w_ori], dim=-1 + # ) + # force_weighting_ori = self.patch_force_scale_network[2](transformed_w_ori) + # force_weighting_ori = self.patch_force_scale_network[3](force_weighting_ori).squeeze(-1) + # force_weighting_ori = F.softmax(force_weighting_ori, dim=-1) + # forces_ori = torch.sum( + # spring_force_ori.detach() * force_weighting.unsqueeze(-1).detach(), dim=1 + # ) + # self.timestep_to_spring_forces_ori[prev_pts_ts] = forces_ori + # ''' spring force from the reference trajectory ''' + + + ''' TODO: a lot to do for this firctional model... ''' + ''' calculate the firctional force ''' + friction_qd = 0.5 + friction_qd = 0.1 + dist_input_pts_active_mesh_sel = batched_index_select(dist_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) + #### The k_d(d) in the form of inverse functions #### + friction_kd = friction_qd / (dist_input_pts_active_mesh_sel - self.spring_x_min) + + #### The k_d(d) in the form of polynomial functions #### + # friction_qd = 0.01 + # friction_kd = friction_qd * ((-(dist_input_pts_active_mesh_sel - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + friction_kd = friction_kd * time_cons + prev_prev_active_mesh_vel_norm = torch.norm(cur_active_mesh_vel, dim=-1) + friction_force = friction_kd * (self.spring_rest_length - dist_input_pts_active_mesh_sel) * prev_prev_active_mesh_vel_norm # | vel | * (dist - rest_length) * friction_kd # + friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = batched_index_select(values=friction_force, indices=sampled_input_pts_idx, dim=1) # + dir_friction_force = cur_active_mesh_vel + dir_friction_force = dir_friction_force / torch.clamp(torch.norm(dir_friction_force, dim=-1, keepdim=True, p=2), min=1e-9) # + friction_force = dir_friction_force * friction_force.unsqueeze(-1) * friction_k # k * friction_force_scale * friction_force_dir # # get the friction force and the frictionk # + friction_force = torch.sum( # friction_force: nn-pts x 3 # + friction_force * force_weighting.unsqueeze(-1), dim=1 + ) + forces = forces + friction_force + forces = friction_force + ''' calculate the firctional force ''' + + + ''' Embed sdf values ''' + # raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' # + + ###### [time_cons] is used when calculating buth the spring force and the frictional force ---> convert force to acc ###### + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, _ = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + + + # ws_unnormed = ws_normed_sampled + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + + ''' get velocity and offset related constants ''' + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + ''' get velocity and offset related constants ''' + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() + + + ''' Compute torque, angular acc, angular vel and delta quaternion via forces and the directional offset from the center point to the sampled points ''' + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) ### center_point to the input_pts ### + # sampled_pts_torque = torch.cross(forces, center_point_to_sampled_pts, dim=-1) ## nn_sampled_pts x 3 ## + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + torque = torch.sum( + sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + ) + delta_angular_vel = torque * time_cons + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() ### (3,) + cur_delta_angle = cur_angular_vel * time_cons + cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() # angular velocity # + self.timestep_to_torque[input_pts_ts] = torque.detach() + + + + # ws_normed, defed_input_pts_sdf # + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_ws_normed[input_pts_ts] = ws_normed.detach() + # self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # # ori input pts # + # self.timestep_to_ori_input_pts_sdf = {} # # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[input_pts_ts] = ori_input_pts.detach() + # self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + 'friction_k': friction_k.detach().cpu()[0].item(), + 'spring_k_val': spring_k_val.detach().cpu()[0].item(), # spring_k + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + ## TODO: is it a good updating strategy? ## + # cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # curupd + if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + + cur_optimizable_total_def = cur_offset + torch.matmul(cur_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_optimizable_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_optimizable_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + + + # cur_rot_mtx = quaternion_to_matrix(cur_quaternion) # 3 x 3 + + # cur_tmp_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) # 3 x 3 rotation matrix # + # np.matmul(new_pts, rot_mtx) + cur_offset # + # new_pts = np.matmul(new_pts, cur_tmp_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### # + # cur_upd_rigid_def_aa = cur_offset + prev_rigid_def.detach() + # cur_upd_rigid_def_aa = cur_offset + torch.matmul(prev_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx).squeeze(0) + + + # ori_input_pts = torch.matmul(raw_input_pts - cur_upd_rigid_def_aa.unsqueeze(0), cur_rot_mtx.contiguous().transpose(1, 0).contiguous()) + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion).detach() + # prev_tot_offset = self.timestep_to_total_def[prev_pts_ts].detach() + # new_pts = torch.matmul(ori_input_pts, prev_rot_mtx) + prev_tot_offset.unsqueeze(0) + + # # + # cur_offset_with_rot = raw_input_pts - new_pts + # cur_offset_with_rot = torch.mean(cur_offset_with_rot, dim=0) + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset_with_rot + + return None + + + + +class BendingNetworkActiveForceFieldForwardLagEuV13(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False, + + ): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagEuV13, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.time_embedding_latent_dim = self.bending_latent_size + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.contact_spring_rest_length = 2. + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_contact_d = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_contact_d.weight) # ks_contact_d # + # self.ks_contact_d # + + self.ks_weight_d = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weight_d.weight) # ks_weight_d # as the weights # + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # the level 1 distnace threshold # # use bending_latent to get the timelabel latnets # + self.distance_threshold = 1.0 + self.distance_threshold = 0.5 + self.distance_threshold = 0.1 + self.distance_threshold = 0.05 + self.distance_threshold = 0.005 + # self.distance_threshold = 0.01 + # self.distance_threshold = 0.001 + # self.distance_threshold = 0.0001 + self.res = 64 + self.construct_grid_points() + # ### determine the friction froce # ## # + ## Should be projected to perpendicular to the normal direction # + + # self.nn_instances = nn_instances + # if nn_instances == 1: + self.friction_net = self.construct_field_network(input_dim=3 + self.time_embedding_latent_dim, hidden_dim=self.hidden_dimensions, output_dim=3, depth=5, init_value=0.) + # else: + # # self.friction_net = [ + # # self.construct_field_network(input_dim=3 + self.time_embedding_latent_dim, hidden_dim=self.hidden_dimensions, output_dim=3, depth=5, init_value=0.) for _ in range(self.nn_instances) + # # ] + # self.friction_net = nn.ModuleList( + # [ + # self.construct_field_network(input_dim=3 + self.time_embedding_latent_dim, hidden_dim=self.hidden_dimensions, output_dim=3, depth=5, init_value=0.) for _ in range(self.nn_instances) + # ] + # ) + + + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 # + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_rest_length_active = 2. + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # timestep_to_contact_normal_forces, timestep_to_friction_forces + self.timestep_to_contact_normal_forces = {} + self.timestep_to_friction_forces = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + self.timestep_to_grid_pts_forces = {} + self.timestep_to_grid_pts_weight = {} + + def construct_grid_points(self, ): + bound_min = [-1, -1, -1] + bound_max = [1, 1, 1] + X = torch.linspace(bound_min[0], bound_max[0], self.res).cuda() + Y = torch.linspace(bound_min[1], bound_max[1], self.res).cuda() # .split(N) + Z = torch.linspace(bound_min[2], bound_max[2], self.res).cuda() # .split(N) + xx, yy, zz = torch.meshgrid(X, Y, Z) + pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + self.grid_pts = pts + + # field_network = construct_field_network(self, input_dim, hidden_dim, output_dim, depth, init_value=0.) # + def construct_field_network(self, input_dim, hidden_dim, output_dim, depth, init_value=0.): + # self.input_ch = 1 + field_network = nn.ModuleList( + [nn.Linear(input_dim, hidden_dim)] + + [nn.Linear(hidden_dim, hidden_dim) + for i in range(depth - 2)] + + [nn.Linear(hidden_dim, output_dim, bias=True)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(field_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + field_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + field_network[-1].bias.data *= 0.0 + field_network[-1].bias.data += init_value # a realvvalue + return field_network + + def apply_filed_network(self, inputs, field_net): + for cur_model in field_net: + inputs = cur_model(inputs) + return inputs + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, sampled_verts_idxes=None, i_instance=0): + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + + ''' Kinematics rigid transformations only ''' # with a good initialization and the kinematics tracking result? # + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # define correspondences # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + # friction_qd = 0.1 + + # sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> + + sampled_input_pts = self.grid_pts + + # construct points # + # res x res x res + # self.grid_pts # + + + # ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + # ws_normed = ws_normed / float(sampled_input_pts.size(0)) + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 20000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + + + # sampled_input_pts_normals = # + init_passive_obj_verts = timestep_to_passive_mesh[0] # get the passive object point # + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() # to total def # + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + + ##### Use the distance to the center of the passive object as the creterion for selection ##### + # dist_sampled_pts_to_center = torch.sum( + # (sampled_input_pts - passive_center_point.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_sampled_pts_to_center = torch.sqrt(dist_sampled_pts_to_center) + # sampled_input_pts = sampled_input_pts[dist_sampled_pts_to_center <= self.distance_threshold] + # ##### Use the distance to the center of the passive object as the creterion for selection ##### + # idx_pts_near_to_obj = dist_sampled_pts_to_center <= self.distance_threshold # k_f # With spring force + + # active # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] # active mesh # + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] # ji + if sampled_verts_idxes is not None: + cur_active_mesh = cur_active_mesh[sampled_verts_idxes] + + dist_pts_to_obj = torch.sum( + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_pts_to_obj, minn_idx_pts_to_obj = torch.min(dist_pts_to_obj, dim=-1) + pts_normals = cur_passive_obj_ns[minn_idx_pts_to_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + + # + idx_pts_near_to_obj = dist_pts_to_obj <= self.distance_threshold + sampled_input_pts = sampled_input_pts[idx_pts_near_to_obj] + minn_idx_pts_to_obj = minn_idx_pts_to_obj[idx_pts_near_to_obj] + pts_normals = pts_normals[idx_pts_near_to_obj] + dist_pts_to_obj = dist_pts_to_obj[idx_pts_near_to_obj] + + + + # idx_pts_near_to_obj -> selector # + + + contact_ka = self.ks_contact_d(torch.zeros((1,)).long().cuda()).view(1) + contact_kb = self.ks_contact_d(torch.ones((1,)).long().cuda()).view(1) + pts_contact_d = contact_ka * (self.spring_rest_length - dist_pts_to_obj) # * 0.02 + pts_contact_d = torch.softmax(pts_contact_d, dim=0) # nn_sampled_pts + pts_contact_d = contact_kb * pts_contact_d + + pts_contact_force = -1 * pts_normals * pts_contact_d.unsqueeze(-1) + + time_latents_emb_idxes = torch.zeros((sampled_input_pts.size(0), ), dtype=torch.long).cuda() + input_pts_ts + time_latents = self.bending_latent(time_latents_emb_idxes) + friction_latents_in = torch.cat( + [sampled_input_pts, time_latents], dim=-1 + ) + + # for a new active mesh, optimize the transformations and controls at each frame to produce the same force field # + # to produce the same forces and the weights # + ### decide the friction force ### # firction network # # only the weights can be optimized, right? # + # if self.nn_instances == 1: + pts_friction = self.apply_filed_network(friction_latents_in, self.friction_net) # ### pts_friction force + # else: + # pts_friction = self.apply_filed_network(friction_latents_in, self.friction_net[i_instance]) + pts_friction_dot_normal = torch.sum( + pts_friction * pts_normals, dim=-1 + ) + pts_friction = pts_friction - pts_friction_dot_normal.unsqueeze(-1) * pts_normals ### nn_sampled_pts x 3 ### + + # idx_pts_near_to_obj -> selector; pts_contact_force, pts_friction # + ## TODO: add soft constraints ## + pts_forces = pts_contact_force + pts_friction + + # determine weights # + dist_pts_to_active = torch.sum( + (sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + dist_pts_to_active, minn_idx_pts_to_active = torch.min(dist_pts_to_active, dim=-1) + weight_da = self.ks_weight_d(torch.zeros((1,)).long().cuda()).view(1) + weight_db = self.ks_weight_d(torch.ones((1,)).long().cuda()).view(1) + # weight = weight_da * (self.spring_rest_length - dist_pts_to_active) + + weight = weight_da * (self.spring_rest_length_active - dist_pts_to_active) + weight = torch.softmax(weight, dim=0) # nn_sampled_pts + weight = weight_db * weight # weight d #### + + # weight = weight_db * torch.exp(-1. * dist_pts_to_active * weight_da ) + + forces = pts_forces + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # # cur actuation + # cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + # cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + # cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # # ######### optimize the actuator forces directly ######### + # # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + # # forces = cur_actuation_forces + # # ######### optimize the actuator forces directly ######### + + # if friction_forces is None: + # ###### get the friction forces ##### + # cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + # else: + # cur_actuation_embedding_st_idx = 365428 * input_pts_ts + # cur_actuation_embedding_ed_idx = 365428 * (input_pts_ts + 1) + # cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + # dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + # (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # # cur_passive_obj_ns # + # inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + # inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + # ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + # ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # # cur_act_weights = ws_normed + # cur_act_weights = ws_unnormed + + # # # ws_unnormed = ws_normed_sampled + # # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + # #### using network weights #### + # # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + # #### using network weights #### + + ### + ''' decide forces via kinematics statistics ''' + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + # dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] + # # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + # contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + # contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + # contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + # # ### # fields # + # # contact_force_d = -contact_spring_ka * (dist_sampled_pts_to_passive_obj - self.contact_spring_rest_length) # + # contact_force_d = contact_spring_ka * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) # + contact_spring_kb * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 2 + contact_spring_kc * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 3 + # # vel_sampled_pts = nex_active_mesh - cur_active_mesh + # tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + # ###### Get the tangential forces via optimizable forces ###### + # cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + # ###### Get the tangential forces via optimizable forces ###### + + # ###### Get the tangential forces via tangential velocities ###### + # # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + # ###### Get the tangential forces via tangential velocities ###### + + # # tangential_forces = tangential_vel * tangential_ks + # # contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + # # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + # # norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts ## + # # penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + # # penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + # # penalty_friction_constraint = torch.mean(penalty_friction_constraint) + # # # + # # self.penalty_friction_constraint = penalty_friction_constraint + + + # ### strict cosntraints ### + # # mult_weights = torch.ones_like(norm_along_normals_forces).detach() + # # hard_selector = norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces + # # hard_selector = hard_selector.detach() + # # mult_weights[hard_selector] = self.static_friction_mu * norm_along_normals_forces.detach()[hard_selector] / norm_tangential_forces.detach()[hard_selector] + # # ### change to the strict constraint ### + # # # tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] = tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] / norm_tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) * self.static_friction_mu * norm_along_normals_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) + # # ### change to the strict constraint ### + + # # # tangential forces # + # # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) + # ### strict cosntraints ### + + # forces = tangential_forces + contact_force_d + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' + # # penalty_dot_forces_normals, penalty_friction_constraint # + # # # get the forces -> decompose forces # + # dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # ## tangential forces ## + + # penalty_dot_forces_normals = dot_forces_normals ** 2 + # penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + # penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) + # self.penalty_dot_forces_normals = penalty_dot_forces_normals + + + + # rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc ## rigid acc ## + rigid_acc = torch.sum(pts_forces * weight.unsqueeze(-1), dim=0) + + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + # torque and the forces # + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, pts_forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * weight.unsqueeze(-1), dim=0 + ) + + # + + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + # timestep_to_contact_normal_forces, timestep_to_friction_forces + self.timestep_to_contact_normal_forces[input_pts_ts] = pts_contact_force.detach() + self.timestep_to_friction_forces[input_pts_ts] = pts_friction.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = weight.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_pts_to_obj.detach() + + # # idx_pts_near_to_obj -> selector; pts_contact_force, pts_friction # + cur_grid_pts_forces = torch.zeros_like(self.grid_pts) + cur_grid_pts_forces = torch.cat([cur_grid_pts_forces, cur_grid_pts_forces], dim=-1) # + selected_pts_tot_forces = torch.cat([pts_contact_force, pts_friction], dim=-1) + cur_grid_pts_forces[idx_pts_near_to_obj] = selected_pts_tot_forces + self.timestep_to_grid_pts_forces[input_pts_ts] = cur_grid_pts_forces # .detach() + cur_grid_pts_weight = torch.zeros((self.grid_pts.size(0),), dtype=torch.float32).cuda() + cur_grid_pts_weight[idx_pts_near_to_obj] = weight + self.timestep_to_grid_pts_weight[input_pts_ts] = cur_grid_pts_weight + + self.save_values = { # input_pts_ts # + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_contact_normal_forces': {cur_ts: self.timestep_to_contact_normal_forces[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_contact_normal_forces}, + 'timestep_to_friction_forces': {cur_ts: self.timestep_to_friction_forces[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_friction_forces}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + 'timestep_to_grid_pts_forces': {cur_ts: self.timestep_to_grid_pts_forces[cur_ts].detach().cpu().numpy() for cur_ts in self.timestep_to_grid_pts_forces}, + 'timestep_to_grid_pts_weight': {cur_ts: self.timestep_to_grid_pts_weight[cur_ts].detach().cpu().numpy() for cur_ts in self.timestep_to_grid_pts_weight} # + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return + + + +class BendingNetworkActiveForceFieldForwardLagRoboManipV13(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagRoboManipV13, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.contact_spring_rest_length = 2. + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) # + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; # + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None): + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] + + + ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + ws_normed = ws_normed / float(sampled_input_pts.size(0)) + m = Categorical(ws_normed) + nn_sampled_input_pts = 5000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = sampled_input_pts[sampled_input_pts_idx] ### sampled input pts #### + + + + # sampled_input_pts_normals = # # sampled # # # + init_passive_obj_verts = timestep_to_passive_mesh[0] # get the passive object point # + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + # passive obj center # + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # active # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] # active mesh # + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + cur_active_mesh = cur_active_mesh[sampled_input_pts_idx] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # cur actuation # embedding st idx # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + if friction_forces is None: + ###### get the friction forces ##### + cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + else: + cur_actuation_embedding_st_idx = 365428 * input_pts_ts + cur_actuation_embedding_ed_idx = 365428 * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + cur_actuation_friction_forces = cur_actuation_friction_forces[sampled_input_pts_idx] ## sample ## + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # cur_passive_obj_ns # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10.) + ####### sharp the weights ####### + + hard_selected_manipulating_points = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + minn_dist_sampled_pts_passive_obj_thres = 0.05 + # minn_dist_sampled_pts_passive_obj_thres = 2. + # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + ### + ''' decide forces via kinematics statistics ''' + rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + # contact_force_d = -contact_spring_ka * (dist_sampled_pts_to_passive_obj - self.contact_spring_rest_length) # + contact_force_d = contact_spring_ka * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) # + contact_spring_kb * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 2 + contact_spring_kc * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 3 + # vel_sampled_pts = nex_active_mesh - cur_active_mesh + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + ###### Get the tangential forces via optimizable forces ###### + cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + ###### Get the tangential forces via optimizable forces ###### + + ###### Get the tangential forces via tangential velocities ###### + # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + ###### Get the tangential forces via tangential velocities ###### + + tangential_forces = tangential_vel * tangential_ks + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts ## + penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + penalty_friction_constraint = torch.mean(penalty_friction_constraint) + # + self.penalty_friction_constraint = penalty_friction_constraint + + + ### strict cosntraints ### + # mult_weights = torch.ones_like(norm_along_normals_forces).detach() + # hard_selector = norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces + # hard_selector = hard_selector.detach() + # mult_weights[hard_selector] = self.static_friction_mu * norm_along_normals_forces.detach()[hard_selector] / norm_tangential_forces.detach()[hard_selector] + # ### change to the strict constraint ### + # # tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] = tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] / norm_tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) * self.static_friction_mu * norm_along_normals_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) + # ### change to the strict constraint ### + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) + ### strict cosntraints ### + + forces = tangential_forces + contact_force_d + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' + # penalty_dot_forces_normals, penalty_friction_constraint # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + tangential_forces = forces - forces_along_normals # tangential forces # ## tangential forces ## + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) + self.penalty_dot_forces_normals = penalty_dot_forces_normals + + + ### forces and the act_weights ### + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc ## rigid acc ## + + + + # hard_selected_forces, hard_selected_manipulating_points # + hard_selected_forces = forces[hard_selected_manipulating_points] + hard_selected_manipulating_points = sampled_input_pts[hard_selected_manipulating_points] ### + sampled_input_pts_idxes = torch.tensor([idxx for idxx in range(sampled_input_pts.size(0))], dtype=torch.long).cuda() + hard_selected_sampled_input_pts_idxes = sampled_input_pts_idxes[hard_selected_manipulating_points] + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + # hard_selected_forces, hard_selected_manipulating_points, hard_selected_sampled_input_pts_idxes + rt_value = { + 'hard_selected_forces': hard_selected_forces, + 'hard_selected_manipulating_points': hard_selected_manipulating_points, + 'hard_selected_sampled_input_pts_idxes': hard_selected_sampled_input_pts_idxes, + } + return rt_value + + + + + + +class BendingNetworkForward(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkForward, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ## timestep t ## # + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + # ks_val # + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + if self.use_rigidity_network: + self.rigidity_activation_function = F.relu # F.relu, torch.sin + self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + use_last_layer_bias = True + self.rigidity_tanh = nn.Tanh() + + if self.rigidity_use_latent: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + else: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.rigidity_network[:-1]): + if self.rigidity_activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.rigidity_activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights + self.rigidity_network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.rigidity_network[-1].bias.data *= 0.0 + + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_forward_deform = {} + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + # self.split_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)]) + ##### split network full ##### + # self.split_network = nn.ModuleList( + # [nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + # ) for _ in range(self.bending_n_timesteps - 5)] + # ) + # for i_split in range(len(self.split_network)): + # with torch.no_grad(): + # for i, layer in enumerate(self.split_network[i_split][:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights to start out with straight rays + # self.split_network[i_split][-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_network[i_split][-1].bias.data *= 0.0 + ##### split network full ##### + + ##### split network single ##### + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + def forward(self, input_pts, input_pts_ts, timestep_to_mesh, timestep_to_passive_mesh, bending_net, bending_net_passive, act_sdf_net, details=None, special_loss_return=False): # special loss return # + + # query the bending_net for the active obj's deformation flow # + # query the bending_net_passive for the passive obj's deformation flow # + # input_pts_ts should goes from zero to maxx_ts - 1 # + nex_pts_ts = input_pts_ts + 1 + nex_timestep_mesh = timestep_to_mesh[nex_pts_ts] + + active_mesh_deformation = bending_net(nex_timestep_mesh, nex_pts_ts) ## get the nex_pts_ts's bending direction ## + active_mesh_deformation = torch.mean(active_mesh_deformation, dim=0) ### (3, ) deformation direction ## + # sdf value ? # + + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + # passive mesh # + cur_timestep_mesh = timestep_to_passive_mesh[input_pts_ts] + passive_mesh_sdf_value = act_sdf_net.sdf(cur_timestep_mesh) # nn_passive_pts -> the shape of the passive pts's sdf values # + maxx_dist = 2. + fs = (-1. * torch.sin(passive_mesh_sdf_value / maxx_dist * float(np.pi) / 2.) + 1) * active_mesh_deformation.unsqueeze(0) * ks_val #### + fs = torch.mean(fs, dim=0) ## (3,) ## + # fs = fs * -1. + + self.timestep_to_forward_deform[input_pts_ts] = fs.detach().cpu().numpy() # + + + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + nex_pts_ts + input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + x = fs.unsqueeze(0).repeat(input_pts.size(0), 1).contiguous() # + # x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets # get the unmasked offsets # + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + if special_loss_return: + return details + else: + return new_points + + + + + + +class BendingNetwork(nn.Module): + def __init__(self, + d_in, + multires, + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetwork, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + if self.use_rigidity_network: + self.rigidity_activation_function = F.relu # F.relu, torch.sin + self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + use_last_layer_bias = True + self.rigidity_tanh = nn.Tanh() + + if self.rigidity_use_latent: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + else: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.rigidity_network[:-1]): + if self.rigidity_activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.rigidity_activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights + self.rigidity_network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.rigidity_network[-1].bias.data *= 0.0 + + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + # self.split_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)]) + ##### split network full ##### + # self.split_network = nn.ModuleList( + # [nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + # ) for _ in range(self.bending_n_timesteps - 5)] + # ) + # for i_split in range(len(self.split_network)): + # with torch.no_grad(): + # for i, layer in enumerate(self.split_network[i_split][:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights to start out with straight rays + # self.split_network[i_split][-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_network[i_split][-1].bias.data *= 0.0 + ##### split network full ##### + + ##### split network single ##### + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + def forward(self, input_pts, input_pts_ts, details=None, special_loss_return=False): # special loss return # + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + input_pts_ts + input_latents = self.bending_latent(expanded_input_pts_ts) + + # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # input_latents = input_latent.expand(input_pts.size()[0], -1) + x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.network + else: + cur_network = self.split_network + # cur_network = self.split_network[input_pts_ts - 5] + + # use no grad # + # n_timesteps + # if self.use_split_network and input_pts_ts < 5: + # if self.use_split_network and input_pts_ts < self.n_timesteps - 1: + # with torch.no_grad(): + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + # else: + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([input_pts, x], -1) + ''' use the single split network without no_grad setting ''' + + # if self.use_split_network: + # with torch.no_grad(): + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + # else: + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + if self.use_opt_rigid_translations: + def_points = self.rigid_translations.unsqueeze(0).repeat(raw_input_pts.size(0), 1, 1) + def_points = batched_index_select(def_points, indices=expanded_input_pts_ts.unsqueeze(-1), dim=1).squeeze(1) + if len(def_points.size()) > 2: + def_points = def_points.squeeze(1) + minn_raw_input_pts, _ = torch.min(raw_input_pts, dim=0) + maxx_raw_input_pts, _ = torch.max(raw_input_pts, dim=0) + # print(f"minn_raw_input_pts: {minn_raw_input_pts}, maxx_raw_input_pts: {maxx_raw_input_pts}") + # get the 3D points tracking # optimize for the 3D points # + # + new_points = raw_input_pts - def_points + + if special_loss_return: # used for compute_divergence_loss() # long term motion and the tracking # long term + return details + else: + return new_points + + +# model 1 +# use rigid translations # +# use the rigidity network # +class BendingNetworkRigidTrans(nn.Module): + def __init__(self, + d_in, + multires, + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkRigidTrans, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.rigid_translations = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=3 # get the + ) + # self.rigid_translations.weight. + torch.nn.init.zeros_(self.rigid_translations.weight) + + + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + if self.use_rigidity_network: + self.rigidity_activation_function = F.relu # F.relu, torch.sin + self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + use_last_layer_bias = True + self.rigidity_tanh = nn.Tanh() + + if self.rigidity_use_latent: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + else: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.rigidity_network[:-1]): + if self.rigidity_activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.rigidity_activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights + self.rigidity_network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.rigidity_network[-1].bias.data *= 0.0 + + # self.rigid_translations = torch.tensor( + # [[ 0.0000, 0.0000, 0.0000], + # [-0.0008, 0.0040, 0.0159], + # [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + # ).cuda() + # self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + # self.split_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)]) + self.split_network = nn.ModuleList( + [nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) for _ in range(self.bending_n_timesteps - 5)] + ) + for i_split in range(len(self.split_network)): + with torch.no_grad(): + for i, layer in enumerate(self.split_network[i_split][:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[i_split][-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[i_split][-1].bias.data *= 0.0 + + def forward(self, input_pts, input_pts_ts, details=None, special_loss_return=False): # special loss return # + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + input_pts_ts + # input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.network + # else: + # # cur_network = self.split_network + # cur_network = self.split_network[input_pts_ts - 5] + + # # n_timesteps + # # if self.use_split_network and input_pts_ts < 5: + # if self.use_split_network and input_pts_ts < self.n_timesteps - 1: + # with torch.no_grad(): + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + # else: + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + + x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + # if self.use_opt_rigid_translations: + # def_points = self.rigid_translations.unsqueeze(0).repeat(raw_input_pts.size(0), 1, 1) + # def_points = batched_index_select(def_points, indices=expanded_input_pts_ts.unsqueeze(-1), dim=1).squeeze(1) + # if len(def_points.size()) > 2: + # def_points = def_points.squeeze(1) + # minn_raw_input_pts, _ = torch.min(raw_input_pts, dim=0) + # maxx_raw_input_pts, _ = torch.max(raw_input_pts, dim=0) + # # print(f"minn_raw_input_pts: {minn_raw_input_pts}, maxx_raw_input_pts: {maxx_raw_input_pts}") + # # get the 3D points tracking # optimize for the 3D points # + # # + # new_points = raw_input_pts - def_points + + if special_loss_return: # used for compute_divergence_loss() # long term motion and the tracking # long term + return details + else: + return new_points + + + def forward_delta(self, input_pts, input_pts_ts, details=None, special_loss_return=False): # special loss return # + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + input_pts_ts + # input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.network + # else: + # # cur_network = self.split_network + # cur_network = self.split_network[input_pts_ts - 5] + + # # n_timesteps + # # if self.use_split_network and input_pts_ts < 5: + # if self.use_split_network and input_pts_ts < self.n_timesteps - 1: + # with torch.no_grad(): + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + # else: + # for i, layer in enumerate(cur_network): + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + + if input_pts_ts > 0: + ## x_{0} = x_t + off_t + ## x_0 = x_{t-1} + off_t + ## 0 = x_t - x_{t-1} + off_t - off_{t-1} --> x_{t-1} = x_t + off_t - off_{t-1} + off_t = self.rigid_translations(expanded_input_pts_ts) + prev_expanded_input_pts_ts = expanded_input_pts_ts - 1 + prev_off_t = self.rigid_translations(prev_expanded_input_pts_ts) + x = off_t - prev_off_t + else: + off_t = self.rigid_translations(expanded_input_pts_ts) + x = off_t + + # x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + # if self.use_opt_rigid_translations: + # def_points = self.rigid_translations.unsqueeze(0).repeat(raw_input_pts.size(0), 1, 1) + # def_points = batched_index_select(def_points, indices=expanded_input_pts_ts.unsqueeze(-1), dim=1).squeeze(1) + # if len(def_points.size()) > 2: + # def_points = def_points.squeeze(1) + # minn_raw_input_pts, _ = torch.min(raw_input_pts, dim=0) + # maxx_raw_input_pts, _ = torch.max(raw_input_pts, dim=0) + # # print(f"minn_raw_input_pts: {minn_raw_input_pts}, maxx_raw_input_pts: {maxx_raw_input_pts}") + # # get the 3D points tracking # optimize for the 3D points # + # # + # new_points = raw_input_pts - def_points + + if special_loss_return: # used for compute_divergence_loss() # long term motion and the tracking # long term + return details + else: + return new_points diff --git a/models/fields_old.py b/models/fields_old.py new file mode 100644 index 0000000000000000000000000000000000000000..17cf9b778be181c3b1099d331bb629ee31d39789 --- /dev/null +++ b/models/fields_old.py @@ -0,0 +1,18493 @@ + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +from models.embedder import get_embedder + +from scipy.spatial import KDTree +from torch.utils.data.sampler import WeightedRandomSampler +from torch.distributions.categorical import Categorical +from torch.distributions.uniform import Uniform + +def batched_index_select(values, indices, dim = 1): + value_dims = values.shape[(dim + 1):] + values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) + indices = indices[(..., *((None,) * len(value_dims)))] + indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) + value_expand_len = len(indices_shape) - (dim + 1) + values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] + + value_expand_shape = [-1] * len(values.shape) + expand_slice = slice(dim, (dim + value_expand_len)) + value_expand_shape[expand_slice] = indices.shape[expand_slice] + values = values.expand(*value_expand_shape) + + dim += value_expand_len + return values.gather(dim, indices) + +def update_quaternion(delta_angle, prev_quat): + s1 = 0 + s2 = prev_quat[0] + v2 = prev_quat[1:] + v1 = delta_angle / 2 + new_v = s1 * v2 + s2 * v1 + torch.cross(v1, v2) + new_s = s1 * s2 - torch.sum(v1 * v2) + new_quat = torch.cat([new_s.unsqueeze(0), new_v], dim=0) + return new_quat + +# def euler_to_quaternion(yaw, pitch, roll): +def euler_to_quaternion(roll, pitch, yaw): + qx = torch.sin(roll/2) * torch.cos(pitch/2) * torch.cos(yaw/2) - torch.cos(roll/2) * torch.sin(pitch/2) * torch.sin(yaw/2) + qy = torch.cos(roll/2) * torch.sin(pitch/2) * torch.cos(yaw/2) + torch.sin(roll/2) * torch.cos(pitch/2) * torch.sin(yaw/2) + qz = torch.cos(roll/2) * torch.cos(pitch/2) * torch.sin(yaw/2) - torch.sin(roll/2) * torch.sin(pitch/2) * torch.cos(yaw/2) + qw = torch.cos(roll/2) * torch.cos(pitch/2) * torch.cos(yaw/2) + torch.sin(roll/2) * torch.sin(pitch/2) * torch.sin(yaw/2) + + # qx = torch.sin() + return [qw, qx, qy, qz] + # return [qx, qy, qz, qw] + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) # -1 for the quaternion matrix # + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + + + + + +class BendingNetworkJointDynModelV2(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkJointDynModelV2, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ### timestep t ### + ## timestep t ## + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current # + # step 1 -> get active object's points correspondences # + # step 2 -> get passive object's points in correspondences # + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion # + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t # + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # # + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.1 + + self.nn_hand_joints = 23 + self.dyn_input_channels = self.nn_hand_joints * (3 + 3) + 3 + self.dyn_output_channels = 3 + + ## dyn_pred_network ## + # self.dyn_pred_network = nn.ModuleList( + # [nn.Linear(self.dyn_input_channels, self.hidden_dimensions)] + + # [nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # # if i + 1 in self.skips + # # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.dyn_output_channels, bias=True)]) + dyn_pred_network = [nn.Linear(self.dyn_input_channels, self.hidden_dimensions)] + [nn.Linear(self.hidden_dimensions, self.hidden_dimensions) for i in range(self.network_depth - 2)] + [nn.Linear(self.hidden_dimensions, self.dyn_output_channels, bias=True)] + self.dyn_pred_network = nn.Sequential( + *dyn_pred_network + ) + + # ks_val # + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + if self.use_rigidity_network: + self.rigidity_activation_function = F.relu # F.relu, torch.sin + self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + use_last_layer_bias = True + self.rigidity_tanh = nn.Tanh() + + if self.rigidity_use_latent: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + else: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.rigidity_network[:-1]): + if self.rigidity_activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.rigidity_activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights + self.rigidity_network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.rigidity_network[-1].bias.data *= 0.0 + + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_forward_deform = {} + self.timestep_to_velocities = {} # timestep_to_actions # + # self. + self.mult_factor_vel_to_offset = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.mult_factor_vel_to_offset.weight) + # self.mult_factor_vel_to_offset.weight.data = self.mult_factor_vel_to_offset.weight.data * 0.1 + + self.mult_factor_force_to_vel = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.mult_factor_force_to_vel.weight) + + self.mult_factor_damping = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.mult_factor_damping.weight) + + self.dyn_type = "acc" + # self.dyn_type = "vel" + + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + # self.split_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)]) + ##### split network full ##### + # self.split_network = nn.ModuleList( + # [nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + # ) for _ in range(self.bending_n_timesteps - 5)] + # ) + # for i_split in range(len(self.split_network)): + # with torch.no_grad(): + # for i, layer in enumerate(self.split_network[i_split][:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights to start out with straight rays + # self.split_network[i_split][-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_network[i_split][-1].bias.data *= 0.0 + ##### split network full ##### + + ##### split network single ##### + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + def forward(self, input_pts, input_pts_ts, timestep_to_mesh, timestep_to_passive_mesh, timestep_to_joints_pts, details=None, special_loss_return=False): # special loss return # + #### the motion should be aggregated from all joints #### + #### the motion should be calculated via unsigned distances #### + + if self.dyn_type == "vel": + nex_pts_ts = input_pts_ts + 1 + act_joints = timestep_to_joints_pts[input_pts_ts] # act joints # + nex_act_joints = timestep_to_joints_pts[nex_pts_ts] ## nex pts ts ## + + cur_joints_motion = nex_act_joints - act_joints + + passive_meshes = timestep_to_passive_mesh[input_pts_ts] # passivE_mesh # + dist_cur_mesh_to_joints = torch.sum( + (passive_meshes.unsqueeze(1) - act_joints.unsqueeze(0)) ** 2,dim=-1 #### nn_passive_pts x nn_active_joints # + ) + dist_cur_mesh_to_joints = torch.sqrt(dist_cur_mesh_to_joints) + maxx_dist = 2. + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + # print(f"ks_vals: {ks_val}") + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + ## cur_joints_motion ## \delta x_t ~ v_t ~ v_{t-1} + \delta t * a_{t-1}; a_{t-1} = f_{t-1} / m ## + ## cur joints motion ## + fs = (-1. * torch.sin(dist_cur_mesh_to_joints / maxx_dist * float(np.pi) / 2.) + 1).unsqueeze(-1) * cur_joints_motion.unsqueeze(0) * ks_val #### + fs = torch.sum(fs, dim=1) #### nn_passive_pts x 3 ### as the passive object's deformation # + fs = torch.mean(fs, dim=0) ### + + ### input_pts #### time to passive mesh ### + + new_pts = input_pts[:, :3] + fs.unsqueeze(0) ### the first three dimensions of the input pts + elif self.dyn_type == "acc": + nex_pts_ts = input_pts_ts + 1 + act_joints = timestep_to_joints_pts[input_pts_ts] # act joints # + nex_act_joints = timestep_to_joints_pts[nex_pts_ts] ## nex pts ts ## + + cur_joints_motion = nex_act_joints - act_joints + + passive_meshes = timestep_to_passive_mesh[input_pts_ts] # passivE_mesh # + dist_cur_mesh_to_joints = torch.sum( + (passive_meshes.unsqueeze(1) - act_joints.unsqueeze(0)) ** 2,dim=-1 #### nn_passive_pts x nn_active_joints # + ) + dist_cur_mesh_to_joints = torch.sqrt(dist_cur_mesh_to_joints) + maxx_dist = 2. + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + # print(f"ks_vals: {ks_val}") + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + # #### nn_passive_pts x nn_joints x 3 #### # + dir_joints_to_passive_pts = passive_meshes.unsqueeze(1) - act_joints.unsqueeze(0) #### nn_passive_pts x nn_joints x 3 #### + dir_joints_to_passive_pts = dir_joints_to_passive_pts / torch.clamp(torch.norm(dir_joints_to_passive_pts, dim=-1, keepdim=True), min=1e-9) + + ## cur_joints_motion ## \delta x_t ~ v_t ~ v_{t-1} + \delta t * a_{t-1}; a_{t-1} = f_{t-1} / m ## + fs = (-1. * torch.sin(dist_cur_mesh_to_joints / maxx_dist * float(np.pi) / 2.) + 1).unsqueeze(-1) * cur_joints_motion.unsqueeze(0) * ks_val #### + # fs = dist_cur_mesh_to_joints.unsqueeze(-1) * cur_joints_motion.unsqueeze(0) * ks_val #### + # fs = (-1. * torch.sin(dist_cur_mesh_to_joints / maxx_dist * float(np.pi) / 2.) + 1).unsqueeze(-1) * dir_joints_to_passive_pts * ks_val * 0.001 #### + fs = torch.sum(fs, dim=1) #### nn_passive_pts x 3 ### as the passive object's deformation # ### nn_passive_pts x nn_joints x 3 ### + fs = torch.mean(fs, dim=0) ### + force_vel_mult = self.mult_factor_force_to_vel(torch.zeros((1,)).long().cuda()).squeeze(0) #### + vel = fs * force_vel_mult + # print(f"fs: {fs.size()}") + vel_offset_mult = self.mult_factor_vel_to_offset(torch.zeros((1,)).long().cuda()).squeeze(0) + if input_pts_ts == 0: + # self.timestep_to_velocities[input_pts_ts] = fs.detach() + self.timestep_to_velocities[input_pts_ts] = vel.detach() + # offset = fs * vel_offset_mult + offset = vel * vel_offset_mult ### velity to the offset + # print(f"offset: {offset.size()}, vel_offset_mult: {vel_offset_mult.size()}") + else: + + prev_pts_ts = input_pts_ts - 1 + prev_velocity = self.timestep_to_velocities[prev_pts_ts] + mult_factor_damping = self.mult_factor_damping(torch.zeros((1,)).long().cuda()).squeeze(0) #### + cur_velocity = vel + prev_velocity * mult_factor_damping + self.timestep_to_velocities[input_pts_ts] = cur_velocity.detach() + offset = cur_velocity * vel_offset_mult + + new_pts = input_pts[:, :3] + offset.unsqueeze(0) + elif self.dyn_type == "net": + nex_pts_ts = input_pts_ts + 1 + act_joints = timestep_to_joints_pts[input_pts_ts] # act joints # + nex_act_joints = timestep_to_joints_pts[nex_pts_ts] ## nex pts ts ## + cur_joints_motion = nex_act_joints - act_joints + cur_joints_pos_motion_cat = torch.stack( + [act_joints, cur_joints_motion], dim=-1 + ) + cur_joints_pos_motion_cat = cur_joints_pos_motion_cat.contiguous().view(-1).contiguous() + + passive_meshes = timestep_to_passive_mesh[input_pts_ts] # passivE_mesh # + cur_passive_mean_pos = torch.mean(passive_meshes, dim=0) ### (3,) + dyn_model_input = torch.cat([cur_joints_pos_motion_cat, cur_passive_mean_pos], dim=-1) ## passive mean pose ## + dyn_model_output = self.dyn_pred_network(dyn_model_input.unsqueeze(0)).squeeze(0).contiguous() ## 3, + new_pts = input_pts[:, :3] + dyn_model_output.unsqueeze(0) + # print(f"new_pts: {new_pts.size()}") + else: + raise ValueError(f"Unrecognized dyn_type; {self.dyn_type}") + + return new_pts + + if special_loss_return: + return details + else: + return new_points + + + +# get the actionable underlying model # +# use the joint points -> passive object points to compute the rigid object's transformations # +# use the joint points -> +# momentum transition functjion -> from the momentum of each active object's point to the passive object # +# the addition but not the average ! # +# from the actionable underlying model # +# + +class BendingNetworkJointDynModel(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkJointDynModel, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ### timestep t ### + ## timestep t ## + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current # + # step 1 -> get active object's points correspondences # + # step 2 -> get passive object's points in correspondences # + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion # + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t # + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # # + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.1 + + self.nn_hand_joints = 23 + self.dyn_input_channels = self.nn_hand_joints * (3 + 3) + 3 + self.dyn_output_channels = 3 + + ## dyn_pred_network ## + # self.dyn_pred_network = nn.ModuleList( + # [nn.Linear(self.dyn_input_channels, self.hidden_dimensions)] + + # [nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # # if i + 1 in self.skips + # # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.dyn_output_channels, bias=True)]) + dyn_pred_network = [nn.Linear(self.dyn_input_channels, self.hidden_dimensions)] + [nn.Linear(self.hidden_dimensions, self.hidden_dimensions) for i in range(self.network_depth - 2)] + [nn.Linear(self.hidden_dimensions, self.dyn_output_channels, bias=True)] + self.dyn_pred_network = nn.Sequential( + *dyn_pred_network + ) + + # ks_val # + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + if self.use_rigidity_network: + self.rigidity_activation_function = F.relu # F.relu, torch.sin + self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + use_last_layer_bias = True + self.rigidity_tanh = nn.Tanh() + + if self.rigidity_use_latent: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + else: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.rigidity_network[:-1]): + if self.rigidity_activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.rigidity_activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights + self.rigidity_network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.rigidity_network[-1].bias.data *= 0.0 + + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_forward_deform = {} + self.timestep_to_velocities = {} # timestep_to_actions # + # self. + self.mult_factor_vel_to_offset = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.mult_factor_vel_to_offset.weight) + # self.mult_factor_vel_to_offset.weight.data = self.mult_factor_vel_to_offset.weight.data * 0.1 + + self.dyn_type = "acc" + # self.dyn_type = "vel" + + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + # self.split_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)]) + ##### split network full ##### + # self.split_network = nn.ModuleList( + # [nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + # ) for _ in range(self.bending_n_timesteps - 5)] + # ) + # for i_split in range(len(self.split_network)): + # with torch.no_grad(): + # for i, layer in enumerate(self.split_network[i_split][:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights to start out with straight rays + # self.split_network[i_split][-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_network[i_split][-1].bias.data *= 0.0 + ##### split network full ##### + + ##### split network single ##### + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + def forward(self, input_pts, input_pts_ts, timestep_to_mesh, timestep_to_passive_mesh, timestep_to_joints_pts, details=None, special_loss_return=False): # special loss return # + #### the motion should be aggregated from all joints #### + #### the motion should be calculated via unsigned distances #### + + if self.dyn_type == "vel": + nex_pts_ts = input_pts_ts + 1 + act_joints = timestep_to_joints_pts[input_pts_ts] # act joints # + nex_act_joints = timestep_to_joints_pts[nex_pts_ts] ## nex pts ts ## + + cur_joints_motion = nex_act_joints - act_joints + + passive_meshes = timestep_to_passive_mesh[input_pts_ts] # passivE_mesh # + dist_cur_mesh_to_joints = torch.sum( + (passive_meshes.unsqueeze(1) - act_joints.unsqueeze(0)) ** 2,dim=-1 #### nn_passive_pts x nn_active_joints # + ) + dist_cur_mesh_to_joints = torch.sqrt(dist_cur_mesh_to_joints) + maxx_dist = 2. + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + # print(f"ks_vals: {ks_val}") + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + ## cur_joints_motion ## \delta x_t ~ v_t ~ v_{t-1} + \delta t * a_{t-1}; a_{t-1} = f_{t-1} / m ## + ## cur joints motion ## + fs = (-1. * torch.sin(dist_cur_mesh_to_joints / maxx_dist * float(np.pi) / 2.) + 1).unsqueeze(-1) * cur_joints_motion.unsqueeze(0) * ks_val #### + fs = torch.sum(fs, dim=1) #### nn_passive_pts x 3 ### as the passive object's deformation # + fs = torch.mean(fs, dim=0) ### + + ### input_pts #### time to passive mesh ### + + new_pts = input_pts[:, :3] + fs.unsqueeze(0) ### the first three dimensions of the input pts + elif self.dyn_type == "acc": + nex_pts_ts = input_pts_ts + 1 + act_joints = timestep_to_joints_pts[input_pts_ts] # act joints # + nex_act_joints = timestep_to_joints_pts[nex_pts_ts] ## nex pts ts ## + + cur_joints_motion = nex_act_joints - act_joints + + passive_meshes = timestep_to_passive_mesh[input_pts_ts] # passivE_mesh # + dist_cur_mesh_to_joints = torch.sum( + (passive_meshes.unsqueeze(1) - act_joints.unsqueeze(0)) ** 2,dim=-1 #### nn_passive_pts x nn_active_joints # + ) + dist_cur_mesh_to_joints = torch.sqrt(dist_cur_mesh_to_joints) + maxx_dist = 2. + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + # print(f"ks_vals: {ks_val}") + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + ## cur_joints_motion ## \delta x_t ~ v_t ~ v_{t-1} + \delta t * a_{t-1}; a_{t-1} = f_{t-1} / m ## + fs = (-1. * torch.sin(dist_cur_mesh_to_joints / maxx_dist * float(np.pi) / 2.) + 1).unsqueeze(-1) * cur_joints_motion.unsqueeze(0) * ks_val #### + fs = torch.sum(fs, dim=1) #### nn_passive_pts x 3 ### as the passive object's deformation # + fs = torch.mean(fs, dim=0) ### + # print(f"fs: {fs.size()}") + vel_offset_mult = self.mult_factor_vel_to_offset(torch.zeros((1,)).long().cuda()).squeeze(0) + if input_pts_ts == 0: + self.timestep_to_velocities[input_pts_ts] = fs.detach() + offset = fs * vel_offset_mult + # print(f"offset: {offset.size()}, vel_offset_mult: {vel_offset_mult.size()}") + else: + + prev_pts_ts = input_pts_ts - 1 + prev_velocity = self.timestep_to_velocities[prev_pts_ts] + cur_velocity = fs + prev_velocity + self.timestep_to_velocities[input_pts_ts] = cur_velocity.detach() + offset = cur_velocity * vel_offset_mult + + new_pts = input_pts[:, :3] + offset.unsqueeze(0) + elif self.dyn_type == "net": + nex_pts_ts = input_pts_ts + 1 + act_joints = timestep_to_joints_pts[input_pts_ts] # act joints # + nex_act_joints = timestep_to_joints_pts[nex_pts_ts] ## nex pts ts ## + cur_joints_motion = nex_act_joints - act_joints + cur_joints_pos_motion_cat = torch.stack( + [act_joints, cur_joints_motion], dim=-1 + ) + cur_joints_pos_motion_cat = cur_joints_pos_motion_cat.contiguous().view(-1).contiguous() + + passive_meshes = timestep_to_passive_mesh[input_pts_ts] # passivE_mesh # + cur_passive_mean_pos = torch.mean(passive_meshes, dim=0) ### (3,) + dyn_model_input = torch.cat([cur_joints_pos_motion_cat, cur_passive_mean_pos], dim=-1) ## passive mean pose ## + dyn_model_output = self.dyn_pred_network(dyn_model_input.unsqueeze(0)).squeeze(0).contiguous() ## 3, + new_pts = input_pts[:, :3] + dyn_model_output.unsqueeze(0) + # print(f"new_pts: {new_pts.size()}") + else: + raise ValueError(f"Unrecognized dyn_type; {self.dyn_type}") + + + + return new_pts + + # ts_joints = timestep_to_joints_pts[input_pts_ts] + + + # query the bending_net for the active obj's deformation flow # + # query the bending_net_passive for the passive obj's deformation flow # + # input_pts_ts should goes from zero to maxx_ts - 1 # + # nex_pts_ts = input_pts_ts - 1 + + # cur_ts_joints = timestep_to_joints_pts[input_pts_ts] + # nex_ts_joints = timestep_to_joints_pts[nex_pts_ts] + # cur_ts_joints_motion = nex_ts_joints - cur_ts_joints # nn_joints x 3 # + #### calculate the unsigned distance from each point to the joint point #### + + # cur_timestep_mesh = timestep_to_passive_mesh[nex_pts_ts] + # dist_cur_mesh_to_joints = torch.sum( + # (cur_timestep_mesh.unsqueeze(1) - nex_ts_joints.unsqueeze(0)) ** 2,dim=-1 #### nn_passive_pts x nn_active_joints # + # ) + # dist_cur_mesh_to_joints = torch.sqrt(dist_cur_mesh_to_joints) ### nn_passive_pts x nn_active_joints # + # dist_cur_mesh_to_joints = + ## get the cur + ### forces? ### + # maxx_dist = 2. + # ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + # self.ks_val_vals = ks_val.detach().cpu().numpy().item() + # fs = (-1. * torch.sin(dist_cur_mesh_to_joints / maxx_dist * float(np.pi) / 2.) + 1).unsqueeze(-1) * cur_ts_joints_motion.unsqueeze(0) * ks_val #### ## use the motion ! + # fs = torch.sum(fs, dim=1) #### nn_passive_pts x 3 ### as the passive object's deformation # + # fs = torch.mean(fs, dim=0) ### + + + + + + # nex_timestep_mesh = timestep_to_mesh[nex_pts_ts] + + # active_mesh_deformation = bending_net(nex_timestep_mesh, nex_pts_ts) ## get the nex_pts_ts's bending direction ## + # active_mesh_deformation = torch.mean(active_mesh_deformation, dim=0) ### (3, ) deformation direction ## + # # sdf value ? # + + # ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + + # self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + # # passive mesh # + # cur_timestep_mesh = timestep_to_passive_mesh[input_pts_ts] + # passive_mesh_sdf_value = act_sdf_net.sdf(cur_timestep_mesh) # nn_passive_pts -> the shape of the passive pts's sdf values # + # maxx_dist = 2. + # fs = (-1. * torch.sin(passive_mesh_sdf_value / maxx_dist * float(np.pi) / 2.) + 1) * active_mesh_deformation.unsqueeze(0) * ks_val #### + # fs = torch.mean(fs, dim=0) ## (3,) ## + # fs = fs * -1. + + # self.timestep_to_forward_deform[input_pts_ts] = fs.detach().cpu().numpy() # + # self.timestep_to_forward_deform[nex_pts_ts] = -fs.detach().cpu().numpy() # + + + # raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # # print(f) + # if self.embed_fn_fine is not None: # embed fn # + # input_pts = self.embed_fn_fine(input_pts) + + + # if special_loss_return and details is None: # details is None # + # details = {} + + # expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + # expanded_input_pts_ts = expanded_input_pts_ts + nex_pts_ts + # input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + # x = fs.unsqueeze(0).repeat(input_pts.size(0), 1).contiguous() # + # x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + # unmasked_offsets = x + # if details is not None: + # details["unmasked_offsets"] = unmasked_offsets # get the unmasked offsets # + + # if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + # if self.rigidity_use_latent: + # x = torch.cat([input_pts, input_latents], -1) + # else: + # x = input_pts + + # for i, layer in enumerate(self.rigidity_network): + # x = layer(x) + # # SIREN + # if self.rigidity_activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.rigidity_network) - 1: + # x = self.rigidity_activation_function(x) + # if i in self.rigidity_skips: + # x = torch.cat([input_pts, x], -1) + + # rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + # if self.rigidity_test_time_cutoff is not None: + # rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + # if self.use_rigidity_network: + # masked_offsets = rigidity_mask * unmasked_offsets + # if self.test_time_scaling is not None: + # masked_offsets *= self.test_time_scaling + # new_points = raw_input_pts + masked_offsets # skip connection # rigidity + # if details is not None: + # details["rigidity_mask"] = rigidity_mask + # details["masked_offsets"] = masked_offsets + # else: + # if self.test_time_scaling is not None: + # unmasked_offsets *= self.test_time_scaling + # new_points = raw_input_pts + unmasked_offsets # skip connection + # # if input_pts_ts >= 5: + # # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + if special_loss_return: + return details + else: + return new_points + + + + +class BendingNetworkForwardJointDyn(nn.Module): + def __init__(self, + d_in, + multires, # fileds # # fields # fields # fields # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkForwardJointDyn, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ## timestep t ## # + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.1 + + # ks_val # + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + if self.use_rigidity_network: + self.rigidity_activation_function = F.relu # F.relu, torch.sin + self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + use_last_layer_bias = True + self.rigidity_tanh = nn.Tanh() + + if self.rigidity_use_latent: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + else: + self.rigidity_network = nn.ModuleList( + [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + if i + 1 in self.rigidity_skips + else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + for i in range(self.rigidity_network_depth - 2)] + + [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.rigidity_network[:-1]): + if self.rigidity_activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.rigidity_activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights + self.rigidity_network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.rigidity_network[-1].bias.data *= 0.0 + + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_forward_deform = {} + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + # self.split_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)]) + ##### split network full ##### + # self.split_network = nn.ModuleList( + # [nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + # ) for _ in range(self.bending_n_timesteps - 5)] + # ) + # for i_split in range(len(self.split_network)): + # with torch.no_grad(): + # for i, layer in enumerate(self.split_network[i_split][:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights to start out with straight rays + # self.split_network[i_split][-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_network[i_split][-1].bias.data *= 0.0 + ##### split network full ##### + + ##### split network single ##### + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + def forward(self, input_pts, input_pts_ts, timestep_to_mesh, timestep_to_passive_mesh, timestep_to_joints_pts, bending_net, bending_net_passive, act_sdf_net, details=None, special_loss_return=False): # special loss return # + #### the motion should be aggregated from all joints #### + #### the motion should be calculated via unsigned distances #### + + + + # query the bending_net for the active obj's deformation flow # + # query the bending_net_passive for the passive obj's deformation flow # + # input_pts_ts should goes from zero to maxx_ts - 1 # + nex_pts_ts = input_pts_ts - 1 + + cur_ts_joints = timestep_to_joints_pts[input_pts_ts] + nex_ts_joints = timestep_to_joints_pts[nex_pts_ts] + cur_ts_joints_motion = nex_ts_joints - cur_ts_joints # nn_joints x 3 # + #### calculate the unsigned distance from each point to the joint point #### + + cur_timestep_mesh = timestep_to_passive_mesh[nex_pts_ts] + dist_cur_mesh_to_joints = torch.sum( + (cur_timestep_mesh.unsqueeze(1) - nex_ts_joints.unsqueeze(0)) ** 2,dim=-1 #### nn_passive_pts x nn_active_joints # + ) + dist_cur_mesh_to_joints = torch.sqrt(dist_cur_mesh_to_joints) ### nn_passive_pts x nn_active_joints # + # dist_cur_mesh_to_joints = + ## get the cur + ### forces? ### + maxx_dist = 2. + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + fs = (-1. * torch.sin(dist_cur_mesh_to_joints / maxx_dist * float(np.pi) / 2.) + 1).unsqueeze(-1) * cur_ts_joints_motion.unsqueeze(0) * ks_val #### ## use the motion ! + fs = torch.sum(fs, dim=1) #### nn_passive_pts x 3 ### as the passive object's deformation # + fs = torch.mean(fs, dim=0) ### + + + + + + # nex_timestep_mesh = timestep_to_mesh[nex_pts_ts] + + # active_mesh_deformation = bending_net(nex_timestep_mesh, nex_pts_ts) ## get the nex_pts_ts's bending direction ## + # active_mesh_deformation = torch.mean(active_mesh_deformation, dim=0) ### (3, ) deformation direction ## + # # sdf value ? # + + # ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + + # self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + # # passive mesh # + # cur_timestep_mesh = timestep_to_passive_mesh[input_pts_ts] + # passive_mesh_sdf_value = act_sdf_net.sdf(cur_timestep_mesh) # nn_passive_pts -> the shape of the passive pts's sdf values # + # maxx_dist = 2. + # fs = (-1. * torch.sin(passive_mesh_sdf_value / maxx_dist * float(np.pi) / 2.) + 1) * active_mesh_deformation.unsqueeze(0) * ks_val #### + # fs = torch.mean(fs, dim=0) ## (3,) ## + # fs = fs * -1. + + # self.timestep_to_forward_deform[input_pts_ts] = fs.detach().cpu().numpy() # + self.timestep_to_forward_deform[nex_pts_ts] = -fs.detach().cpu().numpy() # + + + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + nex_pts_ts + input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + x = fs.unsqueeze(0).repeat(input_pts.size(0), 1).contiguous() # + # x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets # get the unmasked offsets # + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + if special_loss_return: + return details + else: + return new_points + + + +# +class BendingNetworkRigidTransForward(nn.Module): + def __init__(self, + d_in, + multires, # fileds # # fields # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkRigidTransForward, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ## timestep t ## + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.rigid_trans = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=3 + ) + torch.nn.init.zeros_(self.rigid_trans.weight) + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + + + # ks_val # + # # input_channel + bending_latent_size -> the input size # + # self.network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + # if i + 1 in self.skips + # else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + # for i in range(self.network_depth - 2)] + + # [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # # with torch.no_grad(): + # # for i, layer in enumerate(self.network[:-1]): + # # torch.nn.init.xavier_uniform_(layer.weight) + # # torch.nn.init.zeros_(layer.bias) + # # self.network[-1].weight.data *= 0.0 + # # self.network[-1].bias.data *= 0.0 + + + # # # initialize weights + # with torch.no_grad(): + # for i, layer in enumerate(self.network[:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights to start out with straight rays + # self.network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + # if self.use_rigidity_network: + # self.rigidity_activation_function = F.relu # F.relu, torch.sin + # self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + # use_last_layer_bias = True + # self.rigidity_tanh = nn.Tanh() + + # if self.rigidity_use_latent: + # self.rigidity_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # if i + 1 in self.rigidity_skips + # else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # for i in range(self.rigidity_network_depth - 2)] + + # [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + # else: + # self.rigidity_network = nn.ModuleList( + # [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # if i + 1 in self.rigidity_skips + # else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # for i in range(self.rigidity_network_depth - 2)] + + # [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # # initialize weights + # with torch.no_grad(): + # for i, layer in enumerate(self.rigidity_network[:-1]): + # if self.rigidity_activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.rigidity_activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights + # self.rigidity_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.rigidity_network[-1].bias.data *= 0.0 + + ### pre-optimized rigid translations ### + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + + self.timestep_to_forward_deform = {} + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + def forward(self, input_pts, input_pts_ts, timestep_to_passive_mesh, act_sdf_net=None, details=None, special_loss_return=False): # special loss return # + + # query the bending_net for the active obj's deformation flow # + # query the bending_net_passive for the passive obj's deformation flow # + # input_pts_ts should goes from zero to maxx_ts - 1 # + # nex_pts_ts = input_pts_ts + 1 + + prev_pts_ts = input_pts_ts - 1 # previous timestep # # rigid_trans # + prev_ts_mesh = timestep_to_passive_mesh[prev_pts_ts] # timestep to passive mesh # + + cur_passive_mesh = timestep_to_passive_mesh[input_pts_ts] + prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] ## nn_pts x 3 ## + + + raw_input_pts = input_pts[:, :3] + + if self.embed_fn_fine is not None: # embed fn # + prev_passive_mesh = self.embed_fn_fine(prev_passive_mesh) + + # + # expanded_prev_pts_ts = torch.zeros((prev_passive_mesh.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = torch.zeros((prev_passive_mesh.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + + cur_offset = self.rigid_trans(expanded_prev_pts_ts[0]).squeeze(0) + # expanded_prev_pts_ts = + + # input_latents = self.bending_latent(expanded_prev_pts_ts) # input latents + + # ### TODO: how to optimize the force network ? ### + # ######### 1) only use points from passive mesh for optimization ######### + + # # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + # # print(prev_passive_mesh) + # # print(f"prev_pts_ts: {prev_pts_ts}, prev_passive_mesh: {prev_passive_mesh.size()}, input_latents: {input_latents.size()}",) + # # print(f"prev_pts_ts: {prev_pts_ts}, input_pts: {input_pts.size()}, input_latents: {input_latents.size()}",) + # # input_latents = self.bending_latent(expandedp) + # x = torch.cat([prev_passive_mesh, input_latents], dim=-1) + # # x = torch.cat([input_latents[:, :3], input_latents], dim=-1) + # # x = torch.cat([input_pts, input_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.network + # else: + # cur_network = self.split_network + # # cur_network = self.split_network[input_pts_ts - 5] + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # # print(f"i: {i}") + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts, x], -1) + # ''' use the single split network without no_grad setting ''' + + + # forces = x # nn_pts x 3 # # at the + # rigid_acc = torch.mean(forces, dim=0) + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + # delta_vel = rigid_acc * k_acc_to_vel + # if prev_pts_ts == 0: + # cur_vel = delta_vel + # else: + # cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + # self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + # cur_offset = k_vel_to_offset * cur_vel + + # cur_of + + new_pts = input_pts[:, :3] - cur_offset.unsqueeze(0) + return new_pts + + + + + + nex_timestep_mesh = timestep_to_mesh[nex_pts_ts] + # c + + active_mesh_deformation = bending_net(nex_timestep_mesh, nex_pts_ts) ## get the nex_pts_ts's bending direction ## + active_mesh_deformation = torch.mean(active_mesh_deformation, dim=0) ### (3, ) deformation direction ## + # sdf value ? # + + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + # passive mesh # + cur_timestep_mesh = timestep_to_passive_mesh[input_pts_ts] + passive_mesh_sdf_value = act_sdf_net.sdf(cur_timestep_mesh) # nn_passive_pts -> the shape of the passive pts's sdf values # + maxx_dist = 2. + fs = (-1. * torch.sin(passive_mesh_sdf_value / maxx_dist * float(np.pi) / 2.) + 1) * active_mesh_deformation.unsqueeze(0) * ks_val #### + fs = torch.mean(fs, dim=0) ## (3,) ## + # fs = fs * -1. + + self.timestep_to_forward_deform[input_pts_ts] = fs.detach().cpu().numpy() # + + + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + nex_pts_ts + input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + x = fs.unsqueeze(0).repeat(input_pts.size(0), 1).contiguous() # + # x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets # get the unmasked offsets # + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + if special_loss_return: + return details + else: + return new_points + + + +class BendingNetworkForceForward(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkForceForward, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ## timestep t ## + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + # if self.activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # if self.use_rigidity_network: + # self.rigidity_activation_function = F.relu # F.relu, torch.sin + # self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + # use_last_layer_bias = True + # self.rigidity_tanh = nn.Tanh() + + # if self.rigidity_use_latent: + # self.rigidity_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # if i + 1 in self.rigidity_skips + # else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # for i in range(self.rigidity_network_depth - 2)] + + # [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + # else: + # self.rigidity_network = nn.ModuleList( + # [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # if i + 1 in self.rigidity_skips + # else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # for i in range(self.rigidity_network_depth - 2)] + + # [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # # initialize weights + # with torch.no_grad(): + # for i, layer in enumerate(self.rigidity_network[:-1]): + # if self.rigidity_activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.rigidity_activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights + # self.rigidity_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.rigidity_network[-1].bias.data *= 0.0 + + ### pre-optimized rigid translations ### + # self.rigid_translations = torch.tensor( + # [[ 0.0000, 0.0000, 0.0000], + # [-0.0008, 0.0040, 0.0159], + # [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + # ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + def forward(self, input_pts, input_pts_ts, timestep_to_passive_mesh, act_sdf_net=None, details=None, special_loss_return=False): # special loss return # + + # query the bending_net for the active obj's deformation flow # + # query the bending_net_passive for the passive obj's deformation flow # + # input_pts_ts should goes from zero to maxx_ts - 1 # + # nex_pts_ts = input_pts_ts + 1 + + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + prev_ts_mesh = timestep_to_passive_mesh[prev_pts_ts] # timestep to passive mesh # + + cur_passive_mesh = timestep_to_passive_mesh[input_pts_ts] + prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] ## nn_pts x 3 ## + + + raw_input_pts = input_pts[:, :3] + + if self.embed_fn_fine is not None: # embed fn # + prev_passive_mesh = self.embed_fn_fine(prev_passive_mesh) + + # + # expanded_prev_pts_ts = torch.zeros((prev_passive_mesh.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = torch.zeros((prev_passive_mesh.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + + # expanded_prev_pts_ts = + + input_latents = self.bending_latent(expanded_prev_pts_ts) # input latents + + ### TODO: how to optimize the force network ? ### + ######### 1) only use points from passive mesh for optimization ######### + + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + # print(prev_passive_mesh) + # print(f"prev_pts_ts: {prev_pts_ts}, prev_passive_mesh: {prev_passive_mesh.size()}, input_latents: {input_latents.size()}",) + # print(f"prev_pts_ts: {prev_pts_ts}, input_pts: {input_pts.size()}, input_latents: {input_latents.size()}",) + # input_latents = self.bending_latent(expandedp) + x = torch.cat([prev_passive_mesh, input_latents], dim=-1) + # x = torch.cat([input_latents[:, :3], input_latents], dim=-1) + # x = torch.cat([input_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.network + else: + cur_network = self.split_network + # cur_network = self.split_network[input_pts_ts - 5] + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + # print(f"i: {i}") + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([input_pts, x], -1) + ''' use the single split network without no_grad setting ''' + + + forces = x # nn_pts x 3 # # at the + rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item() + } + cur_offset = k_vel_to_offset * cur_vel + + + new_pts = input_pts[:, :3] - cur_offset.unsqueeze(0) + return new_pts + + + + + + nex_timestep_mesh = timestep_to_mesh[nex_pts_ts] + # c + + active_mesh_deformation = bending_net(nex_timestep_mesh, nex_pts_ts) ## get the nex_pts_ts's bending direction ## + active_mesh_deformation = torch.mean(active_mesh_deformation, dim=0) ### (3, ) deformation direction ## + # sdf value ? # + + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + # passive mesh # + cur_timestep_mesh = timestep_to_passive_mesh[input_pts_ts] + passive_mesh_sdf_value = act_sdf_net.sdf(cur_timestep_mesh) # nn_passive_pts -> the shape of the passive pts's sdf values # + maxx_dist = 2. + fs = (-1. * torch.sin(passive_mesh_sdf_value / maxx_dist * float(np.pi) / 2.) + 1) * active_mesh_deformation.unsqueeze(0) * ks_val #### + fs = torch.mean(fs, dim=0) ## (3,) ## + # fs = fs * -1. + + self.timestep_to_forward_deform[input_pts_ts] = fs.detach().cpu().numpy() # + + + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + nex_pts_ts + input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + x = fs.unsqueeze(0).repeat(input_pts.size(0), 1).contiguous() # + # x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets # get the unmasked offsets # + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + if special_loss_return: + return details + else: + return new_points + + + +class BendingNetworkForceFieldForward(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkForceFieldForward, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ## timestep t ## + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) + + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + # if self.use_rigidity_network: + # self.rigidity_activation_function = F.relu # F.relu, torch.sin + # self.rigidity_skips = [] # do not include 0 and do not include depth-1 # + # use_last_layer_bias = True + # self.rigidity_tanh = nn.Tanh() + + # if self.rigidity_use_latent: + # self.rigidity_network = nn.ModuleList( + # [nn.Linear(self.input_ch + self.bending_latent_size, self.rigidity_hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # if i + 1 in self.rigidity_skips + # else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # for i in range(self.rigidity_network_depth - 2)] + + # [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + # else: + # self.rigidity_network = nn.ModuleList( + # [nn.Linear(self.input_ch, self.rigidity_hidden_dimensions)] + + # [nn.Linear(self.input_ch + self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # if i + 1 in self.rigidity_skips + # else nn.Linear(self.rigidity_hidden_dimensions, self.rigidity_hidden_dimensions) + # for i in range(self.rigidity_network_depth - 2)] + + # [nn.Linear(self.rigidity_hidden_dimensions, 1, bias=use_last_layer_bias)]) + + # # initialize weights + # with torch.no_grad(): + # for i, layer in enumerate(self.rigidity_network[:-1]): + # if self.rigidity_activation_function.__name__ == "sin": + # # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.rigidity_activation_function.__name__ == "relu": + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # # initialize final layer to zero weights + # self.rigidity_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.rigidity_network[-1].bias.data *= 0.0 + + ### pre-optimized rigid translations ### + # self.rigid_translations = torch.tensor( + # [[ 0.0000, 0.0000, 0.0000], + # [-0.0008, 0.0040, 0.0159], + # [-0.0566, 0.0099, 0.0173]], dtype=torch.float32 + # ).cuda() + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_passive_mesh, passive_sdf_net, details=None, special_loss_return=False): # special loss # + + # query the bending_net for the active obj's deformation flow # + # query the bending_net_passive for the passive obj's deformation flow # + # input_pts_ts should goes from zero to maxx_ts - 1 # + # nex_pts_ts = input_pts_ts + 1 + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + + expanded_prev_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + + raw_input_pts = input_pts[:, :3] + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + input_latents = self.bending_latent(expanded_prev_pts_ts) # + x = torch.cat([input_pts, input_latents], dim=-1) + + + # prev_ts_mesh = timestep_to_passive_mesh[prev_pts_ts] # timestep to passive mesh # + + # cur_passive_mesh = timestep_to_passive_mesh[input_pts_ts] + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] ## nn_pts x 3 ## + + + # raw_input_pts = input_pts[:, :3] + + # if self.embed_fn_fine is not None: # embed fn # + # prev_passive_mesh = self.embed_fn_fine(prev_passive_mesh) + + # # + # # expanded_prev_pts_ts = torch.zeros((prev_passive_mesh.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = torch.zeros((prev_passive_mesh.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + + # # expanded_prev_pts_ts = + + # input_latents = self.bending_latent(expanded_prev_pts_ts) # input latents + + # ### TODO: how to optimize the force network ? ### + # ######### 1) only use points from passive mesh for optimization ######### + + # # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + # # print(prev_passive_mesh) + # # print(f"prev_pts_ts: {prev_pts_ts}, prev_passive_mesh: {prev_passive_mesh.size()}, input_latents: {input_latents.size()}",) + # # print(f"prev_pts_ts: {prev_pts_ts}, input_pts: {input_pts.size()}, input_latents: {input_latents.size()}",) + # # input_latents = self.bending_latent(expandedp) + # x = torch.cat([prev_passive_mesh, input_latents], dim=-1) + # # x = torch.cat([input_latents[:, :3], input_latents], dim=-1) + # # x = torch.cat([input_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.network + else: + cur_network = self.split_network + # cur_network = self.split_network[input_pts_ts - 5] + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + # print(f"i: {i}") + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([input_pts, x], -1) + ''' use the single split network without no_grad setting ''' + + + forces = x # nn_pts x 3 # # at the + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + + + + nex_timestep_mesh = timestep_to_mesh[nex_pts_ts] + # c + + active_mesh_deformation = bending_net(nex_timestep_mesh, nex_pts_ts) ## get the nex_pts_ts's bending direction ## + active_mesh_deformation = torch.mean(active_mesh_deformation, dim=0) ### (3, ) deformation direction ## + # sdf value ? # + + ks_val = self.ks_val(torch.zeros((1,)).long().cuda()) + + self.ks_val_vals = ks_val.detach().cpu().numpy().item() + + # passive mesh # + cur_timestep_mesh = timestep_to_passive_mesh[input_pts_ts] + passive_mesh_sdf_value = act_sdf_net.sdf(cur_timestep_mesh) # nn_passive_pts -> the shape of the passive pts's sdf values # + maxx_dist = 2. + fs = (-1. * torch.sin(passive_mesh_sdf_value / maxx_dist * float(np.pi) / 2.) + 1) * active_mesh_deformation.unsqueeze(0) * ks_val #### + fs = torch.mean(fs, dim=0) ## (3,) ## + # fs = fs * -1. + + self.timestep_to_forward_deform[input_pts_ts] = fs.detach().cpu().numpy() # + + + raw_input_pts = input_pts[:, :3] # positional encoding includes raw 3D coordinates as first three entries # + # print(f) + if self.embed_fn_fine is not None: # embed fn # + input_pts = self.embed_fn_fine(input_pts) + + + if special_loss_return and details is None: # details is None # + details = {} + + expanded_input_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_input_pts_ts = expanded_input_pts_ts + nex_pts_ts + input_latents = self.bending_latent(expanded_input_pts_ts) + + # # print(f"input_pts: {input_pts.size()}, input_latents: {input_latents.size()}, raw_input_pts: {raw_input_pts.size()}") + # input_latents = input_latent.expand(input_pts.size()[0], -1) + # x = torch.cat([input_pts, input_latents], -1) # input pts with bending latents # + + x = fs.unsqueeze(0).repeat(input_pts.size(0), 1).contiguous() # + # x = self.rigid_translations(expanded_input_pts_ts) # .repeat(input_pts.size(0), 1).contiguous() + + unmasked_offsets = x + if details is not None: + details["unmasked_offsets"] = unmasked_offsets # get the unmasked offsets # + + if self.use_rigidity_network: # bending network? rigidity network... # # bending network and the bending network # # + if self.rigidity_use_latent: + x = torch.cat([input_pts, input_latents], -1) + else: + x = input_pts + + for i, layer in enumerate(self.rigidity_network): + x = layer(x) + # SIREN + if self.rigidity_activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.rigidity_network) - 1: + x = self.rigidity_activation_function(x) + if i in self.rigidity_skips: + x = torch.cat([input_pts, x], -1) + + rigidity_mask = (self.rigidity_tanh(x) + 1) / 2 # close to 1 for nonrigid, close to 0 for rigid + + if self.rigidity_test_time_cutoff is not None: + rigidity_mask[rigidity_mask <= self.rigidity_test_time_cutoff] = 0.0 + + if self.use_rigidity_network: + masked_offsets = rigidity_mask * unmasked_offsets + if self.test_time_scaling is not None: + masked_offsets *= self.test_time_scaling + new_points = raw_input_pts + masked_offsets # skip connection # rigidity + if details is not None: + details["rigidity_mask"] = rigidity_mask + details["masked_offsets"] = masked_offsets + else: + if self.test_time_scaling is not None: + unmasked_offsets *= self.test_time_scaling + new_points = raw_input_pts + unmasked_offsets # skip connection + # if input_pts_ts >= 5: + # avg_offsets_abs = torch.mean(torch.abs(unmasked_offsets), dim=0) + # print(f"input_ts: {input_pts_ts}, offset_avg: {avg_offsets_abs}") + + + if special_loss_return: + return details + else: + return new_points + + + +class BendingNetworkActiveForceFieldForward(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkActiveForceFieldForward, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + ## timestep t ## + # the bending latent # # passive and the active object at the current state # + # extract meshes for the passive and the active object atthe current + # step 1 -> get active object's points correspondences + # step 2 -> get passive object's points in correspondences + # step 3 -> for t-1, get the deformation for each point at t in the active robot mesh and average them as the averaged motion + # step 4 -> sample points from the passive mesh at t-1 and calculate their forces using the active robot's actions, signed distances, and the parameter K # + # step 5 -> aggregate the translation motion (the most simple translation models) and use that as the deformation direction for the passive object at time t + # step 6 -> an additional rigidity mask should be optimized for the passive object # + + # self.bending_latent = nn.Parameter( + # torch.zeros((self.bending_n_timesteps, self.bending_hi)) + # ) + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False): # special loss # + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + ''' Deform input points vai the passive rigid deformations ''' + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + ''' Deform input points vai the passive rigid deformations ''' + + ''' Deform input points via the active deformation ''' + input_pts_to_active = input_pts.clone() + with torch.no_grad(): + for cur_pts_ts in range(prev_pts_ts, -1, -1): + input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + expanded_prev_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + if self.embed_fn_fine is not None: # embed fn # + input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + ''' Acquire the bending latents ''' + input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + # x = torch.cat([input_pts, input_latents], dim=-1) + x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.network + else: + cur_network = self.split_network + # cur_network = self.split_network[input_pts_ts - 5] + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + # print(f"i: {i}") + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([input_pts, x], -1) + ''' use the single split network without no_grad setting ''' + + + forces = x # nn_pts x 3 # # at the + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + +class BendingNetworkActiveForceFieldForwardV2(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkActiveForceFieldForwardV2, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + self.timestep_to_input_pts = {} + self.save_values = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False): # special loss # + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + ''' Deform input points vai the passive rigid deformations ''' + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + ''' Deform input points vai the passive rigid deformations ''' + + ''' Deform input points via the active deformation ''' # size direction # + # input_pts_to_active = input_pts.clone() # + input_pts_to_active = torch.from_numpy(input_pts.detach().cpu().numpy()).float().cuda() + input_pts_to_active.requires_grad_ = True + input_pts_to_active.requires_grad = True + + # with torch.no_grad(): + for cur_pts_ts in range(prev_pts_ts, -1, -1): + input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + # get the deformed active points # + ## get the signed distance of the sampled points to the active robot ## + input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + + + with torch.enable_grad(): + # input_pts_to_active_sdf_nn_out = active_sdf_net(input_pts_to_active) + # input_pts_to_active = torch.from_numpy(input_pts_to_active.detach().cpu().numpy()).float().cuda() + # # input_pts_to_active.requires_grad_ = True + # input_pts_to_active.requires_grad = True + input_pts_to_active_sdf_gradient = active_sdf_net.gradient(input_pts_to_active).squeeze() # nn_pts x 3 -> the spatial graient? # + + input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + input_pts_to_active_sdf_gradient = input_pts_to_active_sdf_gradient.detach() + + # input_pts_force_dir = -1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + input_pts_force_dir = 1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + input_pts_force_dir = input_pts_force_dir / torch.clamp(torch.norm(input_pts_force_dir, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + expanded_prev_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + if self.embed_fn_fine is not None: # embed fn # + input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + + + ''' Acquire the bending latents ''' + input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + # x = torch.cat([input_pts, input_latents], dim=-1) + x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.network + else: + cur_network = self.split_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + # print(f"i: {i}") + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([input_pts, x], -1) + ''' use the single split network without no_grad setting ''' + + ### TODO: how to adjust the direction? ### + ### the size of the force are only related to the signed distance ### + x = x * input_pts_force_dir # with the direction -> + + forces = x # nn_pts x 3 # # at the + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + self.timestep_to_input_pts[prev_pts_ts] = raw_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + } + self.save_values = { + 'ks_vals_dict': self.ks_vals_dict, # save values ## + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts} # + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + +class BendingNetworkActiveForceFieldForwardV3(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkActiveForceFieldForwardV3, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = False + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.dir_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.dir_network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + self.timestep_to_input_pts = {} + self.save_values = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False): # special loss # + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + ''' Deform input points vai the passive rigid deformations ''' + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + ''' Deform input points vai the passive rigid deformations ''' + + ''' Deform input points via the active deformation ''' # size direction # + # input_pts_to_active = input_pts.clone() # + input_pts_to_active = torch.from_numpy(input_pts.detach().cpu().numpy()).float().cuda() + input_pts_to_active.requires_grad_ = True + input_pts_to_active.requires_grad = True + + # with torch.no_grad(): + for cur_pts_ts in range(prev_pts_ts, -1, -1): + input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + # get the deformed active points # + ## get the signed distance of the sampled points to the active robot ## + input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + + + with torch.enable_grad(): + # input_pts_to_active_sdf_nn_out = active_sdf_net(input_pts_to_active) + # input_pts_to_active = torch.from_numpy(input_pts_to_active.detach().cpu().numpy()).float().cuda() + # # input_pts_to_active.requires_grad_ = True + # input_pts_to_active.requires_grad = True + input_pts_to_active_sdf_gradient = active_sdf_net.gradient(input_pts_to_active).squeeze() # nn_pts x 3 -> the spatial graient? # + + input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + input_pts_to_active_sdf_gradient = input_pts_to_active_sdf_gradient.detach() + + # input_pts_force_dir = -1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + input_pts_force_dir = 1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + input_pts_force_dir = input_pts_force_dir / torch.clamp(torch.norm(input_pts_force_dir, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + expanded_prev_pts_ts = torch.zeros((input_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + if self.embed_fn_fine is not None: # embed fn # + input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + + + ''' Acquire the bending latents ''' + input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + # x = torch.cat([input_pts, input_latents], dim=-1) + x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.network + else: + cur_network = self.split_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + # print(f"i: {i}") + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([input_pts_to_active_sdf, x], -1) + ''' use the single split network without no_grad setting ''' + + + input_dir_latents = self.bending_dir_latent(expanded_prev_pts_ts) + dir_x = torch.cat([input_pts_to_active_sdf, input_dir_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.dir_network + else: + cur_network = self.split_dir_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + dir_x = layer(dir_x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + dir_x *= 30.0 + if i != len(self.network) - 1: + dir_x = self.activation_function(dir_x) + if i in self.skips: + dir_x = torch.cat([input_pts_to_active_sdf, dir_x], -1) + ''' use the single split network without no_grad setting ''' + + dir_x = dir_x / torch.clamp(torch.norm(dir_x, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + + ### TODO: how to adjust the direction? ### + ### the size of the force are only related to the signed distance ### + # x = x * input_pts_force_dir # with the direction -> + x = x * dir_x # with the direction -> + + forces = x # nn_pts x 3 # # at the + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + self.timestep_to_input_pts[prev_pts_ts] = raw_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + } + self.save_values = { + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts} # + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + +### the points forces ### +class BendingNetworkActiveForceFieldForwardV4(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkActiveForceFieldForwardV4, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.dir_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.dir_network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + self.timestep_to_input_pts = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False): # special loss # + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + + + + ''' Deform input points vai the passive rigid deformations ''' + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + + # self.timestep_to_ori_input_pts = {} + # self.timestep_to_ori_input_pts_sdf = {} + # ori_input_pts, ori_input_pts_sdf + ori_input_pts = input_pts.clone().detach() + ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points vai the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, + prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = input_pts.size(0) + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 5000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + + # + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) # + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + prev_active_mesh = timestep_to_active_mesh[prev_pts_ts] ## nn_active_pts x 3 ## + + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + nearest_dist, nearest_pts_idx = torch.min(dist_input_pts_active_mesh, dim=-1) + #### KDTree #### + # active_mesh_ktree = KDTree(prev_active_mesh.detach().cpu().numpy()) + # nearest_dist, nearest_pts_idx = active_mesh_ktree.query(input_pts.detach().cpu().numpy(), k=1) + # nearest_pts_idx = torch.from_numpy(nearest_pts_idx).long().cuda() ### nn_input_pts ## + #### KDTree #### + + nearest_active_pts = prev_active_mesh[nearest_pts_idx] ### nn_input_pts x 3 + + active_pts_to_input_pts = sampled_input_pts - nearest_active_pts ## nn_input_pts x 3 + + + + ''' Deform input points via the active deformation ''' # size direction # + input_pts_to_active = sampled_input_pts.clone() # + # input_pts_to_active = torch.from_numpy(input_pts.detach().cpu().numpy()).float().cuda() + # input_pts_to_active.requires_grad_ = True + # input_pts_to_active.requires_grad = True + + # with torch.no_grad(): + for cur_pts_ts in range(prev_pts_ts, -1, -1): + input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + # get the deformed active points # + ## get the signed distance of the sampled points to the active robot ## + input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + + + # with torch.enable_grad(): + # # input_pts_to_active_sdf_nn_out = active_sdf_net(input_pts_to_active) + # # input_pts_to_active = torch.from_numpy(input_pts_to_active.detach().cpu().numpy()).float().cuda() + # # # input_pts_to_active.requires_grad_ = True + # # input_pts_to_active.requires_grad = True + # input_pts_to_active_sdf_gradient = active_sdf_net.gradient(input_pts_to_active).squeeze() # nn_pts x 3 -> the spatial graient? # + + # input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + # input_pts_to_active_sdf_gradient = input_pts_to_active_sdf_gradient.detach() + + # # input_pts_force_dir = -1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = 1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = input_pts_force_dir / torch.clamp(torch.norm(input_pts_force_dir, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + expanded_prev_pts_ts = torch.zeros((sampled_input_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + if self.embed_fn_fine is not None: # embed fn # + input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + + + ''' Acquire the bending latents ''' + input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + # x = torch.cat([input_pts, input_latents], dim=-1) + x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.network + else: + cur_network = self.split_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + # print(f"i: {i}") + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([input_pts_to_active_sdf, x], -1) + ''' use the single split network without no_grad setting ''' + + + # input_dir_latents = self.bending_dir_latent(expanded_prev_pts_ts) + # dir_x = torch.cat([input_pts_to_active_sdf, input_dir_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.dir_network + # else: + # cur_network = self.split_dir_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # dir_x = layer(dir_x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # dir_x *= 30.0 + # if i != len(self.network) - 1: + # dir_x = self.activation_function(dir_x) + # if i in self.skips: + # dir_x = torch.cat([input_pts_to_active_sdf, dir_x], -1) + # ''' use the single split network without no_grad setting ''' + + dir_x = active_pts_to_input_pts / torch.clamp(torch.norm(active_pts_to_input_pts, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + + ### TODO: how to adjust the direction? ### + ### the size of the force are only related to the signed distance ### + # x = x * input_pts_force_dir # with the direction -> + x = x * dir_x # with the direction -> + + forces = x # nn_pts x 3 # # at the + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * sampled_defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + # ws_normed, defed_input_pts_sdf, + self.timestep_to_input_pts[prev_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.timestep_to_ws_normed[prev_pts_ts] = ws_normed.detach() + self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} + # self.timestep_to_ori_input_pts_sdf = {} + # ori_input_pts, ori_input_pts_sdf + self.timestep_to_ori_input_pts[prev_pts_ts] = ori_input_pts.detach() + self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + +### the points forces ### # the point forces with the distance force scales ### +class BendingNetworkActiveForceFieldForwardV5(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + + super(BendingNetworkActiveForceFieldForwardV5, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.dir_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.dir_network[-1].bias.data *= 0.0 + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False): # special loss # + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + + + + ''' Deform input points vai the passive rigid deformations ''' + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + # self.timestep_to_ori_input_pts = {} + # self.timestep_to_ori_input_pts_sdf = {} + # ori_input_pts, ori_input_pts_sdf + ori_input_pts = input_pts.clone().detach() + ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points vai the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, + prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = input_pts.size(0) + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 2000 + # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + + # + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) # + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # prev + prev_active_mesh = timestep_to_active_mesh[prev_pts_ts] ## nn_active_pts x 3 ## + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + nearest_dist, nearest_pts_idx = torch.min(dist_input_pts_active_mesh, dim=-1) + #### KDTree #### + # active_mesh_ktree = KDTree(prev_active_mesh.detach().cpu().numpy()) + # nearest_dist, nearest_pts_idx = active_mesh_ktree.query(input_pts.detach().cpu().numpy(), k=1) + # nearest_pts_idx = torch.from_numpy(nearest_pts_idx).long().cuda() ### nn_input_pts ## + #### KDTree #### + + nearest_active_pts = prev_active_mesh[nearest_pts_idx] ### nn_input_pts x 3 + + active_pts_to_input_pts = sampled_input_pts - nearest_active_pts ## nn_input_pts x 3 # + + + + ''' Deform input points via the active deformation ''' # size direction # + input_pts_to_active = sampled_input_pts.clone() # + # input_pts_to_active = torch.from_numpy(input_pts.detach().cpu().numpy()).float().cuda() + # input_pts_to_active.requires_grad_ = True + # input_pts_to_active.requires_grad = True + + # with torch.no_grad(): + for cur_pts_ts in range(prev_pts_ts, -1, -1): + input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + # get the deformed active points # + ## get the signed distance of the sampled points to the active robot ## + input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + + + # with torch.enable_grad(): + # # input_pts_to_active_sdf_nn_out = active_sdf_net(input_pts_to_active) + # # input_pts_to_active = torch.from_numpy(input_pts_to_active.detach().cpu().numpy()).float().cuda() + # # # input_pts_to_active.requires_grad_ = True + # # input_pts_to_active.requires_grad = True + # input_pts_to_active_sdf_gradient = active_sdf_net.gradient(input_pts_to_active).squeeze() # nn_pts x 3 -> the spatial graient? # + + # input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + # input_pts_to_active_sdf_gradient = input_pts_to_active_sdf_gradient.detach() + + # # input_pts_force_dir = -1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = 1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = input_pts_force_dir / torch.clamp(torch.norm(input_pts_force_dir, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + expanded_prev_pts_ts = torch.zeros((sampled_input_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + if self.embed_fn_fine is not None: # embed fn # + input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + + + ''' Acquire the bending latents ''' + # input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + + ''' Use the network to acquire the force scales ''' ### TODO: should not be a time-varying determining process + # x = torch.cat([input_pts, input_latents], dim=-1) + # x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.network + # else: + # cur_network = self.split_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # # print(f"i: {i}") + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts_to_active_sdf, x], -1) + ''' Use the network to acquire the force scales ''' + + + # input_dir_latents = self.bending_dir_latent(expanded_prev_pts_ts) + # dir_x = torch.cat([input_pts_to_active_sdf, input_dir_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.dir_network + # else: + # cur_network = self.split_dir_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # dir_x = layer(dir_x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # dir_x *= 30.0 + # if i != len(self.network) - 1: + # dir_x = self.activation_function(dir_x) + # if i in self.skips: + # dir_x = torch.cat([input_pts_to_active_sdf, dir_x], -1) + # ''' use the single split network without no_grad setting ''' + + dir_x = active_pts_to_input_pts / torch.clamp(torch.norm(active_pts_to_input_pts, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + + # |f| = k_a * exp(-dist^1 * k_b) + dist_active_pts_to_input_pts = torch.norm(active_pts_to_input_pts, dim=-1, p=2, keepdim=True) ### nn_input_pts + # + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + power_k = 0.5 + power_k = 1. + # x = dist_k_a * torch.exp(-1 * dist_active_pts_to_input_pts * dist_k_b) ### dist_k_b; dist_k_a; distance -> the scale of the functional force ### + x = dist_k_a * torch.exp(-1 * torch.pow(dist_active_pts_to_input_pts, power_k) * dist_k_b) + + + + ### TODO: how to adjust the direction? ### + ### the size of the force are only related to the signed distance ### + # x = x * input_pts_force_dir # with the direction -> + x = x * dir_x # with the direction -> + + forces = x # nn_pts x 3 # # at the + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * sampled_defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + # ws_normed, defed_input_pts_sdf # points as the supervision # + self.timestep_to_input_pts[prev_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.timestep_to_ws_normed[prev_pts_ts] = ws_normed.detach() + self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} + # self.timestep_to_ori_input_pts_sdf = {} + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[prev_pts_ts] = ori_input_pts.detach() + self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + "dist_k_b": dist_k_b.detach().cpu()[0].item(), + "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + +### the points forces ### +class BendingNetworkActiveForceFieldForwardV6(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardV6, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + + # + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.dir_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.dir_network[-1].bias.data *= 0.0 + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False): # special loss # + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + + + # deform input points + ''' Deform input points via the passive rigid deformations ''' + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + # self.timestep_to_ori_input_pts = {} + # self.timestep_to_ori_input_pts_sdf = {} + # ori_input_pts, ori_input_pts_sdf + ori_input_pts = input_pts.clone().detach() + ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, + prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = input_pts.size(0) + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 2000 + # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + + # + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) # prev + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() ## 3 passive mesh # + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) # + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) # + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # + # prev # + prev_active_mesh = timestep_to_active_mesh[prev_pts_ts] ## nn_active_pts x 3 ## + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=500, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts >= thres_dist.unsqueeze(-1)] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + # m = Categorical(normed_weight_active_pts_to_input_pts) # + # nn_sampled_input_pts = 500 # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + + #### spring_qd #### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) # get spring_kd + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + dir_spring_force = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) ## + spring_force = torch.sum( + spring_force * normed_weight_active_pts_to_input_pts.unsqueeze(-1), dim=1 + ) + + # nearest_dist, nearest_pts_idx = torch.min(dist_input_pts_active_mesh, dim=-1) + #### KDTree #### + # active_mesh_ktree = KDTree(prev_active_mesh.detach().cpu().numpy()) + # nearest_dist, nearest_pts_idx = active_mesh_ktree.query(input_pts.detach().cpu().numpy(), k=1) + # nearest_pts_idx = torch.from_numpy(nearest_pts_idx).long().cuda() ### nn_input_pts ## + #### KDTree #### + + # nearest_active_pts = prev_active_mesh[nearest_pts_idx] ### nn_input_pts x 3 + + # active_pts_to_input_pts = sampled_input_pts - nearest_active_pts ## nn_input_pts x 3 # + + + + ''' Deform input points via the active deformation ''' # size direction # + # input_pts_to_active = sampled_input_pts.clone() # + + # input_pts_to_active = torch.from_numpy(input_pts.detach().cpu().numpy()).float().cuda() + # input_pts_to_active.requires_grad_ = True + # input_pts_to_active.requires_grad = True + + # with torch.no_grad(): + # for cur_pts_ts in range(prev_pts_ts, -1, -1): + # input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + # get the deformed active points # + ## get the signed distance of the sampled points to the active robot ## + # input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + + + # with torch.enable_grad(): + # # input_pts_to_active_sdf_nn_out = active_sdf_net(input_pts_to_active) + # # input_pts_to_active = torch.from_numpy(input_pts_to_active.detach().cpu().numpy()).float().cuda() + # # # input_pts_to_active.requires_grad_ = True + # # input_pts_to_active.requires_grad = True + # input_pts_to_active_sdf_gradient = active_sdf_net.gradient(input_pts_to_active).squeeze() # nn_pts x 3 -> the spatial graient? # + + # input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + # input_pts_to_active_sdf_gradient = input_pts_to_active_sdf_gradient.detach() + + # # input_pts_force_dir = -1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = 1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = input_pts_force_dir / torch.clamp(torch.norm(input_pts_force_dir, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + # expanded_prev_pts_ts = torch.zeros((sampled_input_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # embed fn # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + + + ''' Acquire the bending latents ''' + # input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + + ''' Use the network to acquire the force scales ''' ### TODO: should not be a time-varying determining process + # x = torch.cat([input_pts, input_latents], dim=-1) + # x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.network + # else: + # cur_network = self.split_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # # print(f"i: {i}") + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts_to_active_sdf, x], -1) + ''' Use the network to acquire the force scales ''' + + + # input_dir_latents = self.bending_dir_latent(expanded_prev_pts_ts) + # dir_x = torch.cat([input_pts_to_active_sdf, input_dir_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.dir_network + # else: + # cur_network = self.split_dir_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # dir_x = layer(dir_x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # dir_x *= 30.0 + # if i != len(self.network) - 1: + # dir_x = self.activation_function(dir_x) + # if i in self.skips: + # dir_x = torch.cat([input_pts_to_active_sdf, dir_x], -1) + # ''' use the single split network without no_grad setting ''' + + # dir_x = active_pts_to_input_pts / torch.clamp(torch.norm(active_pts_to_input_pts, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + + # |f| = k_a * exp(-dist^1 * k_b) + # dist_active_pts_to_input_pts = torch.norm(active_pts_to_input_pts, dim=-1, p=2, keepdim=True) ### nn_input_pts + # + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + # power_k = 0.5 + # power_k = 1. + # x = dist_k_a * torch.exp(-1 * dist_active_pts_to_input_pts * dist_k_b) ### dist_k_b; dist_k_a; distance -> the scale of the functional force ### + # x = dist_k_a * torch.exp(-1 * torch.pow(dist_active_pts_to_input_pts, power_k) * dist_k_b) + + + + ### TODO: how to adjust the direction? ### + ### the size of the force are only related to the signed distance ### + # x = x * input_pts_force_dir # with the direction -> + # x = x * dir_x # with the direction -> + + x = spring_force # + forces = x # nn_pts x 3 # # at the + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * sampled_defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + # ws_normed, defed_input_pts_sdf # points as the supervision # + self.timestep_to_input_pts[prev_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = x.detach() + self.timestep_to_ws_normed[prev_pts_ts] = ws_normed.detach() + self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # + # self.timestep_to_ori_input_pts_sdf = {} # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[prev_pts_ts] = ori_input_pts.detach() + self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + +### the points forces ### +class BendingNetworkActiveForceFieldForwardV7(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardV7, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + # if self.activation_function.__name__ == "sin": + # if type(layer) == nn.Linear: + # a = ( + # 1.0 / layer.in_features + # if i == 0 + # else np.sqrt(6.0 / layer.in_features) + # ) + # layer.weight.uniform_(-a, a) + # elif self.activation_function.__name__ == "relu": + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # torch.nn.init.kaiming_uniform_( + # layer.weight, a=0, mode="fan_in", nonlinearity="relu" + # ) + # torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.dir_network[-1].weight.data *= 0.0 + # if use_last_layer_bias: + # self.dir_network[-1].bias.data *= 0.0 + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + ### sdf net ### should deform + def forward(self, input_pts, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False): # special loss # + + + prev_pts_ts = input_pts_ts - 1 # previous timestep # + + + # deform input points + ''' Deform input points via the passive rigid deformations ''' + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + # self.timestep_to_ori_input_pts = {} + # self.timestep_to_ori_input_pts_sdf = {} + # ori_input_pts, ori_input_pts_sdf + ori_input_pts = input_pts.clone().detach() + ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, + prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + # optimizable point weights # + # optimizable point weights with fixed spring rules # # ws_normed -> ws_normed # ws_normed # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = input_pts.size(0) + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) # sample points # + nn_sampled_input_pts = 2000 + # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + + # + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) # prev + + ''' ### Use points from passive mesh ### ''' + sampled_input_pts = prev_passive_mesh.clone() ## 3 passive mesh # + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) # + defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) # + defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) ## nn_pts # + sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + prev_active_mesh = timestep_to_active_mesh[prev_pts_ts] ## nn_active_pts x 3 ## + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + + + + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=500, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts >= thres_dist.unsqueeze(-1)] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = 500 # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + # print(f"after selection --- rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}") + # dist_...: nn_input_pts x nn_active_mesh_pts x 3 # rel input pts + # self.patch_force_network = nn.ModuleList( + # [ + # nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + # nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + # nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + # nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + # ] + # ) + + ''' spring force v2: use the spring force as input ''' + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) # get spring_kd + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) ## + # spring_force = torch.sum( + # spring_force * normed_weight_active_pts_to_input_pts.unsqueeze(-1), dim=1 + # ) + # rel_input_pts_active_mesh = spring_force + ''' spring force v2: use the spring force as input ''' + + # patch_force_scale_network # + + # transformed_w = self.patch_force_network[0](rel_input_pts_active_mesh) # + # transformed_w = self.patch_force_network[1](transformed_w) + # glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + # glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + # transformed_w = torch.cat( + # [transformed_w, glb_transformed_w], dim=-1 + # ) + + # forces = self.patch_force_network[2](transformed_w) + # # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) + # forces = self.patch_force_network[3](forces) + ''' spring force v2: use the spring force as input ''' + + # spring # + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + ''' spring force v3: use the spring force as input ''' + + #### use the #### + # #### spring_qd #### + # spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) # get spring_kd + # # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") + # spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + # dir_spring_force = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) + # dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + # spring_force = dir_spring_force * spring_force.unsqueeze(-1) ## + # spring_force = torch.sum( + # spring_force * normed_weight_active_pts_to_input_pts.unsqueeze(-1), dim=1 + # ) + + # nearest_dist, nearest_pts_idx = torch.min(dist_input_pts_active_mesh, dim=-1) # patch calculations # ---> how to caclulate + #### KDTree #### + # active_mesh_ktree = KDTree(prev_active_mesh.detach().cpu().numpy()) + # nearest_dist, nearest_pts_idx = active_mesh_ktree.query(input_pts.detach().cpu().numpy(), k=1) + # nearest_pts_idx = torch.from_numpy(nearest_pts_idx).long().cuda() ### nn_input_pts ## + #### KDTree #### + + # nearest_active_pts = prev_active_mesh[nearest_pts_idx] ### nn_input_pts x 3 + + # active_pts_to_input_pts = sampled_input_pts - nearest_active_pts ## nn_input_pts x 3 # + + + + ''' Deform input points via the active deformation ''' # size direction # + # input_pts_to_active = sampled_input_pts.clone() # + + # input_pts_to_active = torch.from_numpy(input_pts.detach().cpu().numpy()).float().cuda() + # input_pts_to_active.requires_grad_ = True + # input_pts_to_active.requires_grad = True + + # with torch.no_grad(): + # for cur_pts_ts in range(prev_pts_ts, -1, -1): + # input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + # get the deformed active points # + ## get the signed distance of the sampled points to the active robot ## + # input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + + + # with torch.enable_grad(): + # # input_pts_to_active_sdf_nn_out = active_sdf_net(input_pts_to_active) + # # input_pts_to_active = torch.from_numpy(input_pts_to_active.detach().cpu().numpy()).float().cuda() + # # # input_pts_to_active.requires_grad_ = True + # # input_pts_to_active.requires_grad = True + # input_pts_to_active_sdf_gradient = active_sdf_net.gradient(input_pts_to_active).squeeze() # nn_pts x 3 -> the spatial graient? # + + # input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + # input_pts_to_active_sdf_gradient = input_pts_to_active_sdf_gradient.detach() + + # # input_pts_force_dir = -1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = 1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = input_pts_force_dir / torch.clamp(torch.norm(input_pts_force_dir, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + # expanded_prev_pts_ts = torch.zeros((sampled_input_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # embed fn # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + + + ''' Acquire the bending latents ''' + # input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + + ''' Use the network to acquire the force scales ''' ### TODO: should not be a time-varying determining process + # x = torch.cat([input_pts, input_latents], dim=-1) + # x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.network + # else: + # cur_network = self.split_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # # print(f"i: {i}") + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts_to_active_sdf, x], -1) + ''' Use the network to acquire the force scales ''' + + + # input_dir_latents = self.bending_dir_latent(expanded_prev_pts_ts) + # dir_x = torch.cat([input_pts_to_active_sdf, input_dir_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.dir_network + # else: + # cur_network = self.split_dir_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # dir_x = layer(dir_x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # dir_x *= 30.0 + # if i != len(self.network) - 1: + # dir_x = self.activation_function(dir_x) + # if i in self.skips: + # dir_x = torch.cat([input_pts_to_active_sdf, dir_x], -1) + # ''' use the single split network without no_grad setting ''' + + # dir_x = active_pts_to_input_pts / torch.clamp(torch.norm(active_pts_to_input_pts, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + + # |f| = k_a * exp(-dist^1 * k_b) + # dist_active_pts_to_input_pts = torch.norm(active_pts_to_input_pts, dim=-1, p=2, keepdim=True) ### nn_input_pts + # + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + # power_k = 0.5 + # power_k = 1. + # x = dist_k_a * torch.exp(-1 * dist_active_pts_to_input_pts * dist_k_b) ### dist_k_b; dist_k_a; distance -> the scale of the functional force ### + # x = dist_k_a * torch.exp(-1 * torch.pow(dist_active_pts_to_input_pts, power_k) * dist_k_b) + + + + ### TODO: how to adjust the direction? ### + ### the size of the force are only related to the signed distance ### + # x = x * input_pts_force_dir # with the direction -> + # x = x * dir_x # with the direction -> + + # x = spring_force # + # forces = x # nn_pts x 3 # # at the + + # defed_input_pts_sdf: nn_pts sdf values # + # wi = \beta exp(-d_i \alpha) # TODO: the power of the d in the exp? + ## [\alpha, \beta] ## + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * sampled_defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + # ws_normed, defed_input_pts_sdf # points as the supervision # + self.timestep_to_input_pts[prev_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = forces.detach() + self.timestep_to_ws_normed[prev_pts_ts] = ws_normed.detach() + self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # + # self.timestep_to_ori_input_pts_sdf = {} # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[prev_pts_ts] = ori_input_pts.detach() + self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + + # prev_rigid_def.unsqueeze(0) # + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + + + + +### the points forces ### +class BendingNetworkActiveForceFieldForwardV8(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardV8, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + + self.time_constant = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + + ### TODO: initialize the t_to_total_def variable ### + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + ## set splitting bending network # + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + ### sdf net ### + def forward(self, input_pts, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + + # wieghting force field # + prev_pts_ts = input_pts_ts - 1 + + + ''' Deform input points via the passive rigid deformations ''' + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + # defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # # self.timestep_to_ori_input_pts = {} # + # # self.timestep_to_ori_input_pts_sdf = {} # + # # ori_input_pts, ori_input_pts_sdf #### input_pts #### + # ori_input_pts = input_pts.clone().detach() + # ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, # + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + # optimizable point weights with fixed spring rules # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = input_pts.size(0) + #### uniformly_sampled_pts: nn_sampled_pts x 3 #### + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + # use weighting_network to get weights of those sampled pts # + expanded_prev_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) + input_latents = self.bending_latent(expanded_prev_pts_ts) + x = torch.cat([uniformly_sampled_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.weighting_network + else: + cur_network = self.split_weighting_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([uniformly_sampled_pts, x], -1) + # x: nn_uniformly_sampled_pts x 1 weights # + x = x.squeeze(-1) + ws_normed = F.softmax(x, dim=0) #### calculate the softmax as weights # + + ### total def copy ## + # prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] # .un + defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 2000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + + + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # # + ''' Distance to previous prev meshes to optimize ''' + prev_active_mesh = timestep_to_active_mesh[prev_pts_ts] ## nn_active_pts x 3 ## + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=500, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts >= thres_dist.unsqueeze(-1)] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = 500 # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + + # prev_active_mesh_exp = prev_active_mesh.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # prev_active_mesh_exp = batched_index_select(values=prev_active_mesh_exp, indices=sampled_input_pts_idx, dim=1) ### nn_sampled_pts x nn_selected_pts x 3 + # self.timestep_to_prev_selected_active_mesh[prev_pts_ts] = prevactive + ''' Distance to previous active meshes to optimize ''' + # timestep_to_prev_active_mesh_ori + prev_active_mesh_ori = self.timestep_to_prev_active_mesh_ori[prev_pts_ts] ## nn_active_pts x 3 ## + + dist_input_pts_active_mesh_ori = torch.sum( + (sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0)) ** 2, dim=-1 + ) + dist_input_pts_active_mesh_ori = torch.sqrt(dist_input_pts_active_mesh_ori) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh_ori, topk_dist_input_pts_active_mesh_idx_ori = torch.topk(dist_input_pts_active_mesh_ori, k=500, largest=False, dim=-1) + thres_dist_ori, _ = torch.max(topk_dist_input_pts_active_mesh_ori, dim=-1) + weighting_ka_ori = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + weighting_kb_ori = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + unnormed_weight_active_pts_to_input_pts_ori = weighting_ka_ori * torch.exp(-1. * dist_input_pts_active_mesh_ori * weighting_kb_ori * 50) # + unnormed_weight_active_pts_to_input_pts_ori[unnormed_weight_active_pts_to_input_pts_ori >= thres_dist_ori.unsqueeze(-1)] = 0. + normed_weight_active_pts_to_input_pts_ori = unnormed_weight_active_pts_to_input_pts_ori / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts_ori, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m_ori = Categorical(normed_weight_active_pts_to_input_pts_ori) # + nn_sampled_input_pts = 500 # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx_ori = m_ori.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + sampled_input_pts_idx_ori = sampled_input_pts_idx_ori.contiguous().transpose(1, 0).contiguous() + + rel_input_pts_active_mesh_ori = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0).detach() + + prev_active_mesh_ori_exp = prev_active_mesh_ori.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() + prev_active_mesh_ori_exp = batched_index_select(values=prev_active_mesh_ori_exp, indices=sampled_input_pts_idx_ori, dim=1) + # prev_active_mesh_ori_exp: nn_sampled_pts x nn_active_pts x 3 # + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori[prev_pts_ts] = prev_active_mesh_ori_exp.detach() + ''' Distance to previous active meshes to optimize ''' + + # rel_input_pts_active_mesh = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh.unsqueeze(0) + # # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + # rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + ### printf ### + # print(f"after selection --- rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}") + # dist_...: nn_input_pts x nn_active_mesh_pts x 3 # rel input pts + # self.patch_force_network = nn.ModuleList( + # [ + # nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + # nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + # nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + # nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + # ] + # ) + + # active mesh -> active points at each timestep -> the relative offset from each active point to the passive object # + # the force scale and the force direction # + # the spring force from each active point -> the spring force to the passive object (spring force transformer network for the spring force) -> the acc -> the velocity -> the offset # + + + ''' spring force v2: use the spring force as input ''' + ### determine the spring coefficient ### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_qd = 1. # fix the qd to 1 # spring_qd # # spring_qd # + spring_qd = 0.5 + # dist input pts to active mesh # + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) # spring kd for each point # + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") # + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) # tiem_constant + spring_kd = spring_kd * time_cons ### get the spring_kd (nn_sampled_pts x nn_act_pts ) #### + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) # prev_active_mesh # + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) ## + # spring_force = torch.sum( + # spring_force * normed_weight_active_pts_to_input_pts.unsqueeze(-1), dim=1 + # ) + # rel_input_pts_active_mesh = spring_force + ''' spring force v2: use the spring force as input ''' + + + ''' get the spring force of the reference motion ''' + spring_kd_ori = spring_qd / (dist_input_pts_active_mesh_ori - self.spring_x_min) + spring_kd_ori = spring_kd_ori * time_cons + spring_force_ori = -1. * spring_kd_ori * (dist_input_pts_active_mesh_ori - self.spring_rest_length) + spring_force_ori = batched_index_select(values=spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + dir_spring_force_ori = sampled_input_pts.unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0) + dir_spring_force_ori = batched_index_select(values=dir_spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + dir_spring_force_ori = dir_spring_force_ori / torch.clamp(torch.norm(dir_spring_force_ori, dim=-1, keepdim=True, p=2), min=1e-9) + spring_force_ori = dir_spring_force_ori * spring_force_ori.unsqueeze(-1) ## spring_force_ori + ''' get the spring force of the reference motion ''' + + + + # transformed_w = self.patch_force_network[0](rel_input_pts_active_mesh) # + # transformed_w = self.patch_force_network[1](transformed_w) + # glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + # glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + # transformed_w = torch.cat( + # [transformed_w, glb_transformed_w], dim=-1 + # ) + + # forces = self.patch_force_network[2](transformed_w) + # # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) + # forces = self.patch_force_network[3](forces) + ''' spring force v2: use the spring force as input ''' + + + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) # and force weighting # + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( # # use the spring force as input # + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + self.timestep_to_spring_forces[prev_pts_ts] = forces + ''' spring force v3: use the spring force as input ''' + + + + # timestep_to_spring_forces, timestep_to_spring_forces_ori # + transformed_w_ori = self.patch_force_scale_network[0](rel_input_pts_active_mesh_ori) + transformed_w_ori = self.patch_force_scale_network[1](transformed_w_ori) + glb_transformed_w_ori, _ = torch.max(transformed_w_ori, dim=1, keepdim=True) + glb_transformed_w_ori = glb_transformed_w_ori.repeat(1, transformed_w_ori.size(1), 1) # + transformed_w_ori = torch.cat( + [transformed_w_ori, glb_transformed_w_ori], dim=-1 + ) + force_weighting_ori = self.patch_force_scale_network[2](transformed_w_ori) + force_weighting_ori = self.patch_force_scale_network[3](force_weighting_ori).squeeze(-1) + force_weighting_ori = F.softmax(force_weighting_ori, dim=-1) + forces_ori = torch.sum( + spring_force_ori.detach() * force_weighting.unsqueeze(-1).detach(), dim=1 + ) + self.timestep_to_spring_forces_ori[prev_pts_ts] = forces_ori # forces ori and the forces # + + + #### use the #### + # #### spring_qd #### + # spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) # get spring_kd + # # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") + # spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + # dir_spring_force = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) + # dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + # spring_force = dir_spring_force * spring_force.unsqueeze(-1) ## + # spring_force = torch.sum( + # spring_force * normed_weight_active_pts_to_input_pts.unsqueeze(-1), dim=1 + # ) + + # nearest_dist, nearest_pts_idx = torch.min(dist_input_pts_active_mesh, dim=-1) # patch calculations # ---> how to caclulate + #### KDTree #### + # active_mesh_ktree = KDTree(prev_active_mesh.detach().cpu().numpy()) + # nearest_dist, nearest_pts_idx = active_mesh_ktree.query(input_pts.detach().cpu().numpy(), k=1) + # nearest_pts_idx = torch.from_numpy(nearest_pts_idx).long().cuda() ### nn_input_pts ## + #### KDTree #### + + # nearest_active_pts = prev_active_mesh[nearest_pts_idx] ### nn_input_pts x 3 + + # active_pts_to_input_pts = sampled_input_pts - nearest_active_pts ## nn_input_pts x 3 # + + + + ''' Deform input points via the active deformation ''' # size direction # + # input_pts_to_active = sampled_input_pts.clone() # + + # input_pts_to_active = torch.from_numpy(input_pts.detach().cpu().numpy()).float().cuda() + # input_pts_to_active.requires_grad_ = True + # input_pts_to_active.requires_grad = True + + # with torch.no_grad(): + # for cur_pts_ts in range(prev_pts_ts, -1, -1): + # input_pts_to_active = active_bending_net(input_pts_to_active, input_pts_ts=cur_pts_ts) + # get the deformed active points # + ## get the signed distance of the sampled points to the active robot ## + # input_pts_to_active_sdf = active_sdf_net.sdf(input_pts_to_active) # nn_pts x 1 # + + + # with torch.enable_grad(): + # # input_pts_to_active_sdf_nn_out = active_sdf_net(input_pts_to_active) + # # input_pts_to_active = torch.from_numpy(input_pts_to_active.detach().cpu().numpy()).float().cuda() + # # # input_pts_to_active.requires_grad_ = True + # # input_pts_to_active.requires_grad = True + # input_pts_to_active_sdf_gradient = active_sdf_net.gradient(input_pts_to_active).squeeze() # nn_pts x 3 -> the spatial graient? # + + # input_pts_to_active_sdf = input_pts_to_active_sdf.detach() + # input_pts_to_active_sdf_gradient = input_pts_to_active_sdf_gradient.detach() + + # # input_pts_force_dir = -1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = 1. * input_pts_to_active_sdf_gradient # nn_pts x 3 # + # input_pts_force_dir = input_pts_force_dir / torch.clamp(torch.norm(input_pts_force_dir, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + ''' Deform input points via the active deformation ''' + + ''' Embed prev_pts_ts timestamp ''' + # expanded_prev_pts_ts = torch.zeros((sampled_input_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) size timestep # + ''' Embed prev_pts_ts timestamp ''' + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # embed fn # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' + + + + ''' Acquire the bending latents ''' + # input_latents = self.bending_latent(expanded_prev_pts_ts) # + ''' Acquire the bending latents ''' + + + ''' Use the network to acquire the force scales ''' ### TODO: should not be a time-varying determining process + # x = torch.cat([input_pts, input_latents], dim=-1) + # x = torch.cat([input_pts_to_active_sdf, input_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.network + # else: + # cur_network = self.split_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # # print(f"i: {i}") + # x = layer(x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # x *= 30.0 + # if i != len(self.network) - 1: + # x = self.activation_function(x) + # if i in self.skips: + # x = torch.cat([input_pts_to_active_sdf, x], -1) + ''' Use the network to acquire the force scales ''' + + + # input_dir_latents = self.bending_dir_latent(expanded_prev_pts_ts) + # dir_x = torch.cat([input_pts_to_active_sdf, input_dir_latents], dim=-1) + + # if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + # cur_network = self.dir_network + # else: + # cur_network = self.split_dir_network + + # ''' use the single split network without no_grad setting ''' + # for i, layer in enumerate(cur_network): + # dir_x = layer(dir_x) + # # SIREN + # if self.activation_function.__name__ == "sin" and i == 0: + # dir_x *= 30.0 + # if i != len(self.network) - 1: + # dir_x = self.activation_function(dir_x) + # if i in self.skips: + # dir_x = torch.cat([input_pts_to_active_sdf, dir_x], -1) + # ''' use the single split network without no_grad setting ''' + + # dir_x = active_pts_to_input_pts / torch.clamp(torch.norm(active_pts_to_input_pts, dim=-1, keepdim=True, p=2), min=1e-9) ##### force direction ##### + + + # |f| = k_a * exp(-dist^1 * k_b) + # dist_active_pts_to_input_pts = torch.norm(active_pts_to_input_pts, dim=-1, p=2, keepdim=True) ### nn_input_pts + # + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + # power_k = 0.5 + # power_k = 1. + # x = dist_k_a * torch.exp(-1 * dist_active_pts_to_input_pts * dist_k_b) ### dist_k_b; dist_k_a; distance -> the scale of the functional force ### + # x = dist_k_a * torch.exp(-1 * torch.pow(dist_active_pts_to_input_pts, power_k) * dist_k_b) + + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * sampled_defed_input_pts_sdf * ws_alpha) # nn_pts # + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + # print(f"forces: {forces.size()}, ws_normed: {ws_normed.size()}") + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) ### (3,) rigid acc # # + + # rigid_acc = torch.mean(forces, dim=0) + ''' get velocity and offset related constants ''' + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + ''' get velocity and offset related constants ''' + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1] + # ws_normed, defed_input_pts_sdf # + self.timestep_to_input_pts[prev_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = forces.detach() + self.timestep_to_ws_normed[prev_pts_ts] = ws_normed.detach() + self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # # ori input pts # + # self.timestep_to_ori_input_pts_sdf = {} # # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[prev_pts_ts] = ori_input_pts.detach() + self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + ## TODO: is it a good updating strategy? ## + cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + if update_tot_def: + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return new_pts + + + +### the points forces ### +class BendingNetworkActiveForceFieldForwardV9(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardV9, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + + self.time_constant = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + def forward(self, input_pts, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + ### from input_pts to new pts ### + # wieghting force field # + prev_pts_ts = input_pts_ts - 1 + + ''' Deform input points via the passive rigid deformations ''' + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + # defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # # self.timestep_to_ori_input_pts = {} # + # # self.timestep_to_ori_input_pts_sdf = {} # + # # ori_input_pts, ori_input_pts_sdf #### input_pts #### + # ori_input_pts = input_pts.clone().detach() + # ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, # + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed + ''' Calculate weights for deformed input points ''' + + # optimizable point weights with fixed spring rules # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = input_pts.size(0) + #### uniformly_sampled_pts: nn_sampled_pts x 3 #### + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + # use weighting_network to get weights of those sampled pts # + expanded_prev_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() + expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) # if we do not have a kinematics observation? # + input_latents = self.bending_latent(expanded_prev_pts_ts) + x = torch.cat([uniformly_sampled_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < 5): + cur_network = self.weighting_network + else: + cur_network = self.split_weighting_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([uniformly_sampled_pts, x], -1) + # x: nn_uniformly_sampled_pts x 1 weights # + x = x.squeeze(-1) + ws_normed = F.softmax(x, dim=0) #### calculate the softmax as weights # + + ### total def copy ## + # prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + prev_rigid_def = self.timestep_to_total_def[prev_pts_ts].detach() + + # + prev_quaternion = self.timestep_to_quaternion[prev_pts_ts].detach() # + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) # prev_quaternion + + # + defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + + defed_uniformly_sampled_pts = torch.matmul(defed_uniformly_sampled_pts, prev_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### inversely rotate the sampled pts # + + defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + # defed_uniformly_sampled_pts_sdf: nn_sampled_pts # + minn_sampled_sdf, minn_sampled_sdf_pts_idx = torch.min(defed_uniformly_sampled_pts_sdf, dim=0) ## the pts_idx ## + passive_center_point = uniformly_sampled_pts[minn_sampled_sdf_pts_idx] ## center of the passive object ## + + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 2000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + + + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # # + ''' Distance to previous prev meshes to optimize ''' + # to active mesh # + prev_active_mesh = timestep_to_active_mesh[prev_pts_ts] ## nn_active_pts x 3 ## + if prev_pts_ts == 0: + prev_prev_active_mesh_vel = torch.zeros_like(prev_active_mesh) + else: + # prev_prev_active_mesh_vel = prev_active_mesh - timestep_to_active_mesh[prev_pts_ts - 1] + #### prev_prev active mehs #### + # prev_prev_active_mesh = timestep_to_active_mesh[prev_pts_ts - 1] + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + ## distnaces from act_mesh to the prev_prev ### prev_pts_ts ### + dist_prev_act_mesh_to_prev_prev = torch.sum( + (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + ) + minn_dist_prev_act_mesh_to_cur, minn_idx_dist_prev_act_mesh_to_cur = torch.min( + dist_prev_act_mesh_to_prev_prev, dim=-1 ## + ) + selected_mesh_pts = batched_index_select(values=cur_active_mesh, indices=minn_idx_dist_prev_act_mesh_to_cur, dim=0) + prev_prev_active_mesh_vel = selected_mesh_pts - prev_active_mesh + + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=500, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts >= thres_dist.unsqueeze(-1)] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = 500 # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + prev_prev_active_mesh_vel_exp = prev_prev_active_mesh_vel.unsqueeze(0).repeat(rel_input_pts_active_mesh.size(0), 1, 1).contiguous() + prev_prev_active_mesh_vel = batched_index_select(values=prev_prev_active_mesh_vel_exp, indices=sampled_input_pts_idx, dim=1) ## + + # prev_active_mesh_exp = prev_active_mesh.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # prev_active_mesh_exp = batched_index_select(values=prev_active_mesh_exp, indices=sampled_input_pts_idx, dim=1) ### nn_sampled_pts x nn_selected_pts x 3 + # self.timestep_to_prev_selected_active_mesh[prev_pts_ts] = prevactive + ''' Distance to previous active meshes to optimize ''' + # timestep_to_prev_active_mesh_ori + prev_active_mesh_ori = self.timestep_to_prev_active_mesh_ori[prev_pts_ts] ## nn_active_pts x 3 ## + + dist_input_pts_active_mesh_ori = torch.sum( + (sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0)) ** 2, dim=-1 + ) + dist_input_pts_active_mesh_ori = torch.sqrt(dist_input_pts_active_mesh_ori) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh_ori, topk_dist_input_pts_active_mesh_idx_ori = torch.topk(dist_input_pts_active_mesh_ori, k=500, largest=False, dim=-1) + thres_dist_ori, _ = torch.max(topk_dist_input_pts_active_mesh_ori, dim=-1) + weighting_ka_ori = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + weighting_kb_ori = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + unnormed_weight_active_pts_to_input_pts_ori = weighting_ka_ori * torch.exp(-1. * dist_input_pts_active_mesh_ori * weighting_kb_ori * 50) # + unnormed_weight_active_pts_to_input_pts_ori[unnormed_weight_active_pts_to_input_pts_ori >= thres_dist_ori.unsqueeze(-1)] = 0. + normed_weight_active_pts_to_input_pts_ori = unnormed_weight_active_pts_to_input_pts_ori / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts_ori, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m_ori = Categorical(normed_weight_active_pts_to_input_pts_ori) # + nn_sampled_input_pts = 500 # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx_ori = m_ori.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + sampled_input_pts_idx_ori = sampled_input_pts_idx_ori.contiguous().transpose(1, 0).contiguous() + + rel_input_pts_active_mesh_ori = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0).detach() + + prev_active_mesh_ori_exp = prev_active_mesh_ori.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() + prev_active_mesh_ori_exp = batched_index_select(values=prev_active_mesh_ori_exp, indices=sampled_input_pts_idx_ori, dim=1) + # prev_active_mesh_ori_exp: nn_sampled_pts x nn_active_pts x 3 # + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori[prev_pts_ts] = prev_active_mesh_ori_exp.detach() + ''' Distance to previous active meshes to optimize ''' + + + + ''' spring force v2: use the spring force as input ''' + ### determine the spring coefficient ### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_qd = 1. # fix the qd to 1 # spring_qd # # spring_qd # + spring_qd = 0.5 + spring_qd = 0.01 + # dist input pts to active mesh # # + # a threshold distance -(d - d_thres)^3 * k + 2.*(2 - d_thres)**3 --> use the (2 - d_thres) ** 3 * k as the maximum distances -> k sould not be larger than 2. # + #### The k_d(d) in the form of inverse functions #### + # spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) + #### The k_d(d) in the form of polynomial functions #### + spring_kd = spring_qd * ((-(dist_input_pts_active_mesh - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + # wish to use simple functions to achieve the adjustmenet of k-d relations # + + # dist_input_pts_active_mesh # # # # # # + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") # + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) # tiem_constant + spring_k_val = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) + + spring_kd = spring_kd * time_cons ### get the spring_kd (nn_sampled_pts x nn_act_pts ) #### + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - prev_active_mesh.unsqueeze(0) # prev_active_mesh # + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) * spring_k_val + ''' spring force v2: use the spring force as input ''' + + + + ''' get the spring force of the reference motion ''' + #### The k_d(d) in the form of inverse functions #### + # spring_kd_ori = spring_qd / (dist_input_pts_active_mesh_ori - self.spring_x_min) + #### The k_d(d) in the form of polynomial functions #### + spring_kd_ori = spring_qd * ((-(dist_input_pts_active_mesh_ori - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + spring_kd_ori = spring_kd_ori * time_cons + spring_force_ori = -1. * spring_kd_ori * (dist_input_pts_active_mesh_ori - self.spring_rest_length) + spring_force_ori = batched_index_select(values=spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + dir_spring_force_ori = sampled_input_pts.unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0) + dir_spring_force_ori = batched_index_select(values=dir_spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + dir_spring_force_ori = dir_spring_force_ori / torch.clamp(torch.norm(dir_spring_force_ori, dim=-1, keepdim=True, p=2), min=1e-9) + spring_force_ori = dir_spring_force_ori * spring_force_ori.unsqueeze(-1) * spring_k_val + ''' get the spring force of the reference motion ''' + + + + ''' spring force v2: use the spring force as input ''' + + + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) # and force weighting # + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( # # use the spring force as input # + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + self.timestep_to_spring_forces[prev_pts_ts] = forces + ''' spring force v3: use the spring force as input ''' + + + ''' spring force from the reference trajectory ''' + transformed_w_ori = self.patch_force_scale_network[0](rel_input_pts_active_mesh_ori) + transformed_w_ori = self.patch_force_scale_network[1](transformed_w_ori) + glb_transformed_w_ori, _ = torch.max(transformed_w_ori, dim=1, keepdim=True) + glb_transformed_w_ori = glb_transformed_w_ori.repeat(1, transformed_w_ori.size(1), 1) # + transformed_w_ori = torch.cat( + [transformed_w_ori, glb_transformed_w_ori], dim=-1 + ) + force_weighting_ori = self.patch_force_scale_network[2](transformed_w_ori) + force_weighting_ori = self.patch_force_scale_network[3](force_weighting_ori).squeeze(-1) + force_weighting_ori = F.softmax(force_weighting_ori, dim=-1) + forces_ori = torch.sum( + spring_force_ori.detach() * force_weighting.unsqueeze(-1).detach(), dim=1 + ) + self.timestep_to_spring_forces_ori[prev_pts_ts] = forces_ori + ''' spring force from the reference trajectory ''' + + + ''' TODO: a lot to do for this firctional model... ''' + ''' calculate the firctional force ''' + friction_qd = 0.5 + dist_input_pts_active_mesh_sel = batched_index_select(dist_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) + #### The k_d(d) in the form of inverse functions #### + # friction_kd = friction_qd / (dist_input_pts_active_mesh_sel - self.spring_x_min) + #### The k_d(d) in the form of polynomial functions #### + friction_qd = 0.01 + friction_kd = friction_qd * ((-(dist_input_pts_active_mesh_sel - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + friction_kd = friction_kd * time_cons + prev_prev_active_mesh_vel_norm = torch.norm(prev_prev_active_mesh_vel, dim=-1) + friction_force = friction_kd * (self.spring_rest_length - dist_input_pts_active_mesh_sel) * prev_prev_active_mesh_vel_norm # | vel | * (dist - rest_length) * friction_kd # + friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = batched_index_select(values=friction_force, indices=sampled_input_pts_idx, dim=1) # + dir_friction_force = prev_prev_active_mesh_vel + dir_friction_force = dir_friction_force / torch.clamp(torch.norm(dir_friction_force, dim=-1, keepdim=True, p=2), min=1e-9) # + friction_force = dir_friction_force * friction_force.unsqueeze(-1) * friction_k # k * friction_force_scale * friction_force_dir # + friction_force = torch.sum( # friction_force: nn-pts x 3 # + friction_force * force_weighting.unsqueeze(-1), dim=1 + ) + forces = forces + friction_force + ''' calculate the firctional force ''' + + + ''' Embed sdf values ''' + raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' # + + ###### [time_cons] is used when calculating buth the spring force and the frictional force ---> convert force to acc ###### + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + ws_unnormed = ws_beta * torch.exp(-1. * sampled_defed_input_pts_sdf * ws_alpha) + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + + ''' get velocity and offset related constants ''' + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + ''' get velocity and offset related constants ''' + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons + delta_vel = rigid_acc * k_acc_to_vel + if prev_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[prev_pts_ts - 1].detach() + + + ''' Compute torque, angular acc, angular vel and delta quaternion via forces and the directional offset from the center point to the sampled points ''' + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) ### center_point to the input_pts ### + sampled_pts_torque = torch.cross(forces, center_point_to_sampled_pts, dim=-1) ## nn_sampled_pts x 3 ## + torque = torch.sum( + sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + ) + delta_angular_vel = torque * time_cons + if prev_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[prev_pts_ts - 1].detach() ### (3,) + cur_delta_angle = cur_angular_vel * time_cons + cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + prev_quaternion = self.timestep_to_quaternion[prev_pts_ts].detach() # + cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + self.timestep_to_quaternion[input_pts_ts] = cur_quaternion.detach() + self.timestep_to_angular_vel[prev_pts_ts] = cur_angular_vel.detach() # angular velocity # + self.timestep_to_torque[prev_pts_ts] = torque.detach() + + + + # ws_normed, defed_input_pts_sdf # + self.timestep_to_input_pts[prev_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[prev_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[prev_pts_ts] = forces.detach() + self.timestep_to_ws_normed[prev_pts_ts] = ws_normed.detach() + self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # # ori input pts # + # self.timestep_to_ori_input_pts_sdf = {} # # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[prev_pts_ts] = ori_input_pts.detach() + self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + ## TODO: is it a good updating strategy? ## + # cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + cur_upd_rigid_def = cur_offset.detach() + torch.matmul(prev_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # curupd + if update_tot_def: + self.timestep_to_total_def[input_pts_ts] = cur_upd_rigid_def + + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + + ## update raw input pts ## + new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + cur_rot_mtx = quaternion_to_matrix(cur_quaternion) # 3 x 3 + # cur_tmp_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) # 3 x 3 rotation matrix # + # np.matmul(new_pts, rot_mtx) + cur_offset # + # new_pts = np.matmul(new_pts, cur_tmp_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### # + # cur_upd_rigid_def_aa = cur_offset + prev_rigid_def.detach() + cur_upd_rigid_def_aa = cur_offset + torch.matmul(prev_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx).squeeze(0) + + + ori_input_pts = torch.matmul(raw_input_pts - cur_upd_rigid_def_aa.unsqueeze(0), cur_rot_mtx.contiguous().transpose(1, 0).contiguous()) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion).detach() + prev_tot_offset = self.timestep_to_total_def[prev_pts_ts].detach() + new_pts = torch.matmul(ori_input_pts, prev_rot_mtx) + prev_tot_offset.unsqueeze(0) + + # # + cur_offset_with_rot = raw_input_pts - new_pts + cur_offset_with_rot = torch.mean(cur_offset_with_rot, dim=0) + self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset_with_rot + + return new_pts + + + + + + +### the points forces ### +class BendingNetworkActiveForceFieldForwardLagV9(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV9, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + # self.actuator_weights = nn.Embedding( # actuator's forces # + # num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + # ) + # torch.nn.init.ones(self.actuator_weights.weight) # + # self.actuator_weights.weight.data = self.actuator_weights.weight.data * 0.2 + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, details=None, special_loss_return=False, update_tot_def=True): + ### from input_pts to new pts ### + # wieghting force field # + # prev_pts_ts = input_pts_ts - 1 + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + + friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] + init_passive_obj_verts = timestep_to_passive_mesh[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + friction_force = vel_active_mesh * friction_k + forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######### optimize the actuator forces directly ######### + # cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + # cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + # cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 20) + + + # ws_unnormed = ws_normed_sampled + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + + + ###### sampled input pts to center ####### + # center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + torque = torch.sum( + sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + ) + + + + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.save_values = { + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return + + ''' Deform input points via the passive rigid deformations ''' + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + # defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # # self.timestep_to_ori_input_pts = {} + # # self.timestep_to_ori_input_pts_sdf = {} + # # ori_input_pts, ori_input_pts_sdf #### input_pts #### + # ori_input_pts = input_pts.clone().detach() + # ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, # + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + # optimizable point weights with fixed spring rules # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = self.nn_uniformly_sampled_pts + #### uniformly_sampled_pts: nn_sampled_pts x 3 #### + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + # use weighting_network to get weights of those sampled pts # + # expanded_prev_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) # if we do not have a kinematics observation? # + + expanded_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() ### get + expanded_pts_ts = expanded_pts_ts + input_pts_ts + input_latents = self.bending_latent(expanded_pts_ts) + x = torch.cat([uniformly_sampled_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < self.cur_window_size // 2): + cur_network = self.weighting_network + else: + cur_network = self.split_weighting_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([uniformly_sampled_pts, x], -1) + # x: nn_uniformly_sampled_pts x 1 weights # + x = x.squeeze(-1) + ws_normed = F.softmax(x, dim=0) #### calculate the softmax as weights # + + ### total def copy ## + # prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts].detach() + # # + # prev_quaternion = self.timestep_to_quaternion[prev_pts_ts].detach() # + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion) # prev_quaternion + # # + # defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + # defed_uniformly_sampled_pts = torch.matmul(defed_uniformly_sampled_pts, prev_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### inversely rotate the sampled pts # + # defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + # # defed_uniformly_sampled_pts_sdf: nn_sampled_pts # + # minn_sampled_sdf, minn_sampled_sdf_pts_idx = torch.min(defed_uniformly_sampled_pts_sdf, dim=0) ## the pts_idx ## + # passive_center_point = uniformly_sampled_pts[minn_sampled_sdf_pts_idx] ## center of the passive object ## + + + cur_passive_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() + cur_passive_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_rot_mtx = quaternion_to_matrix(cur_passive_quaternion) + + init_passive_obj_verts = timestep_to_passive_mesh[0].detach() + + cur_passive_obj_verts = torch.matmul(init_passive_obj_verts, cur_rot_mtx) + cur_passive_trans.unsqueeze(0) ## nn_pts x 3 ## + passive_center_point = cur_passive_obj_verts.mean(0) + + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 20000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + # defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + # ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + ws_normed_sampled = ws_normed[sampled_input_pts_idx] + + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # # + ''' Distance to previous prev meshes to optimize ''' + # to active mesh # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] ## nn_active_pts x 3 ## # active mesh # + + ##### using the points from active meshes directly #### + ori_input_pts = cur_active_mesh.clone() + sampled_input_pts = cur_active_mesh.clone() + + # if prev_pts_ts == 0: + # prev_prev_active_mesh_vel = torch.zeros_like(prev_active_mesh) + # else: + # # prev_prev_active_mesh_vel = prev_active_mesh - timestep_to_active_mesh[prev_pts_ts - 1] + # #### prev_prev active mehs #### + # # prev_prev_active_mesh = timestep_to_active_mesh[prev_pts_ts - 1] + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + # ## distnaces from act_mesh to the prev_prev ### prev_pts_ts ### + # dist_prev_act_mesh_to_prev_prev = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # minn_dist_prev_act_mesh_to_cur, minn_idx_dist_prev_act_mesh_to_cur = torch.min( + # dist_prev_act_mesh_to_prev_prev, dim=-1 ## + # ) + # selected_mesh_pts = batched_index_select(values=cur_active_mesh, indices=minn_idx_dist_prev_act_mesh_to_cur, dim=0) + # prev_prev_active_mesh_vel = selected_mesh_pts - prev_active_mesh + + nex_pts_ts = input_pts_ts + 1 + nex_active_mesh = timestep_to_active_mesh[nex_pts_ts] + cur_active_mesh_vel = nex_active_mesh - cur_active_mesh + + # dist_act_mesh_to_nex_ = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + # dist input pts active + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=self.nn_patch_active_pts, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) # + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts > thres_dist.unsqueeze(-1) + 1e-6] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = self.nn_patch_active_pts # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + + sampled_input_pts_idx = topk_dist_input_pts_active_mesh_idx + + + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + cur_active_mesh_vel_exp = cur_active_mesh_vel.unsqueeze(0).repeat(rel_input_pts_active_mesh.size(0), 1, 1).contiguous() + cur_active_mesh_vel = batched_index_select(values=cur_active_mesh_vel_exp, indices=sampled_input_pts_idx, dim=1) ## + + # prev_active_mesh_exp = prev_active_mesh.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # prev_active_mesh_exp = batched_index_select(values=prev_active_mesh_exp, indices=sampled_input_pts_idx, dim=1) ### nn_sampled_pts x nn_selected_pts x 3 + # self.timestep_to_prev_selected_active_mesh[prev_pts_ts] = prevactive + # ''' Distance to previous active meshes to optimize ''' + # prev_active_mesh_ori = self.timestep_to_prev_active_mesh_ori[prev_pts_ts] ## nn_active_pts x 3 ## + + # dist_input_pts_active_mesh_ori = torch.sum( + # (sampled_input_pts.detach().unsqueeze(1) - cur_active_mesh_vel.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_input_pts_active_mesh_ori = torch.sqrt(dist_input_pts_active_mesh_ori) # nn_sampled_pts x nn_active_pts # + # topk_dist_input_pts_active_mesh_ori, topk_dist_input_pts_active_mesh_idx_ori = torch.topk(dist_input_pts_active_mesh_ori, k=500, largest=False, dim=-1) + # thres_dist_ori, _ = torch.max(topk_dist_input_pts_active_mesh_ori, dim=-1) + # weighting_ka_ori = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + # weighting_kb_ori = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + # unnormed_weight_active_pts_to_input_pts_ori = weighting_ka_ori * torch.exp(-1. * dist_input_pts_active_mesh_ori * weighting_kb_ori * 50) # + # unnormed_weight_active_pts_to_input_pts_ori[unnormed_weight_active_pts_to_input_pts_ori >= thres_dist_ori.unsqueeze(-1)] = 0. + # normed_weight_active_pts_to_input_pts_ori = unnormed_weight_active_pts_to_input_pts_ori / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts_ori, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + # m_ori = Categorical(normed_weight_active_pts_to_input_pts_ori) # + # nn_sampled_input_pts = 500 # + # # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + # sampled_input_pts_idx_ori = m_ori.sample(sample_shape=(nn_sampled_input_pts,)) + # # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx_ori = sampled_input_pts_idx_ori.contiguous().transpose(1, 0).contiguous() + + # rel_input_pts_active_mesh_ori = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0).detach() + + # prev_active_mesh_ori_exp = prev_active_mesh_ori.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() + # prev_active_mesh_ori_exp = batched_index_select(values=prev_active_mesh_ori_exp, indices=sampled_input_pts_idx_ori, dim=1) + # # prev_active_mesh_ori_exp: nn_sampled_pts x nn_active_pts x 3 # + # # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + # self.timestep_to_prev_selected_active_mesh_ori[prev_pts_ts] = prev_active_mesh_ori_exp.detach() + # ''' Distance to previous active meshes to optimize ''' + + + + ''' spring force v2: use the spring force as input ''' + ### determine the spring coefficient ### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_qd = 1. # fix the qd to 1 # spring_qd # # spring_qd # + spring_qd = 0.5 + # dist input pts to active mesh # # + # a threshold distance -(d - d_thres)^3 * k + 2.*(2 - d_thres)**3 --> use the (2 - d_thres) ** 3 * k as the maximum distances -> k sould not be larger than 2. # + #### The k_d(d) in the form of inverse functions #### + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) ### + + #### The k_d(d) in the form of polynomial functions #### + # spring_qd = 0.01 + # spring_kd = spring_qd * ((-(dist_input_pts_active_mesh - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + # wish to use simple functions to achieve the adjustmenet of k-d relations # # k-d relations # + + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") # + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) # tiem_constant + spring_k_val = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) + + spring_kd = spring_kd * time_cons ### get the spring_kd (nn_sampled_pts x nn_act_pts ) #### + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) # prev_active_mesh # + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) * spring_k_val + ''' spring force v2: use the spring force as input ''' + + + + # ''' get the spring force of the reference motion ''' + # #### The k_d(d) in the form of inverse functions #### + # # spring_kd_ori = spring_qd / (dist_input_pts_active_mesh_ori - self.spring_x_min) + # #### The k_d(d) in the form of polynomial functions #### + # spring_kd_ori = spring_qd * ((-(dist_input_pts_active_mesh_ori - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + # spring_kd_ori = spring_kd_ori * time_cons + # spring_force_ori = -1. * spring_kd_ori * (dist_input_pts_active_mesh_ori - self.spring_rest_length) + # spring_force_ori = batched_index_select(values=spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = sampled_input_pts.unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0) + # dir_spring_force_ori = batched_index_select(values=dir_spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = dir_spring_force_ori / torch.clamp(torch.norm(dir_spring_force_ori, dim=-1, keepdim=True, p=2), min=1e-9) + # spring_force_ori = dir_spring_force_ori * spring_force_ori.unsqueeze(-1) * spring_k_val + # ''' get the spring force of the reference motion ''' + ''' spring force v2: use the spring force as input ''' + + + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) # and force weighting # + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( # # use the spring force as input # + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + self.timestep_to_spring_forces[input_pts_ts] = forces + ''' spring force v3: use the spring force as input ''' + + + # ''' spring force from the reference trajectory ''' + # transformed_w_ori = self.patch_force_scale_network[0](rel_input_pts_active_mesh_ori) + # transformed_w_ori = self.patch_force_scale_network[1](transformed_w_ori) + # glb_transformed_w_ori, _ = torch.max(transformed_w_ori, dim=1, keepdim=True) + # glb_transformed_w_ori = glb_transformed_w_ori.repeat(1, transformed_w_ori.size(1), 1) # + # transformed_w_ori = torch.cat( + # [transformed_w_ori, glb_transformed_w_ori], dim=-1 + # ) + # force_weighting_ori = self.patch_force_scale_network[2](transformed_w_ori) + # force_weighting_ori = self.patch_force_scale_network[3](force_weighting_ori).squeeze(-1) + # force_weighting_ori = F.softmax(force_weighting_ori, dim=-1) + # forces_ori = torch.sum( + # spring_force_ori.detach() * force_weighting.unsqueeze(-1).detach(), dim=1 + # ) + # self.timestep_to_spring_forces_ori[prev_pts_ts] = forces_ori + # ''' spring force from the reference trajectory ''' + + + ''' TODO: a lot to do for this firctional model... ''' + ''' calculate the firctional force ''' + friction_qd = 0.5 + friction_qd = 0.1 + dist_input_pts_active_mesh_sel = batched_index_select(dist_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) + #### The k_d(d) in the form of inverse functions #### + friction_kd = friction_qd / (dist_input_pts_active_mesh_sel - self.spring_x_min) + + #### The k_d(d) in the form of polynomial functions #### + # friction_qd = 0.01 + # friction_kd = friction_qd * ((-(dist_input_pts_active_mesh_sel - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + friction_kd = friction_kd * time_cons + prev_prev_active_mesh_vel_norm = torch.norm(cur_active_mesh_vel, dim=-1) + friction_force = friction_kd * (self.spring_rest_length - dist_input_pts_active_mesh_sel) * prev_prev_active_mesh_vel_norm # | vel | * (dist - rest_length) * friction_kd # + friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = batched_index_select(values=friction_force, indices=sampled_input_pts_idx, dim=1) # + dir_friction_force = cur_active_mesh_vel + dir_friction_force = dir_friction_force / torch.clamp(torch.norm(dir_friction_force, dim=-1, keepdim=True, p=2), min=1e-9) # + friction_force = dir_friction_force * friction_force.unsqueeze(-1) * friction_k # k * friction_force_scale * friction_force_dir # # get the friction force and the frictionk # + friction_force = torch.sum( # friction_force: nn-pts x 3 # + friction_force * force_weighting.unsqueeze(-1), dim=1 + ) + forces = forces + friction_force + forces = friction_force + ''' calculate the firctional force ''' + + + ''' Embed sdf values ''' + # raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' # + + ###### [time_cons] is used when calculating buth the spring force and the frictional force ---> convert force to acc ###### + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, _ = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + + + # ws_unnormed = ws_normed_sampled + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + + ''' get velocity and offset related constants ''' + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + ''' get velocity and offset related constants ''' + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() + + + ''' Compute torque, angular acc, angular vel and delta quaternion via forces and the directional offset from the center point to the sampled points ''' + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) ### center_point to the input_pts ### + # sampled_pts_torque = torch.cross(forces, center_point_to_sampled_pts, dim=-1) ## nn_sampled_pts x 3 ## + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + torque = torch.sum( + sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + ) + delta_angular_vel = torque * time_cons + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() ### (3,) + cur_delta_angle = cur_angular_vel * time_cons + cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() # angular velocity # + self.timestep_to_torque[input_pts_ts] = torque.detach() + + + + # ws_normed, defed_input_pts_sdf # + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_ws_normed[input_pts_ts] = ws_normed.detach() + # self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # # ori input pts # + # self.timestep_to_ori_input_pts_sdf = {} # # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[input_pts_ts] = ori_input_pts.detach() + # self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + 'friction_k': friction_k.detach().cpu()[0].item(), + 'spring_k_val': spring_k_val.detach().cpu()[0].item(), # spring_k + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + ## TODO: is it a good updating strategy? ## + # cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # curupd + if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + + cur_optimizable_total_def = cur_offset + torch.matmul(cur_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_optimizable_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_optimizable_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + + + # cur_rot_mtx = quaternion_to_matrix(cur_quaternion) # 3 x 3 + + # cur_tmp_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) # 3 x 3 rotation matrix # + # np.matmul(new_pts, rot_mtx) + cur_offset # + # new_pts = np.matmul(new_pts, cur_tmp_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### # + # cur_upd_rigid_def_aa = cur_offset + prev_rigid_def.detach() + # cur_upd_rigid_def_aa = cur_offset + torch.matmul(prev_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx).squeeze(0) + + + # ori_input_pts = torch.matmul(raw_input_pts - cur_upd_rigid_def_aa.unsqueeze(0), cur_rot_mtx.contiguous().transpose(1, 0).contiguous()) + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion).detach() + # prev_tot_offset = self.timestep_to_total_def[prev_pts_ts].detach() + # new_pts = torch.matmul(ori_input_pts, prev_rot_mtx) + prev_tot_offset.unsqueeze(0) + + # # + # cur_offset_with_rot = raw_input_pts - new_pts + # cur_offset_with_rot = torch.mean(cur_offset_with_rot, dim=0) + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset_with_rot + + return None + + + + +class BendingNetworkActiveForceFieldForwardLagV10(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV10, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + + + + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, details=None, special_loss_return=False, update_tot_def=True): + ### from input_pts to new pts ### + # wieghting force field # + # prev_pts_ts = input_pts_ts - 1 + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + + friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> + init_passive_obj_verts = timestep_to_passive_mesh[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######### optimize the actuator forces directly ######### + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + ####### sharp the weights ####### + + minn_dist_sampled_pts_passive_obj_thres = 0.05 + # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) + + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return + + ''' Deform input points via the passive rigid deformations ''' + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + # defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # # self.timestep_to_ori_input_pts = {} + # # self.timestep_to_ori_input_pts_sdf = {} + # # ori_input_pts, ori_input_pts_sdf #### input_pts #### + # ori_input_pts = input_pts.clone().detach() + # ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, # + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + # optimizable point weights with fixed spring rules # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = self.nn_uniformly_sampled_pts + #### uniformly_sampled_pts: nn_sampled_pts x 3 #### + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + # use weighting_network to get weights of those sampled pts # + # expanded_prev_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) # if we do not have a kinematics observation? # + + expanded_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() ### get + expanded_pts_ts = expanded_pts_ts + input_pts_ts + input_latents = self.bending_latent(expanded_pts_ts) + x = torch.cat([uniformly_sampled_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < self.cur_window_size // 2): + cur_network = self.weighting_network + else: + cur_network = self.split_weighting_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([uniformly_sampled_pts, x], -1) + # x: nn_uniformly_sampled_pts x 1 weights # + x = x.squeeze(-1) + ws_normed = F.softmax(x, dim=0) #### calculate the softmax as weights # + + ### total def copy ## + # prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts].detach() + # # + # prev_quaternion = self.timestep_to_quaternion[prev_pts_ts].detach() # + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion) # prev_quaternion + # # + # defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + # defed_uniformly_sampled_pts = torch.matmul(defed_uniformly_sampled_pts, prev_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### inversely rotate the sampled pts # + # defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + # # defed_uniformly_sampled_pts_sdf: nn_sampled_pts # + # minn_sampled_sdf, minn_sampled_sdf_pts_idx = torch.min(defed_uniformly_sampled_pts_sdf, dim=0) ## the pts_idx ## + # passive_center_point = uniformly_sampled_pts[minn_sampled_sdf_pts_idx] ## center of the passive object ## + + + cur_passive_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() + cur_passive_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_rot_mtx = quaternion_to_matrix(cur_passive_quaternion) + + init_passive_obj_verts = timestep_to_passive_mesh[0].detach() + + cur_passive_obj_verts = torch.matmul(init_passive_obj_verts, cur_rot_mtx) + cur_passive_trans.unsqueeze(0) ## nn_pts x 3 ## + passive_center_point = cur_passive_obj_verts.mean(0) + + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 20000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + # defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + # ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + ws_normed_sampled = ws_normed[sampled_input_pts_idx] + + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # # + ''' Distance to previous prev meshes to optimize ''' + # to active mesh # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] ## nn_active_pts x 3 ## # active mesh # + + ##### using the points from active meshes directly #### + ori_input_pts = cur_active_mesh.clone() + sampled_input_pts = cur_active_mesh.clone() + + # if prev_pts_ts == 0: + # prev_prev_active_mesh_vel = torch.zeros_like(prev_active_mesh) + # else: + # # prev_prev_active_mesh_vel = prev_active_mesh - timestep_to_active_mesh[prev_pts_ts - 1] + # #### prev_prev active mehs #### + # # prev_prev_active_mesh = timestep_to_active_mesh[prev_pts_ts - 1] + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + # ## distnaces from act_mesh to the prev_prev ### prev_pts_ts ### + # dist_prev_act_mesh_to_prev_prev = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # minn_dist_prev_act_mesh_to_cur, minn_idx_dist_prev_act_mesh_to_cur = torch.min( + # dist_prev_act_mesh_to_prev_prev, dim=-1 ## + # ) + # selected_mesh_pts = batched_index_select(values=cur_active_mesh, indices=minn_idx_dist_prev_act_mesh_to_cur, dim=0) + # prev_prev_active_mesh_vel = selected_mesh_pts - prev_active_mesh + + nex_pts_ts = input_pts_ts + 1 + nex_active_mesh = timestep_to_active_mesh[nex_pts_ts] + cur_active_mesh_vel = nex_active_mesh - cur_active_mesh + + # dist_act_mesh_to_nex_ = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + # dist input pts active + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=self.nn_patch_active_pts, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) # + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts > thres_dist.unsqueeze(-1) + 1e-6] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = self.nn_patch_active_pts # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + + sampled_input_pts_idx = topk_dist_input_pts_active_mesh_idx + + + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + cur_active_mesh_vel_exp = cur_active_mesh_vel.unsqueeze(0).repeat(rel_input_pts_active_mesh.size(0), 1, 1).contiguous() + cur_active_mesh_vel = batched_index_select(values=cur_active_mesh_vel_exp, indices=sampled_input_pts_idx, dim=1) ## + + # prev_active_mesh_exp = prev_active_mesh.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # prev_active_mesh_exp = batched_index_select(values=prev_active_mesh_exp, indices=sampled_input_pts_idx, dim=1) ### nn_sampled_pts x nn_selected_pts x 3 + # self.timestep_to_prev_selected_active_mesh[prev_pts_ts] = prevactive + # ''' Distance to previous active meshes to optimize ''' + # prev_active_mesh_ori = self.timestep_to_prev_active_mesh_ori[prev_pts_ts] ## nn_active_pts x 3 ## + + # dist_input_pts_active_mesh_ori = torch.sum( + # (sampled_input_pts.detach().unsqueeze(1) - cur_active_mesh_vel.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_input_pts_active_mesh_ori = torch.sqrt(dist_input_pts_active_mesh_ori) # nn_sampled_pts x nn_active_pts # + # topk_dist_input_pts_active_mesh_ori, topk_dist_input_pts_active_mesh_idx_ori = torch.topk(dist_input_pts_active_mesh_ori, k=500, largest=False, dim=-1) + # thres_dist_ori, _ = torch.max(topk_dist_input_pts_active_mesh_ori, dim=-1) + # weighting_ka_ori = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + # weighting_kb_ori = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + # unnormed_weight_active_pts_to_input_pts_ori = weighting_ka_ori * torch.exp(-1. * dist_input_pts_active_mesh_ori * weighting_kb_ori * 50) # + # unnormed_weight_active_pts_to_input_pts_ori[unnormed_weight_active_pts_to_input_pts_ori >= thres_dist_ori.unsqueeze(-1)] = 0. + # normed_weight_active_pts_to_input_pts_ori = unnormed_weight_active_pts_to_input_pts_ori / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts_ori, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + # m_ori = Categorical(normed_weight_active_pts_to_input_pts_ori) # + # nn_sampled_input_pts = 500 # + # # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + # sampled_input_pts_idx_ori = m_ori.sample(sample_shape=(nn_sampled_input_pts,)) + # # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx_ori = sampled_input_pts_idx_ori.contiguous().transpose(1, 0).contiguous() + + # rel_input_pts_active_mesh_ori = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0).detach() + + # prev_active_mesh_ori_exp = prev_active_mesh_ori.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() + # prev_active_mesh_ori_exp = batched_index_select(values=prev_active_mesh_ori_exp, indices=sampled_input_pts_idx_ori, dim=1) + # # prev_active_mesh_ori_exp: nn_sampled_pts x nn_active_pts x 3 # + # # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + # self.timestep_to_prev_selected_active_mesh_ori[prev_pts_ts] = prev_active_mesh_ori_exp.detach() + # ''' Distance to previous active meshes to optimize ''' + + + + ''' spring force v2: use the spring force as input ''' + ### determine the spring coefficient ### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_qd = 1. # fix the qd to 1 # spring_qd # # spring_qd # + spring_qd = 0.5 + # dist input pts to active mesh # # + # a threshold distance -(d - d_thres)^3 * k + 2.*(2 - d_thres)**3 --> use the (2 - d_thres) ** 3 * k as the maximum distances -> k sould not be larger than 2. # + #### The k_d(d) in the form of inverse functions #### + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) ### + + #### The k_d(d) in the form of polynomial functions #### + # spring_qd = 0.01 + # spring_kd = spring_qd * ((-(dist_input_pts_active_mesh - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + # wish to use simple functions to achieve the adjustmenet of k-d relations # # k-d relations # + + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") # + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) # tiem_constant + spring_k_val = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) + + spring_kd = spring_kd * time_cons ### get the spring_kd (nn_sampled_pts x nn_act_pts ) #### + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) # prev_active_mesh # + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) * spring_k_val + ''' spring force v2: use the spring force as input ''' + + + + # ''' get the spring force of the reference motion ''' + # #### The k_d(d) in the form of inverse functions #### + # # spring_kd_ori = spring_qd / (dist_input_pts_active_mesh_ori - self.spring_x_min) + # #### The k_d(d) in the form of polynomial functions #### + # spring_kd_ori = spring_qd * ((-(dist_input_pts_active_mesh_ori - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + # spring_kd_ori = spring_kd_ori * time_cons + # spring_force_ori = -1. * spring_kd_ori * (dist_input_pts_active_mesh_ori - self.spring_rest_length) + # spring_force_ori = batched_index_select(values=spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = sampled_input_pts.unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0) + # dir_spring_force_ori = batched_index_select(values=dir_spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = dir_spring_force_ori / torch.clamp(torch.norm(dir_spring_force_ori, dim=-1, keepdim=True, p=2), min=1e-9) + # spring_force_ori = dir_spring_force_ori * spring_force_ori.unsqueeze(-1) * spring_k_val + # ''' get the spring force of the reference motion ''' + ''' spring force v2: use the spring force as input ''' + + + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) # and force weighting # + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( # # use the spring force as input # + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + self.timestep_to_spring_forces[input_pts_ts] = forces + ''' spring force v3: use the spring force as input ''' + + + # ''' spring force from the reference trajectory ''' + # transformed_w_ori = self.patch_force_scale_network[0](rel_input_pts_active_mesh_ori) + # transformed_w_ori = self.patch_force_scale_network[1](transformed_w_ori) + # glb_transformed_w_ori, _ = torch.max(transformed_w_ori, dim=1, keepdim=True) + # glb_transformed_w_ori = glb_transformed_w_ori.repeat(1, transformed_w_ori.size(1), 1) # + # transformed_w_ori = torch.cat( + # [transformed_w_ori, glb_transformed_w_ori], dim=-1 + # ) + # force_weighting_ori = self.patch_force_scale_network[2](transformed_w_ori) + # force_weighting_ori = self.patch_force_scale_network[3](force_weighting_ori).squeeze(-1) + # force_weighting_ori = F.softmax(force_weighting_ori, dim=-1) + # forces_ori = torch.sum( + # spring_force_ori.detach() * force_weighting.unsqueeze(-1).detach(), dim=1 + # ) + # self.timestep_to_spring_forces_ori[prev_pts_ts] = forces_ori + # ''' spring force from the reference trajectory ''' + + + ''' TODO: a lot to do for this firctional model... ''' + ''' calculate the firctional force ''' + friction_qd = 0.5 + friction_qd = 0.1 + dist_input_pts_active_mesh_sel = batched_index_select(dist_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) + #### The k_d(d) in the form of inverse functions #### + friction_kd = friction_qd / (dist_input_pts_active_mesh_sel - self.spring_x_min) + + #### The k_d(d) in the form of polynomial functions #### + # friction_qd = 0.01 + # friction_kd = friction_qd * ((-(dist_input_pts_active_mesh_sel - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + friction_kd = friction_kd * time_cons + prev_prev_active_mesh_vel_norm = torch.norm(cur_active_mesh_vel, dim=-1) + friction_force = friction_kd * (self.spring_rest_length - dist_input_pts_active_mesh_sel) * prev_prev_active_mesh_vel_norm # | vel | * (dist - rest_length) * friction_kd # + friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = batched_index_select(values=friction_force, indices=sampled_input_pts_idx, dim=1) # + dir_friction_force = cur_active_mesh_vel + dir_friction_force = dir_friction_force / torch.clamp(torch.norm(dir_friction_force, dim=-1, keepdim=True, p=2), min=1e-9) # + friction_force = dir_friction_force * friction_force.unsqueeze(-1) * friction_k # k * friction_force_scale * friction_force_dir # # get the friction force and the frictionk # + friction_force = torch.sum( # friction_force: nn-pts x 3 # + friction_force * force_weighting.unsqueeze(-1), dim=1 + ) + forces = forces + friction_force + forces = friction_force + ''' calculate the firctional force ''' + + + ''' Embed sdf values ''' + # raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' # + + ###### [time_cons] is used when calculating buth the spring force and the frictional force ---> convert force to acc ###### + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, _ = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + + + # ws_unnormed = ws_normed_sampled + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + + ''' get velocity and offset related constants ''' + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + ''' get velocity and offset related constants ''' + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() + + + ''' Compute torque, angular acc, angular vel and delta quaternion via forces and the directional offset from the center point to the sampled points ''' + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) ### center_point to the input_pts ### + # sampled_pts_torque = torch.cross(forces, center_point_to_sampled_pts, dim=-1) ## nn_sampled_pts x 3 ## + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + torque = torch.sum( + sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + ) + delta_angular_vel = torque * time_cons + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() ### (3,) + cur_delta_angle = cur_angular_vel * time_cons + cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() # angular velocity # + self.timestep_to_torque[input_pts_ts] = torque.detach() + + + + # ws_normed, defed_input_pts_sdf # + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_ws_normed[input_pts_ts] = ws_normed.detach() + # self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # # ori input pts # + # self.timestep_to_ori_input_pts_sdf = {} # # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[input_pts_ts] = ori_input_pts.detach() + # self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + 'friction_k': friction_k.detach().cpu()[0].item(), + 'spring_k_val': spring_k_val.detach().cpu()[0].item(), # spring_k + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + ## TODO: is it a good updating strategy? ## + # cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # curupd + if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + + cur_optimizable_total_def = cur_offset + torch.matmul(cur_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_optimizable_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_optimizable_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + + + # cur_rot_mtx = quaternion_to_matrix(cur_quaternion) # 3 x 3 + + # cur_tmp_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) # 3 x 3 rotation matrix # + # np.matmul(new_pts, rot_mtx) + cur_offset # + # new_pts = np.matmul(new_pts, cur_tmp_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### # + # cur_upd_rigid_def_aa = cur_offset + prev_rigid_def.detach() + # cur_upd_rigid_def_aa = cur_offset + torch.matmul(prev_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx).squeeze(0) + + + # ori_input_pts = torch.matmul(raw_input_pts - cur_upd_rigid_def_aa.unsqueeze(0), cur_rot_mtx.contiguous().transpose(1, 0).contiguous()) + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion).detach() + # prev_tot_offset = self.timestep_to_total_def[prev_pts_ts].detach() + # new_pts = torch.matmul(ori_input_pts, prev_rot_mtx) + prev_tot_offset.unsqueeze(0) + + # # + # cur_offset_with_rot = raw_input_pts - new_pts + # cur_offset_with_rot = torch.mean(cur_offset_with_rot, dim=0) + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset_with_rot + + return None + + + + +class BendingNetworkActiveForceFieldForwardLagV11(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV11, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + # self.contact_spring_rest_length = 2. + # self.spring_ks_values = nn.Embedding( + # num_embeddings=2, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_ks_values.weight) + # self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.5 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + + + # fields # + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True): + ### from input_pts to new pts ### + # wieghting force field # + # prev_pts_ts = input_pts_ts - 1 + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + + friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> + # sampled_input_pts_normals = timesteptopassivemehsn + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # active # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######### optimize the actuator forces directly ######### + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # cur_passive_obj_ns # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + ####### sharp the weights ####### + + minn_dist_sampled_pts_passive_obj_thres = 0.05 + # minn_dist_sampled_pts_passive_obj_thres = 0.001 + minn_dist_sampled_pts_passive_obj_thres = 0.0001 + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + ### + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + # dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] + # # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + # contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + # contact_force_d = -contact_spring_ka * (dist_sampled_pts_to_passive_obj - self.contact_spring_rest_length) # + # vel_sampled_pts = nex_active_mesh - cur_active_mesh + # tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + # tangential_forces = vel_sampled_pts * tangential_ks + # forces = tangential_forces + contact_force_d + # # + + + ''' Decompose forces and calculate penalty froces ''' + # penalty_dot_forces_normals, penalty_friction_constraint # + # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + tangential_forces = forces - forces_along_normals # tangential forces # ## tangential forces ## + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) + + norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(forces_along_normals, dim=-1, p=2) # nn_sampled_pts ## + penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + penalty_friction_constraint = torch.mean(penalty_friction_constraint) + self.penalty_dot_forces_normals = penalty_dot_forces_normals + self.penalty_friction_constraint = penalty_friction_constraint + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc + + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return + + ''' Deform input points via the passive rigid deformations ''' + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + # defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # # self.timestep_to_ori_input_pts = {} + # # self.timestep_to_ori_input_pts_sdf = {} + # # ori_input_pts, ori_input_pts_sdf #### input_pts #### + # ori_input_pts = input_pts.clone().detach() + # ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, # + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + # optimizable point weights with fixed spring rules # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = self.nn_uniformly_sampled_pts + #### uniformly_sampled_pts: nn_sampled_pts x 3 #### + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + # use weighting_network to get weights of those sampled pts # + # expanded_prev_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) # if we do not have a kinematics observation? # + + expanded_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() ### get + expanded_pts_ts = expanded_pts_ts + input_pts_ts + input_latents = self.bending_latent(expanded_pts_ts) + x = torch.cat([uniformly_sampled_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < self.cur_window_size // 2): + cur_network = self.weighting_network + else: + cur_network = self.split_weighting_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([uniformly_sampled_pts, x], -1) + # x: nn_uniformly_sampled_pts x 1 weights # + x = x.squeeze(-1) + ws_normed = F.softmax(x, dim=0) #### calculate the softmax as weights # + + ### total def copy ## + # prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts].detach() + # # + # prev_quaternion = self.timestep_to_quaternion[prev_pts_ts].detach() # + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion) # prev_quaternion + # # + # defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + # defed_uniformly_sampled_pts = torch.matmul(defed_uniformly_sampled_pts, prev_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### inversely rotate the sampled pts # + # defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + # # defed_uniformly_sampled_pts_sdf: nn_sampled_pts # + # minn_sampled_sdf, minn_sampled_sdf_pts_idx = torch.min(defed_uniformly_sampled_pts_sdf, dim=0) ## the pts_idx ## + # passive_center_point = uniformly_sampled_pts[minn_sampled_sdf_pts_idx] ## center of the passive object ## + + + cur_passive_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() + cur_passive_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_rot_mtx = quaternion_to_matrix(cur_passive_quaternion) + + init_passive_obj_verts = timestep_to_passive_mesh[0].detach() + + cur_passive_obj_verts = torch.matmul(init_passive_obj_verts, cur_rot_mtx) + cur_passive_trans.unsqueeze(0) ## nn_pts x 3 ## + passive_center_point = cur_passive_obj_verts.mean(0) + + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 20000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + # defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + # ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + ws_normed_sampled = ws_normed[sampled_input_pts_idx] + + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # # + ''' Distance to previous prev meshes to optimize ''' + # to active mesh # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] ## nn_active_pts x 3 ## # active mesh # + + ##### using the points from active meshes directly #### + ori_input_pts = cur_active_mesh.clone() + sampled_input_pts = cur_active_mesh.clone() + + # if prev_pts_ts == 0: + # prev_prev_active_mesh_vel = torch.zeros_like(prev_active_mesh) + # else: + # # prev_prev_active_mesh_vel = prev_active_mesh - timestep_to_active_mesh[prev_pts_ts - 1] + # #### prev_prev active mehs #### + # # prev_prev_active_mesh = timestep_to_active_mesh[prev_pts_ts - 1] + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + # ## distnaces from act_mesh to the prev_prev ### prev_pts_ts ### + # dist_prev_act_mesh_to_prev_prev = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # minn_dist_prev_act_mesh_to_cur, minn_idx_dist_prev_act_mesh_to_cur = torch.min( + # dist_prev_act_mesh_to_prev_prev, dim=-1 ## + # ) + # selected_mesh_pts = batched_index_select(values=cur_active_mesh, indices=minn_idx_dist_prev_act_mesh_to_cur, dim=0) + # prev_prev_active_mesh_vel = selected_mesh_pts - prev_active_mesh + + nex_pts_ts = input_pts_ts + 1 + nex_active_mesh = timestep_to_active_mesh[nex_pts_ts] + cur_active_mesh_vel = nex_active_mesh - cur_active_mesh + + # dist_act_mesh_to_nex_ = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + # dist input pts active + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=self.nn_patch_active_pts, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) # + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts > thres_dist.unsqueeze(-1) + 1e-6] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = self.nn_patch_active_pts # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + + sampled_input_pts_idx = topk_dist_input_pts_active_mesh_idx + + + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + cur_active_mesh_vel_exp = cur_active_mesh_vel.unsqueeze(0).repeat(rel_input_pts_active_mesh.size(0), 1, 1).contiguous() + cur_active_mesh_vel = batched_index_select(values=cur_active_mesh_vel_exp, indices=sampled_input_pts_idx, dim=1) ## + + # prev_active_mesh_exp = prev_active_mesh.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # prev_active_mesh_exp = batched_index_select(values=prev_active_mesh_exp, indices=sampled_input_pts_idx, dim=1) ### nn_sampled_pts x nn_selected_pts x 3 + # self.timestep_to_prev_selected_active_mesh[prev_pts_ts] = prevactive + # ''' Distance to previous active meshes to optimize ''' + # prev_active_mesh_ori = self.timestep_to_prev_active_mesh_ori[prev_pts_ts] ## nn_active_pts x 3 ## + + # dist_input_pts_active_mesh_ori = torch.sum( + # (sampled_input_pts.detach().unsqueeze(1) - cur_active_mesh_vel.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_input_pts_active_mesh_ori = torch.sqrt(dist_input_pts_active_mesh_ori) # nn_sampled_pts x nn_active_pts # + # topk_dist_input_pts_active_mesh_ori, topk_dist_input_pts_active_mesh_idx_ori = torch.topk(dist_input_pts_active_mesh_ori, k=500, largest=False, dim=-1) + # thres_dist_ori, _ = torch.max(topk_dist_input_pts_active_mesh_ori, dim=-1) + # weighting_ka_ori = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + # weighting_kb_ori = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + # unnormed_weight_active_pts_to_input_pts_ori = weighting_ka_ori * torch.exp(-1. * dist_input_pts_active_mesh_ori * weighting_kb_ori * 50) # + # unnormed_weight_active_pts_to_input_pts_ori[unnormed_weight_active_pts_to_input_pts_ori >= thres_dist_ori.unsqueeze(-1)] = 0. + # normed_weight_active_pts_to_input_pts_ori = unnormed_weight_active_pts_to_input_pts_ori / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts_ori, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + # m_ori = Categorical(normed_weight_active_pts_to_input_pts_ori) # + # nn_sampled_input_pts = 500 # + # # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + # sampled_input_pts_idx_ori = m_ori.sample(sample_shape=(nn_sampled_input_pts,)) + # # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx_ori = sampled_input_pts_idx_ori.contiguous().transpose(1, 0).contiguous() + + # rel_input_pts_active_mesh_ori = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0).detach() + + # prev_active_mesh_ori_exp = prev_active_mesh_ori.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() + # prev_active_mesh_ori_exp = batched_index_select(values=prev_active_mesh_ori_exp, indices=sampled_input_pts_idx_ori, dim=1) + # # prev_active_mesh_ori_exp: nn_sampled_pts x nn_active_pts x 3 # + # # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + # self.timestep_to_prev_selected_active_mesh_ori[prev_pts_ts] = prev_active_mesh_ori_exp.detach() + # ''' Distance to previous active meshes to optimize ''' + + + + ''' spring force v2: use the spring force as input ''' + ### determine the spring coefficient ### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_qd = 1. # fix the qd to 1 # spring_qd # # spring_qd # + spring_qd = 0.5 + # dist input pts to active mesh # # + # a threshold distance -(d - d_thres)^3 * k + 2.*(2 - d_thres)**3 --> use the (2 - d_thres) ** 3 * k as the maximum distances -> k sould not be larger than 2. # + #### The k_d(d) in the form of inverse functions #### + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) ### + + #### The k_d(d) in the form of polynomial functions #### + # spring_qd = 0.01 + # spring_kd = spring_qd * ((-(dist_input_pts_active_mesh - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + # wish to use simple functions to achieve the adjustmenet of k-d relations # # k-d relations # + + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") # + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) # tiem_constant + spring_k_val = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) + + spring_kd = spring_kd * time_cons ### get the spring_kd (nn_sampled_pts x nn_act_pts ) #### + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) # prev_active_mesh # + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) * spring_k_val + ''' spring force v2: use the spring force as input ''' + + + + # ''' get the spring force of the reference motion ''' + # #### The k_d(d) in the form of inverse functions #### + # # spring_kd_ori = spring_qd / (dist_input_pts_active_mesh_ori - self.spring_x_min) + # #### The k_d(d) in the form of polynomial functions #### + # spring_kd_ori = spring_qd * ((-(dist_input_pts_active_mesh_ori - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + # spring_kd_ori = spring_kd_ori * time_cons + # spring_force_ori = -1. * spring_kd_ori * (dist_input_pts_active_mesh_ori - self.spring_rest_length) + # spring_force_ori = batched_index_select(values=spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = sampled_input_pts.unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0) + # dir_spring_force_ori = batched_index_select(values=dir_spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = dir_spring_force_ori / torch.clamp(torch.norm(dir_spring_force_ori, dim=-1, keepdim=True, p=2), min=1e-9) + # spring_force_ori = dir_spring_force_ori * spring_force_ori.unsqueeze(-1) * spring_k_val + # ''' get the spring force of the reference motion ''' + ''' spring force v2: use the spring force as input ''' + + + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) # and force weighting # + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( # # use the spring force as input # + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + self.timestep_to_spring_forces[input_pts_ts] = forces + ''' spring force v3: use the spring force as input ''' + + + # ''' spring force from the reference trajectory ''' + # transformed_w_ori = self.patch_force_scale_network[0](rel_input_pts_active_mesh_ori) + # transformed_w_ori = self.patch_force_scale_network[1](transformed_w_ori) + # glb_transformed_w_ori, _ = torch.max(transformed_w_ori, dim=1, keepdim=True) + # glb_transformed_w_ori = glb_transformed_w_ori.repeat(1, transformed_w_ori.size(1), 1) # + # transformed_w_ori = torch.cat( + # [transformed_w_ori, glb_transformed_w_ori], dim=-1 + # ) + # force_weighting_ori = self.patch_force_scale_network[2](transformed_w_ori) + # force_weighting_ori = self.patch_force_scale_network[3](force_weighting_ori).squeeze(-1) + # force_weighting_ori = F.softmax(force_weighting_ori, dim=-1) + # forces_ori = torch.sum( + # spring_force_ori.detach() * force_weighting.unsqueeze(-1).detach(), dim=1 + # ) + # self.timestep_to_spring_forces_ori[prev_pts_ts] = forces_ori + # ''' spring force from the reference trajectory ''' + + + ''' TODO: a lot to do for this firctional model... ''' + ''' calculate the firctional force ''' + friction_qd = 0.5 + friction_qd = 0.1 + dist_input_pts_active_mesh_sel = batched_index_select(dist_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) + #### The k_d(d) in the form of inverse functions #### + friction_kd = friction_qd / (dist_input_pts_active_mesh_sel - self.spring_x_min) + + #### The k_d(d) in the form of polynomial functions #### + # friction_qd = 0.01 + # friction_kd = friction_qd * ((-(dist_input_pts_active_mesh_sel - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + friction_kd = friction_kd * time_cons + prev_prev_active_mesh_vel_norm = torch.norm(cur_active_mesh_vel, dim=-1) + friction_force = friction_kd * (self.spring_rest_length - dist_input_pts_active_mesh_sel) * prev_prev_active_mesh_vel_norm # | vel | * (dist - rest_length) * friction_kd # + friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = batched_index_select(values=friction_force, indices=sampled_input_pts_idx, dim=1) # + dir_friction_force = cur_active_mesh_vel + dir_friction_force = dir_friction_force / torch.clamp(torch.norm(dir_friction_force, dim=-1, keepdim=True, p=2), min=1e-9) # + friction_force = dir_friction_force * friction_force.unsqueeze(-1) * friction_k # k * friction_force_scale * friction_force_dir # # get the friction force and the frictionk # + friction_force = torch.sum( # friction_force: nn-pts x 3 # + friction_force * force_weighting.unsqueeze(-1), dim=1 + ) + forces = forces + friction_force + forces = friction_force + ''' calculate the firctional force ''' + + + ''' Embed sdf values ''' + # raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' # + + ###### [time_cons] is used when calculating buth the spring force and the frictional force ---> convert force to acc ###### + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, _ = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + + + # ws_unnormed = ws_normed_sampled + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + + ''' get velocity and offset related constants ''' + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + ''' get velocity and offset related constants ''' + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() + + + ''' Compute torque, angular acc, angular vel and delta quaternion via forces and the directional offset from the center point to the sampled points ''' + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) ### center_point to the input_pts ### + # sampled_pts_torque = torch.cross(forces, center_point_to_sampled_pts, dim=-1) ## nn_sampled_pts x 3 ## + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + torque = torch.sum( + sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + ) + delta_angular_vel = torque * time_cons + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() ### (3,) + cur_delta_angle = cur_angular_vel * time_cons + cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() # angular velocity # + self.timestep_to_torque[input_pts_ts] = torque.detach() + + + + # ws_normed, defed_input_pts_sdf # + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_ws_normed[input_pts_ts] = ws_normed.detach() + # self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # # ori input pts # + # self.timestep_to_ori_input_pts_sdf = {} # # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[input_pts_ts] = ori_input_pts.detach() + # self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + 'friction_k': friction_k.detach().cpu()[0].item(), + 'spring_k_val': spring_k_val.detach().cpu()[0].item(), # spring_k + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + ## TODO: is it a good updating strategy? ## + # cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # curupd + if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + + cur_optimizable_total_def = cur_offset + torch.matmul(cur_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_optimizable_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_optimizable_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + + + # cur_rot_mtx = quaternion_to_matrix(cur_quaternion) # 3 x 3 + + # cur_tmp_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) # 3 x 3 rotation matrix # + # np.matmul(new_pts, rot_mtx) + cur_offset # + # new_pts = np.matmul(new_pts, cur_tmp_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### # + # cur_upd_rigid_def_aa = cur_offset + prev_rigid_def.detach() + # cur_upd_rigid_def_aa = cur_offset + torch.matmul(prev_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx).squeeze(0) + + + # ori_input_pts = torch.matmul(raw_input_pts - cur_upd_rigid_def_aa.unsqueeze(0), cur_rot_mtx.contiguous().transpose(1, 0).contiguous()) + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion).detach() + # prev_tot_offset = self.timestep_to_total_def[prev_pts_ts].detach() + # new_pts = torch.matmul(ori_input_pts, prev_rot_mtx) + prev_tot_offset.unsqueeze(0) + + # # + # cur_offset_with_rot = raw_input_pts - new_pts + # cur_offset_with_rot = torch.mean(cur_offset_with_rot, dim=0) + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset_with_rot + + return None + + + +class BendingNetworkActiveForceFieldForwardLagV12(nn.Module): + def __init__(self, + d_in, + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False): + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV12, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.contact_spring_rest_length = 2. + self.spring_ks_values = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.5 + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + ## [\alpha, \beta] ## + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} + self.save_values = {} + # ws_normed, defed_input_pts_sdf, + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + + + # fields # + def set_rigid_translations_optimizable(self, n_ts): + if n_ts == 3: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0008, 0.0040, 0.0159], + [-0.0566, 0.0099, 0.0173]], dtype=torch.float32, requires_grad=True + ).cuda() + elif n_ts == 5: + self.rigid_translations = torch.tensor( + [[ 0.0000, 0.0000, 0.0000], + [-0.0097, 0.0305, 0.0342], + [-0.1211, 0.1123, 0.0565], + [-0.2700, 0.1271, 0.0412], + [-0.3081, 0.1174, 0.0529]], dtype=torch.float32, requires_grad=False + ).cuda() + # self.rigid_translations.requires_grad = True + # self.rigid_translations.requires_grad_ = True + # self.rigid_translations = nn.Parameter( + # self.rigid_translations, requires_grad=True + # ) + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True): + ### from input_pts to new pts ### + # wieghting force field # + # prev_pts_ts = input_pts_ts - 1 + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 # + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + + friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> + # sampled_input_pts_normals = timesteptopassivemehsn + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # active # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######### optimize the actuator forces directly ######### + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) + forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # cur_passive_obj_ns # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + ####### sharp the weights ####### + + minn_dist_sampled_pts_passive_obj_thres = 0.05 + # minn_dist_sampled_pts_passive_obj_thres = 0.001 + minn_dist_sampled_pts_passive_obj_thres = 0.0001 + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + ### + ''' decide forces via kinematics statistics ''' + rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_force_d = -contact_spring_ka * (dist_sampled_pts_to_passive_obj - self.contact_spring_rest_length) # + vel_sampled_pts = nex_active_mesh - cur_active_mesh + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + tangential_forces = tangential_vel * tangential_ks + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + forces = tangential_forces + contact_force_d + ''' decide forces via kinematics statistics ''' + # + + + ''' Decompose forces and calculate penalty froces ''' + # penalty_dot_forces_normals, penalty_friction_constraint # + # # get the forces -> decompose forces # + # dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # ## tangential forces ## + + # penalty_dot_forces_normals = dot_forces_normals ** 2 + # penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + # penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + # norm_along_normals_forces = torch.norm(forces_along_normals, dim=-1, p=2) # nn_sampled_pts ## + # penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + # penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + # penalty_friction_constraint = torch.mean(penalty_friction_constraint) + # self.penalty_dot_forces_normals = penalty_dot_forces_normals + # self.penalty_friction_constraint = penalty_friction_constraint + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc + + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + # cur_delta_quaternion = + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + # 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + # 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + return + + ''' Deform input points via the passive rigid deformations ''' + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts] + # defed_input_pts = input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # # self.timestep_to_ori_input_pts = {} + # # self.timestep_to_ori_input_pts_sdf = {} + # # ori_input_pts, ori_input_pts_sdf #### input_pts #### + # ori_input_pts = input_pts.clone().detach() + # ori_input_pts_sdf = passive_sdf_net.sdf(ori_input_pts).squeeze(-1).detach() + ''' Deform input points via the passive rigid deformations ''' + + ''' Calculate weights for deformed input points ''' + # ws_normed, defed_input_pts_sdf, # + # prev_passive_mesh = timestep_to_passive_mesh[prev_pts_ts] + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_input_pts_sdf.detach() * ws_alpha) # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + ''' Calculate weights for deformed input points ''' + + + # optimizable point weights with fixed spring rules # + uniformly_dist = Uniform(low=-1.0, high=1.0) + nn_uniformly_sampled_pts = self.nn_uniformly_sampled_pts + #### uniformly_sampled_pts: nn_sampled_pts x 3 #### + uniformly_sampled_pts = uniformly_dist.sample(sample_shape=(nn_uniformly_sampled_pts, 3)) + # use weighting_network to get weights of those sampled pts # + # expanded_prev_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() + # expanded_prev_pts_ts = expanded_prev_pts_ts + prev_pts_ts # (nn_pts,) # if we do not have a kinematics observation? # + + expanded_pts_ts = torch.zeros((uniformly_sampled_pts.size(0)), dtype=torch.long).cuda() ### get + expanded_pts_ts = expanded_pts_ts + input_pts_ts + input_latents = self.bending_latent(expanded_pts_ts) + x = torch.cat([uniformly_sampled_pts, input_latents], dim=-1) + + if (not self.use_split_network) or (self.use_split_network and input_pts_ts < self.cur_window_size // 2): + cur_network = self.weighting_network + else: + cur_network = self.split_weighting_network + + ''' use the single split network without no_grad setting ''' + for i, layer in enumerate(cur_network): + x = layer(x) + # SIREN + if self.activation_function.__name__ == "sin" and i == 0: + x *= 30.0 + if i != len(self.network) - 1: + x = self.activation_function(x) + if i in self.skips: + x = torch.cat([uniformly_sampled_pts, x], -1) + # x: nn_uniformly_sampled_pts x 1 weights # + x = x.squeeze(-1) + ws_normed = F.softmax(x, dim=0) #### calculate the softmax as weights # + + ### total def copy ## + # prev_rigid_def = self.timestep_to_total_def_copy[prev_pts_ts] # .unsqueeze(0) + # prev_rigid_def = self.timestep_to_total_def[prev_pts_ts].detach() + # # + # prev_quaternion = self.timestep_to_quaternion[prev_pts_ts].detach() # + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion) # prev_quaternion + # # + # defed_uniformly_sampled_pts = uniformly_sampled_pts - prev_rigid_def.unsqueeze(0) + # defed_uniformly_sampled_pts = torch.matmul(defed_uniformly_sampled_pts, prev_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### inversely rotate the sampled pts # + # defed_uniformly_sampled_pts_sdf = passive_sdf_net.sdf(defed_uniformly_sampled_pts).squeeze(-1) + # # defed_uniformly_sampled_pts_sdf: nn_sampled_pts # + # minn_sampled_sdf, minn_sampled_sdf_pts_idx = torch.min(defed_uniformly_sampled_pts_sdf, dim=0) ## the pts_idx ## + # passive_center_point = uniformly_sampled_pts[minn_sampled_sdf_pts_idx] ## center of the passive object ## + + + cur_passive_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() + cur_passive_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_rot_mtx = quaternion_to_matrix(cur_passive_quaternion) + + init_passive_obj_verts = timestep_to_passive_mesh[0].detach() + + cur_passive_obj_verts = torch.matmul(init_passive_obj_verts, cur_rot_mtx) + cur_passive_trans.unsqueeze(0) ## nn_pts x 3 ## + passive_center_point = cur_passive_obj_verts.mean(0) + + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1).detach() + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1).detach() + # ws_unnormed = ws_beta * torch.exp(-1. * defed_uniformly_sampled_pts_sdf.detach() * ws_alpha * 100) # nn_pts # + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) ## ws_normed ## + m = Categorical(ws_normed) + nn_sampled_input_pts = 20000 + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + sampled_input_pts = uniformly_sampled_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + # defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf + # defed_input_pts_sdf = sampled_defed_input_pts_sdf + ori_input_pts = uniformly_sampled_pts.clone().detach() + # ori_input_pts_sdf = defed_uniformly_sampled_pts_sdf.detach() + ws_normed_sampled = ws_normed[sampled_input_pts_idx] + + # sampled_input_pts = prev_passive_mesh.clone() + # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + + ''' ### Use points from passive mesh ### ''' + # sampled_input_pts = prev_passive_mesh.clone() + # # defed_input_pts = sampled_input_pts - prev_rigid_def.unsqueeze(0) + # defed_input_pts = sampled_input_pts - self.timestep_to_total_def_copy[prev_pts_ts].unsqueeze(0) + # defed_input_pts_sdf = passive_sdf_net.sdf(defed_input_pts).squeeze(-1) + # sampled_defed_input_pts_sdf = defed_input_pts_sdf + ''' ### Use points from passive mesh ### ''' + + + ''' ### Use points from weighted sampled input_pts ### ''' + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 5000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_input_pts_sdf[sampled_input_pts_idx] + ''' ### Use points from weighted sampled input_pts ### ''' + + # # weighting model via the distance # # defed input pts sdf # + # # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # # distances # the kappa # + # self.weighting_model_ks = nn.Embedding( # k_a and k_b # + # num_embeddings=2, embedding_dim=1 + # ) + # self.spring_rest_length = 2. # + # self.spring_x_min = -2. + # self.spring_qd = nn.Embedding( + # num_embeddings=1, embedding_dim=1 + # ) + # torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # # 1) sample points from the active robot's mesh; + # # 2) calculate forces from sampled points to the action point; + # # 3) use the weight model to calculate weights for each sampled point; # + # # 4) aggregate forces; + # # # + ''' Distance to previous prev meshes to optimize ''' + # to active mesh # + cur_active_mesh = timestep_to_active_mesh[input_pts_ts] ## nn_active_pts x 3 ## # active mesh # + + ##### using the points from active meshes directly #### + ori_input_pts = cur_active_mesh.clone() + sampled_input_pts = cur_active_mesh.clone() + + # if prev_pts_ts == 0: + # prev_prev_active_mesh_vel = torch.zeros_like(prev_active_mesh) + # else: + # # prev_prev_active_mesh_vel = prev_active_mesh - timestep_to_active_mesh[prev_pts_ts - 1] + # #### prev_prev active mehs #### + # # prev_prev_active_mesh = timestep_to_active_mesh[prev_pts_ts - 1] + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + # ## distnaces from act_mesh to the prev_prev ### prev_pts_ts ### + # dist_prev_act_mesh_to_prev_prev = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # minn_dist_prev_act_mesh_to_cur, minn_idx_dist_prev_act_mesh_to_cur = torch.min( + # dist_prev_act_mesh_to_prev_prev, dim=-1 ## + # ) + # selected_mesh_pts = batched_index_select(values=cur_active_mesh, indices=minn_idx_dist_prev_act_mesh_to_cur, dim=0) + # prev_prev_active_mesh_vel = selected_mesh_pts - prev_active_mesh + + nex_pts_ts = input_pts_ts + 1 + nex_active_mesh = timestep_to_active_mesh[nex_pts_ts] + cur_active_mesh_vel = nex_active_mesh - cur_active_mesh + + # dist_act_mesh_to_nex_ = torch.sum( + # (prev_active_mesh.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 ### + # ) + # cur_active_mesh = self.uniformly_sample_pts(cur_active_mesh, nn_samples=2000) + # prev_active_mesh = self.uniformly_sample_pts(prev_active_mesh, nn_samples=2000) + + dist_input_pts_active_mesh = torch.sum( + (sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0)) ** 2, dim=-1 + ) + + # dist input pts active + ##### sqrt and the ##### + dist_input_pts_active_mesh = torch.sqrt(dist_input_pts_active_mesh) # nn_sampled_pts x nn_active_pts # + topk_dist_input_pts_active_mesh, topk_dist_input_pts_active_mesh_idx = torch.topk(dist_input_pts_active_mesh, k=self.nn_patch_active_pts, largest=False, dim=-1) + thres_dist, _ = torch.max(topk_dist_input_pts_active_mesh, dim=-1) + weighting_ka = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) # + weighting_kb = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # + + unnormed_weight_active_pts_to_input_pts = weighting_ka * torch.exp(-1. * dist_input_pts_active_mesh * weighting_kb * 50) # + unnormed_weight_active_pts_to_input_pts[unnormed_weight_active_pts_to_input_pts > thres_dist.unsqueeze(-1) + 1e-6] = 0. + normed_weight_active_pts_to_input_pts = unnormed_weight_active_pts_to_input_pts / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + m = Categorical(normed_weight_active_pts_to_input_pts) # + nn_sampled_input_pts = self.nn_patch_active_pts # + # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx = sampled_input_pts_idx.contiguous().transpose(1, 0).contiguous() + + sampled_input_pts_idx = topk_dist_input_pts_active_mesh_idx + + + rel_input_pts_active_mesh = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) + # print(f"rel_input_pts_active_mesh: {rel_input_pts_active_mesh.size()}, sampled_input_pts_idx: {sampled_input_pts_idx.size()}") + rel_input_pts_active_mesh = batched_index_select(values=rel_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) # + + cur_active_mesh_vel_exp = cur_active_mesh_vel.unsqueeze(0).repeat(rel_input_pts_active_mesh.size(0), 1, 1).contiguous() + cur_active_mesh_vel = batched_index_select(values=cur_active_mesh_vel_exp, indices=sampled_input_pts_idx, dim=1) ## + + # prev_active_mesh_exp = prev_active_mesh.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # prev_active_mesh_exp = batched_index_select(values=prev_active_mesh_exp, indices=sampled_input_pts_idx, dim=1) ### nn_sampled_pts x nn_selected_pts x 3 + # self.timestep_to_prev_selected_active_mesh[prev_pts_ts] = prevactive + # ''' Distance to previous active meshes to optimize ''' + # prev_active_mesh_ori = self.timestep_to_prev_active_mesh_ori[prev_pts_ts] ## nn_active_pts x 3 ## + + # dist_input_pts_active_mesh_ori = torch.sum( + # (sampled_input_pts.detach().unsqueeze(1) - cur_active_mesh_vel.unsqueeze(0)) ** 2, dim=-1 + # ) + # dist_input_pts_active_mesh_ori = torch.sqrt(dist_input_pts_active_mesh_ori) # nn_sampled_pts x nn_active_pts # + # topk_dist_input_pts_active_mesh_ori, topk_dist_input_pts_active_mesh_idx_ori = torch.topk(dist_input_pts_active_mesh_ori, k=500, largest=False, dim=-1) + # thres_dist_ori, _ = torch.max(topk_dist_input_pts_active_mesh_ori, dim=-1) + # weighting_ka_ori = self.weighting_model_ks(torch.zeros((1,)).long().cuda()).view(1) + # weighting_kb_ori = self.weighting_model_ks(torch.ones((1,)).long().cuda()).view(1) # weighting_kb # + + # unnormed_weight_active_pts_to_input_pts_ori = weighting_ka_ori * torch.exp(-1. * dist_input_pts_active_mesh_ori * weighting_kb_ori * 50) # + # unnormed_weight_active_pts_to_input_pts_ori[unnormed_weight_active_pts_to_input_pts_ori >= thres_dist_ori.unsqueeze(-1)] = 0. + # normed_weight_active_pts_to_input_pts_ori = unnormed_weight_active_pts_to_input_pts_ori / torch.clamp(torch.sum(unnormed_weight_active_pts_to_input_pts_ori, dim=-1, keepdim=True), min=1e-9) # nn_sampled_pts # + # m_ori = Categorical(normed_weight_active_pts_to_input_pts_ori) # + # nn_sampled_input_pts = 500 # + # # # print(f"prev_passive_mesh: {prev_passive_mesh.size(), }") + # sampled_input_pts_idx_ori = m_ori.sample(sample_shape=(nn_sampled_input_pts,)) + # # sampled_input_pts = normed_weight_active_pts_to_input_pts[sampled_input_pts_idx] + # # sampled_defed_input_pts_sdf = defed_uniformly_sampled_pts_sdf[sampled_input_pts_idx] + + # sampled_input_pts_idx_ori = sampled_input_pts_idx_ori.contiguous().transpose(1, 0).contiguous() + + # rel_input_pts_active_mesh_ori = sampled_input_pts.detach().unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0).detach() + + # prev_active_mesh_ori_exp = prev_active_mesh_ori.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() + # prev_active_mesh_ori_exp = batched_index_select(values=prev_active_mesh_ori_exp, indices=sampled_input_pts_idx_ori, dim=1) + # # prev_active_mesh_ori_exp: nn_sampled_pts x nn_active_pts x 3 # + # # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + # self.timestep_to_prev_selected_active_mesh_ori[prev_pts_ts] = prev_active_mesh_ori_exp.detach() + # ''' Distance to previous active meshes to optimize ''' + + + + ''' spring force v2: use the spring force as input ''' + ### determine the spring coefficient ### + spring_qd = self.spring_qd(torch.zeros((1,)).long().cuda()).view(1) + # spring_qd = 1. # fix the qd to 1 # spring_qd # # spring_qd # + spring_qd = 0.5 + # dist input pts to active mesh # # + # a threshold distance -(d - d_thres)^3 * k + 2.*(2 - d_thres)**3 --> use the (2 - d_thres) ** 3 * k as the maximum distances -> k sould not be larger than 2. # + #### The k_d(d) in the form of inverse functions #### + spring_kd = spring_qd / (dist_input_pts_active_mesh - self.spring_x_min) ### + + #### The k_d(d) in the form of polynomial functions #### + # spring_qd = 0.01 + # spring_kd = spring_qd * ((-(dist_input_pts_active_mesh - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + # wish to use simple functions to achieve the adjustmenet of k-d relations # # k-d relations # + + # print(f"spring_qd: {spring_qd.size()}, dist_input_pts_active_mesh: {dist_input_pts_active_mesh.size()}, spring_kd: {spring_kd.size()}") # + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) # tiem_constant + spring_k_val = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) + + spring_kd = spring_kd * time_cons ### get the spring_kd (nn_sampled_pts x nn_act_pts ) #### + spring_force = -1. * spring_kd * (dist_input_pts_active_mesh - self.spring_rest_length) # nn_sampled_pts x nn-active_pts + spring_force = batched_index_select(values=spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = sampled_input_pts.unsqueeze(1) - cur_active_mesh.unsqueeze(0) # prev_active_mesh # + dir_spring_force = batched_index_select(values=dir_spring_force, indices=sampled_input_pts_idx, dim=1) # + dir_spring_force = dir_spring_force / torch.clamp(torch.norm(dir_spring_force, dim=-1, keepdim=True, p=2), min=1e-9) # + spring_force = dir_spring_force * spring_force.unsqueeze(-1) * spring_k_val + ''' spring force v2: use the spring force as input ''' + + + + # ''' get the spring force of the reference motion ''' + # #### The k_d(d) in the form of inverse functions #### + # # spring_kd_ori = spring_qd / (dist_input_pts_active_mesh_ori - self.spring_x_min) + # #### The k_d(d) in the form of polynomial functions #### + # spring_kd_ori = spring_qd * ((-(dist_input_pts_active_mesh_ori - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + # spring_kd_ori = spring_kd_ori * time_cons + # spring_force_ori = -1. * spring_kd_ori * (dist_input_pts_active_mesh_ori - self.spring_rest_length) + # spring_force_ori = batched_index_select(values=spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = sampled_input_pts.unsqueeze(1) - prev_active_mesh_ori.unsqueeze(0) + # dir_spring_force_ori = batched_index_select(values=dir_spring_force_ori, indices=sampled_input_pts_idx_ori, dim=1) + # dir_spring_force_ori = dir_spring_force_ori / torch.clamp(torch.norm(dir_spring_force_ori, dim=-1, keepdim=True, p=2), min=1e-9) + # spring_force_ori = dir_spring_force_ori * spring_force_ori.unsqueeze(-1) * spring_k_val + # ''' get the spring force of the reference motion ''' + ''' spring force v2: use the spring force as input ''' + + + ''' spring force v3: use the spring force as input ''' + transformed_w = self.patch_force_scale_network[0](rel_input_pts_active_mesh) # + transformed_w = self.patch_force_scale_network[1](transformed_w) + glb_transformed_w, _ = torch.max(transformed_w, dim=1, keepdim=True) + # print(f"transformed_w: {transformed_w.size()}, glb_transformed_w: {glb_transformed_w.size()}") + glb_transformed_w = glb_transformed_w.repeat(1, transformed_w.size(1), 1) # + + transformed_w = torch.cat( + [transformed_w, glb_transformed_w], dim=-1 + ) + + force_weighting = self.patch_force_scale_network[2](transformed_w) # + # print(f"before the last step, forces: {forces.size()}") + # forces, _ = torch.max(forces, dim=1) # and force weighting # + force_weighting = self.patch_force_scale_network[3](force_weighting).squeeze(-1) # nn_sampled_pts x nn_active_pts # + force_weighting = F.softmax(force_weighting, dim=-1) ## nn_sampled_pts x nn_active_pts # + ## use the v3 force as the input to the field ## + forces = torch.sum( # # use the spring force as input # + spring_force * force_weighting.unsqueeze(-1), dim=1 ### sum over the force; sum over the force ### + ) + self.timestep_to_spring_forces[input_pts_ts] = forces + ''' spring force v3: use the spring force as input ''' + + + # ''' spring force from the reference trajectory ''' + # transformed_w_ori = self.patch_force_scale_network[0](rel_input_pts_active_mesh_ori) + # transformed_w_ori = self.patch_force_scale_network[1](transformed_w_ori) + # glb_transformed_w_ori, _ = torch.max(transformed_w_ori, dim=1, keepdim=True) + # glb_transformed_w_ori = glb_transformed_w_ori.repeat(1, transformed_w_ori.size(1), 1) # + # transformed_w_ori = torch.cat( + # [transformed_w_ori, glb_transformed_w_ori], dim=-1 + # ) + # force_weighting_ori = self.patch_force_scale_network[2](transformed_w_ori) + # force_weighting_ori = self.patch_force_scale_network[3](force_weighting_ori).squeeze(-1) + # force_weighting_ori = F.softmax(force_weighting_ori, dim=-1) + # forces_ori = torch.sum( + # spring_force_ori.detach() * force_weighting.unsqueeze(-1).detach(), dim=1 + # ) + # self.timestep_to_spring_forces_ori[prev_pts_ts] = forces_ori + # ''' spring force from the reference trajectory ''' + + + ''' TODO: a lot to do for this firctional model... ''' + ''' calculate the firctional force ''' + friction_qd = 0.5 + friction_qd = 0.1 + dist_input_pts_active_mesh_sel = batched_index_select(dist_input_pts_active_mesh, indices=sampled_input_pts_idx, dim=1) + #### The k_d(d) in the form of inverse functions #### + friction_kd = friction_qd / (dist_input_pts_active_mesh_sel - self.spring_x_min) + + #### The k_d(d) in the form of polynomial functions #### + # friction_qd = 0.01 + # friction_kd = friction_qd * ((-(dist_input_pts_active_mesh_sel - self.contact_dist_thres) ** 3) + 2. * (2. - self.contact_dist_thres) ** 3) + + friction_kd = friction_kd * time_cons + prev_prev_active_mesh_vel_norm = torch.norm(cur_active_mesh_vel, dim=-1) + friction_force = friction_kd * (self.spring_rest_length - dist_input_pts_active_mesh_sel) * prev_prev_active_mesh_vel_norm # | vel | * (dist - rest_length) * friction_kd # + friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = batched_index_select(values=friction_force, indices=sampled_input_pts_idx, dim=1) # + dir_friction_force = cur_active_mesh_vel + dir_friction_force = dir_friction_force / torch.clamp(torch.norm(dir_friction_force, dim=-1, keepdim=True, p=2), min=1e-9) # + friction_force = dir_friction_force * friction_force.unsqueeze(-1) * friction_k # k * friction_force_scale * friction_force_dir # # get the friction force and the frictionk # + friction_force = torch.sum( # friction_force: nn-pts x 3 # + friction_force * force_weighting.unsqueeze(-1), dim=1 + ) + forces = forces + friction_force + forces = friction_force + ''' calculate the firctional force ''' + + + ''' Embed sdf values ''' + # raw_input_pts = input_pts[:, :3] + # if self.embed_fn_fine is not None: # + # input_pts_to_active_sdf = self.embed_fn_fine(input_pts_to_active_sdf) + ''' Embed sdf values ''' # + + ###### [time_cons] is used when calculating buth the spring force and the frictional force ---> convert force to acc ###### + + + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, _ = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + + + # ws_unnormed = ws_normed_sampled + ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + + ''' get velocity and offset related constants ''' + # k_acc_to_vel = self.ks_val(torch.zeros((1,)).long().cuda()).view(1) # + # k_vel_to_offset = self.ks_val(torch.ones((1,)).long().cuda()).view(1) # + ''' get velocity and offset related constants ''' + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() + + + ''' Compute torque, angular acc, angular vel and delta quaternion via forces and the directional offset from the center point to the sampled points ''' + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) ### center_point to the input_pts ### + # sampled_pts_torque = torch.cross(forces, center_point_to_sampled_pts, dim=-1) ## nn_sampled_pts x 3 ## + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + torque = torch.sum( + sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + ) + delta_angular_vel = torque * time_cons + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() ### (3,) + cur_delta_angle = cur_angular_vel * time_cons + cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() # angular velocity # + self.timestep_to_torque[input_pts_ts] = torque.detach() + + + + # ws_normed, defed_input_pts_sdf # + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_ws_normed[input_pts_ts] = ws_normed.detach() + # self.timestep_to_defed_input_pts_sdf[prev_pts_ts] = defed_input_pts_sdf.detach() + # self.timestep_to_ori_input_pts = {} # # ori input pts # + # self.timestep_to_ori_input_pts_sdf = {} # # + # ori_input_pts, ori_input_pts_sdf # + self.timestep_to_ori_input_pts[input_pts_ts] = ori_input_pts.detach() + # self.timestep_to_ori_input_pts_sdf[prev_pts_ts] = ori_input_pts_sdf.detach() # ori input pts sdfs + + self.ks_vals_dict = { + "acc_to_vel": k_acc_to_vel.detach().cpu()[0].item(), + "vel_to_offset": k_vel_to_offset.detach().cpu()[0].item(), # vel to offset # + "ws_alpha": ws_alpha.detach().cpu()[0].item(), + "ws_beta": ws_beta.detach().cpu()[0].item(), + 'friction_k': friction_k.detach().cpu()[0].item(), + 'spring_k_val': spring_k_val.detach().cpu()[0].item(), # spring_k + # "dist_k_b": dist_k_b.detach().cpu()[0].item(), + # "dist_k_a": dist_k_a.detach().cpu()[0].item(), + } + self.save_values = { # save values # saved values # + 'ks_vals_dict': self.ks_vals_dict, # save values ## # what are good point_accs here? # 1) spatially and temporally continuous; 2) ambient contact force direction; # + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + 'timestep_to_ori_input_pts': {cur_ts: self.timestep_to_ori_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts}, + # 'timestep_to_ori_input_pts_sdf': {cur_ts: self.timestep_to_ori_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ori_input_pts_sdf} + } + cur_offset = k_vel_to_offset * cur_vel + ## TODO: is it a good updating strategy? ## + # cur_upd_rigid_def = cur_offset.detach() + prev_rigid_def + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # curupd + if update_tot_def: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset # get the offset # + + cur_optimizable_total_def = cur_offset + torch.matmul(cur_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_optimizable_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_optimizable_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + ## update raw input pts ## + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + + + # cur_rot_mtx = quaternion_to_matrix(cur_quaternion) # 3 x 3 + + # cur_tmp_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) # 3 x 3 rotation matrix # + # np.matmul(new_pts, rot_mtx) + cur_offset # + # new_pts = np.matmul(new_pts, cur_tmp_rot_mtx.contiguous().transpose(1, 0).contiguous()) ### # + # cur_upd_rigid_def_aa = cur_offset + prev_rigid_def.detach() + # cur_upd_rigid_def_aa = cur_offset + torch.matmul(prev_rigid_def.detach().unsqueeze(0), cur_delta_rot_mtx).squeeze(0) + + + # ori_input_pts = torch.matmul(raw_input_pts - cur_upd_rigid_def_aa.unsqueeze(0), cur_rot_mtx.contiguous().transpose(1, 0).contiguous()) + # prev_rot_mtx = quaternion_to_matrix(prev_quaternion).detach() + # prev_tot_offset = self.timestep_to_total_def[prev_pts_ts].detach() + # new_pts = torch.matmul(ori_input_pts, prev_rot_mtx) + prev_tot_offset.unsqueeze(0) + + # # + # cur_offset_with_rot = raw_input_pts - new_pts + # cur_offset_with_rot = torch.mean(cur_offset_with_rot, dim=0) + # self.timestep_to_optimizable_offset[input_pts_ts] = cur_offset_with_rot + + return None + + + + +class BendingNetworkActiveForceFieldForwardLagV13(nn.Module): + def __init__(self, # self # + d_in, # + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, # hidden dimensions # + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False, + nn_instances=1, + minn_dist_threshold=0.05, + ): # contact + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV13, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.nn_instances = nn_instances + + self.contact_spring_rest_length = 2. + + # self.minn_dist_sampled_pts_passive_obj_thres = 0.05 # minn_dist_threshold ### + self.minn_dist_sampled_pts_passive_obj_thres = minn_dist_threshold + + + if self.nn_instances == 1: + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + else: + self.spring_ks_values = nn.ModuleList( + [ + nn.Embedding(num_embeddings=5, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_values in self.spring_ks_values: + torch.nn.init.ones_(cur_ks_values.weight) + cur_ks_values.weight.data = cur_ks_values.weight.data * 0.01 + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + + ## [ \alpha, \beta ] ## + if self.nn_instances == 1: + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + else: + self.ks_weights = nn.ModuleList( + [ + nn.Embedding(num_embeddings=2, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_weights in self.ks_weights: + torch.nn.init.ones_(cur_ks_weights.weight) # + cur_ks_weights.weight.data[1] = cur_ks_weights.weight.data[1] * (1. / (778 * 2)) + + + if self.nn_instances == 1: + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + else: + self.time_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_time_constant in self.time_constant: + torch.nn.init.ones_(cur_time_constant.weight) # + + if self.nn_instances == 1: + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + else: + self.damping_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_damping_constant in self.damping_constant: + torch.nn.init.ones_(cur_damping_constant.weight) # # # # + cur_damping_constant.weight.data = cur_damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + + + # self.actuator_friction_forces = nn.Embedding( # actuator's forces # + # num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + if nn_instances == 1: + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + else: + self.actuator_friction_forces = nn.ModuleList( + [nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) for _ in range(self.nn_instances) ] + ) + for cur_friction_force_net in self.actuator_friction_forces: + torch.nn.init.zeros_(cur_friction_force_net.weight) # + + + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": # periodict activation functions # + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} # record the optimizable offset # + self.save_values = {} + # ws_normed, defed_input_pts_sdf, # + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + self.obj_sdf_th = None + + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): # no_grad() + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays # + # + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + # ### + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + + def query_for_sdf(self, cur_pts, cur_frame_transformations): + # + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + # cur_pts: nn_pts x 3 # + # print(f"cur_pts: {cur_pts.size()}, cur_frame_translation: {cur_frame_translation.size()}, cur_frame_rotation: {cur_frame_rotation.size()}") + cur_transformed_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + # v = (v - center) * scale # + # sdf_space_center # + cur_transformed_pts_np = cur_transformed_pts.detach().cpu().numpy() + cur_transformed_pts_np = (cur_transformed_pts_np - np.reshape(self.sdf_space_center, (1, 3))) * self.sdf_space_scale + cur_transformed_pts_np = (cur_transformed_pts_np + 1.) / 2. + cur_transformed_pts_xs = (cur_transformed_pts_np[:, 0] * self.sdf_res).astype(np.int32) # [x, y, z] of the transformed_pts_np # + cur_transformed_pts_ys = (cur_transformed_pts_np[:, 1] * self.sdf_res).astype(np.int32) + cur_transformed_pts_zs = (cur_transformed_pts_np[:, 2] * self.sdf_res).astype(np.int32) + + cur_transformed_pts_xs = np.clip(cur_transformed_pts_xs, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_ys = np.clip(cur_transformed_pts_ys, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_zs = np.clip(cur_transformed_pts_zs, a_min=0, a_max=self.sdf_res - 1) + + + if self.obj_sdf_th is None: + self.obj_sdf_th = torch.from_numpy(self.obj_sdf).float().cuda() + cur_transformed_pts_xs_th = torch.from_numpy(cur_transformed_pts_xs).long().cuda() + cur_transformed_pts_ys_th = torch.from_numpy(cur_transformed_pts_ys).long().cuda() + cur_transformed_pts_zs_th = torch.from_numpy(cur_transformed_pts_zs).long().cuda() + + cur_pts_sdf = batched_index_select(self.obj_sdf_th, cur_transformed_pts_xs_th, 0) + # print(f"After selecting the x-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the y-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the z-axis: {cur_pts_sdf.size()}") + + + # cur_pts_sdf = self.obj_sdf[cur_transformed_pts_xs] + # cur_pts_sdf = cur_pts_sdf[:, cur_transformed_pts_ys] + # cur_pts_sdf = cur_pts_sdf[:, :, cur_transformed_pts_zs] + # cur_pts_sdf = np.diagonal(cur_pts_sdf) + # print(f"cur_pts_sdf: {cur_pts_sdf.shape}") + # # gradient of sdf # + # # the contact force dierection should be the negative direction of the sdf gradient? # + # # it seems true # + # # get the cur_pts_sdf value # + # cur_pts_sdf = torch.from_numpy(cur_pts_sdf).float().cuda() + return cur_pts_sdf # # cur_pts_sdf # + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False): + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + # maintain the contact # + + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) # + + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + + # friction_qd = 0.1 + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> # sampled points # + ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + + nn_sampled_input_pts = sampled_input_pts.size(0) + + # + # ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + # ws_normed = ws_normed / float(sampled_input_pts.size(0)) + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 20000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + + # sampled_input_pts_normals = # + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh ### the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k # + # forces = friction_force + # ######## vel for frictions ######### # # maintain the contact / continuous contact -> patch contact + # coantacts in previous timesteps -> + + # cur actuation # # cur actuation # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) # actuation embedding idxes # + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + if friction_forces is None: + ###### get the friction forces ##### + if self.nn_instances == 1: + cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + else: + cur_actuation_friction_forces = self.actuator_friction_forces[i_instance](cur_actuation_embedding_idxes) + else: # + if reference_mano_pts is not None: + ref_mano_pts_nn = reference_mano_pts.size(0) + cur_actuation_embedding_st_idx = ref_mano_pts_nn * input_pts_ts + cur_actuation_embedding_ed_idx = ref_mano_pts_nn * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn_ref_pts x 3 # + # sampled_input_pts # + # r = 0.01 + threshold_ball_r = 0.01 + dist_input_pts_to_reference_pts = torch.sum( + (sampled_input_pts.unsqueeze(1) - reference_mano_pts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_input_pts_to_reference_pts = torch.sqrt(dist_input_pts_to_reference_pts) + weights_input_to_reference = 0.5 - dist_input_pts_to_reference_pts + weights_input_to_reference[weights_input_to_reference < 0] = 0 + weights_input_to_reference[dist_input_pts_to_reference_pts > threshold_ball_r] = 0 + + minn_dist_input_pts_to_reference_pts, minn_idx_input_pts_to_reference_pts = torch.min(dist_input_pts_to_reference_pts, dim=-1) + + weights_input_to_reference[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] = 0.1 - dist_input_pts_to_reference_pts[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] + + weights_input_to_reference = weights_input_to_reference / torch.clamp(torch.sum(weights_input_to_reference, dim=-1, keepdim=True), min=1e-9) + + # cur_actuation_friction_forces = weights_input_to_reference.unsqueeze(-1) * cur_actuation_friction_forces.unsqueeze(0) # nn_input_pts x nn_ref_pts x 1 xxxx 1 x nn_ref_pts x 3 -> nn_input_pts x nn_ref_pts x 3 + # cur_actuation_friction_forces = cur_actuation_friction_forces.sum(dim=1) + + # cur_actuation_friction_forces * weights_input_to_reference.unsqueeze(-1) + cur_actuation_friction_forces = batched_index_select(cur_actuation_friction_forces, minn_idx_input_pts_to_reference_pts, dim=0) + else: + # cur_actuation_embedding_st_idx = 365428 * input_pts_ts + # cur_actuation_embedding_ed_idx = 365428 * (input_pts_ts + 1) + if sampled_verts_idxes is not None: + cur_actuation_embedding_st_idx = ori_nns * input_pts_ts + cur_actuation_embedding_ed_idx = ori_nns * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + cur_actuation_friction_forces = cur_actuation_friction_forces[sampled_verts_idxes] + else: + cur_actuation_embedding_st_idx = nn_sampled_input_pts * input_pts_ts + cur_actuation_embedding_ed_idx = nn_sampled_input_pts * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn instances # + if self.nn_instances == 1: + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + else: + ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + # cur_passive_obj_ns # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] ### nn_sampled_pts x 3 -> the normal direction of the nearest passive object point ### + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() # + dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + + ###### penetration penalty strategy v1 ###### # + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + + + ###### penetration penalty strategy v2 ###### + # if input_pts_ts > 0: + # prev_active_obj = timestep_to_active_mesh[input_pts_ts - 1].detach() + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + # else: + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v2 ###### + + + ###### penetration penalty strategy v3 ###### + # if input_pts_ts > 0: + # cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + # cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + # queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # penetrating_indicator = queried_sdf < 0 + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # else: + # # cur_rot = torch.eye(3, dtype=torch.float32).cuda() + # # cur_trans = torch.zeros((3,), dtype=torch.float32).cuda() + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v3 ###### + + + # ws_beta; ws_alpha; 10 # + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + ### + ''' decide forces via kinematics statistics ''' + ### neares + rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + + + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed # + + # penetrating + ### penetration strategy v4 #### + + if input_pts_ts > 0: + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + penetrating_indicator = queried_sdf < 0 + else: + penetrating_indicator = torch.zeros_like(dot_inter_obj_pts_to_sampled_pts_normals).bool() + # penetrating_indicator = + + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + self.penetrating_indicator = penetrating_indicator + penetration_proj_ks = 0 - dot_inter_obj_pts_to_sampled_pts_normals + ### penetratio nproj penalty ### + penetration_proj_penalty = penetration_proj_ks * (-1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1)) + self.penetrating_depth_penalty = penetration_proj_penalty[penetrating_indicator].mean() + if torch.isnan(self.penetrating_depth_penalty): # get the penetration penalties # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + penetrating_points = sampled_input_pts[penetrating_indicator] + # penetration_proj_k_to_robot = 1.0 # 0.7 + penetration_proj_k_to_robot = 0.1 + penetrating_forces = penetration_proj_ks.unsqueeze(-1) * inter_obj_normals.detach() * penetration_proj_k_to_robot + penetrating_forces = penetrating_forces[penetrating_indicator] + self.penetrating_forces = penetrating_forces + self.penetrating_points = penetrating_points + ### penetration strategy v4 #### # another mophology # + + # maintain the contact and calculate the penetrating forces and points for each timestep and then use the displacemnet to calculate the penalty based friction forces # + + + + + if self.nn_instances == 1: + # contact ks values # # fi we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + + + ###### the contact force decided by the rest_length ###### + # contact_force_d = contact_spring_ka * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) # + contact_spring_kb * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 2 + contact_spring_kc * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 3 # + ###### the contact force decided by the rest_length ###### + + ##### the contact force decided by the theshold ###### + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + ###### Get the tangential forces via optimizable forces ###### + cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + ###### Get the tangential forces via optimizable forces ###### + + + + ###### Get the tangential forces via tangential velocities ###### + # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + ###### Get the tangential forces via tangential velocities ###### + + tangential_forces = tangential_vel * tangential_ks # tangential forces # + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts ## + penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + penalty_friction_constraint = torch.mean(penalty_friction_constraint) + # + self.penalty_friction_constraint = penalty_friction_constraint # penalty friction + + # initially the penetrations should be neglected and gradually it should be considered to generate contact forces # + # the gradient of the sdf field? + ### strict cosntraints ### + # mult_weights = torch.ones_like(norm_along_normals_forces).detach() + # hard_selector = norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces + # hard_selector = hard_selector.detach() + # mult_weights[hard_selector] = self.static_friction_mu * norm_along_normals_forces.detach()[hard_selector] / norm_tangential_forces.detach()[hard_selector] + # ### change to the strict constraint ### + # # tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] = tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces] / norm_tangential_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) * self.static_friction_mu * norm_along_normals_forces[norm_tangential_forces > self.static_friction_mu * norm_along_normals_forces].unsqueeze(-1) + # ### change to the strict constraint ### + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) + ### strict cosntraints ### + + forces = tangential_forces + contact_force_d # tantential forces and contact force d # + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' + # penalty_dot_forces_normals, penalty_friction_constraint # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + tangential_forces = forces - forces_along_normals # tangential forces # ## tangential forces ## + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) + self.penalty_dot_forces_normals = penalty_dot_forces_normals + + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc ## rigid acc ## + + + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() # ## + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + # cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # + + if not fix_obj: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return + + + + + +# +class BendingNetworkActiveForceFieldForwardLagV15(nn.Module): + def __init__(self, # self # + d_in, # + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False, + nn_instances=1, + minn_dist_threshold=0.05, + ): # contact + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV15, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.nn_instances = nn_instances + + self.contact_spring_rest_length = 2. + + # self.minn_dist_sampled_pts_passive_obj_thres = 0.05 # minn_dist_threshold ### + self.minn_dist_sampled_pts_passive_obj_thres = minn_dist_threshold + + self.spring_contact_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_contact_ks_values.weight) + self.spring_contact_ks_values.weight.data = self.spring_contact_ks_values.weight.data * 0.01 + + self.spring_friction_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_friction_ks_values.weight) + self.spring_friction_ks_values.weight.data = self.spring_friction_ks_values.weight.data * 0.001 + + if self.nn_instances == 1: + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + else: + self.spring_ks_values = nn.ModuleList( + [ + nn.Embedding(num_embeddings=5, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_values in self.spring_ks_values: + torch.nn.init.ones_(cur_ks_values.weight) + cur_ks_values.weight.data = cur_ks_values.weight.data * 0.01 + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + + ## [ \alpha, \beta ] ## + if self.nn_instances == 1: + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + else: + self.ks_weights = nn.ModuleList( + [ + nn.Embedding(num_embeddings=2, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_weights in self.ks_weights: + torch.nn.init.ones_(cur_ks_weights.weight) # + cur_ks_weights.weight.data[1] = cur_ks_weights.weight.data[1] * (1. / (778 * 2)) + + + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + self.sep_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_time_constant.weight) # + + self.sep_torque_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_torque_time_constant.weight) # + + self.sep_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_damping_constant.weight) # # # # + self.sep_damping_constant.weight.data = self.sep_damping_constant.weight.data * 0.9 + + + self.sep_angular_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_angular_damping_constant.weight) # # # # + self.sep_angular_damping_constant.weight.data = self.sep_angular_damping_constant.weight.data * 0.9 + + + if self.nn_instances == 1: + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + else: + self.time_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_time_constant in self.time_constant: + torch.nn.init.ones_(cur_time_constant.weight) # + + if self.nn_instances == 1: + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + else: + self.damping_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_damping_constant in self.damping_constant: + torch.nn.init.ones_(cur_damping_constant.weight) # # # # + cur_damping_constant.weight.data = cur_damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + + + # self.actuator_friction_forces = nn.Embedding( # actuator's forces # + # num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + if nn_instances == 1: + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + else: + self.actuator_friction_forces = nn.ModuleList( + [nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) for _ in range(self.nn_instances) ] + ) + for cur_friction_force_net in self.actuator_friction_forces: + torch.nn.init.zeros_(cur_friction_force_net.weight) # + + + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": # periodict activation functions # + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} # record the optimizable offset # + self.save_values = {} + # ws_normed, defed_input_pts_sdf, # + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # # active mesh # active mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # # actuators + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + self.obj_sdf_th = None + self.obj_sdf_grad_th = None + + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): # no_grad() + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays # + # + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + # ### + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + + def query_for_sdf(self, cur_pts, cur_frame_transformations): + # + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + # cur_pts: nn_pts x 3 # + # print(f"cur_pts: {cur_pts.size()}, cur_frame_translation: {cur_frame_translation.size()}, cur_frame_rotation: {cur_frame_rotation.size()}") + ### transformed pts ### + cur_transformed_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + # v = (v - center) * scale # + # sdf_space_center # + cur_transformed_pts_np = cur_transformed_pts.detach().cpu().numpy() + cur_transformed_pts_np = (cur_transformed_pts_np - np.reshape(self.sdf_space_center, (1, 3))) * self.sdf_space_scale + cur_transformed_pts_np = (cur_transformed_pts_np + 1.) / 2. + cur_transformed_pts_xs = (cur_transformed_pts_np[:, 0] * self.sdf_res).astype(np.int32) # [x, y, z] of the transformed_pts_np # + cur_transformed_pts_ys = (cur_transformed_pts_np[:, 1] * self.sdf_res).astype(np.int32) + cur_transformed_pts_zs = (cur_transformed_pts_np[:, 2] * self.sdf_res).astype(np.int32) + + cur_transformed_pts_xs = np.clip(cur_transformed_pts_xs, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_ys = np.clip(cur_transformed_pts_ys, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_zs = np.clip(cur_transformed_pts_zs, a_min=0, a_max=self.sdf_res - 1) + + + if self.obj_sdf_th is None: + self.obj_sdf_th = torch.from_numpy(self.obj_sdf).float().cuda() + cur_transformed_pts_xs_th = torch.from_numpy(cur_transformed_pts_xs).long().cuda() + cur_transformed_pts_ys_th = torch.from_numpy(cur_transformed_pts_ys).long().cuda() + cur_transformed_pts_zs_th = torch.from_numpy(cur_transformed_pts_zs).long().cuda() + + cur_pts_sdf = batched_index_select(self.obj_sdf_th, cur_transformed_pts_xs_th, 0) + # print(f"After selecting the x-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the y-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the z-axis: {cur_pts_sdf.size()}") + + if self.obj_sdf_grad is not None: + if self.obj_sdf_grad_th is None: + self.obj_sdf_grad_th = torch.from_numpy(self.obj_sdf_grad).float().cuda() + self.obj_sdf_grad_th = self.obj_sdf_grad_th / torch.clamp(torch.norm(self.obj_sdf_grad_th, p=2, keepdim=True), min=1e-5) + cur_pts_sdf_grad = batched_index_select(self.obj_sdf_grad_th, cur_transformed_pts_xs_th, 0) # nn_pts x res x res x 3 + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + else: + cur_pts_sdf_grad = None + + + + # cur_pts_sdf = self.obj_sdf[cur_transformed_pts_xs] + # cur_pts_sdf = cur_pts_sdf[:, cur_transformed_pts_ys] + # cur_pts_sdf = cur_pts_sdf[:, :, cur_transformed_pts_zs] + # cur_pts_sdf = np.diagonal(cur_pts_sdf) + # print(f"cur_pts_sdf: {cur_pts_sdf.shape}") + # # gradient of sdf # + # # the contact force dierection should be the negative direction of the sdf gradient? # + # # it seems true # + # # get the cur_pts_sdf value # + # cur_pts_sdf = torch.from_numpy(cur_pts_sdf).float().cuda() + if cur_pts_sdf_grad is None: + return cur_pts_sdf + else: + return cur_pts_sdf, cur_pts_sdf_grad # return the grad as the + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False, contact_pairs_set=None): + #### contact_pairs_set #### + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # # + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) # + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) # + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx # + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + # friction_qd = 0.1 # # + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> sampled points # + ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + nn_sampled_input_pts = sampled_input_pts.size(0) + + if nex_pts_ts in timestep_to_active_mesh: + ### disp_sampled_input_pts = nex_sampled_input_pts - sampled_input_pts ### + nex_sampled_input_pts = timestep_to_active_mesh[nex_pts_ts].detach() + else: + nex_sampled_input_pts = timestep_to_active_mesh[input_pts_ts].detach() + nex_sampled_input_pts = nex_sampled_input_pts[sampled_verts_idxes] + + + # ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + # ws_normed = ws_normed / float(sampled_input_pts.size(0)) + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 20000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + + + # sampled_input_pts_normals = # + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + # cur_passive_obj_rot, cur_passive_obj_trans # + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + + # + + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh ### the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k # + # forces = friction_force + # ######## vel for frictions ######### # # maintain the contact / continuous contact -> patch contact + # coantacts in previous timesteps -> ### + + # cur actuation # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) # actuation embedding idxes # + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + if friction_forces is None: + if self.nn_instances == 1: + cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + else: + cur_actuation_friction_forces = self.actuator_friction_forces[i_instance](cur_actuation_embedding_idxes) + else: + if reference_mano_pts is not None: + ref_mano_pts_nn = reference_mano_pts.size(0) + cur_actuation_embedding_st_idx = ref_mano_pts_nn * input_pts_ts + cur_actuation_embedding_ed_idx = ref_mano_pts_nn * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn_ref_pts x 3 # + # sampled_input_pts # + # r = 0.01 # + threshold_ball_r = 0.01 + dist_input_pts_to_reference_pts = torch.sum( + (sampled_input_pts.unsqueeze(1) - reference_mano_pts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_input_pts_to_reference_pts = torch.sqrt(dist_input_pts_to_reference_pts) + weights_input_to_reference = 0.5 - dist_input_pts_to_reference_pts + weights_input_to_reference[weights_input_to_reference < 0] = 0 + weights_input_to_reference[dist_input_pts_to_reference_pts > threshold_ball_r] = 0 + + minn_dist_input_pts_to_reference_pts, minn_idx_input_pts_to_reference_pts = torch.min(dist_input_pts_to_reference_pts, dim=-1) + + weights_input_to_reference[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] = 0.1 - dist_input_pts_to_reference_pts[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] + + weights_input_to_reference = weights_input_to_reference / torch.clamp(torch.sum(weights_input_to_reference, dim=-1, keepdim=True), min=1e-9) + + # cur_actuation_friction_forces = weights_input_to_reference.unsqueeze(-1) * cur_actuation_friction_forces.unsqueeze(0) # nn_input_pts x nn_ref_pts x 1 xxxx 1 x nn_ref_pts x 3 -> nn_input_pts x nn_ref_pts x 3 + # cur_actuation_friction_forces = cur_actuation_friction_forces.sum(dim=1) + + # cur_actuation_friction_forces * weights_input_to_reference.unsqueeze(-1) + cur_actuation_friction_forces = batched_index_select(cur_actuation_friction_forces, minn_idx_input_pts_to_reference_pts, dim=0) + else: + # cur_actuation_embedding_st_idx = 365428 * input_pts_ts + # cur_actuation_embedding_ed_idx = 365428 * (input_pts_ts + 1) + if sampled_verts_idxes is not None: + cur_actuation_embedding_st_idx = ori_nns * input_pts_ts + cur_actuation_embedding_ed_idx = ori_nns * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + cur_actuation_friction_forces = cur_actuation_friction_forces[sampled_verts_idxes] + else: + cur_actuation_embedding_st_idx = nn_sampled_input_pts * input_pts_ts + cur_actuation_embedding_ed_idx = nn_sampled_input_pts * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn instances # # nninstances # # + if self.nn_instances == 1: + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + else: + ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + + # use_penalty_based_friction, use_disp_based_friction # + # ### get the nearest object point to the in-active object ### + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): # contact pairs set # # contact pairs set ## + # for each calculated contacts, calculate the current contact points reversed transformed to the contact local frame # + # use the reversed transformed active point and the previous rest contact point position to calculate the contact friction force # + # transform the force to the current contact frame # + # x_h^{cur} - x_o^{cur} --- add the frictions for the hand + # add the friction force onto the object point # # contact point position -> nn_contacts x 3 # + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + # contact_active_pos = sampled_input_pts[contact_active_idxes] # should not be inter_obj_pts... # + # contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + # to the passive obje ###s + minn_idx_sampled_pts_to_passive_obj[contact_active_idxes] = contact_passive_idxes + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj = batched_index_select(dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1).squeeze(1) + # ### get the nearest object point to the in-active object ### + + + + # sampled_input_pts # + # inter_obj_pts # + # inter_obj_normals + + # nn_sampledjpoints # + # cur_passive_obj_ns # # inter obj normals # # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + cur_passive_obj_verts_pts_idxes = torch.arange(0, cur_passive_obj_verts.size(0), dtype=torch.long).cuda() # + inter_passive_obj_pts_idxes = cur_passive_obj_verts_pts_idxes[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() + dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) + + # contact_pairs_set # + + ###### penetration penalty strategy v1 ###### + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + + ###### penetration penalty strategy v2 ###### + # if input_pts_ts > 0: + # prev_active_obj = timestep_to_active_mesh[input_pts_ts - 1].detach() + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + # else: + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v2 ###### + # + # jacobian matrix calculation? + # jacobian + + ###### penetration penalty strategy v3 ###### + # if input_pts_ts > 0: + # cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + # cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + # queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # penetrating_indicator = queried_sdf < 0 + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # else: + # # cur_rot = torch.eye(3, dtype=torch.float32).cuda() + # # cur_trans = torch.zeros((3,), dtype=torch.float32).cuda() + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v3 ###### + + # ws_beta; 10 # # sum over the forces but not the weighted sum... # + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) # ws_alpha # + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 # + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 # + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + # penetrating # + ### penetration strategy v4 #### + + if input_pts_ts > 0: + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + penetrating_indicator = queried_sdf < 0 + else: + penetrating_indicator = torch.zeros_like(dot_inter_obj_pts_to_sampled_pts_normals).bool() + + + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + # penetrating + + ### nearest #### + ''' decide forces via kinematics statistics ''' + ### nearest #### + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + + # cannot be adapted to this easily # + # what's a better realization way? # + + + # dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] # + dist_sampled_pts_to_passive_obj[penetrating_indicator] = -1. * dist_sampled_pts_to_passive_obj[penetrating_indicator] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + in_contact_indicator = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + # in_contact_indicator + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + + # penetrating_indicator = + + # penetrating + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + self.penetrating_indicator = penetrating_indicator + penetration_proj_ks = 0 - dot_inter_obj_pts_to_sampled_pts_normals + ### penetratio nproj penalty ### + penetration_proj_penalty = penetration_proj_ks * (-1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1)) + self.penetrating_depth_penalty = penetration_proj_penalty[penetrating_indicator].mean() + if torch.isnan(self.penetrating_depth_penalty): # get the penetration penalties # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + penetrating_points = sampled_input_pts[penetrating_indicator] + penetration_proj_k_to_robot = 1.0 # 0.7 + # penetration_proj_k_to_robot = 0.01 + penetration_proj_k_to_robot = 0.0 + penetrating_forces = penetration_proj_ks.unsqueeze(-1) * inter_obj_normals.detach() * penetration_proj_k_to_robot + penetrating_forces = penetrating_forces[penetrating_indicator] + self.penetrating_forces = penetrating_forces # + self.penetrating_points = penetrating_points # + ### penetration strategy v4 #### # another mophology # + + # maintain the forces # + + # # contact_pairs_set # # + + # for contact pair in the contact_pair_set, get the contact pair -> the mesh index of the passive object and the active object # + # the orientation of the contact frame # + # original contact point position of the contact pair # + # original orientation of the contact frame # + ##### get previous contact information ###### + # for cur_contact_pair in contact_pairs_set: + # # cur_contact_pair = (contact point position, contact frame orientation) # + # # contact_point_positon -> should be the contact position transformed to the local contact frame # + # contact_point_positon, (contact_passive_idx, contact_active_idx), contact_frame_pose = cur_contact_pair # + # # contact_point_positon of the contact pair # + # cur_active_pos = sampled_input_pts[contact_active_idx] # passive_position # + # # (original passive position - current passive position) * K_f = penalty based friction force # # # # + # cur_passive_pos = inter_obj_pts[contact_passive_idx] # active_position # + # # (the transformed passive position) # + # # + # # # the continuous active and passive pos ## + # # # the continuous active and passive pos ## + # # the continuous active and passive pos ## + # contact_frame_orientation, contact_frame_translation = contact_frame_pose # # set the orientation and the contact frame translation + # # orientation, translation # + # cur_inv_transformed_active_pos = torch.matmul( + # contact_frame_orientation.contiguous().transpose(1, 0).contiguous(), (cur_active_pos - contact_frame_translation.unsqueeze(0)).transpose(1, 0) + # ) + + + + # should be the contact penalty frictions added onto the passive object verts # + # use the frictional force to mainatian the contact here # + + # maintain the contact and calculate the penetrating forces and points for each timestep and then use the displacemnet to calculate the penalty based friction forces # + + + if self.nn_instances == 1: # spring ks values + # contact ks values # # if we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + + + ###### the contact force decided by the rest_length ###### # not very sure ... # + # contact_force_d = contact_spring_ka * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) # + contact_spring_kb * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 2 + contact_spring_kc * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 3 # + ###### the contact force decided by the rest_length ###### + + + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance # + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + ###### Get the tangential forces via optimizable forces ###### # dot along the normals ## + cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + ###### Get the tangential forces via optimizable forces ###### + + # cur actuation friction forces along normals # + + ###### Get the tangential forces via tangential velocities ###### + # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + ###### Get the tangential forces via tangential velocities ###### + + tangential_forces = tangential_vel * tangential_ks # tangential forces # + contact_force_d_scalar = contact_force_d.clone() # + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts, nnsampledpts # + penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + penalty_friction_constraint = torch.mean(penalty_friction_constraint) + self.penalty_friction_constraint = penalty_friction_constraint # penalty friction + contact_force_d_scalar = norm_along_normals_forces.clone() + # penalty friction constraints # + penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + ''' Get the contact information that should be maintained''' + if contact_pairs_set is not None: # contact pairs set # # contact pairs set ## + # for each calculated contacts, calculate the current contact points reversed transformed to the contact local frame # + # use the reversed transformed active point and the previous rest contact point position to calculate the contact friction force # + # transform the force to the current contact frame # + # x_h^{cur} - x_o^{cur} --- add the frictions for the hand + # add the friction force onto the object point # # contact point position -> nn_contacts x 3 # + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + contact_active_pos = sampled_input_pts[contact_active_idxes] # should not be inter_obj_pts... # + contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + + ''' Penalty based contact force v2 ''' + contact_frame_orientations, contact_frame_translations = contact_frame_pose + transformed_prev_contact_active_pos = torch.matmul( + contact_frame_orientations.contiguous(), contact_active_point_pts.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + transformed_prev_contact_point_position = torch.matmul( + contact_frame_orientations.contiguous(), contact_point_position.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + diff_transformed_prev_contact_passive_to_active = transformed_prev_contact_active_pos - transformed_prev_contact_point_position + # cur_contact_passive_pos_from_active = contact_passive_pos + diff_transformed_prev_contact_passive_to_active + cur_contact_passive_pos_from_active = contact_active_pos - diff_transformed_prev_contact_passive_to_active + + friction_k = 1.0 + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + # penalty_based_friction_forces = friction_k * (contact_active_pos - transformed_prev_contact_active_pos) + + # + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + penalty_based_friction_forces = friction_k * (cur_contact_passive_pos_from_active - contact_passive_pos) + ''' Penalty based contact force v2 ''' + + ''' Penalty based contact force v1 ''' + ###### Contact frame orientations and translations ###### + # contact_frame_orientations, contact_frame_translations = contact_frame_pose # (nn_contacts x 3 x 3) # (nn_contacts x 3) # + # # cur_passive_obj_verts # + # inv_transformed_contact_active_pos = torch.matmul( + # contact_frame_orientations.contiguous().transpose(2, 1).contiguous(), (contact_active_pos - contact_frame_translations).contiguous().unsqueeze(-1) + # ).squeeze(-1) # nn_contacts x 3 # + # inv_transformed_contact_passive_pos = torch.matmul( # contact frame translations # ## nn_contacts x 3 ## # # + # contact_frame_orientations.contiguous().transpose(2, 1).contiguous(), (contact_passive_pos - contact_frame_translations).contiguous().unsqueeze(-1) + # ).squeeze(-1) + # # inversely transformed cotnact active and passive pos # + + # # inv_transformed_contact_active_pos, inv_transformed_contact_passive_pos # + # ### contact point position ### # + # ### use the passive point disp ### + # # disp_active_pos = (inv_transformed_contact_active_pos - contact_point_position) # nn_contacts x 3 # + # ### use the active point disp ### + # # disp_active_pos = (inv_transformed_contact_active_pos - contact_active_point_pts) + # disp_active_pos = (inv_transformed_contact_active_pos - contact_active_point_pts) + # ### friction_k is equals to 1.0 ### + # friction_k = 1. + # # use the disp_active_pose as the penalty based friction forces # # nn_contacts x 3 # + # penalty_based_friction_forces = disp_active_pos * friction_k + + # # get the penalty based friction forces # + # penalty_based_friction_forces = torch.matmul( + # contact_frame_orientations.contiguous(), penalty_based_friction_forces.unsqueeze(-1) + # ).contiguous().squeeze(-1).contiguous() + ''' Penalty based contact force v1 ''' + + #### strategy 1: implement the dynamic friction forces #### + # dyn_friction_k = 1.0 # together with the friction_k # + # # dyn_friction_k # + # dyn_friction_force = dyn_friction_k * contact_force_d # nn_sampled_pts x 3 # + # dyn_friction_force # + # dyn_friction_force = # + # tangential velocities # # tangential velocities # + #### strategy 1: implement the dynamic friction forces #### + + #### strategy 2: do not use the dynamic friction forces #### + # equalt to use a hard selector to screen the friction forces # + # + # contact_force_d # # contact_force_d # + + valid_contact_force_d_scalar = contact_force_d_scalar[contact_active_idxes] + + + # penalty_based_friction_forces # + norm_penalty_based_friction_forces = torch.norm(penalty_based_friction_forces, dim=-1, p=2) + # valid penalty friction forces # # valid contact force d scalar # + valid_penalty_friction_forces_indicator = norm_penalty_based_friction_forces <= (valid_contact_force_d_scalar * self.static_friction_mu * 500) + valid_penalty_friction_forces_indicator[:] = True + + + summ_valid_penalty_friction_forces_indicator = torch.sum(valid_penalty_friction_forces_indicator.float()) + + # print(f"summ_valid_penalty_friction_forces_indicator: {summ_valid_penalty_friction_forces_indicator}") + # print(f"penalty_based_friction_forces: {penalty_based_friction_forces.size()}, summ_valid_penalty_friction_forces_indicator: {summ_valid_penalty_friction_forces_indicator}") + # tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.005 # * 1000. + + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + + # penalty_friction_tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * contact_spring_kb + + penalty_friction_tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * contact_friction_spring_cur + + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * contact_spring_kb + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. # based friction + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.02 + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.05 # + + else: + contact_active_idxes = None + self.contact_active_idxes = contact_active_idxes + valid_penalty_friction_forces_indicator = None + # tangential forces with inter obj normals # -> + dot_tangential_forces_with_inter_obj_normals = torch.sum(penalty_friction_tangential_forces * inter_obj_normals, dim=-1) ### nn_active_pts x # + penalty_friction_tangential_forces = penalty_friction_tangential_forces - dot_tangential_forces_with_inter_obj_normals.unsqueeze(-1) * inter_obj_normals + tangential_forces_clone = tangential_forces.clone() + # tangential_forces = torch.zeros_like(tangential_forces) ### + + # if contact_active_idxes is not None: + # self.contact_active_idxes = contact_active_idxes + # self.valid_penalty_friction_forces_indicator = valid_penalty_friction_forces_indicator # + # # print(f"here {summ_valid_penalty_friction_forces_indicator}") + # # tangential_forces[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] = tangential_forces_clone[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] + # contact_active_idxes_indicators = torch.ones((tangential_forces.size(0)), dtype=torch.float).cuda().bool() + # contact_active_idxes_indicators[:] = True + # contact_active_idxes_indicators[self.contact_active_idxes] = False + + # tangential_forces[contact_active_idxes_indicators] = 0. + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # tangential forces # + # maxx_norm_tangential, _ = torch.max(norm_tangential_forces, dim=-1) + # minn_norm_tangential, _ = torch.min(norm_tangential_forces, dim=-1) + # print(f"maxx_norm_tangential: {maxx_norm_tangential}, minn_norm_tangential: {minn_norm_tangential}") + + # two + ### ## get new contacts ## ### + tot_contact_point_position = [] + tot_contact_active_point_pts = [] + tot_contact_active_idxes = [] + tot_contact_passive_idxes = [] + tot_contact_frame_rotations = [] + tot_contact_frame_translations = [] + + if torch.sum(in_contact_indicator.float()) > 0.5: # in contact indicator # + cur_in_contact_passive_pts = inter_obj_pts[in_contact_indicator] + cur_in_contact_passive_normals = inter_obj_normals[in_contact_indicator] + cur_in_contact_active_pts = sampled_input_pts[in_contact_indicator] # in_contact_active_pts # + + # in contact active pts # + # sampled input pts # + # cur_passive_obj_rot, cur_passive_obj_trans # + # cur_passive_obj_trans # + # cur_in_contact_activE_pts # + # in_contact_passive_pts # + cur_contact_frame_rotations = cur_passive_obj_rot.unsqueeze(0).repeat(cur_in_contact_passive_pts.size(0), 1, 1).contiguous() + cur_contact_frame_translations = cur_in_contact_passive_pts.clone() # + #### contact farme active points ##### -> ## + cur_contact_frame_active_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_active_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_contact_frame_passive_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_passive_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_in_contact_active_pts_all = torch.arange(0, sampled_input_pts.size(0)).long().cuda() + cur_in_contact_active_pts_all = cur_in_contact_active_pts_all[in_contact_indicator] + cur_inter_passive_obj_pts_idxes = inter_passive_obj_pts_idxes[in_contact_indicator] + # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose + # cur_contact_frame_pose = (cur_contact_frame_rotations, cur_contact_frame_translations) + # contact_point_positions = cur_contact_frame_passive_pts # + # contact_active_idxes, cotnact_passive_idxes # + # contact_point_position = cur_contact_frame_passive_pts + # contact_active_idxes = cur_in_contact_active_pts_all + # contact_passive_idxes = cur_inter_passive_obj_pts_idxes + tot_contact_active_point_pts.append(cur_contact_frame_active_pts) + tot_contact_point_position.append(cur_contact_frame_passive_pts) # contact frame points + tot_contact_active_idxes.append(cur_in_contact_active_pts_all) # active_pts_idxes + tot_contact_passive_idxes.append(cur_inter_passive_obj_pts_idxes) # passive_pts_idxes + tot_contact_frame_rotations.append(cur_contact_frame_rotations) # rotations + tot_contact_frame_translations.append(cur_contact_frame_translations) # translations + + + ## + # ####### if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: ######## + # if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: + # # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # prev_contact_active_point_pts = contact_active_point_pts[valid_penalty_friction_forces_indicator] + # prev_contact_point_position = contact_point_position[valid_penalty_friction_forces_indicator] + # prev_contact_active_idxes = contact_active_idxes[valid_penalty_friction_forces_indicator] + # prev_contact_passive_idxes = contact_passive_idxes[valid_penalty_friction_forces_indicator] + # prev_contact_frame_rotations = contact_frame_orientations[valid_penalty_friction_forces_indicator] + # prev_contact_frame_translations = contact_frame_translations[valid_penalty_friction_forces_indicator] + + # tot_contact_active_point_pts.append(prev_contact_active_point_pts) + # tot_contact_point_position.append(prev_contact_point_position) + # tot_contact_active_idxes.append(prev_contact_active_idxes) + # tot_contact_passive_idxes.append(prev_contact_passive_idxes) + # tot_contact_frame_rotations.append(prev_contact_frame_rotations) + # tot_contact_frame_translations.append(prev_contact_frame_translations) + ####### if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: ######## + + + + if len(tot_contact_frame_rotations) > 0: + upd_contact_active_point_pts = torch.cat(tot_contact_active_point_pts, dim=0) + upd_contact_point_position = torch.cat(tot_contact_point_position, dim=0) + upd_contact_active_idxes = torch.cat(tot_contact_active_idxes, dim=0) + upd_contact_passive_idxes = torch.cat(tot_contact_passive_idxes, dim=0) + upd_contact_frame_rotations = torch.cat(tot_contact_frame_rotations, dim=0) + upd_contact_frame_translations = torch.cat(tot_contact_frame_translations, dim=0) + upd_contact_pairs_information = [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] + else: + upd_contact_pairs_information = None + + + + # # previus + if self.use_penalty_based_friction and self.use_disp_based_friction: + disp_friction_tangential_forces = nex_sampled_input_pts - sampled_input_pts + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + disp_friction_tangential_forces = disp_friction_tangential_forces * contact_friction_spring_cur + disp_friction_tangential_forces_dot_normals = torch.sum( + disp_friction_tangential_forces * inter_obj_normals, dim=-1 + ) + disp_friction_tangential_forces = disp_friction_tangential_forces - disp_friction_tangential_forces_dot_normals.unsqueeze(-1) * inter_obj_normals + + penalty_friction_tangential_forces = disp_friction_tangential_forces + + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) # # + ### strict cosntraints ### + if self.use_penalty_based_friction: + forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + else: + # print(f"not using use_penalty_based_friction...") + tangential_forces_norm = torch.sum(tangential_forces ** 2, dim=-1) + pos_tangential_forces = tangential_forces[tangential_forces_norm > 1e-5] + # print(pos_tangential_forces) + forces = tangential_forces + contact_force_d # tantential forces and contact force d # + # forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' # + # penalty_dot_forces_normals, penalty_friction_constraint # # contraints # # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # # tangential forces ### tangential forces ## + # penalty_friction_tangential_forces = force - + + + #### penalty_friction_tangential_forces, tangential_forces #### + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.tangential_forces = tangential_forces + + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) # 1) must # 2) must # + self.penalty_dot_forces_normals = penalty_dot_forces_normals # + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc # + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) # squeeze(1) # + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) # + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + # cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # + + if not fix_obj: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return upd_contact_pairs_information + + + ### forward; + def forward2(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False, contact_pairs_set=None): + #### contact_pairs_set #### + + # # + # prev_pts_ts = input_pts_ts - 1 # + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # # + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) # + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) # + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx # + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + ''' Kinematics transformations from acc and torques ''' + + # friction_qd = 0.1 # + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> sampled points # + ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + nn_sampled_input_pts = sampled_input_pts.size(0) + + if nex_pts_ts in timestep_to_active_mesh: ## + ### disp_sampled_input_pts = nex_sampled_input_pts - sampled_input_pts ### + nex_sampled_input_pts = timestep_to_active_mesh[nex_pts_ts].detach() + else: + nex_sampled_input_pts = timestep_to_active_mesh[input_pts_ts].detach() + nex_sampled_input_pts = nex_sampled_input_pts[sampled_verts_idxes] + + # ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + # ws_normed = ws_normed / float(sampled_input_pts.size(0)) + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 20000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + + + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + # cur_passive_obj_rot, cur_passive_obj_trans # + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + + + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center # obj_center # + + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] # + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] # + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh ### the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k # + # forces = friction_force + # ######## vel for frictions ######### # # maintain the contact / continuous contact -> patch contact + # coantacts in previous timesteps -> ### + + # cur actuation # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) # actuation embedding idxes # + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + + # nn instances # # nninstances # # + if self.nn_instances == 1: + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + else: + ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + if self.use_sqrt_dist: + dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + ) + else: + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + + # ### add the sqrt for calculate the l2 distance ### + # dist_sampled_pts_to_passive_obj = torch.sqrt(dist_sampled_pts_to_passive_obj) ### + + + # dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + # (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + # ) + + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + + + # inte robj normals at the current frame # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + cur_passive_obj_verts_pts_idxes = torch.arange(0, cur_passive_obj_verts.size(0), dtype=torch.long).cuda() # + inter_passive_obj_pts_idxes = cur_passive_obj_verts_pts_idxes[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() # sampled p + # dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) + + + ###### penetration penalty strategy v1 ###### + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + # ws_beta; 10 # # sum over the forces but not the weighted sum... # + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) # ws_alpha # + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) e + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + # penetrating # + ### penetration strategy v4 #### + + if input_pts_ts > 0: + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + # obj_sdf_grad + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + inter_obj_normals = -1.0 * queried_sdf_grad + penetrating_indicator = queried_sdf < 0 + else: + + cur_rot = torch.eye(n=3, dtype=torch.float32).cuda() + cur_trans = torch.zeros((3,), dtype=torch.float32).cuda() + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + inter_obj_normals = -1.0 * queried_sdf_grad + penetrating_indicator = queried_sdf < 0 + + # penetrating_indicator = torch.zeros_like(dot_inter_obj_pts_to_sampled_pts_normals).bool() + # penetrating_indicator = torch.zeros((sampled_input_pts.size(0),), dtype=torch.bool).cuda().bool() + + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + # penetrating + + ### nearest #### + ''' decide forces via kinematics statistics ''' + ### nearest #### + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + penetrating_indicator_mult_factor = torch.ones_like(penetrating_indicator).float() + penetrating_indicator_mult_factor[penetrating_indicator] = -1. + + + # dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] # + # dist_sampled_pts_to_passive_obj[penetrating_indicator] = -1. * dist_sampled_pts_to_passive_obj[penetrating_indicator] + # dist_sampled_pts_to_passive_obj[penetrating_indicator] = -1. * dist_sampled_pts_to_passive_obj[penetrating_indicator] + dist_sampled_pts_to_passive_obj = dist_sampled_pts_to_passive_obj * penetrating_indicator_mult_factor + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + ## minn_dist_sampled_pts_passive_obj_thres + in_contact_indicator = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + ws_unnormed = torch.ones_like(ws_unnormed) + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + + + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + self.penetrating_indicator = penetrating_indicator + cur_inter_obj_normals = inter_obj_normals.clone().detach() + penetration_proj_ks = 0 - torch.sum(inter_obj_pts_to_sampled_pts * cur_inter_obj_normals, dim=-1) + ### penetratio nproj penalty ### + # inter_obj_pts_to_sampled_pts # + + # penetration_proj_penalty = penetration_proj_ks * (-1 * torch.sum(inter_obj_pts_to_sampled_pts * cur_inter_obj_normals, dim=-1)) + + # self.penetrating_depth_penalty = penetration_proj_penalty[penetrating_indicator].mean() + # if torch.isnan(self.penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + + penetration_proj_ks = -1. * queried_sdf + self.penetrating_depth_penalty = -1. * queried_sdf[penetrating_indicator].mean() + if torch.isnan(self.penetrating_depth_penalty): # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + + penetrating_points = sampled_input_pts[penetrating_indicator] + # penetration_proj_k_to_robot = 1.0 + # penetration_proj_k_to_robot = 0.01 + penetration_proj_k_to_robot = self.penetration_proj_k_to_robot + # penetration_proj_k_to_robot = 0.0 + penetrating_forces = penetration_proj_ks.unsqueeze(-1) * cur_inter_obj_normals * penetration_proj_k_to_robot + penetrating_forces = penetrating_forces[penetrating_indicator] + self.penetrating_forces = penetrating_forces # + self.penetrating_points = penetrating_points # + ### penetration strategy v4 #### # another mophology # + + + + if self.nn_instances == 1: # spring ks values + # contact ks values # # if we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + + if self.use_split_params: + contact_spring_ka = self.spring_contact_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + if self.use_sqr_spring_stiffness: + contact_spring_ka = contact_spring_ka ** 2 + + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance # + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + + + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + # norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts, nnsampledpts # + # penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + # penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + # penalty_friction_constraint = torch.mean(penalty_friction_constraint) + self.penalty_friction_constraint = torch.zeros((1,), dtype=torch.float32).cuda().mean() # penalty friction + # contact_force_d_scalar = norm_along_normals_forces.clone() + + + # penalty friction constraints # + penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + + + ''' Get the contact information that should be maintained''' + if contact_pairs_set is not None: + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + contact_active_pos = sampled_input_pts[contact_active_idxes] + contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + + ''' Penalty based contact force v2 ''' + contact_frame_orientations, contact_frame_translations = contact_frame_pose + transformed_prev_contact_active_pos = torch.matmul( + contact_frame_orientations.contiguous(), contact_active_point_pts.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + transformed_prev_contact_point_position = torch.matmul( + contact_frame_orientations.contiguous(), contact_point_position.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + # transformed prev contact active pose # + diff_transformed_prev_contact_passive_to_active = transformed_prev_contact_active_pos - transformed_prev_contact_point_position + ### + # cur_contact_passive_pos_from_active = contact_passive_pos + diff_transformed_prev_contact_passive_to_active + # cur_contact_passive_pos_from_active = contact_active_pos - diff_transformed_prev_contact_passive_to_active + + # friction_k = 1.0 + # friction_k = 0.01 + # friction_k = 0.001 + # friction_k = 0.001 + friction_k = 1.0 + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + # penalty_based_friction_forces = friction_k * (contact_active_pos - transformed_prev_contact_active_pos) + + + if self.use_pre_proj_frictions: + # cur_passive_obj_ns[contact_passive_idxes] + cur_inter_passive_obj_ns = cur_passive_obj_ns[contact_passive_idxes] + cur_contact_passive_to_active_pos = contact_active_pos - contact_passive_pos + dot_cur_contact_passive_to_active_pos_with_ns = torch.sum( ### dot produce between passive to active and the passive ns ### + cur_inter_passive_obj_ns * cur_contact_passive_to_active_pos, dim=-1 + ) + cur_contact_passive_to_active_pos = cur_contact_passive_to_active_pos - dot_cur_contact_passive_to_active_pos_with_ns.unsqueeze(-1) * cur_inter_passive_obj_ns + + # contact passive posefrom active ## + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + penalty_based_friction_forces = friction_k * cur_contact_passive_to_active_pos + else: + # contact passive posefrom active ## + penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + # penalty_based_friction_forces = friction_k * (cur_contact_passive_pos_from_active - contact_passive_pos) + + + # a good way to optiize the actions? # + dist_contact_active_pos_to_passive_pose = torch.sum( + (contact_active_pos - contact_passive_pos) ** 2, dim=-1 + ) + dist_contact_active_pos_to_passive_pose = torch.sqrt(dist_contact_active_pos_to_passive_pose) + + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.1 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.2 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.3 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 0.5 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 1.0 + # remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= 1000.0 # remaining contact indicator + remaining_contact_indicator = dist_contact_active_pos_to_passive_pose <= self.contact_maintaining_dist_thres + ''' Penalty based contact force v2 ''' + + # tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.005 # * 1000. + + if self.use_split_params: + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + else: + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + + + if self.use_sqr_spring_stiffness: + contact_friction_spring_cur = contact_friction_spring_cur ** 2 + + contact_active_penalty_based_friction_forces = penalty_based_friction_forces * contact_friction_spring_cur + # penalty_friction_tangential_forces[contact_active_idxes] = penalty_based_friction_forces * contact_friction_spring_cur + + + ''' update contact_force_d ''' + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance + if self.use_sqrt_dist: + dist_cur_active_to_passive = torch.norm( + (contact_active_pos - contact_passive_pos), dim=-1, p=2 + ) + else: + dist_cur_active_to_passive = torch.sum( + (contact_active_pos - contact_passive_pos) ** 2, dim=-1 + ) + + # ### add the sqrt for calculate the l2 distance ### + # dist_cur_active_to_passive = torch.sqrt(dist_cur_active_to_passive) + + # dist_cur_active_to_passive = torch.norm( + # (contact_active_pos - contact_passive_pos), dim=-1, p=2 + # ) + + + penetrating_indicator_mult_factor = torch.ones_like(penetrating_indicator).float() + penetrating_indicator_mult_factor[penetrating_indicator] = -1. # dist active to passive # + + cur_penetrating_indicator_mult_factor = penetrating_indicator_mult_factor[contact_active_idxes] + + # indicator mult factor # + dist_cur_active_to_passive = dist_cur_active_to_passive * cur_penetrating_indicator_mult_factor + + # dist_cur_active_to_passive[penetrating_indicator[contact_active_idxes]] = -1. * dist_cur_active_to_passive[penetrating_indicator[contact_active_idxes]] # + + # update contact d and inter obj normals # + cur_contact_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_cur_active_to_passive) + # contact_force_d_scalar = contact_force_d.clone() # + cur_contact_d = cur_contact_d.unsqueeze(-1) * (-1. * cur_passive_obj_ns[contact_passive_idxes]) + + # inter_obj_normals[contact_active_idxes] = cur_passive_obj_ns[contact_passive_idxes] + # contact_force_d[contact_active_idxes] = cur_contact_d + + + + ### TODO: how to check the correctness of the switching between the static friction and the dynamic friction ### + if self.use_static_mus: # use staticc + ''' Use the relative scale between the static_mu * contact force and the penalty friction to decide dynamic and static frictions ''' + ### get norm of the contact force along the normal direction ### ## # + contact_active_force_d_scalar = torch.norm(cur_contact_d, dim=-1, p=2) + contact_active_penalty_based_friction_forces_norm = torch.norm(contact_active_penalty_based_friction_forces, p=2, dim=-1) + # contact_friction_static_mu = 10. + contact_friction_static_mu = 1. + contact_friction_static_mu = self.contact_friction_static_mu + ## remaining contact indicator ## + remaining_contact_indicator = contact_active_penalty_based_friction_forces_norm <= (contact_friction_static_mu * contact_active_force_d_scalar) # r + + ### not_remaining_contacts ### + not_remaining_contacts = contact_active_penalty_based_friction_forces_norm > (contact_friction_static_mu * contact_active_force_d_scalar) + + ## contact active penalty based friction forces ## + contact_active_penalty_based_friction_forces_dir = contact_active_penalty_based_friction_forces / torch.clamp(contact_active_penalty_based_friction_forces_norm.unsqueeze(-1), min=1e-5) + dyn_contact_active_penalty_based_friction_forces = contact_active_penalty_based_friction_forces_dir * (contact_friction_static_mu * contact_active_force_d_scalar).unsqueeze(-1) + + if self.debug: + avg_contact_active_penalty_based_friction_forces_norm = torch.mean(contact_active_penalty_based_friction_forces_norm) + avg_contact_active_force_d_scalar = torch.mean(contact_active_force_d_scalar) + print(f"avg_contact_active_force_d_scalar: {avg_contact_active_force_d_scalar}, avg_contact_active_penalty_based_friction_forces_norm: {avg_contact_active_penalty_based_friction_forces_norm}, contact_friction_static_mu: {contact_friction_static_mu}") + print(f"contact_active_force_d_scalar: {contact_active_force_d_scalar[:10]}, contact_active_penalty_based_friction_forces_norm: {contact_active_penalty_based_friction_forces_norm[:10]}") + nn_remaining_contact = torch.sum(remaining_contact_indicator).item() + print(f"nn_remaining_contact / tot_contacts: {nn_remaining_contact} / {contact_active_penalty_based_friction_forces.size(0)}") + + ### reamining contact based frictions ### remaining cotnacts and not remaining contacts # + not_remaining_contacts_mask = torch.zeros((contact_active_penalty_based_friction_forces.size(0)), dtype=torch.float32).cuda() + not_remaining_contacts_mask[not_remaining_contacts] = 1 + ''' Use the relative scale between the static_mu * contact force and the penalty friction to decide dynamic and static frictions ''' + + ''' Update penalty based friction forces ''' + contact_active_penalty_based_friction_forces = contact_active_penalty_based_friction_forces * (1. - not_remaining_contacts_mask).unsqueeze(-1) + dyn_contact_active_penalty_based_friction_forces * not_remaining_contacts_mask.unsqueeze(-1) + # + contact_active_penalty_based_friction_forces = torch.clamp(contact_active_penalty_based_friction_forces, min=-1e2, max=1e2) + ''' Update penalty based friction forces ''' # + ### TODO: how to check the correctness of the switching between the static friction and the dynamic friction ### + + + + expanded_contact_active_idxes = contact_active_idxes.unsqueeze(-1).contiguous().repeat(1, 3).contiguous() + penalty_friction_tangential_forces = torch.scatter(penalty_friction_tangential_forces, dim=0, index=expanded_contact_active_idxes, src=contact_active_penalty_based_friction_forces) + + if self.obj_sdf_grad is None: + inter_obj_normals = torch.scatter( + inter_obj_normals, dim=0, index=expanded_contact_active_idxes, src=cur_passive_obj_ns[contact_passive_idxes] + ) + + contact_force_d = torch.scatter( + contact_force_d, dim=0, index=expanded_contact_active_idxes, src=cur_contact_d + ) + ''' update contact_force_d ''' + + cur_act_weights[contact_active_idxes] = 1. + ws_unnormed[contact_active_idxes] = 1. + else: + contact_active_idxes = None + self.contact_active_idxes = contact_active_idxes + valid_penalty_friction_forces_indicator = None + penalty_based_friction_forces = None + # tangential forces with inter obj normals # -> #### + if torch.sum(cur_act_weights).item() > 0.5: + cur_act_weights = cur_act_weights / torch.sum(cur_act_weights) + + + # norm_penalty_friction_tangential_forces = torch.norm(penalty_friction_tangential_forces, dim=-1, p=2) + # maxx_norm_penalty_friction_tangential_forces, _ = torch.max(norm_penalty_friction_tangential_forces, dim=-1) + # minn_norm_penalty_friction_tangential_forces, _ = torch.min(norm_penalty_friction_tangential_forces, dim=-1) + # print(f"maxx_norm_penalty_friction_tangential_forces: {maxx_norm_penalty_friction_tangential_forces}, minn_norm_penalty_friction_tangential_forces: {minn_norm_penalty_friction_tangential_forces}") + + # tangetntial forces --- dot with normals # + if not self.use_pre_proj_frictions: # inter obj normals # + dot_tangential_forces_with_inter_obj_normals = torch.sum(penalty_friction_tangential_forces * inter_obj_normals, dim=-1) ### nn_active_pts x # + penalty_friction_tangential_forces = penalty_friction_tangential_forces - dot_tangential_forces_with_inter_obj_normals.unsqueeze(-1) * inter_obj_normals + + + # penalty_based_friction_forces # + # norm_penalty_friction_tangential_forces = torch.norm(penalty_friction_tangential_forces, dim=-1, p=2) + # # valid penalty friction forces # # valid contact force d scalar # + # maxx_norm_penalty_friction_tangential_forces, _ = torch.max(norm_penalty_friction_tangential_forces, dim=-1) + # minn_norm_penalty_friction_tangential_forces, _ = torch.min(norm_penalty_friction_tangential_forces, dim=-1) + # print(f"[After proj.] maxx_norm_penalty_friction_tangential_forces: {maxx_norm_penalty_friction_tangential_forces}, minn_norm_penalty_friction_tangential_forces: {minn_norm_penalty_friction_tangential_forces}") + + + # tangential_forces_clone = tangential_forces.clone() + # tangential_forces = torch.zeros_like(tangential_forces) ### + + # if contact_active_idxes is not None: + # self.contact_active_idxes = contact_active_idxes + # self.valid_penalty_friction_forces_indicator = valid_penalty_friction_forces_indicator # + # # print(f"here {summ_valid_penalty_friction_forces_indicator}") + # # tangential_forces[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] = tangential_forces_clone[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] + # contact_active_idxes_indicators = torch.ones((tangential_forces.size(0)), dtype=torch.float).cuda().bool() + # contact_active_idxes_indicators[:] = True + # contact_active_idxes_indicators[self.contact_active_idxes] = False + + # tangential_forces[contact_active_idxes_indicators] = 0. + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # tangential forces # + # maxx_norm_tangential, _ = torch.max(norm_tangential_forces, dim=-1) + # minn_norm_tangential, _ = torch.min(norm_tangential_forces, dim=-1) + # print(f"maxx_norm_tangential: {maxx_norm_tangential}, minn_norm_tangential: {minn_norm_tangential}") + + ### ## get new contacts ## ### + tot_contact_point_position = [] + tot_contact_active_point_pts = [] + tot_contact_active_idxes = [] + tot_contact_passive_idxes = [] + tot_contact_frame_rotations = [] + tot_contact_frame_translations = [] + + + if contact_pairs_set is not None: # contact + if torch.sum(remaining_contact_indicator.float()) > 0.5: + # contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + remaining_contact_active_point_pts = contact_active_point_pts[remaining_contact_indicator] + remaining_contact_point_position = contact_point_position[remaining_contact_indicator] + remaining_contact_active_idxes = contact_active_idxes[remaining_contact_indicator] + remaining_contact_passive_idxes = contact_passive_idxes[remaining_contact_indicator] + remaining_contact_frame_rotations = contact_frame_orientations[remaining_contact_indicator] + remaining_contact_frame_translations = contact_frame_translations[remaining_contact_indicator] + tot_contact_point_position.append(remaining_contact_point_position) + tot_contact_active_point_pts.append(remaining_contact_active_point_pts) + tot_contact_active_idxes.append(remaining_contact_active_idxes) + tot_contact_passive_idxes.append(remaining_contact_passive_idxes) + tot_contact_frame_rotations.append(remaining_contact_frame_rotations) + tot_contact_frame_translations.append(remaining_contact_frame_translations) + + # remaining_contact_active_idxes + in_contact_indicator[remaining_contact_active_idxes] = False + + + + if torch.sum(in_contact_indicator.float()) > 0.5: # in contact indicator # + cur_in_contact_passive_pts = inter_obj_pts[in_contact_indicator] # get inter obj pts + # cur_in_contact_passive_normals = inter_obj_normals[in_contact_indicator] + cur_in_contact_active_pts = sampled_input_pts[in_contact_indicator] # in_contact_active_pts # + + + cur_contact_frame_rotations = cur_passive_obj_rot.unsqueeze(0).repeat(cur_in_contact_passive_pts.size(0), 1, 1).contiguous() + cur_contact_frame_translations = cur_in_contact_passive_pts.clone() # cur in contact passive points + #### contact farme active points #### + cur_contact_frame_active_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_active_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) + #### cur_contact_frame_active_pts #### + cur_contact_frame_passive_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_passive_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_in_contact_active_pts_all = torch.arange(0, sampled_input_pts.size(0)).long().cuda() + cur_in_contact_active_pts_all = cur_in_contact_active_pts_all[in_contact_indicator] # in contact active indicator # + cur_inter_passive_obj_pts_idxes = inter_passive_obj_pts_idxes[in_contact_indicator] + # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose + # cur_contact_frame_pose = (cur_contact_frame_rotations, cur_contact_frame_translations) + # contact_point_positions = cur_contact_frame_passive_pts # + # contact_active_idxes, cotnact_passive_idxes # + # contact_point_position = cur_contact_frame_passive_pts + # contact_active_idxes = cur_in_contact_active_pts_all + # contact_passive_idxes = cur_inter_passive_obj_pts_idxes + tot_contact_active_point_pts.append(cur_contact_frame_active_pts) + tot_contact_point_position.append(cur_contact_frame_passive_pts) # contact frame points + tot_contact_active_idxes.append(cur_in_contact_active_pts_all) # active_pts_idxes + tot_contact_passive_idxes.append(cur_inter_passive_obj_pts_idxes) # passive_pts_idxes + tot_contact_frame_rotations.append(cur_contact_frame_rotations) # rotations + tot_contact_frame_translations.append(cur_contact_frame_translations) # translations + + + + if len(tot_contact_frame_rotations) > 0: + upd_contact_active_point_pts = torch.cat(tot_contact_active_point_pts, dim=0) + upd_contact_point_position = torch.cat(tot_contact_point_position, dim=0) + upd_contact_active_idxes = torch.cat(tot_contact_active_idxes, dim=0) + upd_contact_passive_idxes = torch.cat(tot_contact_passive_idxes, dim=0) + upd_contact_frame_rotations = torch.cat(tot_contact_frame_rotations, dim=0) + upd_contact_frame_translations = torch.cat(tot_contact_frame_translations, dim=0) + upd_contact_pairs_information = [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] + else: + upd_contact_pairs_information = None + + + + if self.use_penalty_based_friction and self.use_disp_based_friction: + disp_friction_tangential_forces = nex_sampled_input_pts - sampled_input_pts + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + disp_friction_tangential_forces = disp_friction_tangential_forces * contact_friction_spring_cur + disp_friction_tangential_forces_dot_normals = torch.sum( + disp_friction_tangential_forces * inter_obj_normals, dim=-1 + ) + disp_friction_tangential_forces = disp_friction_tangential_forces - disp_friction_tangential_forces_dot_normals.unsqueeze(-1) * inter_obj_normals + + penalty_friction_tangential_forces = disp_friction_tangential_forces + + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) # # + ### strict cosntraints ### + if self.use_penalty_based_friction: + forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + else: + # print(f"not using use_penalty_based_friction...") + tangential_forces_norm = torch.sum(tangential_forces ** 2, dim=-1) + pos_tangential_forces = tangential_forces[tangential_forces_norm > 1e-5] + # print(pos_tangential_forces) + forces = tangential_forces + contact_force_d # tantential forces and contact force d # + # forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' # + # penalty_dot_forces_normals, penalty_friction_constraint # # contraints # # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # # tangential forces ### tangential forces ## + # penalty_friction_tangential_forces = force - + + + #### penalty_friction_tangential_forces, tangential_forces #### + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.tangential_forces = penalty_friction_tangential_forces + + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.contact_force_d = contact_force_d + self.penalty_based_friction_forces = penalty_based_friction_forces + + + penalty_dot_forces_normals = dot_forces_normals ** 2 # must in the negative direction of the object normal # + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) # 1) must # 2) must # + self.penalty_dot_forces_normals = penalty_dot_forces_normals # + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc # + + ###### sampled input pts to center ####### + if contact_pairs_set is not None: + inter_obj_pts[contact_active_idxes] = cur_passive_obj_verts[contact_passive_idxes] + + # center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + + center_point_to_sampled_pts = inter_obj_pts - passive_center_point.unsqueeze(0) + + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) # squeeze(1) # + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) # + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + if self.use_split_params: + + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + time_cons = self.sep_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + time_cons_2 = self.sep_torque_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + damping_cons = self.sep_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + damping_cons_2 = self.sep_angular_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + ##### TMP ###### + # cur_vel = delta_vel + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() # + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + ##### TMP ###### + # cur_angular_vel = delta_angular_vel + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + + # prev + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + # cur_ + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def # update the current rigid def using the offset and the cur_rigid_def ## # + + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # + + if not fix_obj: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, # quaternion + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return upd_contact_pairs_information + + + + + +class BendingNetworkActiveForceFieldForwardLagV18(nn.Module): + def __init__(self, # self # + d_in, # + multires, # fileds # + bending_n_timesteps, + bending_latent_size, + rigidity_hidden_dimensions, + rigidity_network_depth, + rigidity_use_latent=False, + use_rigidity_network=False, + nn_instances=1, + minn_dist_threshold=0.05, + ): # contact + # bending network active force field # + super(BendingNetworkActiveForceFieldForwardLagV18, self).__init__() + self.use_positionally_encoded_input = False + self.input_ch = 3 + self.input_ch = 1 + d_in = self.input_ch + self.output_ch = 3 + self.output_ch = 1 + self.bending_n_timesteps = bending_n_timesteps + self.bending_latent_size = bending_latent_size + self.use_rigidity_network = use_rigidity_network + self.rigidity_hidden_dimensions = rigidity_hidden_dimensions + self.rigidity_network_depth = rigidity_network_depth + self.rigidity_use_latent = rigidity_use_latent + + # simple scene editing. set to None during training. + self.rigidity_test_time_cutoff = None + self.test_time_scaling = None + self.activation_function = F.relu # F.relu, torch.sin + self.hidden_dimensions = 64 + self.network_depth = 5 + self.contact_dist_thres = 0.1 + self.skips = [] # do not include 0 and do not include depth-1 + use_last_layer_bias = True + self.use_last_layer_bias = use_last_layer_bias + + self.static_friction_mu = 1. + + self.embed_fn_fine = None # embed fn and the embed fn # + if multires > 0: + embed_fn, self.input_ch = get_embedder(multires, input_dims=d_in) + self.embed_fn_fine = embed_fn + + self.nn_uniformly_sampled_pts = 50000 + + self.cur_window_size = 60 # get + self.bending_n_timesteps = self.cur_window_size + 10 + self.nn_patch_active_pts = 50 + self.nn_patch_active_pts = 1 + + self.nn_instances = nn_instances + + self.contact_spring_rest_length = 2. + + # self.minn_dist_sampled_pts_passive_obj_thres = 0.05 # minn_dist_threshold ### + self.minn_dist_sampled_pts_passive_obj_thres = minn_dist_threshold + + self.spring_contact_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_contact_ks_values.weight) + self.spring_contact_ks_values.weight.data = self.spring_contact_ks_values.weight.data * 0.01 + + self.spring_friction_ks_values = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_friction_ks_values.weight) + self.spring_friction_ks_values.weight.data = self.spring_friction_ks_values.weight.data * 0.001 + + if self.nn_instances == 1: + self.spring_ks_values = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_ks_values.weight) + self.spring_ks_values.weight.data = self.spring_ks_values.weight.data * 0.01 + self.spring_ks_values.weight.data[:, :] = 0.1395 + else: + self.spring_ks_values = nn.ModuleList( + [ + nn.Embedding(num_embeddings=5, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_values in self.spring_ks_values: + torch.nn.init.ones_(cur_ks_values.weight) + cur_ks_values.weight.data = cur_ks_values.weight.data * 0.01 + + self.inertia_div_factor = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.inertia_div_factor.weight) + # self.inertia_div_factor.weight.data[:, :] = 30.0 + self.inertia_div_factor.weight.data[:, :] = 20.0 + + + + self.bending_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + + self.bending_dir_latent = nn.Embedding( + num_embeddings=self.bending_n_timesteps, embedding_dim=self.bending_latent_size + ) + # dist_k_a = self.distance_ks_val(torch.zeros((1,)).long().cuda()).view(1) + # dist_k_b = self.distance_ks_val(torch.ones((1,)).long().cuda()).view(1) * 5# *# 0.1 + + # distance + self.distance_ks_val = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.distance_ks_val.weight) # distance_ks_val # + # self.distance_ks_val.weight.data[0] = self.distance_ks_val.weight.data[0] * 0.6160 ## + # self.distance_ks_val.weight.data[1] = self.distance_ks_val.weight.data[1] * 4.0756 ## + + self.ks_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_val.weight) + self.ks_val.weight.data = self.ks_val.weight.data * 0.2 + + + self.ks_friction_val = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_friction_val.weight) + self.ks_friction_val.weight.data = self.ks_friction_val.weight.data * 0.2 + + + ## [ \alpha, \beta ] ## + if self.nn_instances == 1: + self.ks_weights = nn.Embedding( + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.ks_weights.weight) # + self.ks_weights.weight.data[1] = self.ks_weights.weight.data[1] * (1. / (778 * 2)) + else: + self.ks_weights = nn.ModuleList( + [ + nn.Embedding(num_embeddings=2, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_ks_weights in self.ks_weights: + torch.nn.init.ones_(cur_ks_weights.weight) # + cur_ks_weights.weight.data[1] = cur_ks_weights.weight.data[1] * (1. / (778 * 2)) + + + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + self.sep_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_time_constant.weight) # + + self.sep_torque_time_constant = self.time_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_torque_time_constant.weight) # + + self.sep_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_damping_constant.weight) # # # # + # self.sep_damping_constant.weight.data = self.sep_damping_constant.weight.data * 0.9 + self.sep_damping_constant.weight.data = self.sep_damping_constant.weight.data * 0.2 + + + self.sep_angular_damping_constant = nn.Embedding( + num_embeddings=64, embedding_dim=1 + ) + torch.nn.init.ones_(self.sep_angular_damping_constant.weight) # # # # + # self.sep_angular_damping_constant.weight.data = self.sep_angular_damping_constant.weight.data * 0.9 + self.sep_angular_damping_constant.weight.data = self.sep_angular_damping_constant.weight.data * 0.2 + + + + + if self.nn_instances == 1: + self.time_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.time_constant.weight) # + else: + self.time_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_time_constant in self.time_constant: + torch.nn.init.ones_(cur_time_constant.weight) # + + if self.nn_instances == 1: + self.damping_constant = nn.Embedding( + num_embeddings=3, embedding_dim=1 + ) + torch.nn.init.ones_(self.damping_constant.weight) # # # # + self.damping_constant.weight.data = self.damping_constant.weight.data * 0.9 + else: + self.damping_constant = nn.ModuleList( + [ + nn.Embedding(num_embeddings=3, embedding_dim=1) for _ in range(self.nn_instances) + ] + ) + for cur_damping_constant in self.damping_constant: + torch.nn.init.ones_(cur_damping_constant.weight) # # # # + cur_damping_constant.weight.data = cur_damping_constant.weight.data * 0.9 + + self.nn_actuators = 778 * 2 # vertices # + self.nn_actuation_forces = self.nn_actuators * self.cur_window_size + self.actuator_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_forces.weight) # + + + + # self.actuator_friction_forces = nn.Embedding( # actuator's forces # + # num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + + + if nn_instances == 1: + self.actuator_friction_forces = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) + torch.nn.init.zeros_(self.actuator_friction_forces.weight) # + else: + self.actuator_friction_forces = nn.ModuleList( + [nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=3 + ) for _ in range(self.nn_instances) ] + ) + for cur_friction_force_net in self.actuator_friction_forces: + torch.nn.init.zeros_(cur_friction_force_net.weight) # + + # simulator # + + self.actuator_weights = nn.Embedding( # actuator's forces # + num_embeddings=self.nn_actuation_forces + 10, embedding_dim=1 + ) + torch.nn.init.ones_(self.actuator_weights.weight) # + self.actuator_weights.weight.data = self.actuator_weights.weight.data * (1. / (778 * 2)) + + + ''' patch force network and the patch force scale network ''' + self.patch_force_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 3)), # hidden_dimension x 1 -> the weights # + ] + ) + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + # if i == len(self.patch_force_network) - 1: + # torch.nn.init.xavier_uniform_(cc.bias) + # else: + if i < len(self.patch_force_network) - 1: + torch.nn.init.zeros_(cc.bias) + # torch.nn.init.zeros_(layer.bias) + self.patch_force_scale_network = nn.ModuleList( + [ + nn.Sequential(nn.Linear(3, self.hidden_dimensions), nn.ReLU()), + nn.Sequential(nn.Linear(self.hidden_dimensions, self.hidden_dimensions)), # with maxpoll layers # + nn.Sequential(nn.Linear(self.hidden_dimensions * 2, self.hidden_dimensions), nn.ReLU()), # + nn.Sequential(nn.Linear(self.hidden_dimensions, 1)), # hidden_dimension x 1 -> the weights # + ] + ) + with torch.no_grad(): + for i, layer in enumerate(self.patch_force_scale_network[:]): + for cc in layer: + if isinstance(cc, nn.Linear): ### ifthe lienar layer # # ## + torch.nn.init.kaiming_uniform_( + cc.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.patch_force_scale_network) - 1: + torch.nn.init.zeros_(cc.bias) + ''' patch force network and the patch force scale network ''' + + ''' the bending network ''' + # self.input_ch = 1 + self.network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=use_last_layer_bias)]) + + # initialize weights + with torch.no_grad(): + for i, layer in enumerate(self.network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + # initialize final layer to zero weights to start out with straight rays + self.network[-1].weight.data *= 0.0 + if use_last_layer_bias: + self.network[-1].bias.data *= 0.0 + self.network[-1].bias.data += 0.2 + ''' the bending network ''' + + self.dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)]) + + with torch.no_grad(): + for i, layer in enumerate(self.dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + self.friction_input_dim = 3 + 3 + 1 + 3 ### + self.friction_network = [ + nn.Linear(self.friction_input_dim, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, 3) + ] + for i_sub_net, sub_net in enumerate(self.friction_network): + if isinstance(sub_net, nn.Linear): + if i_sub_net < len(self.friction_network) - 1: + torch.nn.init.kaiming_uniform_( + sub_net.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(sub_net.bias) + else: + torch.nn.init.zeros_(sub_net.weight) + torch.nn.init.zeros_(sub_net.bias) + self.friction_network = nn.Sequential( + *self.friction_network + ) + + + self.contact_normal_force_network = [ + nn.Linear(self.friction_input_dim, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, self.hidden_dimensions), nn.ReLU(), + nn.Linear(self.hidden_dimensions, 3) + ] + for i_sub_net, sub_net in enumerate(self.contact_normal_force_network): + if isinstance(sub_net, nn.Linear): + if i_sub_net < len(self.contact_normal_force_network) - 1: + torch.nn.init.kaiming_uniform_( + sub_net.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(sub_net.bias) + else: + torch.nn.init.zeros_(sub_net.weight) + torch.nn.init.zeros_(sub_net.bias) + self.contact_normal_force_network = nn.Sequential( + *self.contact_normal_force_network + ) + + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.weighting_network[:]): + if self.activation_function.__name__ == "sin": # periodict activation functions # + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + # weighting model via the distance # + # unormed_weight = k_a exp{-d * k_b} # weights # k_a; k_b # + # distances # the kappa # + self.weighting_model_ks = nn.Embedding( # k_a and k_b # + num_embeddings=2, embedding_dim=1 + ) + torch.nn.init.ones_(self.weighting_model_ks.weight) + self.spring_rest_length = 2. # + self.spring_x_min = -2. + self.spring_qd = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.spring_qd.weight) # q_d of the spring k_d model -- k_d = q_d / (x - self.spring_x_min) # + + self.obj_inertia = nn.Embedding( + num_embeddings=1, embedding_dim=3 + ) + torch.nn.init.ones_(self.obj_inertia.weight) + + self.optimizable_obj_mass = nn.Embedding( + num_embeddings=1, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_obj_mass.weight) + import math + self.optimizable_obj_mass.weight.data *= math.sqrt(30) + + + self.optimizable_spring_ks = nn.Embedding( + num_embeddings=5, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_spring_ks.weight) + # 400000000, 100000000 # optimizabale spring forces an the + self.optimizable_spring_ks.weight.data[0, :] = math.sqrt(1.0) + self.optimizable_spring_ks.weight.data[1, :] = math.sqrt(1.0) + + + + # optimizable_spring_ks_normal, optimizable_spring_ks_friction # + self.optimizable_spring_ks_normal = nn.Embedding( + num_embeddings=200, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_spring_ks_normal.weight) + # # 400000000, 100000000 # optimizabale spring forces an the + # self.optimizable_spring_ks.weight.data[0, :] = math.sqrt(1.0) + # self.optimizable_spring_ks.weight.data[1, :] = math.sqrt(1.0) + + self.optimizable_spring_ks_friction = nn.Embedding( + num_embeddings=200, embedding_dim=1 + ) + torch.nn.init.ones_(self.optimizable_spring_ks_friction.weight) + + + # spring_force = -k_d * \delta_x = -k_d * (x - self.spring_rest_length) # + # 1) sample points from the active robot's mesh; + # 2) calculate forces from sampled points to the action point; + # 3) use the weight model to calculate weights for each sampled point; + # 4) aggregate forces; + + self.timestep_to_vel = {} + self.timestep_to_point_accs = {} + # how to support frictions? # + ### TODO: initialize the t_to_total_def variable ### # tangential + self.timestep_to_total_def = {} + + self.timestep_to_input_pts = {} + self.timestep_to_optimizable_offset = {} # record the optimizable offset # + self.save_values = {} + # ws_normed, defed_input_pts_sdf, # + self.timestep_to_ws_normed = {} + self.timestep_to_defed_input_pts_sdf = {} + self.timestep_to_ori_input_pts = {} + self.timestep_to_ori_input_pts_sdf = {} + + self.use_opt_rigid_translations = False # load utils and the loading .... ## + self.use_split_network = False + + self.timestep_to_prev_active_mesh_ori = {} + # timestep_to_prev_selected_active_mesh_ori, timestep_to_prev_selected_active_mesh # # active mesh # active mesh # + self.timestep_to_prev_selected_active_mesh_ori = {} + self.timestep_to_prev_selected_active_mesh = {} + + self.timestep_to_spring_forces = {} + self.timestep_to_spring_forces_ori = {} + + # timestep_to_angular_vel, timestep_to_quaternion # + self.timestep_to_angular_vel = {} + self.timestep_to_quaternion = {} + self.timestep_to_torque = {} + + self.timestep_to_accum_acc = {} + + + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion + self.timestep_to_optimizable_total_def = {} + self.timestep_to_optimizable_quaternion = {} + self.timestep_to_optimizable_rot_mtx = {} + self.timestep_to_aggregation_weights = {} + self.timestep_to_sampled_pts_to_passive_obj_dist = {} + + self.time_quaternions = nn.Embedding( + num_embeddings=60, embedding_dim=4 + ) + self.time_quaternions.weight.data[:, 0] = 1. + self.time_quaternions.weight.data[:, 1] = 0. + self.time_quaternions.weight.data[:, 2] = 0. + self.time_quaternions.weight.data[:, 3] = 0. + # torch.nn.init.ones_(self.time_quaternions.weight) # # actuators + + self.time_translations = nn.Embedding( # tim + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_translations.weight) # + + self.time_forces = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_forces.weight) # + + # self.time_velocities = nn.Embedding( + # num_embeddings=60, embedding_dim=3 + # ) + # torch.nn.init.zeros_(self.time_velocities.weight) # + self.time_torques = nn.Embedding( + num_embeddings=60, embedding_dim=3 + ) + torch.nn.init.zeros_(self.time_torques.weight) # + + self.obj_sdf_th = None + self.obj_sdf_grad_th = None + + self.normal_plane_max_y = torch.tensor([0, 1., 0], dtype=torch.float32).cuda() ## 0, 1, 0 + self.normal_plane_min_y = torch.tensor([0, -1., 0.], dtype=torch.float32).cuda() # + + self.normal_plane_max_x = torch.tensor([1, 0, 0], dtype=torch.float32).cuda() ## 0, 1, 0 + self.normal_plane_min_x = torch.tensor([-1, 0., 0.], dtype=torch.float32).cuda() # + + self.normal_plane_max_z = torch.tensor([0, 0, 1.], dtype=torch.float32).cuda() ## 0, 1, 0 + self.normal_plane_min_z = torch.tensor([0, 0, -1.], dtype=torch.float32).cuda() # + + ## set the initial passive object verts and normals ### + ## the default scene is the box scene ## + self.penetration_determining = "plane_primitives" + self.canon_passive_obj_verts = None + self.canon_passive_obj_normals = None + self.train_residual_normal_forces = False + + self.lin_damping_coefs = nn.Embedding( # tim + num_embeddings=150, embedding_dim=1 + ) + torch.nn.init.ones_(self.lin_damping_coefs.weight) # (1.0 - damping) * prev_ts_vel + cur_ts_delta_vel + + self.ang_damping_coefs = nn.Embedding( # tim + num_embeddings=150, embedding_dim=1 + ) + torch.nn.init.ones_(self.ang_damping_coefs.weight) # (1.0 - samping_coef) * prev_ts_vel + cur_ts_delta_vel # + + + + def set_split_bending_network(self, ): + self.use_split_network = True + ##### split network single ##### ## + self.split_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, self.output_ch, bias=self.use_last_layer_bias)] + ) + with torch.no_grad(): + for i, layer in enumerate(self.split_network[:-1]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays + self.split_network[-1].weight.data *= 0.0 + if self.use_last_layer_bias: + self.split_network[-1].bias.data *= 0.0 + self.split_network[-1].bias.data += 0.2 + ##### split network single ##### + + + self.split_dir_network = nn.ModuleList( + [nn.Linear(self.input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 3)] + ) + with torch.no_grad(): # no_grad() + for i, layer in enumerate(self.split_dir_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + torch.nn.init.zeros_(layer.bias) + + # initialize final layer to zero weights to start out with straight rays # + # + # self.split_dir_network[-1].weight.data *= 0.0 + # if self.use_last_layer_bias: + # self.split_dir_network[-1].bias.data *= 0.0 + ##### split network single ##### + + + # ### + ## weighting_network for the network ## + self.weighting_net_input_ch = 3 + self.split_weighting_network = nn.ModuleList( + [nn.Linear(self.weighting_net_input_ch + self.bending_latent_size, self.hidden_dimensions)] + + [nn.Linear(self.weighting_net_input_ch + self.hidden_dimensions, self.hidden_dimensions) + if i + 1 in self.skips + else nn.Linear(self.hidden_dimensions, self.hidden_dimensions) + for i in range(self.network_depth - 2)] + + [nn.Linear(self.hidden_dimensions, 1)]) + + with torch.no_grad(): + for i, layer in enumerate(self.split_weighting_network[:]): + if self.activation_function.__name__ == "sin": + # SIREN ( Implicit Neural Representations with Periodic Activation Functions + # https://arxiv.org/pdf/2006.09661.pdf Sec. 3.2) + if type(layer) == nn.Linear: + a = ( + 1.0 / layer.in_features + if i == 0 + else np.sqrt(6.0 / layer.in_features) + ) + layer.weight.uniform_(-a, a) + elif self.activation_function.__name__ == "relu": + torch.nn.init.kaiming_uniform_( + layer.weight, a=0, mode="fan_in", nonlinearity="relu" + ) + if i < len(self.split_weighting_network) - 1: + torch.nn.init.zeros_(layer.bias) + + def uniformly_sample_pts(self, tot_pts, nn_samples): + tot_pts_prob = torch.ones_like(tot_pts[:, 0]) + tot_pts_prob = tot_pts_prob / torch.sum(tot_pts_prob) + pts_dist = Categorical(tot_pts_prob) + sampled_pts_idx = pts_dist.sample((nn_samples,)) + sampled_pts_idx = sampled_pts_idx.squeeze() + sampled_pts = tot_pts[sampled_pts_idx] + return sampled_pts + + + def query_for_sdf(self, cur_pts, cur_frame_transformations): + # + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + # cur_pts: nn_pts x 3 # + # print(f"cur_pts: {cur_pts.size()}, cur_frame_translation: {cur_frame_translation.size()}, cur_frame_rotation: {cur_frame_rotation.size()}") + ### transformed pts ### + # cur_transformed_pts = torch.matmul( + # cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + # center_init_passive_obj_verts # + cur_transformed_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + # v = (v - center) * scale # + # sdf_space_center # + cur_transformed_pts_np = cur_transformed_pts.detach().cpu().numpy() + cur_transformed_pts_np = (cur_transformed_pts_np - np.reshape(self.sdf_space_center, (1, 3))) * self.sdf_space_scale + cur_transformed_pts_np = (cur_transformed_pts_np + 1.) / 2. + cur_transformed_pts_xs = (cur_transformed_pts_np[:, 0] * self.sdf_res).astype(np.int32) # [x, y, z] of the transformed_pts_np # + cur_transformed_pts_ys = (cur_transformed_pts_np[:, 1] * self.sdf_res).astype(np.int32) + cur_transformed_pts_zs = (cur_transformed_pts_np[:, 2] * self.sdf_res).astype(np.int32) + + cur_transformed_pts_xs = np.clip(cur_transformed_pts_xs, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_ys = np.clip(cur_transformed_pts_ys, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_zs = np.clip(cur_transformed_pts_zs, a_min=0, a_max=self.sdf_res - 1) + + + if self.obj_sdf_th is None: + self.obj_sdf_th = torch.from_numpy(self.obj_sdf).float().cuda() + cur_transformed_pts_xs_th = torch.from_numpy(cur_transformed_pts_xs).long().cuda() + cur_transformed_pts_ys_th = torch.from_numpy(cur_transformed_pts_ys).long().cuda() + cur_transformed_pts_zs_th = torch.from_numpy(cur_transformed_pts_zs).long().cuda() + + cur_pts_sdf = batched_index_select(self.obj_sdf_th, cur_transformed_pts_xs_th, 0) + # print(f"After selecting the x-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the y-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the z-axis: {cur_pts_sdf.size()}") + + if self.obj_sdf_grad is not None: + if self.obj_sdf_grad_th is None: + self.obj_sdf_grad_th = torch.from_numpy(self.obj_sdf_grad).float().cuda() + self.obj_sdf_grad_th = self.obj_sdf_grad_th / torch.clamp(torch.norm(self.obj_sdf_grad_th, p=2, keepdim=True, dim=-1), min=1e-5) + cur_pts_sdf_grad = batched_index_select(self.obj_sdf_grad_th, cur_transformed_pts_xs_th, 0) # nn_pts x res x res x 3 + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # cur_pts_sdf_grad = cur_pts_sdf_grad / torch + else: + cur_pts_sdf_grad = None + + # cur_pts_sdf = self.obj_sdf[cur_transformed_pts_xs] + # cur_pts_sdf = cur_pts_sdf[:, cur_transformed_pts_ys] + # cur_pts_sdf = cur_pts_sdf[:, :, cur_transformed_pts_zs] + # cur_pts_sdf = np.diagonal(cur_pts_sdf) + # print(f"cur_pts_sdf: {cur_pts_sdf.shape}") + # # gradient of sdf # + # # the contact force dierection should be the negative direction of the sdf gradient? # + # # it seems true # + # # get the cur_pts_sdf value # + # cur_pts_sdf = torch.from_numpy(cur_pts_sdf).float().cuda() + if cur_pts_sdf_grad is None: + return cur_pts_sdf + else: + return cur_pts_sdf, cur_pts_sdf_grad # return the grad as the + + + def query_for_sdf_of_canon_obj(self, cur_pts, cur_frame_transformations): + + # + + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + + cur_transformed_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + + # cur_transformed_pts = torch.matmul( + # cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + # # v = (v - center) * scale # + # sdf_space_center # + cur_transformed_pts_np = cur_transformed_pts.detach().cpu().numpy() + # + cur_transformed_pts_np = (cur_transformed_pts_np - np.reshape(self.sdf_space_center, (1, 3))) * self.sdf_space_scale + cur_transformed_pts_np = (cur_transformed_pts_np + 1.) / 2. + cur_transformed_pts_xs = (cur_transformed_pts_np[:, 0] * self.sdf_res).astype(np.int32) # [x, y, z] of the transformed_pts_np # + cur_transformed_pts_ys = (cur_transformed_pts_np[:, 1] * self.sdf_res).astype(np.int32) + cur_transformed_pts_zs = (cur_transformed_pts_np[:, 2] * self.sdf_res).astype(np.int32) + + cur_transformed_pts_xs = np.clip(cur_transformed_pts_xs, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_ys = np.clip(cur_transformed_pts_ys, a_min=0, a_max=self.sdf_res - 1) + cur_transformed_pts_zs = np.clip(cur_transformed_pts_zs, a_min=0, a_max=self.sdf_res - 1) + + + if self.obj_sdf_th is None: + self.obj_sdf_th = torch.from_numpy(self.obj_sdf).float().cuda() + cur_transformed_pts_xs_th = torch.from_numpy(cur_transformed_pts_xs).long().cuda() + cur_transformed_pts_ys_th = torch.from_numpy(cur_transformed_pts_ys).long().cuda() + cur_transformed_pts_zs_th = torch.from_numpy(cur_transformed_pts_zs).long().cuda() + + cur_pts_sdf = batched_index_select(self.obj_sdf_th, cur_transformed_pts_xs_th, 0) + # print(f"After selecting the x-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the y-axis: {cur_pts_sdf.size()}") + cur_pts_sdf = batched_index_select(cur_pts_sdf, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # print(f"After selecting the z-axis: {cur_pts_sdf.size()}") + + if self.obj_sdf_grad is not None: + if self.obj_sdf_grad_th is None: + self.obj_sdf_grad_th = torch.from_numpy(self.obj_sdf_grad).float().cuda() + self.obj_sdf_grad_th = self.obj_sdf_grad_th / torch.clamp(torch.norm(self.obj_sdf_grad_th, p=2, keepdim=True, dim=-1), min=1e-5) + cur_pts_sdf_grad = batched_index_select(self.obj_sdf_grad_th, cur_transformed_pts_xs_th, 0) # nn_pts x res x res x 3 + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_ys_th.unsqueeze(-1), 1).squeeze(1) + cur_pts_sdf_grad = batched_index_select(cur_pts_sdf_grad, cur_transformed_pts_zs_th.unsqueeze(-1), 1).squeeze(1) + # cur_pts_sdf_grad = cur_pts_sdf_grad / torch + else: + cur_pts_sdf_grad = None + + # cur_pts_sdf = self.obj_sdf[cur_transformed_pts_xs] + # cur_pts_sdf = cur_pts_sdf[:, cur_transformed_pts_ys] + # cur_pts_sdf = cur_pts_sdf[:, :, cur_transformed_pts_zs] + # cur_pts_sdf = np.diagonal(cur_pts_sdf) + # print(f"cur_pts_sdf: {cur_pts_sdf.shape}") + # # the contact force dierection should be the negative direction of the sdf gradient? # + # # get the cur_pts_sdf value # + # cur_pts_sdf = torch.from_numpy(cur_pts_sdf).float().cuda() + if cur_pts_sdf_grad is None: + return cur_pts_sdf + else: + return cur_pts_sdf, cur_pts_sdf_grad # return the grad as the + + ## query for cotnacting + def query_for_contacting_ball_primitives(self, cur_pts, cur_frame_transformations): + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + + inv_transformed_queried_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + + # center_verts, ball_r # + center_verts = self.center_verts + ball_r = self.ball_r + dist_inv_transformed_pts_w_center_ball = torch.sum( + (inv_transformed_queried_pts - center_verts.unsqueeze(0)) ** 2, dim=-1 ## + ) + + penetration_indicators = dist_inv_transformed_pts_w_center_ball <= (ball_r ** 2) + + # maxx_dist_to_planes, projected_plane_pts_transformed, projected_plane_normals_transformed, projected_plane_pts, selected_plane_normals + dir_center_to_ball = inv_transformed_queried_pts - center_verts.unsqueeze(0) ## nn_pts x 3 ## + norm_center_to_ball = torch.norm(dir_center_to_ball, dim=-1, p=2, keepdim=True) + dir_center_to_ball = dir_center_to_ball / torch.clamp(torch.norm(dir_center_to_ball, dim=-1, p=2, keepdim=True), min=1e-6) + sd_dist = norm_center_to_ball - ball_r + projected_ball_pts = center_verts.unsqueeze(0) + dir_center_to_ball * ball_r + projected_ball_normals = dir_center_to_ball.clone() + + projected_ball_normals_transformed = torch.matmul( + cur_frame_rotation, projected_ball_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + projected_ball_pts_transformed = torch.matmul( ## center init passive obj verts + cur_frame_rotation, (projected_ball_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_frame_translation.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) + + return penetration_indicators, sd_dist, projected_ball_pts_transformed, projected_ball_normals_transformed, projected_ball_pts, projected_ball_normals + + ## because of ## because of + def query_for_contacting_primitives(self, cur_pts, cur_frame_transformations): + # cur_frame rotation -> 3 x 3 rtoations # translation -> 3 translations # + cur_frame_rotation, cur_frame_translation = cur_frame_transformations + # cur_pts: nn_pts x 3 # + # print(f"cur_pts: {cur_pts.size()}, cur_frame_translation: {cur_frame_translation.size()}, cur_frame_rotation: {cur_frame_rotation.size()}") + ### transformed pts ### + # inv_transformed_queried_pts = torch.matmul( + # cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0)).transpose(1, 0) + # ).transpose(1, 0) + inv_transformed_queried_pts = torch.matmul( + cur_frame_rotation.contiguous().transpose(1, 0).contiguous(), (cur_pts - cur_frame_translation.unsqueeze(0) - self.center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0) + ).transpose(1, 0) + self.center_init_passive_obj_verts.unsqueeze(0) + + # maxximum # jut define the maxim # + # normal to six palnes -> + # normal to six planes -> + maxx_init_passive_mesh = self.maxx_init_passive_mesh + minn_init_passive_mesh = self.minn_init_passive_mesh # + + + # max y-coordiante; min y-coordiante; max + dist_to_plane_max_y = torch.sum((inv_transformed_queried_pts - maxx_init_passive_mesh.unsqueeze(0)) * self.normal_plane_max_y.unsqueeze(0), dim=-1) ### signed distance to the upper s + # maximum distnace? # + dist_to_plane_min_y = torch.sum((inv_transformed_queried_pts - minn_init_passive_mesh.unsqueeze(0)) * self.normal_plane_min_y.unsqueeze(0), dim=-1) ### signed distance to the lower surface # + + dist_to_plane_max_z = torch.sum((inv_transformed_queried_pts - maxx_init_passive_mesh.unsqueeze(0)) * self.normal_plane_max_z.unsqueeze(0), dim=-1) ### signed distance to the upper s + # maximum distnace? # + dist_to_plane_min_z = torch.sum((inv_transformed_queried_pts - minn_init_passive_mesh.unsqueeze(0)) * self.normal_plane_min_z.unsqueeze(0), dim=-1) ### signed distance to the lower surface # + + dist_to_plane_max_x = torch.sum((inv_transformed_queried_pts - maxx_init_passive_mesh.unsqueeze(0)) * self.normal_plane_max_x.unsqueeze(0), dim=-1) ### signed distance to the upper s + # maximum distnace? # + dist_to_plane_min_x = torch.sum((inv_transformed_queried_pts - minn_init_passive_mesh.unsqueeze(0)) * self.normal_plane_min_x.unsqueeze(0), dim=-1) ### signed distance to the lower surface # + tot_dist_to_planes = torch.stack( + [dist_to_plane_max_y, dist_to_plane_min_y, dist_to_plane_max_z, dist_to_plane_min_z, dist_to_plane_max_x, dist_to_plane_min_x], dim=-1 + ) + maxx_dist_to_planes, maxx_dist_to_planes_plane_idx = torch.max(tot_dist_to_planes, dim=-1) ### maxx dist to planes ## # nn_pts # + + # selected plane normals # selected plane normals # kinematics dirven mano? # + # a much more simplified setting> # # need frictio + # contact points established and the contact information maintainacnce # + # test cases -> test such two relatively moving objects # + # assume you have the correct forces --- how to opt them # + # model the frictions # + tot_plane_normals = torch.stack( + [self.normal_plane_max_y, self.normal_plane_min_y, self.normal_plane_max_z, self.normal_plane_min_z, self.normal_plane_max_x, self.normal_plane_min_x], dim=0 ### 6 x 3 -> plane normals # + ) + # nearest plane points # # nearest plane points # # nearest plane points # # nearest plane points # + # nearest_plane_points # + selected_plane_normals = tot_plane_normals[maxx_dist_to_planes_plane_idx ] ### nn_tot_pts x 3 ### + projected_plane_pts = cur_pts - selected_plane_normals * maxx_dist_to_planes.unsqueeze(-1) ### nn_tot_pts x 3 ### + projected_plane_pts_x = projected_plane_pts[:, 0] + projected_plane_pts_y = projected_plane_pts[:, 1] + projected_plane_pts_z = projected_plane_pts[:, 2] + projected_plane_pts_x = torch.clamp(projected_plane_pts_x, min=minn_init_passive_mesh[0], max=maxx_init_passive_mesh[0]) + projected_plane_pts_y = torch.clamp(projected_plane_pts_y, min=minn_init_passive_mesh[1], max=maxx_init_passive_mesh[1]) + projected_plane_pts_z = torch.clamp(projected_plane_pts_z, min=minn_init_passive_mesh[2], max=maxx_init_passive_mesh[2]) + + projected_plane_pts = torch.stack( + [projected_plane_pts_x, projected_plane_pts_y, projected_plane_pts_z], dim=-1 + ) + + # query # + projected_plane_pts_transformed = torch.matmul( + cur_frame_rotation, (projected_plane_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_frame_translation.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) + projected_plane_normals_transformed = torch.matmul( + cur_frame_rotation, selected_plane_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + + # ### penetration indicator, signed distance, projected points onto the plane as the contact points ### + return maxx_dist_to_planes <= 0, maxx_dist_to_planes, projected_plane_pts_transformed, projected_plane_normals_transformed, projected_plane_pts, selected_plane_normals + + + # def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, passive_sdf_net, active_bending_net, active_sdf_net, details=None, special_loss_return=False, update_tot_def=True): + def forward(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False, contact_pairs_set=None): + #### contact_pairs_set #### + ### from input_pts to new pts ### + # prev_pts_ts = input_pts_ts - 1 # + ''' Kinematics rigid transformations only ''' + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # # + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # self.timestep_to_optimizable_quaternion[input_pts_ts + 1] = self.time_quaternions(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(4) # + # cur_optimizable_rot_mtx = quaternion_to_matrix(self.timestep_to_optimizable_quaternion[input_pts_ts + 1]) # + # self.timestep_to_optimizable_rot_mtx[input_pts_ts + 1] = cur_optimizable_rot_mtx # + ''' Kinematics rigid transformations only ''' + + nex_pts_ts = input_pts_ts + 1 + + ''' Kinematics transformations from acc and torques ''' + # rigid_acc = self.time_forces(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # + # torque = self.time_torques(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) # TODO: note that inertial_matrix^{-1} real_torque # + ''' Kinematics transformations from acc and torques ''' + + # friction_qd = 0.1 # # + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] # sampled points --> sampled points # + ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + nn_sampled_input_pts = sampled_input_pts.size(0) + + if nex_pts_ts in timestep_to_active_mesh: + ### disp_sampled_input_pts = nex_sampled_input_pts - sampled_input_pts ### + nex_sampled_input_pts = timestep_to_active_mesh[nex_pts_ts].detach() + else: + nex_sampled_input_pts = timestep_to_active_mesh[input_pts_ts].detach() + nex_sampled_input_pts = nex_sampled_input_pts[sampled_verts_idxes] + + + # ws_normed = torch.ones((sampled_input_pts.size(0),), dtype=torch.float32).cuda() + # ws_normed = ws_normed / float(sampled_input_pts.size(0)) + # m = Categorical(ws_normed) + # nn_sampled_input_pts = 20000 + # sampled_input_pts_idx = m.sample(sample_shape=(nn_sampled_input_pts,)) + + + # sampled_input_pts_normals = # + init_passive_obj_verts = timestep_to_passive_mesh[0] + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + + # cur_passive_obj_rot, cur_passive_obj_trans # + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + + # + + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) # + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() ## transform the normals ## + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center + + # cur_active_mesh = timestep_to_active_mesh[input_pts_ts] + # nex_active_mesh = timestep_to_active_mesh[input_pts_ts + 1] + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh ### the active mesh velocity + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k + # forces = friction_force + # ######## vel for frictions ######### + + + # ######## vel for frictions ######### + # vel_active_mesh = nex_active_mesh - cur_active_mesh # the active mesh velocity + # if input_pts_ts > 0: + # vel_passive_mesh = self.timestep_to_vel[input_pts_ts - 1] + # else: + # vel_passive_mesh = torch.zeros((3,), dtype=torch.float32).cuda() ### zeros ### + # vel_active_mesh = vel_active_mesh - vel_passive_mesh.unsqueeze(0) ## nn_active_pts x 3 ## --> active pts ## + + # friction_k = self.ks_friction_val(torch.zeros((1,)).long().cuda()).view(1) + # friction_force = vel_active_mesh * friction_k # + # forces = friction_force + # ######## vel for frictions ######### # # maintain the contact / continuous contact -> patch contact + # coantacts in previous timesteps -> ### + + # cur actuation # + cur_actuation_embedding_st_idx = self.nn_actuators * input_pts_ts + cur_actuation_embedding_ed_idx = self.nn_actuators * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + # ######### optimize the actuator forces directly ######### + # cur_actuation_forces = self.actuator_forces(cur_actuation_embedding_idxes) # actuation embedding idxes # + # forces = cur_actuation_forces + # ######### optimize the actuator forces directly ######### + + if friction_forces is None: + if self.nn_instances == 1: + cur_actuation_friction_forces = self.actuator_friction_forces(cur_actuation_embedding_idxes) + else: + cur_actuation_friction_forces = self.actuator_friction_forces[i_instance](cur_actuation_embedding_idxes) + else: + if reference_mano_pts is not None: + ref_mano_pts_nn = reference_mano_pts.size(0) + cur_actuation_embedding_st_idx = ref_mano_pts_nn * input_pts_ts + cur_actuation_embedding_ed_idx = ref_mano_pts_nn * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn_ref_pts x 3 # + # sampled_input_pts # + # r = 0.01 # + threshold_ball_r = 0.01 + dist_input_pts_to_reference_pts = torch.sum( + (sampled_input_pts.unsqueeze(1) - reference_mano_pts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_input_pts_to_reference_pts = torch.sqrt(dist_input_pts_to_reference_pts) + weights_input_to_reference = 0.5 - dist_input_pts_to_reference_pts + weights_input_to_reference[weights_input_to_reference < 0] = 0 + weights_input_to_reference[dist_input_pts_to_reference_pts > threshold_ball_r] = 0 + + minn_dist_input_pts_to_reference_pts, minn_idx_input_pts_to_reference_pts = torch.min(dist_input_pts_to_reference_pts, dim=-1) + + weights_input_to_reference[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] = 0.1 - dist_input_pts_to_reference_pts[dist_input_pts_to_reference_pts == minn_dist_input_pts_to_reference_pts.unsqueeze(-1)] + + weights_input_to_reference = weights_input_to_reference / torch.clamp(torch.sum(weights_input_to_reference, dim=-1, keepdim=True), min=1e-9) + + # cur_actuation_friction_forces = weights_input_to_reference.unsqueeze(-1) * cur_actuation_friction_forces.unsqueeze(0) # nn_input_pts x nn_ref_pts x 1 xxxx 1 x nn_ref_pts x 3 -> nn_input_pts x nn_ref_pts x 3 + # cur_actuation_friction_forces = cur_actuation_friction_forces.sum(dim=1) + + # cur_actuation_friction_forces * weights_input_to_reference.unsqueeze(-1) + cur_actuation_friction_forces = batched_index_select(cur_actuation_friction_forces, minn_idx_input_pts_to_reference_pts, dim=0) + else: + # cur_actuation_embedding_st_idx = 365428 * input_pts_ts + # cur_actuation_embedding_ed_idx = 365428 * (input_pts_ts + 1) + if sampled_verts_idxes is not None: + cur_actuation_embedding_st_idx = ori_nns * input_pts_ts + cur_actuation_embedding_ed_idx = ori_nns * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + cur_actuation_friction_forces = cur_actuation_friction_forces[sampled_verts_idxes] + else: + cur_actuation_embedding_st_idx = nn_sampled_input_pts * input_pts_ts + cur_actuation_embedding_ed_idx = nn_sampled_input_pts * (input_pts_ts + 1) + cur_actuation_embedding_idxes = torch.tensor([idxx for idxx in range(cur_actuation_embedding_st_idx, cur_actuation_embedding_ed_idx)], dtype=torch.long).cuda() + cur_actuation_friction_forces = friction_forces(cur_actuation_embedding_idxes) + + # nn instances # # nninstances # # + if self.nn_instances == 1: + ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + else: + ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) + + + # use_penalty_based_friction, use_disp_based_friction # + # ### get the nearest object point to the in-active object ### + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): # contact pairs set # # contact pairs set ## + # for each calculated contacts, calculate the current contact points reversed transformed to the contact local frame # + # use the reversed transformed active point and the previous rest contact point position to calculate the contact friction force # + # transform the force to the current contact frame # + # x_h^{cur} - x_o^{cur} --- add the frictions for the hand + # add the friction force onto the object point # # contact point position -> nn_contacts x 3 # + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + # contact_active_pos = sampled_input_pts[contact_active_idxes] # should not be inter_obj_pts... # + # contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + # to the passive obje ###s + minn_idx_sampled_pts_to_passive_obj[contact_active_idxes] = contact_passive_idxes + + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + dist_sampled_pts_to_passive_obj = batched_index_select(dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1).squeeze(1) + # ### get the nearest object point to the in-active object ### + + + + # sampled_input_pts # + # inter_obj_pts # + # inter_obj_normals + + # nn_sampledjpoints # + # cur_passive_obj_ns # # inter obj normals # # + inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + cur_passive_obj_verts_pts_idxes = torch.arange(0, cur_passive_obj_verts.size(0), dtype=torch.long).cuda() # + inter_passive_obj_pts_idxes = cur_passive_obj_verts_pts_idxes[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() + dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) + + # contact_pairs_set # + + ###### penetration penalty strategy v1 ###### + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + + ###### penetration penalty strategy v2 ###### + # if input_pts_ts > 0: + # prev_active_obj = timestep_to_active_mesh[input_pts_ts - 1].detach() + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + # else: + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v2 ###### + # + # jacobian matrix calculation? + # jacobian + + ###### penetration penalty strategy v3 ###### + # if input_pts_ts > 0: + # cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + # cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + # queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # penetrating_indicator = queried_sdf < 0 + # if sampled_verts_idxes is not None: + # prev_active_obj = prev_active_obj[sampled_verts_idxes] + # disp_prev_to_cur = sampled_input_pts - prev_active_obj + # disp_prev_to_cur = torch.norm(disp_prev_to_cur, dim=-1, p=2) + # penetrating_depth_penalty = disp_prev_to_cur[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # else: + # # cur_rot = torch.eye(3, dtype=torch.float32).cuda() + # # cur_trans = torch.zeros((3,), dtype=torch.float32).cuda() + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v3 ###### + + # ws_beta; 10 # # sum over the forces but not the weighted sum... # + ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) # ws_alpha # + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 # + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 # + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + # penetrating # + ### penetration strategy v4 #### + + if input_pts_ts > 0: + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + penetrating_indicator = queried_sdf < 0 + else: + penetrating_indicator = torch.zeros_like(dot_inter_obj_pts_to_sampled_pts_normals).bool() + + + # if contact_pairs_set is not None and self.use_penalty_based_friction and (not self.use_disp_based_friction): + # penetrating + + ### nearest #### + ''' decide forces via kinematics statistics ''' + ### nearest #### + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + + # cannot be adapted to this easily # + # what's a better realization way? # + + + # dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] = -1. * dist_sampled_pts_to_passive_obj[dot_rel_inter_obj_pts_normals < 0] # + dist_sampled_pts_to_passive_obj[penetrating_indicator] = -1. * dist_sampled_pts_to_passive_obj[penetrating_indicator] + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + in_contact_indicator = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + # in_contact_indicator + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + + # penetrating_indicator = + + # penetrating + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + self.penetrating_indicator = penetrating_indicator + penetration_proj_ks = 0 - dot_inter_obj_pts_to_sampled_pts_normals + ### penetratio nproj penalty ### + penetration_proj_penalty = penetration_proj_ks * (-1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1)) + self.penetrating_depth_penalty = penetration_proj_penalty[penetrating_indicator].mean() + if torch.isnan(self.penetrating_depth_penalty): # get the penetration penalties # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + penetrating_points = sampled_input_pts[penetrating_indicator] + penetration_proj_k_to_robot = 1.0 # 0.7 + # penetration_proj_k_to_robot = 0.01 + penetration_proj_k_to_robot = 0.0 + penetrating_forces = penetration_proj_ks.unsqueeze(-1) * inter_obj_normals.detach() * penetration_proj_k_to_robot + penetrating_forces = penetrating_forces[penetrating_indicator] + self.penetrating_forces = penetrating_forces # + self.penetrating_points = penetrating_points # + ### penetration strategy v4 #### # another mophology # + + # maintain the forces # + + # # contact_pairs_set # # + + # for contact pair in the contact_pair_set, get the contact pair -> the mesh index of the passive object and the active object # + # the orientation of the contact frame # + # original contact point position of the contact pair # + # original orientation of the contact frame # + ##### get previous contact information ###### + # for cur_contact_pair in contact_pairs_set: + # # cur_contact_pair = (contact point position, contact frame orientation) # + # # contact_point_positon -> should be the contact position transformed to the local contact frame # + # contact_point_positon, (contact_passive_idx, contact_active_idx), contact_frame_pose = cur_contact_pair # + # # contact_point_positon of the contact pair # + # cur_active_pos = sampled_input_pts[contact_active_idx] # passive_position # + # # (original passive position - current passive position) * K_f = penalty based friction force # # # # + # cur_passive_pos = inter_obj_pts[contact_passive_idx] # active_position # + # # (the transformed passive position) # + # # + # # # the continuous active and passive pos ## + # # # the continuous active and passive pos ## + # # the continuous active and passive pos ## + # contact_frame_orientation, contact_frame_translation = contact_frame_pose # # set the orientation and the contact frame translation + # # orientation, translation # + # cur_inv_transformed_active_pos = torch.matmul( + # contact_frame_orientation.contiguous().transpose(1, 0).contiguous(), (cur_active_pos - contact_frame_translation.unsqueeze(0)).transpose(1, 0) + # ) + + + + # should be the contact penalty frictions added onto the passive object verts # + # use the frictional force to mainatian the contact here # + + # maintain the contact and calculate the penetrating forces and points for each timestep and then use the displacemnet to calculate the penalty based friction forces # + + + if self.nn_instances == 1: # spring ks values + # contact ks values # # if we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + + + ###### the contact force decided by the rest_length ###### # not very sure ... # + # contact_force_d = contact_spring_ka * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) # + contact_spring_kb * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 2 + contact_spring_kc * (self.contact_spring_rest_length - dist_sampled_pts_to_passive_obj) ** 3 # + ###### the contact force decided by the rest_length ###### + + + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance # + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + ###### Get the tangential forces via optimizable forces ###### # dot along the normals ## + cur_actuation_friction_forces_along_normals = torch.sum(cur_actuation_friction_forces * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + tangential_vel = cur_actuation_friction_forces - cur_actuation_friction_forces_along_normals + ###### Get the tangential forces via optimizable forces ###### + + # cur actuation friction forces along normals # + + ###### Get the tangential forces via tangential velocities ###### + # vel_sampled_pts_along_normals = torch.sum(vel_sampled_pts * inter_obj_normals, dim=-1).unsqueeze(-1) * inter_obj_normals + # tangential_vel = vel_sampled_pts - vel_sampled_pts_along_normals + ###### Get the tangential forces via tangential velocities ###### + + tangential_forces = tangential_vel * tangential_ks # tangential forces # + contact_force_d_scalar = contact_force_d.clone() # + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts, nnsampledpts # + penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + penalty_friction_constraint = torch.mean(penalty_friction_constraint) + self.penalty_friction_constraint = penalty_friction_constraint # penalty friction + contact_force_d_scalar = norm_along_normals_forces.clone() + # penalty friction constraints # + penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + ''' Get the contact information that should be maintained''' + if contact_pairs_set is not None: # contact pairs set # # contact pairs set ## + # for each calculated contacts, calculate the current contact points reversed transformed to the contact local frame # + # use the reversed transformed active point and the previous rest contact point position to calculate the contact friction force # + # transform the force to the current contact frame # + # x_h^{cur} - x_o^{cur} --- add the frictions for the hand + # add the friction force onto the object point # # contact point position -> nn_contacts x 3 # + contact_active_point_pts, contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # contact active pos and contact passive pos # contact_active_pos; contact_passive_pos; # + contact_active_pos = sampled_input_pts[contact_active_idxes] # should not be inter_obj_pts... # + contact_passive_pos = cur_passive_obj_verts[contact_passive_idxes] + + ''' Penalty based contact force v2 ''' + contact_frame_orientations, contact_frame_translations = contact_frame_pose + transformed_prev_contact_active_pos = torch.matmul( + contact_frame_orientations.contiguous(), contact_active_point_pts.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + transformed_prev_contact_point_position = torch.matmul( + contact_frame_orientations.contiguous(), contact_point_position.unsqueeze(-1) + ).squeeze(-1) + contact_frame_translations + diff_transformed_prev_contact_passive_to_active = transformed_prev_contact_active_pos - transformed_prev_contact_point_position + # cur_contact_passive_pos_from_active = contact_passive_pos + diff_transformed_prev_contact_passive_to_active + cur_contact_passive_pos_from_active = contact_active_pos - diff_transformed_prev_contact_passive_to_active + + friction_k = 1.0 + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + # penalty_based_friction_forces = friction_k * (contact_active_pos - transformed_prev_contact_active_pos) + + # + # penalty_based_friction_forces = friction_k * (contact_active_pos - contact_passive_pos) + penalty_based_friction_forces = friction_k * (cur_contact_passive_pos_from_active - contact_passive_pos) + ''' Penalty based contact force v2 ''' + + ''' Penalty based contact force v1 ''' + ###### Contact frame orientations and translations ###### + # contact_frame_orientations, contact_frame_translations = contact_frame_pose # (nn_contacts x 3 x 3) # (nn_contacts x 3) # + # # cur_passive_obj_verts # + # inv_transformed_contact_active_pos = torch.matmul( + # contact_frame_orientations.contiguous().transpose(2, 1).contiguous(), (contact_active_pos - contact_frame_translations).contiguous().unsqueeze(-1) + # ).squeeze(-1) # nn_contacts x 3 # + # inv_transformed_contact_passive_pos = torch.matmul( # contact frame translations # ## nn_contacts x 3 ## # # + # contact_frame_orientations.contiguous().transpose(2, 1).contiguous(), (contact_passive_pos - contact_frame_translations).contiguous().unsqueeze(-1) + # ).squeeze(-1) + # # inversely transformed cotnact active and passive pos # + + # # inv_transformed_contact_active_pos, inv_transformed_contact_passive_pos # + # ### contact point position ### # + # ### use the passive point disp ### + # # disp_active_pos = (inv_transformed_contact_active_pos - contact_point_position) # nn_contacts x 3 # + # ### use the active point disp ### + # # disp_active_pos = (inv_transformed_contact_active_pos - contact_active_point_pts) + # disp_active_pos = (inv_transformed_contact_active_pos - contact_active_point_pts) + # ### friction_k is equals to 1.0 ### + # friction_k = 1. + # # use the disp_active_pose as the penalty based friction forces # # nn_contacts x 3 # + # penalty_based_friction_forces = disp_active_pos * friction_k + + # # get the penalty based friction forces # + # penalty_based_friction_forces = torch.matmul( + # contact_frame_orientations.contiguous(), penalty_based_friction_forces.unsqueeze(-1) + # ).contiguous().squeeze(-1).contiguous() + ''' Penalty based contact force v1 ''' + + #### strategy 1: implement the dynamic friction forces #### + # dyn_friction_k = 1.0 # together with the friction_k # + # # dyn_friction_k # + # dyn_friction_force = dyn_friction_k * contact_force_d # nn_sampled_pts x 3 # + # dyn_friction_force # + # dyn_friction_force = # + # tangential velocities # # tangential velocities # + #### strategy 1: implement the dynamic friction forces #### + + #### strategy 2: do not use the dynamic friction forces #### + # equalt to use a hard selector to screen the friction forces # + # + # contact_force_d # # contact_force_d # + + valid_contact_force_d_scalar = contact_force_d_scalar[contact_active_idxes] + + + # penalty_based_friction_forces # + norm_penalty_based_friction_forces = torch.norm(penalty_based_friction_forces, dim=-1, p=2) + # valid penalty friction forces # # valid contact force d scalar # + valid_penalty_friction_forces_indicator = norm_penalty_based_friction_forces <= (valid_contact_force_d_scalar * self.static_friction_mu * 500) + valid_penalty_friction_forces_indicator[:] = True + + + summ_valid_penalty_friction_forces_indicator = torch.sum(valid_penalty_friction_forces_indicator.float()) + + # print(f"summ_valid_penalty_friction_forces_indicator: {summ_valid_penalty_friction_forces_indicator}") + # print(f"penalty_based_friction_forces: {penalty_based_friction_forces.size()}, summ_valid_penalty_friction_forces_indicator: {summ_valid_penalty_friction_forces_indicator}") + # tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.005 # * 1000. + + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + + # penalty_friction_tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * contact_spring_kb + + penalty_friction_tangential_forces[contact_active_idxes][valid_penalty_friction_forces_indicator] = penalty_based_friction_forces[valid_penalty_friction_forces_indicator] * contact_friction_spring_cur + + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * contact_spring_kb + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.01 # * 1000. # based friction + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.02 + # tangential_forces[contact_active_idxes] = penalty_based_friction_forces * 0.05 # + + else: + contact_active_idxes = None + self.contact_active_idxes = contact_active_idxes + valid_penalty_friction_forces_indicator = None + # tangential forces with inter obj normals # -> + dot_tangential_forces_with_inter_obj_normals = torch.sum(penalty_friction_tangential_forces * inter_obj_normals, dim=-1) ### nn_active_pts x # + penalty_friction_tangential_forces = penalty_friction_tangential_forces - dot_tangential_forces_with_inter_obj_normals.unsqueeze(-1) * inter_obj_normals + tangential_forces_clone = tangential_forces.clone() + # tangential_forces = torch.zeros_like(tangential_forces) ### + + # if contact_active_idxes is not None: + # self.contact_active_idxes = contact_active_idxes + # self.valid_penalty_friction_forces_indicator = valid_penalty_friction_forces_indicator # + # # print(f"here {summ_valid_penalty_friction_forces_indicator}") + # # tangential_forces[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] = tangential_forces_clone[self.contact_active_idxes][self.valid_penalty_friction_forces_indicator] + # contact_active_idxes_indicators = torch.ones((tangential_forces.size(0)), dtype=torch.float).cuda().bool() + # contact_active_idxes_indicators[:] = True + # contact_active_idxes_indicators[self.contact_active_idxes] = False + + # tangential_forces[contact_active_idxes_indicators] = 0. + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # tangential forces # + # maxx_norm_tangential, _ = torch.max(norm_tangential_forces, dim=-1) + # minn_norm_tangential, _ = torch.min(norm_tangential_forces, dim=-1) + # print(f"maxx_norm_tangential: {maxx_norm_tangential}, minn_norm_tangential: {minn_norm_tangential}") + + # two + ### ## get new contacts ## ### + tot_contact_point_position = [] + tot_contact_active_point_pts = [] + tot_contact_active_idxes = [] + tot_contact_passive_idxes = [] + tot_contact_frame_rotations = [] + tot_contact_frame_translations = [] + + if torch.sum(in_contact_indicator.float()) > 0.5: # in contact indicator # + cur_in_contact_passive_pts = inter_obj_pts[in_contact_indicator] + cur_in_contact_passive_normals = inter_obj_normals[in_contact_indicator] + cur_in_contact_active_pts = sampled_input_pts[in_contact_indicator] # in_contact_active_pts # + + # in contact active pts # + # sampled input pts # + # cur_passive_obj_rot, cur_passive_obj_trans # + # cur_passive_obj_trans # + # cur_in_contact_activE_pts # + # in_contact_passive_pts # + cur_contact_frame_rotations = cur_passive_obj_rot.unsqueeze(0).repeat(cur_in_contact_passive_pts.size(0), 1, 1).contiguous() + cur_contact_frame_translations = cur_in_contact_passive_pts.clone() # + #### contact farme active points ##### -> ## + cur_contact_frame_active_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_active_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_contact_frame_passive_pts = torch.matmul( + cur_contact_frame_rotations.contiguous().transpose(1, 2).contiguous(), (cur_in_contact_passive_pts - cur_contact_frame_translations).contiguous().unsqueeze(-1) + ).squeeze(-1) ### cur_contact_frame_active_pts ### + cur_in_contact_active_pts_all = torch.arange(0, sampled_input_pts.size(0)).long().cuda() + cur_in_contact_active_pts_all = cur_in_contact_active_pts_all[in_contact_indicator] + cur_inter_passive_obj_pts_idxes = inter_passive_obj_pts_idxes[in_contact_indicator] + # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose + # cur_contact_frame_pose = (cur_contact_frame_rotations, cur_contact_frame_translations) + # contact_point_positions = cur_contact_frame_passive_pts # + # contact_active_idxes, cotnact_passive_idxes # + # contact_point_position = cur_contact_frame_passive_pts + # contact_active_idxes = cur_in_contact_active_pts_all + # contact_passive_idxes = cur_inter_passive_obj_pts_idxes + tot_contact_active_point_pts.append(cur_contact_frame_active_pts) + tot_contact_point_position.append(cur_contact_frame_passive_pts) # contact frame points + tot_contact_active_idxes.append(cur_in_contact_active_pts_all) # active_pts_idxes + tot_contact_passive_idxes.append(cur_inter_passive_obj_pts_idxes) # passive_pts_idxes + tot_contact_frame_rotations.append(cur_contact_frame_rotations) # rotations + tot_contact_frame_translations.append(cur_contact_frame_translations) # translations + + + ## + # ####### if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: ######## + # if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: + # # contact_point_position, (contact_active_idxes, contact_passive_idxes), contact_frame_pose = contact_pairs_set + # prev_contact_active_point_pts = contact_active_point_pts[valid_penalty_friction_forces_indicator] + # prev_contact_point_position = contact_point_position[valid_penalty_friction_forces_indicator] + # prev_contact_active_idxes = contact_active_idxes[valid_penalty_friction_forces_indicator] + # prev_contact_passive_idxes = contact_passive_idxes[valid_penalty_friction_forces_indicator] + # prev_contact_frame_rotations = contact_frame_orientations[valid_penalty_friction_forces_indicator] + # prev_contact_frame_translations = contact_frame_translations[valid_penalty_friction_forces_indicator] + + # tot_contact_active_point_pts.append(prev_contact_active_point_pts) + # tot_contact_point_position.append(prev_contact_point_position) + # tot_contact_active_idxes.append(prev_contact_active_idxes) + # tot_contact_passive_idxes.append(prev_contact_passive_idxes) + # tot_contact_frame_rotations.append(prev_contact_frame_rotations) + # tot_contact_frame_translations.append(prev_contact_frame_translations) + ####### if contact_pairs_set is not None and torch.sum(valid_penalty_friction_forces_indicator.float()) > 0.5: ######## + + + + if len(tot_contact_frame_rotations) > 0: + upd_contact_active_point_pts = torch.cat(tot_contact_active_point_pts, dim=0) + upd_contact_point_position = torch.cat(tot_contact_point_position, dim=0) + upd_contact_active_idxes = torch.cat(tot_contact_active_idxes, dim=0) + upd_contact_passive_idxes = torch.cat(tot_contact_passive_idxes, dim=0) + upd_contact_frame_rotations = torch.cat(tot_contact_frame_rotations, dim=0) + upd_contact_frame_translations = torch.cat(tot_contact_frame_translations, dim=0) + upd_contact_pairs_information = [upd_contact_active_point_pts, upd_contact_point_position, (upd_contact_active_idxes, upd_contact_passive_idxes), (upd_contact_frame_rotations, upd_contact_frame_translations)] + else: + upd_contact_pairs_information = None + + + + # # previus + if self.use_penalty_based_friction and self.use_disp_based_friction: + disp_friction_tangential_forces = nex_sampled_input_pts - sampled_input_pts + + contact_friction_spring_cur = self.spring_friction_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + disp_friction_tangential_forces = disp_friction_tangential_forces * contact_friction_spring_cur + disp_friction_tangential_forces_dot_normals = torch.sum( + disp_friction_tangential_forces * inter_obj_normals, dim=-1 + ) + disp_friction_tangential_forces = disp_friction_tangential_forces - disp_friction_tangential_forces_dot_normals.unsqueeze(-1) * inter_obj_normals + + penalty_friction_tangential_forces = disp_friction_tangential_forces + + + # # tangential forces # + # tangential_forces = tangential_forces * mult_weights.unsqueeze(-1) # # + ### strict cosntraints ### + if self.use_penalty_based_friction: + forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + else: + # print(f"not using use_penalty_based_friction...") + tangential_forces_norm = torch.sum(tangential_forces ** 2, dim=-1) + pos_tangential_forces = tangential_forces[tangential_forces_norm > 1e-5] + # print(pos_tangential_forces) + forces = tangential_forces + contact_force_d # tantential forces and contact force d # + # forces = penalty_friction_tangential_forces + contact_force_d # tantential forces and contact force d # + ''' decide forces via kinematics statistics ''' + + ''' Decompose forces and calculate penalty froces ''' # + # penalty_dot_forces_normals, penalty_friction_constraint # # contraints # # + # # get the forces -> decompose forces # + dot_forces_normals = torch.sum(inter_obj_normals * forces, dim=-1) ### nn_sampled_pts ### + # forces_along_normals = dot_forces_normals.unsqueeze(-1) * inter_obj_normals ## the forces along the normal direction ## + # tangential_forces = forces - forces_along_normals # tangential forces # # tangential forces ### tangential forces ## + # penalty_friction_tangential_forces = force - + + + #### penalty_friction_tangential_forces, tangential_forces #### + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.tangential_forces = tangential_forces + + + penalty_dot_forces_normals = dot_forces_normals ** 2 + penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) # 1) must # 2) must # + self.penalty_dot_forces_normals = penalty_dot_forces_normals # + + + rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc # + + ###### sampled input pts to center ####### + center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + ###### sampled input pts to center ####### + + ###### nearest passive object point to center ####### + # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) # squeeze(1) # + + # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) # + ###### nearest passive object point to center ####### + + sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # torque = torch.sum( + # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # ) + torque = torch.sum( + sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + ) + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + cur_vel = delta_vel + self.timestep_to_vel[input_pts_ts - 1].detach() * damping_cons + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() + + + delta_angular_vel = torque * time_cons_rot + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + cur_angular_vel = delta_angular_vel + self.timestep_to_angular_vel[input_pts_ts - 1].detach() * damping_cons_rot ### (3,) + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 + + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + + + # cur_delta_rot_mtx = torch.matmul(cur_optimizable_rot_mtx, prev_rot_mtx.transpose(1, 0)) + + # cur_delta_quaternion = euler_to_quaternion(cur_delta_angle[0], cur_delta_angle[1], cur_delta_angle[2]) ### delta_quaternion ### + # cur_delta_quaternion = torch.stack(cur_delta_quaternion, dim=0) ## (4,) quaternion ## + + # cur_quaternion = prev_quaternion + cur_delta_quaternion ### (4,) + + # cur_delta_rot_mtx = quaternion_to_matrix(cur_delta_quaternion) ## (4,) -> (3, 3) + + # print(f"input_pts_ts {input_pts_ts},, prev_quaternion { prev_quaternion}") + + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + # curupd + # if update_tot_def: + + + + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternio + # + + if not fix_obj: + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return upd_contact_pairs_information + + + ### forward; #### # + def forward2(self, input_pts_ts, timestep_to_active_mesh, timestep_to_passive_mesh, timestep_to_passive_mesh_normals, details=None, special_loss_return=False, update_tot_def=True, friction_forces=None, i_instance=0, reference_mano_pts=None, sampled_verts_idxes=None, fix_obj=False, contact_pairs_set=None, pts_frictional_forces=None): + + + nex_pts_ts = input_pts_ts + 1 + + sampled_input_pts = timestep_to_active_mesh[input_pts_ts] + # ori_nns = sampled_input_pts.size(0) + if sampled_verts_idxes is not None: + sampled_input_pts = sampled_input_pts[sampled_verts_idxes] + # nn_sampled_input_pts = sampled_input_pts.size(0) + + if nex_pts_ts in timestep_to_active_mesh: + ### disp_sampled_input_pts = nex_sampled_input_pts - sampled_input_pts ### + nex_sampled_input_pts = timestep_to_active_mesh[nex_pts_ts].detach() + else: + nex_sampled_input_pts = timestep_to_active_mesh[input_pts_ts].detach() ## + if sampled_verts_idxes is not None: + nex_sampled_input_pts = nex_sampled_input_pts[sampled_verts_idxes] ## + disp_act_pts_cur_to_nex = nex_sampled_input_pts - sampled_input_pts ## act pts cur to nex ## ## act pts cur to nex ## + # disp_act_pts_cur_to_nex = disp_act_pts_cur_to_nex / torch.clamp(torch.norm(disp_act_pts_cur_to_nex, p=2, keepdim=True, dim=-1), min=1e-5) + + ### + if sampled_input_pts.size(0) > 20000: + norm_disp_act_pts = torch.clamp(torch.norm(disp_act_pts_cur_to_nex, dim=-1, p=2, keepdim=True), min=1e-5) # * 10.0 + else: + norm_disp_act_pts = torch.clamp(torch.norm(disp_act_pts_cur_to_nex, p=2, keepdim=True), min=1e-5) + + disp_act_pts_cur_to_nex = disp_act_pts_cur_to_nex / norm_disp_act_pts + real_norm = torch.clamp(torch.norm(disp_act_pts_cur_to_nex, p=2, keepdim=True, dim=-1), min=1e-5) + real_norm = torch.mean(real_norm) + + # print(sampled_input_pts.size(), norm_disp_act_pts, real_norm) + + if self.canon_passive_obj_verts is None: + ## center init passsive obj verts ## + init_passive_obj_verts = timestep_to_passive_mesh[0] # at the timestep 0 ## + init_passive_obj_ns = timestep_to_passive_mesh_normals[0] + center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + self.center_init_passive_obj_verts = center_init_passive_obj_verts.clone() + else: + init_passive_obj_verts = self.canon_passive_obj_verts + init_passive_obj_ns = self.canon_passive_obj_normals + + # center_init_passive_obj_verts = init_passive_obj_verts.mean(dim=0) + # self.center_init_passive_obj_verts = center_init_passive_obj_verts.clone() + + # direction of the normal direction has been changed # + # contact region and multiple contact points ## + center_init_passive_obj_verts = torch.zeros((3, ), dtype=torch.float32).cuda() + self.center_init_passive_obj_verts = center_init_passive_obj_verts.clone() + + + + # cur_passive_obj_rot, cur_passive_obj_trans # ## quaternion to matrix -- quaternion for + cur_passive_obj_rot = quaternion_to_matrix(self.timestep_to_quaternion[input_pts_ts].detach()) + cur_passive_obj_trans = self.timestep_to_total_def[input_pts_ts].detach() + + ''' Transform the passive object verts and normals ''' + cur_passive_obj_verts = torch.matmul(cur_passive_obj_rot, (init_passive_obj_verts - center_init_passive_obj_verts.unsqueeze(0)).transpose(1, 0)).transpose(1, 0) + center_init_passive_obj_verts.squeeze(0) + cur_passive_obj_trans.unsqueeze(0) + + + cur_passive_obj_ns = torch.matmul(cur_passive_obj_rot, init_passive_obj_ns.transpose(1, 0).contiguous()).transpose(1, 0).contiguous() + ### passvie obj ns ### + cur_passive_obj_ns = cur_passive_obj_ns / torch.clamp(torch.norm(cur_passive_obj_ns, dim=-1, keepdim=True), min=1e-8) + cur_passive_obj_center = center_init_passive_obj_verts + cur_passive_obj_trans + passive_center_point = cur_passive_obj_center # passive obj center # + + self.cur_passive_obj_ns = cur_passive_obj_ns + self.cur_passive_obj_verts = cur_passive_obj_verts + + + # nn instances # # # cur passive obj ns ## + # if self.nn_instances == 1: + # ws_alpha = self.ks_weights(torch.zeros((1,)).long().cuda()).view(1) + # ws_beta = self.ks_weights(torch.ones((1,)).long().cuda()).view(1) + # else: + # ws_alpha = self.ks_weights[i_instance](torch.zeros((1,)).long().cuda()).view(1) + # ws_beta = self.ks_weights[i_instance](torch.ones((1,)).long().cuda()).view(1) + + # print(f"sampled_input_pts: {sampled_input_pts.size()}") + + if self.use_sqrt_dist: # use sqrt distance # + dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + ) + else: + dist_sampled_pts_to_passive_obj = torch.sum( # nn_sampled_pts x nn_passive_pts + (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)) ** 2, dim=-1 + ) + #### use sqrt distances #### + + # ### add the sqrt for calculate the l2 distance ### + # dist_sampled_pts_to_passive_obj = torch.sqrt(dist_sampled_pts_to_passive_obj) ### + + + # dist_sampled_pts_to_passive_obj = torch.norm( # nn_sampled_pts x nn_passive_pts + # (sampled_input_pts.unsqueeze(1) - cur_passive_obj_verts.unsqueeze(0)), dim=-1, p=2 + # ) + + ''' distance between sampled pts and the passive object ''' + ## get the object vert idx with the + dist_sampled_pts_to_passive_obj, minn_idx_sampled_pts_to_passive_obj = torch.min(dist_sampled_pts_to_passive_obj, dim=-1) # get the minn idx sampled pts to passive obj ## + + + ''' calculate the apssvie objects normals ''' + # inter obj normals at the current frame # + # inter_obj_normals = cur_passive_obj_ns[minn_idx_sampled_pts_to_passive_obj] + + + + # disp_act_pts_cur_to_nex = disp_act_pts_cur_to_nex[mini] + + ### use obj normals as the direction ### + # inter_obj_normals = -1 * inter_obj_normals.detach().clone() + ### use the active points displacement directions as the direction ### # the normal + + inter_obj_normals = -1 * disp_act_pts_cur_to_nex.detach().clone() + + # penetration_determining # + inter_obj_pts = cur_passive_obj_verts[minn_idx_sampled_pts_to_passive_obj] + + + cur_passive_obj_verts_pts_idxes = torch.arange(0, cur_passive_obj_verts.size(0), dtype=torch.long).cuda() # + + # inter_passive_obj_pts_idxes = cur_passive_obj_verts_pts_idxes[minn_idx_sampled_pts_to_passive_obj] + + # inter_obj_normals # + # inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts.detach() # sampled p + # dot_inter_obj_pts_to_sampled_pts_normals = torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) + + ###### penetration penalty strategy v1 ###### + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 + # penetrating_depth = -1 * torch.sum(inter_obj_pts_to_sampled_pts * inter_obj_normals.detach(), dim=-1) + # penetrating_depth_penalty = penetrating_depth[penetrating_indicator].mean() + # self.penetrating_depth_penalty = penetrating_depth_penalty + # if torch.isnan(penetrating_depth_penalty): # get the penetration penalties # + # self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + ###### penetration penalty strategy v1 ###### + + # ws_beta; 10 # # sum over the forces but not the weighted sum... # + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha * 10) # ws_alpha # + ####### sharp the weights ####### + + # minn_dist_sampled_pts_passive_obj_thres = 0.05 + # # minn_dist_sampled_pts_passive_obj_thres = 0.001 + # minn_dist_sampled_pts_passive_obj_thres = 0.0001 # m + minn_dist_sampled_pts_passive_obj_thres = self.minn_dist_sampled_pts_passive_obj_thres + + + + # # ws_unnormed = ws_normed_sampled + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) e + # rigid_acc = torch.sum(forces * ws_normed.unsqueeze(-1), dim=0) + + #### using network weights #### + # cur_act_weights = self.actuator_weights(cur_actuation_embedding_idxes).squeeze(-1) + #### using network weights #### + + # penetrating # + ### penetration strategy v4 #### ## threshold of the sampled pts ## + + if input_pts_ts > 0 or (input_pts_ts == 0 and input_pts_ts in self.timestep_to_total_def): + cur_rot = self.timestep_to_optimizable_rot_mtx[input_pts_ts].detach() + cur_trans = self.timestep_to_total_def[input_pts_ts].detach() + # obj_sdf_grad + if self.penetration_determining == "sdf_of_canon": ### queried sdf? ### + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # inter_obj_normals = -1.0 * queried_sdf_grad + # inter_obj_normals = queried_sdf_grad + # inter_obj_normals = torch.matmul( # 3 x 3 xxxx 3 x N -> 3 x N + # cur_rot, inter_obj_normals.contiguous().transpose(1, 0).contiguous() + # ).contiguous().transpose(1, 0).contiguous() + # elif self.penetration_determining == ## timestep to total def ## + penetrating_indicator = queried_sdf < 0 + else: + cur_rot = torch.eye(n=3, dtype=torch.float32).cuda() + cur_trans = torch.zeros((3,), dtype=torch.float32).cuda() + if self.penetration_determining == "sdf_of_canon": + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf_of_canon_obj(sampled_input_pts, (cur_rot, cur_trans)) + else: + if self.obj_sdf_grad is None: + queried_sdf = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + else: + queried_sdf, queried_sdf_grad = self.query_for_sdf(sampled_input_pts, (cur_rot, cur_trans)) + # inter_obj_normals = -1.0 * queried_sdf_grad + # inter_obj_normals = queried_sdf_grad + penetrating_indicator = queried_sdf < 0 + + ### nearest #### + ''' decide forces via kinematics statistics ''' + ### nearest #### + # rel_inter_obj_pts_to_sampled_pts = sampled_input_pts - inter_obj_pts # inter_obj_pts # + # dot_rel_inter_obj_pts_normals = torch.sum(rel_inter_obj_pts_to_sampled_pts * inter_obj_normals, dim=-1) ## nn_sampled_pts + + penetrating_indicator_mult_factor = torch.ones_like(penetrating_indicator).float() + penetrating_indicator_mult_factor[penetrating_indicator] = -1. ## penetration indicator ## + + + + dist_sampled_pts_to_passive_obj = dist_sampled_pts_to_passive_obj * penetrating_indicator_mult_factor + # contact_spring_ka * | minn_spring_length - dist_sampled_pts_to_passive_obj | + + # use contact + if self.use_contact_dist_as_sdf: # + queried_sdf = dist_sampled_pts_to_passive_obj # queried sdf # # queried sdf # + + # + in_contact_indicator_robot_to_obj = queried_sdf <= self.minn_dist_threshold_robot_to_obj ### queried_sdf <= minn_dist_threshold_robot_to_obj + + zero_level_incontact_indicator_robot_to_obj = queried_sdf <= 0.0 + + + ## minn_dist_sampled_pts_passive_obj_thres # ## in contct indicator ## + in_contact_indicator = dist_sampled_pts_to_passive_obj <= minn_dist_sampled_pts_passive_obj_thres + + + # ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + # ws_unnormed = torch.ones_like(ws_unnormed) + ws_unnormed = torch.ones_like(dist_sampled_pts_to_passive_obj) + ws_unnormed[dist_sampled_pts_to_passive_obj > minn_dist_sampled_pts_passive_obj_thres] = 0 + + # ws_unnormed = ws_beta * torch.exp(-1. * dist_sampled_pts_to_passive_obj * ws_alpha ) + # ws_normed = ws_unnormed / torch.clamp(torch.sum(ws_unnormed), min=1e-9) + # cur_act_weights = ws_normed + cur_act_weights = ws_unnormed + + + # minimized motions # + # penetrating_indicator = dot_inter_obj_pts_to_sampled_pts_normals < 0 # + # self.penetrating_indicator = penetrating_indicator # # + self.penetrating_indicator = in_contact_indicator_robot_to_obj + cur_inter_obj_normals = inter_obj_normals.clone().detach() + + ### + if self.penetration_determining == "plane_primitives": # optimize the ruels for ball case? # + in_contact_indicator_robot_to_obj, queried_sdf, inter_obj_pts, inter_obj_normals, canon_inter_obj_pts, canon_inter_obj_normals = self.query_for_contacting_primitives(sampled_input_pts, (cur_rot, cur_trans)) + + self.penetrating_indicator = in_contact_indicator_robot_to_obj + + cur_inter_obj_normals = inter_obj_normals.clone().detach() + elif self.penetration_determining == "ball_primitives": + in_contact_indicator_robot_to_obj, queried_sdf, inter_obj_pts, inter_obj_normals, canon_inter_obj_pts, canon_inter_obj_normals = self.query_for_contacting_ball_primitives(sampled_input_pts, (cur_rot, cur_trans)) + self.penetrating_indicator = in_contact_indicator_robot_to_obj + cur_inter_obj_normals = inter_obj_normals.clone().detach() + else: + # inter_obj_pts + canon_inter_obj_pts = torch.matmul( + cur_passive_obj_rot.contiguous().transpose(1, 0).contiguous(), (inter_obj_pts - cur_passive_obj_trans.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() ## + canon_inter_obj_normals = torch.matmul( # passive obj rot ## # R^T n --> R R^T n --the current inter obj normals ## + cur_passive_obj_rot.contiguous().transpose(1, 0).contiguous(), inter_obj_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() ## -> inter obj normals + + + ##### penetration depth penalty loss calculation: strategy 2 ##### + penetration_proj_ks = self.minn_dist_threshold_robot_to_obj - queried_sdf + penetration_proj_pos = sampled_input_pts + penetration_proj_ks.unsqueeze(-1) * inter_obj_normals ## nn_sampled_pts x 3 ## + dot_pos_to_proj_with_normal = torch.sum( + (penetration_proj_pos.detach() - sampled_input_pts) * inter_obj_normals.detach(), dim=-1 ### nn_sampled_pts + ) + + + # self.penetrating_depth_penalty = dot_pos_to_proj_with_normal[in_contact_indicator_robot_to_obj].mean() + self.smaller_than_zero_level_set_indicator = queried_sdf < 0.0 + self.penetrating_depth_penalty = dot_pos_to_proj_with_normal[queried_sdf < 0.0].mean() + ##### penetration depth penalty loss calculation: strategy 2 ##### + + + ##### penetration depth penalty loss calculation: strategy 1 ##### + # self.penetrating_depth_penalty = (self.minn_dist_threshold_robot_to_obj - queried_sdf[in_contact_indicator_robot_to_obj]).mean() + ##### penetration depth penalty loss calculation: strategy 1 ##### + + + ### penetration strategy v4 #### # another mophology # + + + + if self.nn_instances == 1: # spring ks values + # contact ks values # # if we set a fixed k value here # + contact_spring_ka = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + contact_spring_kb = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 1).view(1,) + # contact_spring_kc = self.spring_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + # tangential_ks = self.spring_ks_values(torch.ones((1,), dtype=torch.long).cuda()).view(1,) + else: + contact_spring_ka = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + # contact_spring_kb = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 2).view(1,) + # contact_spring_kc = self.spring_ks_values[i_instance](torch.zeros((1,), dtype=torch.long).cuda() + 3).view(1,) + + # tangential_ks = self.spring_ks_values[i_instance](torch.ones((1,), dtype=torch.long).cuda()).view(1,) + + # optm_alltime_ks + # # optimizable_spring_ks_normal, optimizable_spring_ks_friction # + if self.optm_alltime_ks: + opt_penetration_proj_k_to_robot = self.optimizable_spring_ks_normal(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + opt_penetration_proj_k_to_robot_friction = self.optimizable_spring_ks_friction(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + else: + # optimizable_spring_ks # + opt_penetration_proj_k_to_robot = self.optimizable_spring_ks(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + opt_penetration_proj_k_to_robot_friction = self.optimizable_spring_ks(torch.zeros((1,), dtype=torch.long).cuda() + 1).view(1,) + + # self.penetration_proj_k_to_robot = opt_penetration_proj_k_to_robot ** 2 + # self.penetration_proj_k_to_robot_friction = opt_penetration_proj_k_to_robot_friction ** 2 + + ## penetration proj k to robot ## + penetration_proj_k_to_robot = self.penetration_proj_k_to_robot * opt_penetration_proj_k_to_robot ** 2 + penetration_proj_k_to_robot_friction = self.penetration_proj_k_to_robot_friction * opt_penetration_proj_k_to_robot_friction ** 2 + + # penetration_proj_k_to_robot = self.penetration_proj_k_to_robot + # penetration_proj_k_to_robot = opt_penetration_proj_k_to_robot + + if self.use_split_params: ## + contact_spring_ka = self.spring_contact_ks_values(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(1,) + + if self.use_sqr_spring_stiffness: + contact_spring_ka = contact_spring_ka ** 2 + + if self.train_residual_friction: + contact_spring_ka = 0.1907073 ** 2 + contact_spring_kb = 0.00131699 + + if self.use_same_contact_spring_k: + # contact_spring_ka_ori = contact_spring_ka.clone() + + ''' Equal forces ''' + contact_spring_ka = penetration_proj_k_to_robot * contact_spring_ka # equal forc stiffjess + + contact_spring_kb = contact_spring_kb * penetration_proj_k_to_robot + + penetration_proj_k_to_robot = contact_spring_ka + + ''' N-Equal forces ''' + # contact_spring_ka = 30. * contact_spring_ka ## change the contact spring k ## + # penetration_proj_k_to_robot = penetration_proj_k_to_robot * contact_spring_ka_ori ### change the projection coeff to the robot + + + ''' N-Equal forces ''' + # contact_spring_ka = penetration_proj_k_to_robot * contact_spring_ka ## contact spring ka ## + # penetration_proj_k_to_robot = 30. * contact_spring_ka_ori + else: + contact_spring_ka = penetration_proj_k_to_robot * contact_spring_ka # + # contact_spring_kb = contact_spring_kb * self.penetration_proj_k_to_robot_friction #proje k to robot friction + contact_spring_kb = contact_spring_kb * penetration_proj_k_to_robot_friction + penetration_proj_k_to_robot = contact_spring_ka + + + + + if torch.isnan(self.penetrating_depth_penalty): # + self.penetrating_depth_penalty = torch.tensor(0., dtype=torch.float32).cuda() + + # penetrating_points = sampled_input_pts[penetrating_indicator] # robot to obj # # + penetrating_points = sampled_input_pts[in_contact_indicator_robot_to_obj] # + # penetration_proj_k_to_robot = 1.0 + # penetration_proj_k_to_robot = 0.01 + + # penetration_proj_k_to_robot = 0.0 + # proj_force = dist * normal * penetration_k # # + penetrating_forces = penetration_proj_ks.unsqueeze(-1) * cur_inter_obj_normals * penetration_proj_k_to_robot + # penetrating_forces = penetrating_forces[penetrating_indicator] + penetrating_forces = penetrating_forces[in_contact_indicator_robot_to_obj] + self.penetrating_forces = penetrating_forces # forces + self.penetrating_points = penetrating_points # penetrating points ## # incontact indicator toothers ## + + + # contact psring ka ## cotnact + + ##### the contact force decided by the theshold ###### # realted to the distance threshold and the HO distance # + contact_force_d = contact_spring_ka * (self.minn_dist_sampled_pts_passive_obj_thres - dist_sampled_pts_to_passive_obj) + ###### the contact force decided by the threshold ###### + + + # contac force d contact spring ka * penetration depth # + contact_force_d = contact_force_d.unsqueeze(-1) * (-1. * inter_obj_normals) + + # norm_tangential_forces = torch.norm(tangential_forces, dim=-1, p=2) # nn_sampled_pts ## + # norm_along_normals_forces = torch.norm(contact_force_d, dim=-1, p=2) # nn_sampled_pts, nnsampledpts # + # penalty_friction_constraint = (norm_tangential_forces - self.static_friction_mu * norm_along_normals_forces) ** 2 + # penalty_friction_constraint[norm_tangential_forces <= self.static_friction_mu * norm_along_normals_forces] = 0. + # penalty_friction_constraint = torch.mean(penalty_friction_constraint) # friction + self.penalty_friction_constraint = torch.zeros((1,), dtype=torch.float32).cuda().mean() # penalty friction + # contact_force_d_scalar = norm_along_normals_forces.clone() + + # friction models # + # penalty friction constraints # + penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + + + + # rotation and translatiosn # + cur_fr_rot = cur_passive_obj_rot # passive obj rot # + cur_fr_trans = cur_passive_obj_trans # + + tot_contact_active_pts = [] + tot_contact_passive_pts = [] + tot_contact_active_idxes = [] + # tot_contact_passive_idxes = [] # # + tot_canon_contact_passive_normals = [] + tot_canon_contact_passive_pts = [] + tot_contact_passive_normals = [] # tot contact passive pts; tot cotnact passive normals # + tot_contact_frictions = [] + tot_residual_normal_forces = [] + + if contact_pairs_set is not None: + # contact_active_pts = contact_pairs_set['contact_active_pts'] + # contact_passive_pts = contact_pairs_set['contact_passive_pts'] + contact_active_idxes = contact_pairs_set['contact_active_idxes'] + # contact_passive_idxes = contact_pairs_set # # app + + # contact active idxes # + # nn_contact_pts x 3 -> as the cotnact passvie normals # + canon_contact_passive_normals = contact_pairs_set['canon_contact_passive_normals'] + canon_contact_passive_pts = contact_pairs_set['canon_contact_passive_pts'] + cur_fr_contact_passive_normals = torch.matmul( ## penetration normals ## + cur_fr_rot, canon_contact_passive_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() # tranformed normals # frame passive normals # + + # not irrelevant at all # + cur_fr_contact_act_pts = sampled_input_pts[contact_active_idxes] + # cur_fr_contact_passive_pts = canon_contact_passive_pts + # + cur_fr_contact_passive_pts = torch.matmul( + cur_fr_rot, (canon_contact_passive_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_fr_trans.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) ## passive pts + + nex_fr_contact_act_pts = nex_sampled_input_pts[contact_active_idxes] + + # cur_fr_contact_passive_to_act = cur_fr_contact_act_pts - cur_fr_contact_passive_pts # + + cur_fr_contact_passive_to_act = nex_fr_contact_act_pts - cur_fr_contact_passive_pts + + dot_rel_disp_with_passive_normals = torch.sum( + cur_fr_contact_passive_to_act * cur_fr_contact_passive_normals, dim=-1 + ) + cur_friction_forces = cur_fr_contact_passive_to_act - dot_rel_disp_with_passive_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + + ## cur frame cotnct act + cur_cur_fr_contact_passive_to_act = cur_fr_contact_act_pts - cur_fr_contact_passive_pts + cur_cur_penetration_depth = torch.sum( + cur_cur_fr_contact_passive_to_act * cur_fr_contact_passive_normals, dim=-1 + ) + + + if self.train_residual_friction: + ''' add residual fictions ''' + # 3 + 3 + 3 + 3 ### active points's current relative position, active point's offset, penetration depth, normal direction + friction_net_in_feats = torch.cat( + [cur_cur_fr_contact_passive_to_act, cur_fr_contact_passive_to_act, cur_cur_penetration_depth.unsqueeze(-1), cur_fr_contact_passive_normals], dim=-1 + ) + residual_frictions = self.friction_network(friction_net_in_feats) + + residual_frictions_dot_w_normals = torch.sum( + residual_frictions * cur_fr_contact_passive_normals, dim=-1 + ) + residual_frictions = residual_frictions - residual_frictions_dot_w_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + + cur_friction_forces = cur_friction_forces + residual_frictions + ''' add residual fictions ''' + + if self.train_residual_normal_forces: + # contact_normal_force_network + contact_normal_forces_in_feats = torch.cat( + [cur_cur_fr_contact_passive_to_act, cur_fr_contact_passive_to_act, cur_cur_penetration_depth.unsqueeze(-1), cur_fr_contact_passive_normals], dim=-1 + ) + residual_normal_forces = self.contact_normal_force_network(contact_normal_forces_in_feats) + residual_normal_forces_dot_w_normals = torch.sum( + residual_normal_forces * cur_fr_contact_passive_normals, dim=-1 + ) + residual_normal_forces = residual_normal_forces_dot_w_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + tot_residual_normal_forces.append(residual_normal_forces[remaining_contact_indicators]) + + + # cur_rel_passive_to_active = cur_fr_contact_act_pts - cur_fr_contact_passive_pts + # dot_rel_disp_w_obj_normals = torch.sum( + # cur_rel_passive_to_active * cur_fr_contact_passive_normals, dim=-1 + # ) + # cur_friction_forces = cur_rel_passive_to_active - dot_rel_disp_w_obj_normals.unsqueeze(-1) * cur_fr_contact_passive_normals + + + # if the dot < 0 -> still in contact ## rremaning contacts ## + # if the dot > 0 -. not in contact and can use the points to establish new conatcts --- # maitnian the contacts # + # remaining_contact_indicators = dot_rel_disp_with_passive_normals <= 0.0 ## + + ''' Remaining penetration indicator determining -- strategy 1 ''' + # remaining_contact_indicators = cur_cur_penetration_depth <= 0.0 ## dot relative passive to active with passive normals ## + ''' Remaining penetration indicator determining -- strategy 2 ''' + remaining_contact_indicators = cur_cur_penetration_depth <= self.minn_dist_threshold_robot_to_obj + + remaining_contact_act_idxes = contact_active_idxes[remaining_contact_indicators] + + # remaining contact act idxes # + + if torch.sum(remaining_contact_indicators.float()).item() > 0.5: + # contact_active_pts, contact_passive_pts, ## remaining cotnact indicators ## + tot_contact_passive_normals.append(cur_fr_contact_passive_normals[remaining_contact_indicators]) + tot_contact_passive_pts.append(cur_fr_contact_passive_pts[remaining_contact_indicators]) ## + tot_contact_active_pts.append(cur_fr_contact_act_pts[remaining_contact_indicators]) ## contact act pts + + tot_contact_active_idxes.append(contact_active_idxes[remaining_contact_indicators]) + # tot_contact_passive_idxes.append(contact_passive_idxes[remaining_contact_indicators]) # # passive idxes # + tot_contact_frictions.append(cur_friction_forces[remaining_contact_indicators]) + tot_canon_contact_passive_pts.append(canon_contact_passive_pts[remaining_contact_indicators]) + tot_canon_contact_passive_normals.append(canon_contact_passive_normals[remaining_contact_indicators]) + + else: + remaining_contact_act_idxes = torch.empty((0,), dtype=torch.long).cuda() ## remaining contact act idxes ## + + # remaining idxes # + + new_in_contact_indicator_robot_to_obj = in_contact_indicator_robot_to_obj.clone() + new_in_contact_indicator_robot_to_obj[remaining_contact_act_idxes] = False + + tot_active_pts_idxes = torch.arange(0, sampled_input_pts.size(0), dtype=torch.long).cuda() + + + if torch.sum(new_in_contact_indicator_robot_to_obj.float()).item() > 0.5: + # + # in_contact_indicator_robot_to_obj, queried_sdf, inter_obj_pts, inter_obj_normals, canon_inter_obj_pts, canon_inter_obj_normals + new_contact_active_pts = sampled_input_pts[new_in_contact_indicator_robot_to_obj] + new_canon_contact_passive_pts = canon_inter_obj_pts[new_in_contact_indicator_robot_to_obj] + new_canon_contact_passive_normals = canon_inter_obj_normals[new_in_contact_indicator_robot_to_obj] ## obj normals ## + new_contact_active_idxes = tot_active_pts_idxes[new_in_contact_indicator_robot_to_obj] + + new_cur_fr_contact_passive_normals = torch.matmul( + cur_fr_rot, new_canon_contact_passive_normals.contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() # + + # new cur fr contact passive pts # + new_cur_fr_contact_passive_pts = torch.matmul( + cur_fr_rot, (new_canon_contact_passive_pts - self.center_init_passive_obj_verts.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + cur_fr_trans.unsqueeze(0) + self.center_init_passive_obj_verts.unsqueeze(0) ## passive pts + + + + new_nex_fr_contact_active_pts = nex_sampled_input_pts[new_in_contact_indicator_robot_to_obj] + + + new_cur_fr_contact_passive_to_act = new_nex_fr_contact_active_pts - new_cur_fr_contact_passive_pts + + dot_rel_disp_with_passive_normals = torch.sum( + new_cur_fr_contact_passive_to_act * new_cur_fr_contact_passive_normals, dim=-1 + ) + new_frictions = new_cur_fr_contact_passive_to_act - dot_rel_disp_with_passive_normals.unsqueeze(-1) * new_cur_fr_contact_passive_normals + + + if self.train_residual_friction: + ''' add residual fictions ''' + new_cur_cur_fr_contact_passive_to_act = new_contact_active_pts - new_cur_fr_contact_passive_pts + new_cur_cur_penetration_depth = torch.sum( + new_cur_cur_fr_contact_passive_to_act * new_cur_fr_contact_passive_normals, dim=-1 + ) + # 3 + 3 + 3 + 3 ### active points's current relative position, active point's offset, penetration depth, normal direction + new_friction_net_in_feats = torch.cat( + [new_cur_cur_fr_contact_passive_to_act, new_cur_fr_contact_passive_to_act, new_cur_cur_penetration_depth.unsqueeze(-1), new_cur_fr_contact_passive_normals], dim=-1 + ) + new_residual_frictions = self.friction_network(new_friction_net_in_feats) + + new_residual_frictions_dot_w_normals = torch.sum( + new_residual_frictions * new_cur_fr_contact_passive_normals, dim=-1 + ) + new_residual_frictions = new_residual_frictions - new_residual_frictions_dot_w_normals.unsqueeze(-1) * new_cur_fr_contact_passive_normals + new_frictions = new_frictions + new_residual_frictions + ''' add residual fictions ''' + + if self.train_residual_normal_forces: + contact_normal_forces_in_feats = torch.cat( + [new_cur_cur_fr_contact_passive_to_act, new_cur_fr_contact_passive_to_act, new_cur_cur_penetration_depth.unsqueeze(-1), new_cur_fr_contact_passive_normals], dim=-1 + ) + new_residual_normal_forces = self.contact_normal_force_network(contact_normal_forces_in_feats) + new_residual_normal_forces_dot_w_normals = torch.sum( + new_residual_normal_forces * new_cur_fr_contact_passive_normals, dim=-1 + ) + new_residual_normal_forces = new_residual_normal_forces_dot_w_normals.unsqueeze(-1) * new_cur_fr_contact_passive_normals + tot_residual_normal_forces.append(new_residual_normal_forces) + + + # new_frictions = torch.zeros_like(new_cur_fr_contact_passive_pts) + tot_contact_passive_normals.append(new_cur_fr_contact_passive_normals) + tot_contact_passive_pts.append(new_cur_fr_contact_passive_pts) + tot_contact_active_pts.append(new_contact_active_pts) + tot_contact_active_idxes.append(new_contact_active_idxes) + tot_canon_contact_passive_pts.append(new_canon_contact_passive_pts) + tot_canon_contact_passive_normals.append(new_canon_contact_passive_normals) + tot_contact_frictions.append(new_frictions) + + + if len(tot_contact_passive_normals) > 0: + # forces ? # not hard to compute ... # + # passive normals; passive pts # + tot_contact_passive_normals = torch.cat( + tot_contact_passive_normals, dim=0 + ) + tot_contact_passive_pts = torch.cat(tot_contact_passive_pts, dim=0) + tot_contact_active_pts = torch.cat(tot_contact_active_pts, dim=0) + tot_contact_active_idxes = torch.cat(tot_contact_active_idxes, dim=0) + tot_canon_contact_passive_pts = torch.cat(tot_canon_contact_passive_pts, dim=0) + tot_canon_contact_passive_normals = torch.cat(tot_canon_contact_passive_normals, dim=0) + tot_contact_frictions = torch.cat(tot_contact_frictions, dim=0) + if self.train_residual_normal_forces: ## the + tot_residual_normal_forces = torch.cat(tot_residual_normal_forces, dim=0) + + contact_passive_to_active = tot_contact_active_pts - tot_contact_passive_pts + # dot relative passive to active with the passive normals # ## relative + + # this depth should be adjusted according to minn_dist_threshold_robot_to_obj ## + dot_rel_passive_to_active_with_normals = torch.sum( + contact_passive_to_active * tot_contact_passive_normals, dim=-1 ### dot with the passive normals ## + ) + # Adjust the penetration depth used for contact force computing using the distance threshold # + dot_rel_passive_to_active_with_normals = dot_rel_passive_to_active_with_normals - self.minn_dist_threshold_robot_to_obj + # dot with the passive normals ## dot with passive normals ## ## passive normals ## + ### penetration depth * the passive obj normals ### # dot value and with + contact_forces_along_normals = dot_rel_passive_to_active_with_normals.unsqueeze(-1) * tot_contact_passive_normals * contact_spring_ka # dot wiht relative # negative normal directions # + + if self.train_residual_normal_forces: + contact_forces_along_normals = contact_forces_along_normals + tot_residual_normal_forces + + # return the contact pairs and return the contact dicts # + # return the contact pairs and the contact dicts # + # having got the contact pairs -> contact dicts # + # having got the contact pairs -> contact dicts # ## contact spring kb ## + tot_contact_frictions = tot_contact_frictions * contact_spring_kb # change it to spring_kb... + + if pts_frictional_forces is not None: + tot_contact_frictions = pts_frictional_forces[tot_contact_active_idxes] + + # contac_forces_along_normals + upd_contact_pairs_information = { + 'contact_active_idxes': tot_contact_active_idxes.clone().detach(), + 'canon_contact_passive_normals': tot_canon_contact_passive_normals.clone().detach(), + 'canon_contact_passive_pts': tot_canon_contact_passive_pts.clone().detach(), + 'contact_passive_pts': tot_contact_passive_pts.clone().detach(), + } + else: + upd_contact_pairs_information = None + + + + ''' average acitve points weights ''' + if torch.sum(cur_act_weights).item() > 0.5: + cur_act_weights = cur_act_weights / torch.sum(cur_act_weights) + + + # norm_penalty_friction_tangential_forces = torch.norm(penalty_friction_tangential_forces, dim=-1, p=2) + # maxx_norm_penalty_friction_tangential_forces, _ = torch.max(norm_penalty_friction_tangential_forces, dim=-1) + # minn_norm_penalty_friction_tangential_forces, _ = torch.min(norm_penalty_friction_tangential_forces, dim=-1) + # print(f"maxx_norm_penalty_friction_tangential_forces: {maxx_norm_penalty_friction_tangential_forces}, minn_norm_penalty_friction_tangential_forces: {minn_norm_penalty_friction_tangential_forces}") + + # tangetntial forces --- dot with normals # + if not self.use_pre_proj_frictions: # inter obj normals # # if ue proj frictions # + dot_tangential_forces_with_inter_obj_normals = torch.sum(penalty_friction_tangential_forces * inter_obj_normals, dim=-1) ### nn_active_pts x # + penalty_friction_tangential_forces = penalty_friction_tangential_forces - dot_tangential_forces_with_inter_obj_normals.unsqueeze(-1) * inter_obj_normals + + # penalty_friction_tangential_forces = torch.zeros_like(penalty_friction_tangential_forces) + penalty_friction_tangential_forces = tot_contact_frictions + + + + + if upd_contact_pairs_information is not None: + contact_force_d = contact_forces_along_normals # forces along normals # + # contact forces along normals # + self.contact_force_d = contact_force_d + + # penalty_friction_tangential_forces = torch.zeros_like(contact_force_d) + + #### penalty_frictiontangential_forces, tangential_forces #### + # self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.tangential_forces = penalty_friction_tangential_forces + + self.penalty_friction_tangential_forces = penalty_friction_tangential_forces + self.contact_force_d = contact_force_d + self.penalty_based_friction_forces = penalty_friction_tangential_forces + + self.tot_contact_passive_normals = tot_contact_passive_normals + # penalty dot forces normals # + ''' Penalty dot forces normals ''' + # penalty_dot_forces_normals = dot_forces_normals ** 2 # must in the negative direction of the object normal # + # penalty_dot_forces_normals[dot_forces_normals <= 0] = 0 # 1) must in the negative direction of the object normal # + # penalty_dot_forces_normals = torch.mean(penalty_dot_forces_normals) # 1) must # 2) must # # + # self.penalty_dot_forces_normals = penalty_dot_forces_normals # + + forces = self.contact_force_d + self.penalty_friction_tangential_forces + + center_point_to_contact_pts = tot_contact_passive_pts - passive_center_point.unsqueeze(0) + # cneter point to contact pts # + # cneter point to contact pts # + torque = torch.cross(center_point_to_contact_pts, forces) + torque = torch.mean(torque, dim=0) + forces = torch.mean(forces, dim=0) ## get rigid acc ## # + else: + # self.contact_force_d = torch.zeros((3,), dtype=torch.float32).cuda() + torque = torch.zeros((3,), dtype=torch.float32).cuda() + forces = torch.zeros((3,), dtype=torch.float32).cuda() + self.contact_force_d = torch.zeros((1, 3), dtype=torch.float32).cuda() + self.penalty_friction_tangential_forces = torch.zeros((1, 3), dtype=torch.float32).cuda() + self.penalty_based_friction_forces = torch.zeros((1, 3), dtype=torch.float32).cuda() + + self.tot_contact_passive_normals = torch.zeros((1, 3), dtype=torch.float32).cuda() + + + + + ''' Forces and rigid acss: Strategy and version 1 ''' + # rigid_acc = torch.sum(forces * cur_act_weights.unsqueeze(-1), dim=0) # rigid acc # + + # ###### sampled input pts to center ####### + # if contact_pairs_set is not None: + # inter_obj_pts[contact_active_idxes] = cur_passive_obj_verts[contact_passive_idxes] + + # # center_point_to_sampled_pts = sampled_input_pts - passive_center_point.unsqueeze(0) + + # center_point_to_sampled_pts = inter_obj_pts - passive_center_point.unsqueeze(0) + # ###### sampled input pts to center ####### + + # ###### nearest passive object point to center ####### + # # cur_passive_obj_verts_exp = cur_passive_obj_verts.unsqueeze(0).repeat(sampled_input_pts.size(0), 1, 1).contiguous() ### + # # cur_passive_obj_verts = batched_index_select(values=cur_passive_obj_verts_exp, indices=minn_idx_sampled_pts_to_passive_obj.unsqueeze(1), dim=1) + # # cur_passive_obj_verts = cur_passive_obj_verts.squeeze(1) # squeeze(1) # + + # # center_point_to_sampled_pts = cur_passive_obj_verts - passive_center_point.unsqueeze(0) # + # ###### nearest passive object point to center ####### + + # sampled_pts_torque = torch.cross(center_point_to_sampled_pts, forces, dim=-1) + # # torque = torch.sum( + # # sampled_pts_torque * ws_normed.unsqueeze(-1), dim=0 + # # ) + # torque = torch.sum( + # sampled_pts_torque * cur_act_weights.unsqueeze(-1), dim=0 + # ) + + + + + + if self.nn_instances == 1: + time_cons = self.time_constant(torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant(torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant(torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant(torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant(torch.ones((1,)).long().cuda()).view(1) + else: + time_cons = self.time_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + time_cons_2 = self.time_constant[i_instance](torch.zeros((1,)).long().cuda() + 2).view(1) + time_cons_rot = self.time_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + damping_cons = self.damping_constant[i_instance](torch.zeros((1,)).long().cuda()).view(1) + damping_cons_rot = self.damping_constant[i_instance](torch.ones((1,)).long().cuda()).view(1) + + ## + if self.use_split_params: ## ## friction network should be trained? ## + # sep_time_constant, sep_torque_time_constant, sep_damping_constant, sep_angular_damping_constant + time_cons = self.sep_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + time_cons_2 = self.sep_torque_time_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + # damping_cons = self.sep_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + # damping_cons_2 = self.sep_angular_damping_constant(torch.zeros((1,)).long().cuda() + input_pts_ts).view(1) + + # time_cons = 0.05 + # time_cons_2 = 0.05 + # time_cons_rot = 0.05 + + + time_cons = 0.005 + time_cons_2 = 0.005 + time_cons_rot = 0.005 + + + time_cons = 0.0005 + time_cons_2 = 0.0005 + time_cons_rot = 0.0005 + + # time_cons = 0.00005 + # time_cons_2 = 0.00005 + # time_cons_rot = 0.00005 + + # time_cons = 0.0005 + # time_cons_2 = 0.0005 + # time_cons_rot = 0.0005 + + + ## not a good ## + # time_cons = 0.005 + # time_cons_2 = 0.005 + # time_cons_rot = 0.005 + + + + obj_mass = self.obj_mass + + obj_mass_value = self.optimizable_obj_mass(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + + obj_mass_value = obj_mass_value ** 2 + + rigid_acc = forces / obj_mass_value # + + damping_coef = 5e2 + + damping_coef = 0.0 + damping_coef_angular = 0.0 + + + + # small clip with not very noticiable # # + + + if self.use_optimizable_params: ## + damping_coef = self.sep_damping_constant(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + damping_coef_angular = self.sep_angular_damping_constant(torch.zeros((1,), dtype=torch.long).cuda()).view(1,) + + damping_coef = damping_coef ** 2 + damping_coef_angular = damping_coef_angular ** 2 ## sue the sampiing coef angular and dampoing coef here ## + + if self.use_damping_params_vel: + damping_coef_lin_vel = self.lin_damping_coefs(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + damping_coef_ang_vel = self.ang_damping_coefs(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + damping_coef_lin_vel = damping_coef_lin_vel ** 2 + damping_coef_ang_vel = damping_coef_ang_vel ** 2 + else: + damping_coef_lin_vel = 1.0 + damping_coef_ang_vel = self.ang_vel_damping + + + if input_pts_ts > 0: + # the sampoing for the rigid acc here ? # + rigid_acc = rigid_acc - damping_coef * self.timestep_to_vel[input_pts_ts - 1].detach() ## dam + + + #F the sampoing for the rigid acc here ? # + # rigid_acc = # + # rigid acc = forces # + + k_acc_to_vel = time_cons + k_vel_to_offset = time_cons_2 + delta_vel = rigid_acc * k_acc_to_vel + if input_pts_ts == 0: + cur_vel = delta_vel + else: + ##### TMP ###### + # cur_vel = delta_vel # + cur_vel = delta_vel + (1.0 - damping_coef_lin_vel) * self.timestep_to_vel[input_pts_ts - 1].detach() # * damping_cons # + self.timestep_to_vel[input_pts_ts] = cur_vel.detach() + + cur_offset = k_vel_to_offset * cur_vel + cur_rigid_def = self.timestep_to_total_def[input_pts_ts].detach() # timestep + + + cur_inertia_div_factor = self.inertia_div_factor(torch.zeros((1,), dtype=torch.long).cuda()).view(1) + + + # cur inv inertia is a large value? # # bug free? # ### divide the inv_inertia using the factor 20.0 # + cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, self.I_inv_ref), cur_passive_obj_rot.transpose(1, 0)) / float(20.) + # cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, self.I_inv_ref), cur_passive_obj_rot.transpose(1, 0)) / float(10.) ## + # cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, self.I_inv_ref), cur_passive_obj_rot.transpose(1, 0)) / float(cur_inertia_div_factor) ## + + cur_inv_inertia = torch.eye(n=3, dtype=torch.float32).cuda() # three values for the inertia? # + + obj_inertia_value = self.obj_inertia(torch.zeros((1,), dtype=torch.long).cuda()).view(3,) + obj_inertia_value = obj_inertia_value ** 2 + # cur_inv_inertia = torch.diag(obj_inertia_value) + cur_inv_inertia = cur_inv_inertia * obj_inertia_value.unsqueeze(0) ## 3 x 3 matrix ## ### the inertia values ## + cur_inv_inertia = torch.matmul(torch.matmul(cur_passive_obj_rot, cur_inv_inertia), cur_passive_obj_rot.transpose(1, 0)) + + torque = torch.matmul(cur_inv_inertia, torque.unsqueeze(-1)).contiguous().squeeze(-1) ### get the torque of the object ### + # + # + if input_pts_ts > 0: # + torque = torque - damping_coef_angular * self.timestep_to_angular_vel[input_pts_ts - 1].detach() + delta_angular_vel = torque * time_cons_rot + + # print(f"torque: {torque}") # + + if input_pts_ts == 0: + cur_angular_vel = delta_angular_vel + else: + ##### TMP ###### + # cur_angular_vel = delta_angular_vel # + # cur_angular_vel = delta_angular_vel + (1.0 - self.ang_vel_damping) * (self.timestep_to_angular_vel[input_pts_ts - 1].detach()) + # (1.0 - damping_coef_lin_vel) * + cur_angular_vel = delta_angular_vel + (1.0 - damping_coef_ang_vel) * (self.timestep_to_angular_vel[input_pts_ts - 1].detach()) # damping coef ### + cur_delta_angle = cur_angular_vel * time_cons_rot # \delta_t w^1 / 2 # / 2 # # \delta_t w^1 # + + # prev # # # ## + prev_quaternion = self.timestep_to_quaternion[input_pts_ts].detach() # input pts ts # + cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) + + # cur_quaternion = prev_quaternion + update_quaternion(cur_delta_angle, prev_quaternion) # + cur_quaternion = cur_quaternion / torch.norm(cur_quaternion, p=2, dim=-1, keepdim=True) + # angular + # obj_mass # + + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + prev_rot_mtx = quaternion_to_matrix(prev_quaternion) + + # cur_ 3 no frictions/ # # # + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_rigid_def.unsqueeze(0), cur_delta_rot_mtx.detach()).squeeze(0) + # cur_upd_rigid_def = cur_offset.detach() + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + cur_upd_rigid_def = cur_offset.detach() + cur_rigid_def + + ## quaternion to matrix and + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx.detach(), cur_rigid_def.unsqueeze(-1)).squeeze(-1) + # cur_optimizable_total_def = cur_offset + torch.matmul(cur_delta_rot_mtx, cur_rigid_def.unsqueeze(-1)).squeeze(-1) # + cur_optimizable_total_def = cur_offset + cur_rigid_def + # cur_optimizable_quaternion = prev_quaternion.detach() + cur_delta_quaternion # + # timestep_to_optimizable_total_def, timestep_to_optimizable_quaternion # timestep # timestep # + + + self.upd_rigid_acc = rigid_acc.clone() + self.upd_rigid_def = cur_upd_rigid_def.clone() + self.upd_optimizable_total_def = cur_optimizable_total_def.clone() + self.upd_quaternion = cur_quaternion.clone() + self.upd_rot_mtx = cur_optimizable_rot_mtx.clone() + self.upd_angular_vel = cur_angular_vel.clone() + self.upd_forces = forces.clone() ## ## + + ## + self.timestep_to_accum_acc[input_pts_ts] = rigid_acc.detach().clone() + + if not fix_obj: + if input_pts_ts == 0 and input_pts_ts not in self.timestep_to_optimizable_total_def: + self.timestep_to_total_def[input_pts_ts] = torch.zeros_like(cur_upd_rigid_def) + self.timestep_to_optimizable_total_def[input_pts_ts] = torch.zeros_like(cur_optimizable_total_def) + self.timestep_to_optimizable_quaternion[input_pts_ts] = torch.tensor([1., 0., 0., 0.],dtype=torch.float32).cuda() + self.timestep_to_quaternion[input_pts_ts] = torch.tensor([1., 0., 0., 0.],dtype=torch.float32).cuda() + self.timestep_to_angular_vel[input_pts_ts] = torch.zeros_like(cur_angular_vel).detach() + self.timestep_to_total_def[nex_pts_ts] = cur_upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = cur_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = cur_quaternion + self.timestep_to_quaternion[nex_pts_ts] = cur_quaternion.detach() + + cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = cur_optimizable_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + self.timestep_to_angular_vel[input_pts_ts] = cur_angular_vel.detach() + + # self.timestep_to_optimizable_total_def[input_pts_ts + 1] = self.time_translations(torch.zeros((1,), dtype=torch.long).cuda() + input_pts_ts).view(3) + + + self.timestep_to_input_pts[input_pts_ts] = sampled_input_pts.detach() + self.timestep_to_point_accs[input_pts_ts] = forces.detach() + self.timestep_to_aggregation_weights[input_pts_ts] = cur_act_weights.detach() + self.timestep_to_sampled_pts_to_passive_obj_dist[input_pts_ts] = dist_sampled_pts_to_passive_obj.detach() + self.save_values = { + 'timestep_to_point_accs': {cur_ts: self.timestep_to_point_accs[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_point_accs}, + # 'timestep_to_vel': {cur_ts: self.timestep_to_vel[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_vel}, + 'timestep_to_input_pts': {cur_ts: self.timestep_to_input_pts[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_input_pts}, + 'timestep_to_aggregation_weights': {cur_ts: self.timestep_to_aggregation_weights[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_aggregation_weights}, + 'timestep_to_sampled_pts_to_passive_obj_dist': {cur_ts: self.timestep_to_sampled_pts_to_passive_obj_dist[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_sampled_pts_to_passive_obj_dist}, # quaternion + # 'timestep_to_ws_normed': {cur_ts: self.timestep_to_ws_normed[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_ws_normed}, + # 'timestep_to_defed_input_pts_sdf': {cur_ts: self.timestep_to_defed_input_pts_sdf[cur_ts].cpu().numpy() for cur_ts in self.timestep_to_defed_input_pts_sdf}, + } + + return upd_contact_pairs_information + + def update_timestep_to_quantities(self, input_pts_ts, upd_quat, upd_trans): + nex_pts_ts = input_pts_ts + 1 + self.timestep_to_total_def[nex_pts_ts] = upd_trans # .detach().clone().detach() + self.timestep_to_optimizable_total_def[nex_pts_ts] = upd_trans # .detach().clone().detach() + self.timestep_to_optimizable_quaternion[nex_pts_ts] = upd_quat # .detach().clone().detach() + self.timestep_to_quaternion[nex_pts_ts] = upd_quat # .detach().clone().detach() + + # self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = quaternion_to_matrix(upd_quat.detach().clone()).clone().detach() + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = quaternion_to_matrix(upd_quat) # .clone().detach() # the upd quat # + + + + def reset_timestep_to_quantities(self, input_pts_ts): + nex_pts_ts = input_pts_ts + 1 + self.timestep_to_accum_acc[input_pts_ts] = self.upd_rigid_acc.detach() + self.timestep_to_total_def[nex_pts_ts] = self.upd_rigid_def + self.timestep_to_optimizable_total_def[nex_pts_ts] = self.upd_optimizable_total_def + self.timestep_to_optimizable_quaternion[nex_pts_ts] = self.upd_quaternion + self.timestep_to_quaternion[nex_pts_ts] = self.upd_quaternion.detach() + + # cur_optimizable_rot_mtx = quaternion_to_matrix(cur_quaternion) + self.timestep_to_optimizable_rot_mtx[nex_pts_ts] = self.upd_rot_mtx + # new_pts = raw_input_pts - cur_offset.unsqueeze(0) + + self.timestep_to_angular_vel[input_pts_ts] = self.upd_angular_vel.detach() + self.timestep_to_point_accs[input_pts_ts] = self.upd_forces.detach() + \ No newline at end of file diff --git a/models/renderer.py b/models/renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..e52f314fd72cf8fa08cda2431e861f2f8a8679c9 --- /dev/null +++ b/models/renderer.py @@ -0,0 +1,641 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import logging +import mcubes +from icecream import ic +import os + +import trimesh +from pysdf import SDF + +from uni_rep.rep_3d.dmtet import marching_tets_tetmesh, create_tetmesh_variables + +def create_mt_variable(device): + triangle_table = torch.tensor( + [ + [-1, -1, -1, -1, -1, -1], + [1, 0, 2, -1, -1, -1], + [4, 0, 3, -1, -1, -1], + [1, 4, 2, 1, 3, 4], + [3, 1, 5, -1, -1, -1], + [2, 3, 0, 2, 5, 3], + [1, 4, 0, 1, 5, 4], + [4, 2, 5, -1, -1, -1], + [4, 5, 2, -1, -1, -1], + [4, 1, 0, 4, 5, 1], + [3, 2, 0, 3, 5, 2], + [1, 3, 5, -1, -1, -1], + [4, 1, 2, 4, 3, 1], + [3, 0, 4, -1, -1, -1], + [2, 0, 1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1] + ], dtype=torch.long, device=device) + + num_triangles_table = torch.tensor([0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long, device=device) + base_tet_edges = torch.tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long, device=device) + v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=device)) + return triangle_table, num_triangles_table, base_tet_edges, v_id + + + +def extract_fields_from_tets(bound_min, bound_max, resolution, query_func): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + +def extract_fields(bound_min, bound_max, resolution, query_func): + N = 64 + X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N) + Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N) + Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N) + + u = np.zeros([resolution, resolution, resolution], dtype=np.float32) + with torch.no_grad(): + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = torch.meshgrid(xs, ys, zs) + pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() + u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val + # should save u here # + # save_u_path = os.path.join("/data2/datasets/diffsim/neus/exp/hand_test/womask_sphere_reverse_value/other_saved", "sdf_values.npy") + # np.save(save_u_path, u) + # print(f"u saved to {save_u_path}") + return u + + +def extract_geometry(bound_min, bound_max, resolution, threshold, query_func): + print('threshold: {}'.format(threshold)) + + ## using maching cubes ### + u = extract_fields(bound_min, bound_max, resolution, query_func) + vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + b_max_np = bound_max.detach().cpu().numpy() + b_min_np = bound_min.detach().cpu().numpy() + + vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ### using marching tets ### + # vertices, triangles = extract_fields_from_tets(bound_min, bound_max, resolution, query_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles + +def extract_geometry_tets(bound_min, bound_max, resolution, threshold, query_func): + # print('threshold: {}'.format(threshold)) + + ### using maching cubes ### + # u = extract_fields(bound_min, bound_max, resolution, query_func) + # vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ## + ### using marching tets ### fiels from tets ## + vertices, triangles, tet_sdf_values, GT_verts, GT_faces = extract_fields_from_tets(bound_min, bound_max, resolution, query_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + # + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles, tet_sdf_values, GT_verts, GT_faces + + +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / torch.sum(weights, -1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = torch.rand(list(cdf.shape[:-1]) + [n_samples]) + + # Invert CDF # invert cdf # + u = u.contiguous() + inds = torch.searchsorted(cdf, u, right=True) + below = torch.max(torch.zeros_like(inds - 1), inds - 1) + above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) + inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +def load_GT_vertices(GT_meshes_folder): + tot_meshes_fns = os.listdir(GT_meshes_folder) + tot_meshes_fns = [fn for fn in tot_meshes_fns if fn.endswith(".obj")] + tot_mesh_verts = [] + tot_mesh_faces = [] + n_tot_verts = 0 + for fn in tot_meshes_fns: + cur_mesh_fn = os.path.join(GT_meshes_folder, fn) + obj_mesh = trimesh.load(cur_mesh_fn, process=False) + # obj_mesh.remove_degenerate_faces(height=1e-06) + + verts_obj = np.array(obj_mesh.vertices) + faces_obj = np.array(obj_mesh.faces) + + tot_mesh_verts.append(verts_obj) + tot_mesh_faces.append(faces_obj + n_tot_verts) + n_tot_verts += verts_obj.shape[0] + + # tot_mesh_faces.append(faces_obj) + tot_mesh_verts = np.concatenate(tot_mesh_verts, axis=0) + tot_mesh_faces = np.concatenate(tot_mesh_faces, axis=0) + return tot_mesh_verts, tot_mesh_faces + + +class NeuSRenderer: + def __init__(self, + nerf, + sdf_network, + deviation_network, + color_network, + n_samples, + n_importance, + n_outside, + up_sample_steps, + perturb): + self.nerf = nerf + self.sdf_network = sdf_network + self.deviation_network = deviation_network + self.color_network = color_network + self.n_samples = n_samples + self.n_importance = n_importance + self.n_outside = n_outside + self.up_sample_steps = up_sample_steps + self.perturb = perturb + + GT_meshes_folder = "/home/xueyi/diffsim/DiffHand/assets/hand" + self.mesh_vertices, self.mesh_faces = load_GT_vertices(GT_meshes_folder=GT_meshes_folder) + maxx_pts = 25. + minn_pts = -15. + self.mesh_vertices = (self.mesh_vertices - minn_pts) / (maxx_pts - minn_pts) + f = SDF(self.mesh_vertices, self.mesh_faces) + self.gt_sdf = f ## a unite sphere or box + + self.minn_pts = 0 + self.maxx_pts = 1. + + # self.minn_pts = -1.5 + # self.maxx_pts = 1.5 + self.bkg_pts = ... # TODO: the bkg pts + self.cur_fr_bkg_pts_defs = ... # TODO: set the cur_bkg_pts_defs for each frame # + self.dist_interp_thres = ... # TODO + + # get the pts and render the pts # + # pts and the rendering pts # + def deform_pts(self, pts): + # pts: nn_batch x nn_samples x 3 + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + + + + + def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None): + """ + Render background + """ + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints # + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3 # + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10) + pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4 # + + dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3) + + pts = pts.reshape(-1, 3 + int(self.n_outside > 0)) + dirs = dirs.reshape(-1, 3) + + density, sampled_color = nerf(pts, dirs) + sampled_color = torch.sigmoid(sampled_color) + alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists) + alpha = alpha.reshape(batch_size, n_samples) + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + sampled_color = sampled_color.reshape(batch_size, n_samples, 3) + color = (weights[:, :, None] * sampled_color).sum(dim=1) + if background_rgb is not None: + color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True)) + + return { + 'color': color, + 'sampled_color': sampled_color, + 'alpha': alpha, + 'weights': weights, + } + + def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s): + """ + Up sampling give a fixed inv_s + """ + batch_size, n_samples = z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False) + inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0) + sdf = sdf.reshape(batch_size, n_samples) + prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:] + prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:] + mid_sdf = (prev_sdf + next_sdf) * 0.5 + cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5) + + # ---------------------------------------------------------------------------------------------------------- + # Use min value of [ cos, prev_cos ] + # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more + # robust when meeting situations like below: + # + # SDF + # ^ + # |\ -----x----... + # | \ / + # | x x + # |---\----/-------------> 0 level + # | \ / + # | \/ + # | + # ---------------------------------------------------------------------------------------------------------- + prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1) + cos_val = torch.stack([prev_cos_val, cos_val], dim=-1) + cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False) + cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere + + dist = (next_z_vals - prev_z_vals) + prev_esti_sdf = mid_sdf - cos_val * dist * 0.5 + next_esti_sdf = mid_sdf + cos_val * dist * 0.5 + prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s) + next_cdf = torch.sigmoid(next_esti_sdf * inv_s) + alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5) + weights = alpha * torch.cumprod( + torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + + z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach() + return z_samples + + def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False): + batch_size, n_samples = z_vals.shape + _, n_importance = new_z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None] + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + z_vals = torch.cat([z_vals, new_z_vals], dim=-1) + z_vals, index = torch.sort(z_vals, dim=-1) + + if not last: + new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + sdf = torch.cat([sdf, new_sdf], dim=-1) + xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1) + index = index.reshape(-1) + sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance) + + return z_vals, sdf + + def render_core(self, + rays_o, + rays_d, + z_vals, + sample_dist, + sdf_network, + deviation_network, + color_network, + background_alpha=None, + background_sampled_color=None, + background_rgb=None, + cos_anneal_ratio=0.0): + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 # z_vals and dists * 0.5 # + + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + dirs = rays_d[:, None, :].expand(pts.shape) + + pts = pts.reshape(-1, 3) # pts, nn_ou + dirs = dirs.reshape(-1, 3) + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + + # pts = pts.flip((-1,)) * 2 - 1 # + pts = pts * 2 - 1 + + sdf_nn_output = sdf_network(pts) + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + + gradients = sdf_network.gradient(pts).squeeze() + sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3) + + # deviation network # + inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) # Single parameter + inv_s = inv_s.expand(batch_size * n_samples, 1) + + true_cos = (dirs * gradients).sum(-1, keepdim=True) + + # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes + # the cos value "not dead" at the beginning training iterations, for better convergence. + iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + + F.relu(-true_cos) * cos_anneal_ratio) # always non-positive + + # Estimate signed distances at section points + estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 + estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 + + prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s) + next_cdf = torch.sigmoid(estimated_next_sdf * inv_s) + + p = prev_cdf - next_cdf + c = prev_cdf + + alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0) + + pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples) + inside_sphere = (pts_norm < 1.0).float().detach() + relax_inside_sphere = (pts_norm < 1.2).float().detach() + + # Render with background + if background_alpha is not None: + alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere) + alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1) + sampled_color = sampled_color * inside_sphere[:, :, None] +\ + background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None] + sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1) + + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + weights_sum = weights.sum(dim=-1, keepdim=True) + + color = (sampled_color * weights[:, :, None]).sum(dim=1) + if background_rgb is not None: # Fixed background, usually black + color = color + background_rgb * (1.0 - weights_sum) + + # Eikonal loss + gradient_error = (torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2, + dim=-1) - 1.0) ** 2 + gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5) + + return { + 'color': color, + 'sdf': sdf, + 'dists': dists, + 'gradients': gradients.reshape(batch_size, n_samples, 3), + 's_val': 1.0 / inv_s, + 'mid_z_vals': mid_z_vals, + 'weights': weights, + 'cdf': c.reshape(batch_size, n_samples), + 'gradient_error': gradient_error, + 'inside_sphere': inside_sphere + } + + def render(self, rays_o, rays_d, near, far, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, use_gt_sdf=False): + batch_size = len(rays_o) + sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere + z_vals = torch.linspace(0.0, 1.0, self.n_samples) + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + if perturb_overwrite >= 0: + perturb = perturb_overwrite + if perturb > 0: + t_rand = (torch.rand([batch_size, 1]) - 0.5) + z_vals = z_vals + t_rand * 2.0 / self.n_samples + + if self.n_outside > 0: + mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1]) + upper = torch.cat([mids, z_vals_outside[..., -1:]], -1) + lower = torch.cat([z_vals_outside[..., :1], mids], -1) + t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]]) + z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand + + if self.n_outside > 0: + z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples + + background_alpha = None + background_sampled_color = None + + # Up sample + if self.n_importance > 0: + with torch.no_grad(): + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + # sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples) + # gt_sdf # + + # + # pts = ((pts - xyz_min) / (xyz_max - xyz_min)).flip((-1,)) * 2 - 1 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + pts_exp = pts.reshape(-1, 3) + # minn_pts, _ = torch.min(pts_exp, dim=0) + # maxx_pts, _ = torch.max(pts_exp, dim=0) + # print(f"minn_pts: {minn_pts}, maxx_pts: {maxx_pts}") + + # pts_to_near = pts - near.unsqueeze(1) + # maxx_pts = 1.5; minn_pts = -1.5 + # # maxx_pts = 3; minn_pts = -3 + # # maxx_pts = 1; minn_pts = -1 + # pts_exp = (pts_exp - minn_pts) / (maxx_pts - minn_pts) + + ## render and iamges #### + if use_gt_sdf: + ### use the GT sdf field #### + # print(f"Using gt sdf :") + sdf = self.gt_sdf(pts_exp.reshape(-1, 3).detach().cpu().numpy()) + sdf = torch.from_numpy(sdf).float().cuda() + sdf = sdf.reshape(batch_size, self.n_samples) + ### use the GT sdf field #### + else: + #### use the optimized sdf field #### + sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + #### use the optimized sdf field #### + + for i in range(self.up_sample_steps): + new_z_vals = self.up_sample(rays_o, + rays_d, + z_vals, + sdf, + self.n_importance // self.up_sample_steps, + 64 * 2**i) + z_vals, sdf = self.cat_z_vals(rays_o, + rays_d, + z_vals, + new_z_vals, + sdf, + last=(i + 1 == self.up_sample_steps)) + + n_samples = self.n_samples + self.n_importance + + # Background model + if self.n_outside > 0: + z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1) + z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1) + ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf) + + background_sampled_color = ret_outside['sampled_color'] + background_alpha = ret_outside['alpha'] + + # Render core + ret_fine = self.render_core(rays_o, # + rays_d, + z_vals, + sample_dist, + self.sdf_network, + self.deviation_network, + self.color_network, + background_rgb=background_rgb, + background_alpha=background_alpha, + background_sampled_color=background_sampled_color, + cos_anneal_ratio=cos_anneal_ratio) + + color_fine = ret_fine['color'] + weights = ret_fine['weights'] + weights_sum = weights.sum(dim=-1, keepdim=True) + gradients = ret_fine['gradients'] + s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True) + + return { + 'color_fine': color_fine, + 's_val': s_val, + 'cdf_fine': ret_fine['cdf'], + 'weight_sum': weights_sum, + 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0], + 'gradients': gradients, + 'weights': weights, + 'gradient_error': ret_fine['gradient_error'], + 'inside_sphere': ret_fine['inside_sphere'] + } + + def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0): + return extract_geometry(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.sdf_network.sdf(pts)) + + def extract_geometry_tets(self, bound_min, bound_max, resolution, threshold=0.0): + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.sdf_network.sdf(pts)) diff --git a/models/renderer_def.py b/models/renderer_def.py new file mode 100644 index 0000000000000000000000000000000000000000..5ed728fabbf5bd0a4c8d7e678c73422d8a9093dc --- /dev/null +++ b/models/renderer_def.py @@ -0,0 +1,725 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import logging +import mcubes +from icecream import ic +import os + +import trimesh +from pysdf import SDF + +from uni_rep.rep_3d.dmtet import marching_tets_tetmesh, create_tetmesh_variables + +def create_mt_variable(device): + triangle_table = torch.tensor( + [ + [-1, -1, -1, -1, -1, -1], + [1, 0, 2, -1, -1, -1], + [4, 0, 3, -1, -1, -1], + [1, 4, 2, 1, 3, 4], + [3, 1, 5, -1, -1, -1], + [2, 3, 0, 2, 5, 3], + [1, 4, 0, 1, 5, 4], + [4, 2, 5, -1, -1, -1], + [4, 5, 2, -1, -1, -1], + [4, 1, 0, 4, 5, 1], + [3, 2, 0, 3, 5, 2], + [1, 3, 5, -1, -1, -1], + [4, 1, 2, 4, 3, 1], + [3, 0, 4, -1, -1, -1], + [2, 0, 1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1] + ], dtype=torch.long, device=device) + + num_triangles_table = torch.tensor([0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long, device=device) + base_tet_edges = torch.tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long, device=device) + v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=device)) + return triangle_table, num_triangles_table, base_tet_edges, v_id + + + +def extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=None): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + if def_func is not None: + cur_query_pts = def_func(cur_query_pts) + cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + +def extract_fields(bound_min, bound_max, resolution, query_func): + N = 64 + X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N) + Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N) + Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N) + + u = np.zeros([resolution, resolution, resolution], dtype=np.float32) + with torch.no_grad(): + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = torch.meshgrid(xs, ys, zs) + pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() + u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val + # should save u here # + # save_u_path = os.path.join("/data2/datasets/diffsim/neus/exp/hand_test/womask_sphere_reverse_value/other_saved", "sdf_values.npy") + # np.save(save_u_path, u) + # print(f"u saved to {save_u_path}") + return u + + +def extract_geometry(bound_min, bound_max, resolution, threshold, query_func): + print('threshold: {}'.format(threshold)) + + ## using maching cubes ### + u = extract_fields(bound_min, bound_max, resolution, query_func) + vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + b_max_np = bound_max.detach().cpu().numpy() + b_min_np = bound_min.detach().cpu().numpy() + + vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ### using marching tets ### + # vertices, triangles = extract_fields_from_tets(bound_min, bound_max, resolution, query_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles + +def extract_geometry_tets(bound_min, bound_max, resolution, threshold, query_func, def_func=None): + # print('threshold: {}'.format(threshold)) + + ### using maching cubes ### + # u = extract_fields(bound_min, bound_max, resolution, query_func) + # vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ## + ### using marching tets ### fiels from tets ## + vertices, triangles, tet_sdf_values, GT_verts, GT_faces = extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=def_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + # + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles, tet_sdf_values, GT_verts, GT_faces + + +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / torch.sum(weights, -1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = torch.rand(list(cdf.shape[:-1]) + [n_samples]) + + # Invert CDF # invert cdf # + u = u.contiguous() + inds = torch.searchsorted(cdf, u, right=True) + below = torch.max(torch.zeros_like(inds - 1), inds - 1) + above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) + inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +def load_GT_vertices(GT_meshes_folder): + tot_meshes_fns = os.listdir(GT_meshes_folder) + tot_meshes_fns = [fn for fn in tot_meshes_fns if fn.endswith(".obj")] + tot_mesh_verts = [] + tot_mesh_faces = [] + n_tot_verts = 0 + for fn in tot_meshes_fns: + cur_mesh_fn = os.path.join(GT_meshes_folder, fn) + obj_mesh = trimesh.load(cur_mesh_fn, process=False) + # obj_mesh.remove_degenerate_faces(height=1e-06) + + verts_obj = np.array(obj_mesh.vertices) + faces_obj = np.array(obj_mesh.faces) + + tot_mesh_verts.append(verts_obj) + tot_mesh_faces.append(faces_obj + n_tot_verts) + n_tot_verts += verts_obj.shape[0] + + # tot_mesh_faces.append(faces_obj) + tot_mesh_verts = np.concatenate(tot_mesh_verts, axis=0) + tot_mesh_faces = np.concatenate(tot_mesh_faces, axis=0) + return tot_mesh_verts, tot_mesh_faces + + +class NeuSRenderer: + def __init__(self, + nerf, + sdf_network, + deviation_network, + color_network, + n_samples, + n_importance, + n_outside, + up_sample_steps, + perturb): + self.nerf = nerf + self.sdf_network = sdf_network + self.deviation_network = deviation_network + self.color_network = color_network + self.n_samples = n_samples + self.n_importance = n_importance + self.n_outside = n_outside + self.up_sample_steps = up_sample_steps + self.perturb = perturb + + GT_meshes_folder = "/home/xueyi/diffsim/DiffHand/assets/hand" + self.mesh_vertices, self.mesh_faces = load_GT_vertices(GT_meshes_folder=GT_meshes_folder) + maxx_pts = 25. + minn_pts = -15. + self.mesh_vertices = (self.mesh_vertices - minn_pts) / (maxx_pts - minn_pts) + f = SDF(self.mesh_vertices, self.mesh_faces) + self.gt_sdf = f ## a unite sphere or box + + self.minn_pts = 0 + self.maxx_pts = 1. + + # self.minn_pts = -1.5 # gorudn-truth states with the deformation -> update the sdf value fiedl + # self.maxx_pts = 1.5 + self.bkg_pts = ... # TODO: the bkg pts # bkg_pts; # bkg_pts_defs # + self.cur_fr_bkg_pts_defs = ... # TODO: set the cur_bkg_pts_defs for each frame # + self.dist_interp_thres = ... # TODO: set the cur_bkg_pts_defs # + + self.bending_network = ... # TODO: add the bending network # + self.use_bending_network = ... # TODO: set the property # + self.use_delta_bending = ... # TODO + # use bending network # + + + # get the pts and render the pts # + # pts and the rendering pts # + def deform_pts(self, pts, pts_ts=0): + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + # if pts_ts >= 5: + # pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + # for cur_pts_ts in range(4, -1, -1): + # # print(f"using delta bending with pts_ts: {cur_pts_ts}") + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # for cur_pts_ts in range(pts_ts, -1, -1): + # # print(f"using delta bending with pts_ts: {cur_pts_ts}") + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + for cur_pts_ts in range(pts_ts, -1, -1): + # print(f"using delta bending with pts_ts: {cur_pts_ts}") + pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + + + def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None, pts_ts=0): + """ + Render background + """ + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints # + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3 # + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10) + pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4 # + + dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3) + + pts = pts.reshape(-1, 3 + int(self.n_outside > 0)) + dirs = dirs.reshape(-1, 3) + + density, sampled_color = nerf(pts, dirs) + sampled_color = torch.sigmoid(sampled_color) + alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists) + alpha = alpha.reshape(batch_size, n_samples) + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + sampled_color = sampled_color.reshape(batch_size, n_samples, 3) + color = (weights[:, :, None] * sampled_color).sum(dim=1) + if background_rgb is not None: + color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True)) + + return { + 'color': color, + 'sampled_color': sampled_color, + 'alpha': alpha, + 'weights': weights, + } + + def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts_ts=0): + """ + Up sampling give a fixed inv_s + """ + batch_size, n_samples = z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False) + inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0) + sdf = sdf.reshape(batch_size, n_samples) + prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:] + prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:] + mid_sdf = (prev_sdf + next_sdf) * 0.5 + cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5) + + # ---------------------------------------------------------------------------------------------------------- + # Use min value of [ cos, prev_cos ] + # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more + # robust when meeting situations like below: + # + # SDF + # ^ + # |\ -----x----... + # | \ / + # | x x + # |---\----/-------------> 0 level + # | \ / + # | \/ + # | + # ---------------------------------------------------------------------------------------------------------- + prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1) + cos_val = torch.stack([prev_cos_val, cos_val], dim=-1) + cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False) + cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere + + dist = (next_z_vals - prev_z_vals) + prev_esti_sdf = mid_sdf - cos_val * dist * 0.5 + next_esti_sdf = mid_sdf + cos_val * dist * 0.5 + prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s) + next_cdf = torch.sigmoid(next_esti_sdf * inv_s) + alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5) + weights = alpha * torch.cumprod( + torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + + z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach() + return z_samples + + def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False, pts_ts=0): + batch_size, n_samples = z_vals.shape + _, n_importance = new_z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None] + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + z_vals = torch.cat([z_vals, new_z_vals], dim=-1) + z_vals, index = torch.sort(z_vals, dim=-1) + + if not last: + new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + sdf = torch.cat([sdf, new_sdf], dim=-1) + xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1) + index = index.reshape(-1) + sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance) + + return z_vals, sdf + + def render_core(self, + rays_o, + rays_d, + z_vals, + sample_dist, + sdf_network, + deviation_network, + color_network, + background_alpha=None, + background_sampled_color=None, + background_rgb=None, + cos_anneal_ratio=0.0, + pts_ts=0): + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 # z_vals and dists * 0.5 # + + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + dirs = rays_d[:, None, :].expand(pts.shape) + + pts = pts.reshape(-1, 3) # pts, nn_ou + dirs = dirs.reshape(-1, 3) + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + sdf_nn_output = sdf_network(pts) + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + + gradients = sdf_network.gradient(pts).squeeze() + sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3) + + # deviation network # + inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) # Single parameter + inv_s = inv_s.expand(batch_size * n_samples, 1) + + true_cos = (dirs * gradients).sum(-1, keepdim=True) + + # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes + # the cos value "not dead" at the beginning training iterations, for better convergence. + iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + + F.relu(-true_cos) * cos_anneal_ratio) # always non-positive + + # Estimate signed distances at section points + estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 + estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 + + prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s) + next_cdf = torch.sigmoid(estimated_next_sdf * inv_s) + + p = prev_cdf - next_cdf + c = prev_cdf + + alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0) + + pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples) + inside_sphere = (pts_norm < 1.0).float().detach() + relax_inside_sphere = (pts_norm < 1.2).float().detach() + + # Render with background + if background_alpha is not None: + alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere) + alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1) + sampled_color = sampled_color * inside_sphere[:, :, None] +\ + background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None] + sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1) + + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + weights_sum = weights.sum(dim=-1, keepdim=True) + + color = (sampled_color * weights[:, :, None]).sum(dim=1) + if background_rgb is not None: # Fixed background, usually black + color = color + background_rgb * (1.0 - weights_sum) + + # Eikonal loss + gradient_error = (torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2, + dim=-1) - 1.0) ** 2 + gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5) + + return { + 'color': color, + 'sdf': sdf, + 'dists': dists, + 'gradients': gradients.reshape(batch_size, n_samples, 3), + 's_val': 1.0 / inv_s, + 'mid_z_vals': mid_z_vals, + 'weights': weights, + 'cdf': c.reshape(batch_size, n_samples), + 'gradient_error': gradient_error, + 'inside_sphere': inside_sphere + } + + def render(self, rays_o, rays_d, near, far, pts_ts=0, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, use_gt_sdf=False): + batch_size = len(rays_o) + sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere + z_vals = torch.linspace(0.0, 1.0, self.n_samples) + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + if perturb_overwrite >= 0: + perturb = perturb_overwrite + if perturb > 0: + t_rand = (torch.rand([batch_size, 1]) - 0.5) + z_vals = z_vals + t_rand * 2.0 / self.n_samples + + if self.n_outside > 0: + mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1]) + upper = torch.cat([mids, z_vals_outside[..., -1:]], -1) + lower = torch.cat([z_vals_outside[..., :1], mids], -1) + t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]]) + z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand + + if self.n_outside > 0: + z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples + + background_alpha = None + background_sampled_color = None + + # Up sample + if self.n_importance > 0: + with torch.no_grad(): + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + # sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples) + # gt_sdf # + + # + # pts = ((pts - xyz_min) / (xyz_max - xyz_min)).flip((-1,)) * 2 - 1 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + pts_exp = pts.reshape(-1, 3) + # minn_pts, _ = torch.min(pts_exp, dim=0) + # maxx_pts, _ = torch.max(pts_exp, dim=0) # deformation field (not a rigid one) -> the meshes # + # print(f"minn_pts: {minn_pts}, maxx_pts: {maxx_pts}") + + # pts_to_near = pts - near.unsqueeze(1) + # maxx_pts = 1.5; minn_pts = -1.5 + # # maxx_pts = 3; minn_pts = -3 + # # maxx_pts = 1; minn_pts = -1 + # pts_exp = (pts_exp - minn_pts) / (maxx_pts - minn_pts) + + ## render and iamges #### + if use_gt_sdf: + ### use the GT sdf field #### + # print(f"Using gt sdf :") + sdf = self.gt_sdf(pts_exp.reshape(-1, 3).detach().cpu().numpy()) + sdf = torch.from_numpy(sdf).float().cuda() + sdf = sdf.reshape(batch_size, self.n_samples) + ### use the GT sdf field #### + else: + #### use the optimized sdf field #### + sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + #### use the optimized sdf field #### + + for i in range(self.up_sample_steps): + new_z_vals = self.up_sample(rays_o, + rays_d, + z_vals, + sdf, + self.n_importance // self.up_sample_steps, + 64 * 2**i, + pts_ts=pts_ts) + z_vals, sdf = self.cat_z_vals(rays_o, + rays_d, + z_vals, + new_z_vals, + sdf, + last=(i + 1 == self.up_sample_steps), + pts_ts=pts_ts) + + n_samples = self.n_samples + self.n_importance + + # Background model + if self.n_outside > 0: + z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1) + z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1) + ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf, pts_ts=pts_ts) + + background_sampled_color = ret_outside['sampled_color'] + background_alpha = ret_outside['alpha'] + + # Render core + ret_fine = self.render_core(rays_o, # + rays_d, + z_vals, + sample_dist, + self.sdf_network, + self.deviation_network, + self.color_network, + background_rgb=background_rgb, + background_alpha=background_alpha, + background_sampled_color=background_sampled_color, + cos_anneal_ratio=cos_anneal_ratio, + pts_ts=pts_ts) + + color_fine = ret_fine['color'] + weights = ret_fine['weights'] + weights_sum = weights.sum(dim=-1, keepdim=True) + gradients = ret_fine['gradients'] + s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True) + + return { + 'color_fine': color_fine, + 's_val': s_val, + 'cdf_fine': ret_fine['cdf'], + 'weight_sum': weights_sum, + 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0], + 'gradients': gradients, + 'weights': weights, + 'gradient_error': ret_fine['gradient_error'], + 'inside_sphere': ret_fine['inside_sphere'] + } + + def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0): + return extract_geometry(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.sdf_network.sdf(pts)) + + def extract_geometry_tets(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts(pts, pts_ts=pts_ts)) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.sdf_network.sdf(pts)) diff --git a/models/renderer_def_multi_objs.py b/models/renderer_def_multi_objs.py new file mode 100644 index 0000000000000000000000000000000000000000..21af05e8e1fee1f99c78857bc93becc33114de10 --- /dev/null +++ b/models/renderer_def_multi_objs.py @@ -0,0 +1,1088 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import logging +import mcubes +from icecream import ic +import os + +import trimesh +from pysdf import SDF + +import models.fields as fields + +from uni_rep.rep_3d.dmtet import marching_tets_tetmesh, create_tetmesh_variables + +def batched_index_select(values, indices, dim = 1): + value_dims = values.shape[(dim + 1):] + values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) + indices = indices[(..., *((None,) * len(value_dims)))] + indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) + value_expand_len = len(indices_shape) - (dim + 1) + values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] + + value_expand_shape = [-1] * len(values.shape) + expand_slice = slice(dim, (dim + value_expand_len)) + value_expand_shape[expand_slice] = indices.shape[expand_slice] + values = values.expand(*value_expand_shape) + + dim += value_expand_len + return values.gather(dim, indices) + + +def create_mt_variable(device): + triangle_table = torch.tensor( + [ + [-1, -1, -1, -1, -1, -1], + [1, 0, 2, -1, -1, -1], + [4, 0, 3, -1, -1, -1], + [1, 4, 2, 1, 3, 4], + [3, 1, 5, -1, -1, -1], + [2, 3, 0, 2, 5, 3], + [1, 4, 0, 1, 5, 4], + [4, 2, 5, -1, -1, -1], + [4, 5, 2, -1, -1, -1], + [4, 1, 0, 4, 5, 1], + [3, 2, 0, 3, 5, 2], + [1, 3, 5, -1, -1, -1], + [4, 1, 2, 4, 3, 1], + [3, 0, 4, -1, -1, -1], + [2, 0, 1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1] + ], dtype=torch.long, device=device) + + num_triangles_table = torch.tensor([0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long, device=device) + base_tet_edges = torch.tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long, device=device) + v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=device)) + return triangle_table, num_triangles_table, base_tet_edges, v_id + + + +def extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=None): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + if def_func is not None: + cur_query_pts = def_func(cur_query_pts) + cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + +def extract_fields(bound_min, bound_max, resolution, query_func): + N = 64 + X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N) + Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N) + Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N) + + u = np.zeros([resolution, resolution, resolution], dtype=np.float32) + with torch.no_grad(): + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = torch.meshgrid(xs, ys, zs) + pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() + u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val + # should save u here # + # save_u_path = os.path.join("/data2/datasets/diffsim/neus/exp/hand_test/womask_sphere_reverse_value/other_saved", "sdf_values.npy") + # np.save(save_u_path, u) + # print(f"u saved to {save_u_path}") + return u + + +def extract_geometry(bound_min, bound_max, resolution, threshold, query_func): + print('threshold: {}'.format(threshold)) + + ## using maching cubes ### + u = extract_fields(bound_min, bound_max, resolution, query_func) + vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + b_max_np = bound_max.detach().cpu().numpy() + b_min_np = bound_min.detach().cpu().numpy() + + vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ### using marching tets ### + # vertices, triangles = extract_fields_from_tets(bound_min, bound_max, resolution, query_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles + +def extract_geometry_tets(bound_min, bound_max, resolution, threshold, query_func, def_func=None): + # print('threshold: {}'.format(threshold)) + + ### using maching cubes ### + # u = extract_fields(bound_min, bound_max, resolution, query_func) + # vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ## + ### using marching tets ### fiels from tets ## + vertices, triangles, tet_sdf_values, GT_verts, GT_faces = extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=def_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + # + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles, tet_sdf_values, GT_verts, GT_faces + + +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / torch.sum(weights, -1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = torch.rand(list(cdf.shape[:-1]) + [n_samples]) + + # Invert CDF # invert cdf # + u = u.contiguous() + inds = torch.searchsorted(cdf, u, right=True) + below = torch.max(torch.zeros_like(inds - 1), inds - 1) + above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) + inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +def load_GT_vertices(GT_meshes_folder): + tot_meshes_fns = os.listdir(GT_meshes_folder) + tot_meshes_fns = [fn for fn in tot_meshes_fns if fn.endswith(".obj")] + tot_mesh_verts = [] + tot_mesh_faces = [] + n_tot_verts = 0 + for fn in tot_meshes_fns: + cur_mesh_fn = os.path.join(GT_meshes_folder, fn) + obj_mesh = trimesh.load(cur_mesh_fn, process=False) + # obj_mesh.remove_degenerate_faces(height=1e-06) + + verts_obj = np.array(obj_mesh.vertices) + faces_obj = np.array(obj_mesh.faces) + + tot_mesh_verts.append(verts_obj) + tot_mesh_faces.append(faces_obj + n_tot_verts) + n_tot_verts += verts_obj.shape[0] + + # tot_mesh_faces.append(faces_obj) + tot_mesh_verts = np.concatenate(tot_mesh_verts, axis=0) + tot_mesh_faces = np.concatenate(tot_mesh_faces, axis=0) + return tot_mesh_verts, tot_mesh_faces + + +class NeuSRenderer: + def __init__(self, + nerf, + sdf_network, + deviation_network, + color_network, + n_samples, + n_importance, + n_outside, + up_sample_steps, + perturb): + self.nerf = nerf # multiple sdf networks and deviation networks and xxx # + self.sdf_network = sdf_network + self.deviation_network = deviation_network + self.color_network = color_network + self.n_samples = n_samples + self.n_importance = n_importance + self.n_outside = n_outside + self.up_sample_steps = up_sample_steps + self.perturb = perturb + + GT_meshes_folder = "/home/xueyi/diffsim/DiffHand/assets/hand" + self.mesh_vertices, self.mesh_faces = load_GT_vertices(GT_meshes_folder=GT_meshes_folder) + maxx_pts = 25. + minn_pts = -15. + self.mesh_vertices = (self.mesh_vertices - minn_pts) / (maxx_pts - minn_pts) + f = SDF(self.mesh_vertices, self.mesh_faces) + self.gt_sdf = f ## a unite sphere or box + + self.minn_pts = 0 + self.maxx_pts = 1. + + # self.minn_pts = -1.5 # gorudn-truth states with the deformation -> update the sdf value fiedl + # self.maxx_pts = 1.5 # + self.bkg_pts = ... # TODO: the bkg pts # bkg_pts; # bkg_pts_defs # + self.cur_fr_bkg_pts_defs = ... # TODO: set the cur_bkg_pts_defs for each frame # + self.dist_interp_thres = ... # TODO: set the cur_bkg_pts_defs # + + self.bending_network = ... # TODO: add the bending network # + self.use_bending_network = ... # TODO: set the property # + self.use_delta_bending = ... # TODO + self.prev_sdf_network = ... # TODO + self.use_selector = False + # use bending network # + # two bending netwrok + # two sdf networks + + + # get the pts and render the pts # + # pts and the rendering pts # + def deform_pts(self, pts, pts_ts=0): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + + if isinstance(self.bending_network, list): + pts_offsets = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + if isinstance(cur_bending_network, fields.BendingNetwork): + for cur_pts_ts in range(pts_ts, -1, -1): + cur_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts) + elif isinstance(cur_bending_network, fields.BendingNetworkRigidTrans): + cur_pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + raise ValueError('Encountered with unexpected bending network class...') + pts_offsets.append(cur_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): # pts ts # + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + if isinstance(self.bending_network, list): # prev sdf network # + pts_offsets = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + pts_offsets.append(bended_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def deform_pts_with_selector(self, pts, pts_ts=0): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + if isinstance(self.bending_network, list): + bended_pts = [] + queries_sdfs_selector = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + if cur_bending_network.use_opt_rigid_translations: + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + else: + # bended_pts_exp = pts_exp.clone() + for cur_pts_ts in range(pts_ts, -1, -1): + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts) + _, cur_bended_pts_selecotr = self.query_pts_sdf_fn_for_selector(bended_pts_exp) + bended_pts.append(bended_pts_exp) + queries_sdfs_selector.append(cur_bended_pts_selecotr) + bended_pts = torch.stack(bended_pts, dim=1) # nn_pts x 2 x 3 for bended pts # + queries_sdfs_selector = torch.stack(queries_sdfs_selector, dim=1) # nn_pts x 2 + # queries_sdfs_selector = (queries_sdfs_selector.sum(dim=1) > 0.5).float().long() + sdf_selector = queries_sdfs_selector[:, -1] + # sdf_selector = queries_sdfs_selector + # delta_sdf, sdf_selector = self.query_pts_sdf_fn_for_selector(pts_exp) + bended_pts = batched_index_select(values=bended_pts, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) # nn_pts x 3 # + # print(f"bended_pts: {bended_pts.size()}, pts_exp: {pts_exp.size()}") + pts_exp = bended_pts.squeeze(1) + + + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + if isinstance(self.bending_network, list): # prev sdf network # + # pts_offsets = [] + bended_pts = [] + queries_sdfs_selector = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # pts_offsets.append(bended_pts_exp - pts_exp) + _, cur_bended_pts_selecotr = self.query_pts_sdf_fn_for_selector(bended_pts_exp) + bended_pts.append(bended_pts_exp) + queries_sdfs_selector.append(cur_bended_pts_selecotr) + bended_pts = torch.stack(bended_pts, dim=1) # nn_pts x 2 x 3 for bended pts # + queries_sdfs_selector = torch.stack(queries_sdfs_selector, dim=1) # nn_pts x 2 + # queries_sdfs_selector = (queries_sdfs_selector.sum(dim=1) > 0.5).float().long() + sdf_selector = queries_sdfs_selector[:, -1] + # sdf_selector = queries_sdfs_selector + + + # delta_sdf, sdf_selector = self.query_pts_sdf_fn_for_selector(pts_exp) + bended_pts = batched_index_select(values=bended_pts, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) # nn_pts x 3 # + # print(f"bended_pts: {bended_pts.size()}, pts_exp: {pts_exp.size()}") + pts_exp = bended_pts.squeeze(1) + + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def deform_pts_passive(self, pts, pts_ts=0): + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + for cur_pts_ts in range(pts_ts, -1, -1): + if isinstance(self.bending_network, list): + for i_obj, cur_bending_network in enumerate(self.bending_network): + pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + # if isinstance(self.bending_network, list): + # pts_offsets = [] + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # pts_offsets.append(bended_pts_exp - pts_exp) + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + # else: + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def query_pts_sdf_fn_for_selector(self, pts): + # for negative + # 1) inside the current mesh but outside the previous mesh ---> negative sdf for this field but positive for another field + # 2) negative in thie field and also negative in the previous field ---> + # 2) for positive values of this current field ---> + cur_sdf = self.sdf_network.sdf(pts) + prev_sdf = self.prev_sdf_network.sdf(pts) + neg_neg = ((cur_sdf < 0.).float() + (prev_sdf < 0.).float()) > 1.5 + neg_pos = ((cur_sdf < 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + + neg_weq_pos = ((cur_sdf <= 0.).float() + (prev_sdf > 0.).float()) > 1.5 + + pos_neg = ((cur_sdf >= 0.).float() + (prev_sdf < 0.).float()) > 1.5 + pos_pos = ((cur_sdf >= 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + res_sdf = torch.zeros_like(cur_sdf) + res_sdf[neg_neg] = 1. # + res_sdf[neg_pos] = cur_sdf[neg_pos] + res_sdf[pos_neg] = cur_sdf[pos_neg] + + # inside the residual mesh -> must be neg and pos + res_sdf_selector = torch.zeros_like(cur_sdf).long() # + # res_sdf_selector[neg_pos] = 1 # is the residual mesh + res_sdf_selector[neg_weq_pos] = 1 + # res_sdf_selector[] + + cat_cur_prev_sdf = torch.stack( + [cur_sdf, prev_sdf], dim=-1 + ) + minn_cur_prev_sdf, _ = torch.min(cat_cur_prev_sdf, dim=-1) + res_sdf[pos_pos] = minn_cur_prev_sdf[pos_pos] + + return res_sdf, res_sdf_selector + + def query_func_sdf(self, pts): + if isinstance(self.sdf_network, list): + tot_sdf_values = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_values = cur_sdf_network.sdf(pts) + tot_sdf_values.append(cur_sdf_values) + tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + sdf = tot_sdf_values + else: + sdf = self.sdf_network.sdf(pts) + return sdf + + def query_func_sdf_passive(self, pts): + # if isinstance(self.sdf_network, list): + # tot_sdf_values = [] + # for i_obj, cur_sdf_network in enumerate(self.sdf_network): + # cur_sdf_values = cur_sdf_network.sdf(pts) + # tot_sdf_values.append(cur_sdf_values) + # tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + # tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + # sdf = tot_sdf_values + # else: + sdf = self.sdf_network[-1].sdf(pts) + + return sdf + + + def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None, pts_ts=0): + """ + Render background + """ + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints # + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3 # + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10) + pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4 # + + dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3) + + pts = pts.reshape(-1, 3 + int(self.n_outside > 0)) + dirs = dirs.reshape(-1, 3) + + density, sampled_color = nerf(pts, dirs) + sampled_color = torch.sigmoid(sampled_color) + alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists) + alpha = alpha.reshape(batch_size, n_samples) + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + sampled_color = sampled_color.reshape(batch_size, n_samples, 3) + color = (weights[:, :, None] * sampled_color).sum(dim=1) + if background_rgb is not None: + color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True)) + + return { + 'color': color, + 'sampled_color': sampled_color, + 'alpha': alpha, + 'weights': weights, + } + + def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts_ts=0): + """ + Up sampling give a fixed inv_s + """ + batch_size, n_samples = z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False) + inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0) + sdf = sdf.reshape(batch_size, n_samples) + prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:] + prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:] + mid_sdf = (prev_sdf + next_sdf) * 0.5 + cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5) + + # ---------------------------------------------------------------------------------------------------------- + # Use min value of [ cos, prev_cos ] + # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more + # robust when meeting situations like below: + # + # SDF + # ^ + # |\ -----x----... + # | \ / + # | x x + # |---\----/-------------> 0 level + # | \ / + # | \/ + # | + # ---------------------------------------------------------------------------------------------------------- + prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1) + cos_val = torch.stack([prev_cos_val, cos_val], dim=-1) + cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False) + cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere + + dist = (next_z_vals - prev_z_vals) + prev_esti_sdf = mid_sdf - cos_val * dist * 0.5 + next_esti_sdf = mid_sdf + cos_val * dist * 0.5 + prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s) + next_cdf = torch.sigmoid(next_esti_sdf * inv_s) + alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5) + weights = alpha * torch.cumprod( + torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + + z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach() + return z_samples + + def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False, pts_ts=0): + batch_size, n_samples = z_vals.shape + _, n_importance = new_z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None] + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + z_vals = torch.cat([z_vals, new_z_vals], dim=-1) + z_vals, index = torch.sort(z_vals, dim=-1) + + if not last: + if isinstance(self.sdf_network, list): + tot_new_sdf = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_new_sdf = cur_sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + tot_new_sdf.append(cur_new_sdf) + tot_new_sdf = torch.stack(tot_new_sdf, dim=-1) + new_sdf, _ = torch.min(tot_new_sdf, dim=-1) # + else: + new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + sdf = torch.cat([sdf, new_sdf], dim=-1) + xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1) + index = index.reshape(-1) + sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance) + + return z_vals, sdf + + + + def render_core(self, + rays_o, + rays_d, + z_vals, + sample_dist, + sdf_network, + deviation_network, + color_network, + background_alpha=None, + background_sampled_color=None, + background_rgb=None, + cos_anneal_ratio=0.0, + pts_ts=0): + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 # z_vals and dists * 0.5 # + + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + dirs = rays_d[:, None, :].expand(pts.shape) + + pts = pts.reshape(-1, 3) # pts, nn_ou + dirs = dirs.reshape(-1, 3) + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + if isinstance(sdf_network, list): + tot_sdf = [] + tot_feature_vector = [] + tot_obj_sel = [] + tot_gradients = [] + for i_obj, cur_sdf_network in enumerate(sdf_network): + cur_sdf_nn_output = cur_sdf_network(pts) + cur_sdf, cur_feature_vector = cur_sdf_nn_output[:, :1], cur_sdf_nn_output[:, 1:] + tot_sdf.append(cur_sdf) + tot_feature_vector.append(cur_feature_vector) + + gradients = cur_sdf_network.gradient(pts).squeeze() + tot_gradients.append(gradients) + tot_sdf = torch.stack(tot_sdf, dim=-1) + sdf, obj_sel = torch.min(tot_sdf, dim=-1) + feature_vector = torch.stack(tot_feature_vector, dim=1) + + # batched_index_select + # print(f"before sel: {feature_vector.size()}, obj_sel: {obj_sel.size()}") + feature_vector = batched_index_select(values=feature_vector, indices=obj_sel, dim=1).squeeze(1) + + + # feature_vector = feature_vector[obj_sel.unsqueeze(-1), :].squeeze(1) + # print(f"after sel: {feature_vector.size()}") + tot_gradients = torch.stack(tot_gradients, dim=1) + # gradients = tot_gradients[obj_sel.unsqueeze(-1)].squeeze(1) + gradients = batched_index_select(values=tot_gradients, indices=obj_sel, dim=1).squeeze(1) + # print(f"gradients: {gradients.size()}, tot_gradients: {tot_gradients.size()}") + + else: + sdf_nn_output = sdf_network(pts) + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + gradients = sdf_network.gradient(pts).squeeze() + + sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3) + + # deviation network # + inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) # Single parameter + inv_s = inv_s.expand(batch_size * n_samples, 1) + + true_cos = (dirs * gradients).sum(-1, keepdim=True) + + # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes + # the cos value "not dead" at the beginning training iterations, for better convergence. + iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + + F.relu(-true_cos) * cos_anneal_ratio) # always non-positive + + # Estimate signed distances at section points + estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 + estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 + + prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s) + next_cdf = torch.sigmoid(estimated_next_sdf * inv_s) + + p = prev_cdf - next_cdf + c = prev_cdf + + alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0) + + pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples) + inside_sphere = (pts_norm < 1.0).float().detach() + relax_inside_sphere = (pts_norm < 1.2).float().detach() + + # Render with background + if background_alpha is not None: + alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere) + alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1) + sampled_color = sampled_color * inside_sphere[:, :, None] +\ + background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None] + sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1) + + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + weights_sum = weights.sum(dim=-1, keepdim=True) + + color = (sampled_color * weights[:, :, None]).sum(dim=1) + if background_rgb is not None: # Fixed background, usually black + color = color + background_rgb * (1.0 - weights_sum) + + # Eikonal loss + gradient_error = (torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2, + dim=-1) - 1.0) ** 2 + gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5) + + return { + 'color': color, + 'sdf': sdf, + 'dists': dists, + 'gradients': gradients.reshape(batch_size, n_samples, 3), + 's_val': 1.0 / inv_s, + 'mid_z_vals': mid_z_vals, + 'weights': weights, + 'cdf': c.reshape(batch_size, n_samples), + 'gradient_error': gradient_error, + 'inside_sphere': inside_sphere + } + + def render(self, rays_o, rays_d, near, far, pts_ts=0, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, use_gt_sdf=False): + batch_size = len(rays_o) + sample_dist = 2.0 / self.n_samples # in a unit sphere # # Assuming the region of interest is a unit sphere + z_vals = torch.linspace(0.0, 1.0, self.n_samples) # linspace # + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + if perturb_overwrite >= 0: + perturb = perturb_overwrite + if perturb > 0: + t_rand = (torch.rand([batch_size, 1]) - 0.5) + z_vals = z_vals + t_rand * 2.0 / self.n_samples + + if self.n_outside > 0: # z values output # n_outside # + mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1]) + upper = torch.cat([mids, z_vals_outside[..., -1:]], -1) + lower = torch.cat([z_vals_outside[..., :1], mids], -1) + t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]]) + z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand + + if self.n_outside > 0: + z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples + + background_alpha = None + background_sampled_color = None + + # Up sample + if self.n_importance > 0: + with torch.no_grad(): + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + # sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples) + # gt_sdf # + + # + # pts = ((pts - xyz_min) / (xyz_max - xyz_min)).flip((-1,)) * 2 - 1 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) # give nthe pts + + pts_exp = pts.reshape(-1, 3) + # minn_pts, _ = torch.min(pts_exp, dim=0) + # maxx_pts, _ = torch.max(pts_exp, dim=0) # deformation field (not a rigid one) -> the meshes # + # print(f"minn_pts: {minn_pts}, maxx_pts: {maxx_pts}") + + # pts_to_near = pts - near.unsqueeze(1) + # maxx_pts = 1.5; minn_pts = -1.5 + # # maxx_pts = 3; minn_pts = -3 + # # maxx_pts = 1; minn_pts = -1 + # pts_exp = (pts_exp - minn_pts) / (maxx_pts - minn_pts) + + ## render and iamges #### + if use_gt_sdf: + ### use the GT sdf field #### + # print(f"Using gt sdf :") + sdf = self.gt_sdf(pts_exp.reshape(-1, 3).detach().cpu().numpy()) + sdf = torch.from_numpy(sdf).float().cuda() + sdf = sdf.reshape(batch_size, self.n_samples) + ### use the GT sdf field #### + else: + # pts_exp: (bsz x nn_s) x 3 -> (sdf_network) -> (bsz x nn_s) + #### use the optimized sdf field #### + + # sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + + if isinstance(self.sdf_network, list): + tot_sdf_values = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_values = cur_sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + tot_sdf_values.append(cur_sdf_values) + tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + sdf = tot_sdf_values + else: + sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + + #### use the optimized sdf field #### + + for i in range(self.up_sample_steps): + new_z_vals = self.up_sample(rays_o, + rays_d, + z_vals, + sdf, + self.n_importance // self.up_sample_steps, + 64 * 2**i, + pts_ts=pts_ts) + z_vals, sdf = self.cat_z_vals(rays_o, + rays_d, + z_vals, + new_z_vals, + sdf, + last=(i + 1 == self.up_sample_steps), + pts_ts=pts_ts) + + n_samples = self.n_samples + self.n_importance + + # Background model + if self.n_outside > 0: + z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1) + z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1) + ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf, pts_ts=pts_ts) + + background_sampled_color = ret_outside['sampled_color'] + background_alpha = ret_outside['alpha'] + + # Render core + ret_fine = self.render_core(rays_o, # + rays_d, + z_vals, + sample_dist, + self.sdf_network, + self.deviation_network, + self.color_network, + background_rgb=background_rgb, + background_alpha=background_alpha, + background_sampled_color=background_sampled_color, + cos_anneal_ratio=cos_anneal_ratio, + pts_ts=pts_ts) + + color_fine = ret_fine['color'] + weights = ret_fine['weights'] + weights_sum = weights.sum(dim=-1, keepdim=True) + gradients = ret_fine['gradients'] + s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True) + + return { + 'color_fine': color_fine, + 's_val': s_val, + 'cdf_fine': ret_fine['cdf'], + 'weight_sum': weights_sum, + 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0], + 'gradients': gradients, + 'weights': weights, + 'gradient_error': ret_fine['gradient_error'], + 'inside_sphere': ret_fine['inside_sphere'] + } + + def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0): + return extract_geometry(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts), + query_func=lambda pts: -self.query_func_sdf(pts) + ) + + # if self.deform_pts_with_selector: + # pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + def extract_geometry_tets(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts(pts, pts_ts=pts_ts) if not self.use_selector else self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts)) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + ) + + def extract_geometry_tets_passive(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_sdf_passive(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts_passive(pts, pts_ts=pts_ts)) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + ) diff --git a/models/renderer_def_multi_objs_compositional.py b/models/renderer_def_multi_objs_compositional.py new file mode 100644 index 0000000000000000000000000000000000000000..ad2ae1c156c4c5ceb4c6c7b17107750652bc7078 --- /dev/null +++ b/models/renderer_def_multi_objs_compositional.py @@ -0,0 +1,1510 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import logging +import mcubes +from icecream import ic +import os + +import trimesh +from pysdf import SDF + +import models.fields as fields + +from uni_rep.rep_3d.dmtet import marching_tets_tetmesh, create_tetmesh_variables + +def batched_index_select(values, indices, dim = 1): + value_dims = values.shape[(dim + 1):] + values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) + indices = indices[(..., *((None,) * len(value_dims)))] + indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) + value_expand_len = len(indices_shape) - (dim + 1) + values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] + + value_expand_shape = [-1] * len(values.shape) + expand_slice = slice(dim, (dim + value_expand_len)) + value_expand_shape[expand_slice] = indices.shape[expand_slice] + values = values.expand(*value_expand_shape) + + dim += value_expand_len + return values.gather(dim, indices) + + +def create_mt_variable(device): + triangle_table = torch.tensor( + [ + [-1, -1, -1, -1, -1, -1], + [1, 0, 2, -1, -1, -1], + [4, 0, 3, -1, -1, -1], + [1, 4, 2, 1, 3, 4], + [3, 1, 5, -1, -1, -1], + [2, 3, 0, 2, 5, 3], + [1, 4, 0, 1, 5, 4], + [4, 2, 5, -1, -1, -1], + [4, 5, 2, -1, -1, -1], + [4, 1, 0, 4, 5, 1], + [3, 2, 0, 3, 5, 2], + [1, 3, 5, -1, -1, -1], + [4, 1, 2, 4, 3, 1], + [3, 0, 4, -1, -1, -1], + [2, 0, 1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1] + ], dtype=torch.long, device=device) + + num_triangles_table = torch.tensor([0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long, device=device) + base_tet_edges = torch.tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long, device=device) + v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=device)) + return triangle_table, num_triangles_table, base_tet_edges, v_id + + + +def extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=None): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + if not os.path.exists(tet_fn): + tet_fn = f"/data/xueyi/NeuS/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + if def_func is not None: + cur_query_pts = def_func(cur_query_pts) + cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + # GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + gt_sdf_fn = "/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy" + if not os.path.exists(gt_sdf_fn): + gt_sdf_fn = "/data/xueyi/NeuS/data/100_sdf_values.npy" + GT_sdf_values = np.load(gt_sdf_fn, allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + + +def extract_fields_from_tets_selector(bound_min, bound_max, resolution, query_func, def_func=None): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + if not os.path.exists(tet_fn): + tet_fn = f"/data/xueyi/NeuS/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + if def_func is not None: + cur_query_pts, _ = def_func(cur_query_pts) + cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + # GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + gt_sdf_fn = "/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy" + if not os.path.exists(gt_sdf_fn): + gt_sdf_fn = "/data/xueyi/NeuS/data/100_sdf_values.npy" + GT_sdf_values = np.load(gt_sdf_fn, allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + + +def extract_fields(bound_min, bound_max, resolution, query_func): + N = 64 + X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N) + Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N) + Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N) + + u = np.zeros([resolution, resolution, resolution], dtype=np.float32) + with torch.no_grad(): + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = torch.meshgrid(xs, ys, zs) + pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() + u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val + # should save u here # + # save_u_path = os.path.join("/data2/datasets/diffsim/neus/exp/hand_test/womask_sphere_reverse_value/other_saved", "sdf_values.npy") + # np.save(save_u_path, u) # + # print(f"u saved to {save_u_path}") + return u + + +def extract_geometry(bound_min, bound_max, resolution, threshold, query_func): + print('threshold: {}'.format(threshold)) + + ## using maching cubes ### + u = extract_fields(bound_min, bound_max, resolution, query_func) + vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + b_max_np = bound_max.detach().cpu().numpy() + b_min_np = bound_min.detach().cpu().numpy() + + vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ### using marching tets ### + # vertices, triangles = extract_fields_from_tets(bound_min, bound_max, resolution, query_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles + +def extract_geometry_tets(bound_min, bound_max, resolution, threshold, query_func, def_func=None, selector=False): + # print('threshold: {}'.format(threshold)) + + ### using maching cubes ### + # u = extract_fields(bound_min, bound_max, resolution, query_func) + # vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ## + ### using marching tets ### fiels from tets ## + if selector: + vertices, triangles, tet_sdf_values, GT_verts, GT_faces = extract_fields_from_tets_selector(bound_min, bound_max, resolution, query_func, def_func=def_func) + else: + vertices, triangles, tet_sdf_values, GT_verts, GT_faces = extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=def_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + # + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles, tet_sdf_values, GT_verts, GT_faces + + +### sample pdfs ### +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / torch.sum(weights, -1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = torch.rand(list(cdf.shape[:-1]) + [n_samples]) + + # Invert CDF # invert cdf # + u = u.contiguous() + inds = torch.searchsorted(cdf, u, right=True) + below = torch.max(torch.zeros_like(inds - 1), inds - 1) + above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) + inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +def load_GT_vertices(GT_meshes_folder): + tot_meshes_fns = os.listdir(GT_meshes_folder) + tot_meshes_fns = [fn for fn in tot_meshes_fns if fn.endswith(".obj")] + tot_mesh_verts = [] + tot_mesh_faces = [] + n_tot_verts = 0 + for fn in tot_meshes_fns: + cur_mesh_fn = os.path.join(GT_meshes_folder, fn) + obj_mesh = trimesh.load(cur_mesh_fn, process=False) + # obj_mesh.remove_degenerate_faces(height=1e-06) + + verts_obj = np.array(obj_mesh.vertices) + faces_obj = np.array(obj_mesh.faces) + + tot_mesh_verts.append(verts_obj) + tot_mesh_faces.append(faces_obj + n_tot_verts) + n_tot_verts += verts_obj.shape[0] + + # tot_mesh_faces.append(faces_obj) + tot_mesh_verts = np.concatenate(tot_mesh_verts, axis=0) + tot_mesh_faces = np.concatenate(tot_mesh_faces, axis=0) + return tot_mesh_verts, tot_mesh_faces + + +class NeuSRenderer: + def __init__(self, + nerf, + sdf_network, + deviation_network, + color_network, + n_samples, + n_importance, + n_outside, + up_sample_steps, + perturb): + self.nerf = nerf # + self.sdf_network = sdf_network + self.deviation_network = deviation_network + self.color_network = color_network + self.n_samples = n_samples + self.n_importance = n_importance + self.n_outside = n_outside + self.up_sample_steps = up_sample_steps + self.perturb = perturb + + GT_meshes_folder = "/home/xueyi/diffsim/DiffHand/assets/hand" + if not os.path.exists(GT_meshes_folder): + GT_meshes_folder = "/data/xueyi/diffsim/DiffHand/assets/hand" + self.mesh_vertices, self.mesh_faces = load_GT_vertices(GT_meshes_folder=GT_meshes_folder) + maxx_pts = 25. + minn_pts = -15. + self.mesh_vertices = (self.mesh_vertices - minn_pts) / (maxx_pts - minn_pts) + f = SDF(self.mesh_vertices, self.mesh_faces) + self.gt_sdf = f ## a unite sphere or box + + self.minn_pts = 0 + self.maxx_pts = 1. + + # self.minn_pts = -1.5 # + # self.maxx_pts = 1.5 # + self.bkg_pts = ... # TODO + self.cur_fr_bkg_pts_defs = ... # TODO: set the cur_bkg_pts_defs for each frame # + self.dist_interp_thres = ... # TODO: set the cur_bkg_pts_defs # + + self.bending_network = ... # TODO: add the bending network # + self.use_bending_network = ... # TODO: set the property # + self.use_delta_bending = ... # TODO + self.prev_sdf_network = ... # TODO + self.use_selector = False + self.timestep_to_passive_mesh = ... # TODO + self.timestep_to_active_mesh = ... # TODO + + + + def deform_pts(self, pts, pts_ts=0, update_tot_def=True): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + + if isinstance(self.bending_network, list): + pts_offsets = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + if isinstance(cur_bending_network, fields.BendingNetwork): + for cur_pts_ts in range(pts_ts, -1, -1): + cur_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts) + elif isinstance(cur_bending_network, fields.BendingNetworkRigidTrans): + cur_pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + raise ValueError('Encountered with unexpected bending network class...') + pts_offsets.append(cur_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): # pts ts # + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + if isinstance(self.bending_network, list): # prev sdf network # + pts_offsets = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + pts_offsets.append(bended_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def deform_pts_with_selector(self, pts, pts_ts=0, update_tot_def=True): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + if isinstance(self.bending_network, list): + bended_pts = [] + queries_sdfs_selector = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + if cur_bending_network.use_opt_rigid_translations: + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + else: + # bended_pts_exp = pts_exp.clone() + if i_obj == 1 and pts_ts == 0: + bended_pts_exp = pts_exp + elif i_obj == 1: + for cur_pts_ts in range(pts_ts, 0, -1): ### before 0 ### + if isinstance(cur_bending_network, fields.BendingNetwork): + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts) + elif isinstance(cur_bending_network, fields.BendingNetworkForceForward) or isinstance(cur_bending_network, fields.BendingNetworkRigidTransForward): + # input_pts, input_pts_ts, timestep_to_passive_mesh, act_sdf_net=None, details=None, special_loss_return=False + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh) + elif isinstance(cur_bending_network, fields.BendingNetworkForceFieldForward): + # input_pts, input_pts_ts, timestep_to_passive_mesh, passive_sdf_net, details=None, special_loss_return=False + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForward): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV2): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV3): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV4): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV5): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV6): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV7): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV8): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0], update_tot_def=update_tot_def) + elif isinstance(cur_bending_network, fields.BendingNetworkActiveForceFieldForwardV9): + # active_bending_net, active_sdf_net, + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0], update_tot_def=update_tot_def) + else: + raise ValueError(f"Unrecognized bending network type: {type(cur_bending_network)}") + else: + for cur_pts_ts in range(pts_ts, -1, -1): + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts) + # _, cur_bended_pts_selecotr = self.query_pts_sdf_fn_for_selector(bended_pts_exp) + _, cur_bended_pts_selecotr = self.query_pts_sdf_fn_for_selector_ndelta(bended_pts_exp, i_net=i_obj) + bended_pts.append(bended_pts_exp) + queries_sdfs_selector.append(cur_bended_pts_selecotr) + bended_pts = torch.stack(bended_pts, dim=1) # nn_pts x 2 x 3 for bended pts # # bended_pts # + queries_sdfs_selector = torch.stack(queries_sdfs_selector, dim=1) # nn_pts x 2 + # queries_sdfs_selector = (queries_sdfs_selector.sum(dim=1) > 0.5).float().long() + #### get the final sdf_selector from queries_sdfs_selector #### + # sdf_selector = queries_sdfs_selector[:, -1] + # neg_neg = ((queries_sdfs_selector[:, 0] == 0).float() + (queries_sdfs_selector[:, -1] == 1).float()) > 1.5 #### both inside of the object + sdf_selector = 1 - queries_sdfs_selector[:, 0] + # neg_neg + # sdf_selector = queries_sdfs_selector + # delta_sdf, sdf_selector = self.query_pts_sdf_fn_for_selector(pts_exp) + bended_pts = batched_index_select(values=bended_pts, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) # nn_pts x 3 # + # print(f"bended_pts: {bended_pts.size()}, pts_exp: {pts_exp.size()}") + # pts_exp = bended_pts.squeeze(1) + pts_exp = bended_pts + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + if isinstance(self.bending_network, list): # prev sdf network # + # pts_offsets = [] + bended_pts = [] + queries_sdfs_selector = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # pts_offsets.append(bended_pts_exp - pts_exp) + _, cur_bended_pts_selecotr = self.query_pts_sdf_fn_for_selector(bended_pts_exp) + bended_pts.append(bended_pts_exp) + queries_sdfs_selector.append(cur_bended_pts_selecotr) + bended_pts = torch.stack(bended_pts, dim=1) # nn_pts x 2 x 3 for bended pts # + queries_sdfs_selector = torch.stack(queries_sdfs_selector, dim=1) # nn_pts x 2 + # queries_sdfs_selector = (queries_sdfs_selector.sum(dim=1) > 0.5).float().long() + sdf_selector = queries_sdfs_selector[:, -1] + # sdf_selector = queries_sdfs_selector + + + # delta_sdf, sdf_selector = self.query_pts_sdf_fn_for_selector(pts_exp) + bended_pts = batched_index_select(values=bended_pts, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) # nn_pts x 3 # + # print(f"bended_pts: {bended_pts.size()}, pts_exp: {pts_exp.size()}") + pts_exp = bended_pts.squeeze(1) + + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts, sdf_selector + else: + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def deform_pts_passive(self, pts, pts_ts=0): + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + if pts_ts > 0: + for cur_pts_ts in range(pts_ts, 0, -1): + # if isinstance(self.bending_network, list): + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + if isinstance(self.bending_network[-1], fields.BendingNetwork): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts) + elif isinstance(self.bending_network[-1], fields.BendingNetworkForceForward) or isinstance(self.bending_network[-1], fields.BendingNetworkRigidTransForward): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh) + elif isinstance(self.bending_network[-1], fields.BendingNetworkForceFieldForward): + # input_pts, input_pts_ts, timestep_to_passive_mesh, passive_sdf_net, details=None, special_loss_return=False + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForward): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV2): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV3): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV4): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV5): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV6): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV7): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV8): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + elif isinstance(self.bending_network[-1], fields.BendingNetworkActiveForceFieldForwardV9): + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts, timestep_to_active_mesh=self.timestep_to_active_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, passive_sdf_net=self.sdf_network[1], active_bending_net=self.bending_network[0], active_sdf_net=self.sdf_network[0]) + else: + raise ValueError(f"Unrecognized bending network type: {type(self.bending_network[-1])}") + # pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=cur_pts_ts) + else: + # if isinstance(self.bending_network, list): + # pts_offsets = [] + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # pts_offsets.append(bended_pts_exp - pts_exp) + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + # else: + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + # delta mesh as passive mesh # + def query_pts_sdf_fn_for_selector(self, pts): + # for negative + # 1) inside the current mesh but outside the previous mesh ---> negative sdf for this field but positive for another field + # 2) negative in thie field and also negative in the previous field ---> + # 2) for positive values of this current field ---> + cur_sdf = self.sdf_network.sdf(pts) + prev_sdf = self.prev_sdf_network.sdf(pts) + neg_neg = ((cur_sdf < 0.).float() + (prev_sdf < 0.).float()) > 1.5 + neg_pos = ((cur_sdf < 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + + neg_weq_pos = ((cur_sdf <= 0.).float() + (prev_sdf > 0.).float()) > 1.5 + + pos_neg = ((cur_sdf >= 0.).float() + (prev_sdf < 0.).float()) > 1.5 + pos_pos = ((cur_sdf >= 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + res_sdf = torch.zeros_like(cur_sdf) + res_sdf[neg_neg] = 1. # + res_sdf[neg_pos] = cur_sdf[neg_pos] + res_sdf[pos_neg] = cur_sdf[pos_neg] + + # inside the residual mesh -> must be neg and pos + res_sdf_selector = torch.zeros_like(cur_sdf).long() # + # res_sdf_selector[neg_pos] = 1 # is the residual mesh + res_sdf_selector[neg_weq_pos] = 1 + # res_sdf_selector[] + + cat_cur_prev_sdf = torch.stack( + [cur_sdf, prev_sdf], dim=-1 + ) + minn_cur_prev_sdf, _ = torch.min(cat_cur_prev_sdf, dim=-1) + res_sdf[pos_pos] = minn_cur_prev_sdf[pos_pos] + + return res_sdf, res_sdf_selector + + def query_pts_sdf_fn_for_selector_ndelta(self, pts, i_net): + # for negative + # 1) inside the current mesh but outside the previous mesh ---> negative sdf for this field but positive for another field + # 2) negative in thie field and also negative in the previous field ---> + # 2) for positive values of this current field ---> + passive_sdf = self.sdf_network[i_net].sdf(pts).squeeze(-1) + passive_sdf_selector = torch.zeros_like(passive_sdf).long() + passive_sdf_selector[passive_sdf <= 0.] = 1. + return passive_sdf, passive_sdf_selector + + cur_sdf = self.sdf_network.sdf(pts) + prev_sdf = self.prev_sdf_network.sdf(pts) + neg_neg = ((cur_sdf < 0.).float() + (prev_sdf < 0.).float()) > 1.5 + neg_pos = ((cur_sdf < 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + + neg_weq_pos = ((cur_sdf <= 0.).float() + (prev_sdf > 0.).float()) > 1.5 + + pos_neg = ((cur_sdf >= 0.).float() + (prev_sdf < 0.).float()) > 1.5 + pos_pos = ((cur_sdf >= 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + res_sdf = torch.zeros_like(cur_sdf) + res_sdf[neg_neg] = 1. # + res_sdf[neg_pos] = cur_sdf[neg_pos] + res_sdf[pos_neg] = cur_sdf[pos_neg] + + # inside the residual mesh -> must be neg and pos + res_sdf_selector = torch.zeros_like(cur_sdf).long() # + # res_sdf_selector[neg_pos] = 1 # is the residual mesh + res_sdf_selector[neg_weq_pos] = 1 + # res_sdf_selector[] + + cat_cur_prev_sdf = torch.stack( + [cur_sdf, prev_sdf], dim=-1 + ) + minn_cur_prev_sdf, _ = torch.min(cat_cur_prev_sdf, dim=-1) + res_sdf[pos_pos] = minn_cur_prev_sdf[pos_pos] + + return res_sdf, res_sdf_selector + + + def query_func_sdf(self, pts): + if isinstance(self.sdf_network, list): + tot_sdf_values = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_values = cur_sdf_network.sdf(pts) + tot_sdf_values.append(cur_sdf_values) + tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + sdf = tot_sdf_values + else: + sdf = self.sdf_network.sdf(pts) + return sdf + + def query_func_sdf_passive(self, pts): + # if isinstance(self.sdf_network, list): + # tot_sdf_values = [] + # for i_obj, cur_sdf_network in enumerate(self.sdf_network): + # cur_sdf_values = cur_sdf_network.sdf(pts) + # tot_sdf_values.append(cur_sdf_values) + # tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + # tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + # sdf = tot_sdf_values + # else: + sdf = self.sdf_network[-1].sdf(pts) + + return sdf + + + def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None, pts_ts=0): + """ + Render background + """ + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints # + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3 # + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10) + pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4 # + + dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3) + + pts = pts.reshape(-1, 3 + int(self.n_outside > 0)) ### deformed_pts ### + dirs = dirs.reshape(-1, 3) + + if self.use_selector: + tot_density, tot_sampled_color = [], [] + for i_nerf, cur_nerf in enumerate(nerf): + cur_density, cur_sampled_color = cur_nerf(pts, dirs) + tot_density.append(cur_density) + tot_sampled_color.append(cur_sampled_color) + tot_density = torch.stack(tot_density, dim=1) + tot_sampled_color = torch.stack(tot_sampled_color, dim=1) ### sampled colors + # print(f"tot_density: {tot_density.size()}, tot_sampled_color: {tot_sampled_color.size()}, sdf_selector: {sdf_selector.size()}") + density = batched_index_select(values=tot_density, indices=sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + sampled_color = batched_index_select(values=tot_sampled_color, indices=sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + else: + density, sampled_color = nerf(pts, dirs) + sampled_color = torch.sigmoid(sampled_color) + alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists) + alpha = alpha.reshape(batch_size, n_samples) + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + sampled_color = sampled_color.reshape(batch_size, n_samples, 3) + color = (weights[:, :, None] * sampled_color).sum(dim=1) + if background_rgb is not None: + color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True)) + + return { + 'color': color, + 'sampled_color': sampled_color, + 'alpha': alpha, + 'weights': weights, + } + + def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts_ts=0): + """ + Up sampling give a fixed inv_s + """ + batch_size, n_samples = z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False) + inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0) + sdf = sdf.reshape(batch_size, n_samples) + prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:] + prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:] + mid_sdf = (prev_sdf + next_sdf) * 0.5 + cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5) + + # ---------------------------------------------------------------------------------------------------------- + # Use min value of [ cos, prev_cos ] + # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more + # robust when meeting situations like below: + # + # SDF + # ^ + # |\ -----x----... + # | \ / + # | x x + # |---\----/-------------> 0 level + # | \ / + # | \/ + # | + # ---------------------------------------------------------------------------------------------------------- + prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1) + cos_val = torch.stack([prev_cos_val, cos_val], dim=-1) + cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False) + cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere + + dist = (next_z_vals - prev_z_vals) + prev_esti_sdf = mid_sdf - cos_val * dist * 0.5 + next_esti_sdf = mid_sdf + cos_val * dist * 0.5 + prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s) + next_cdf = torch.sigmoid(next_esti_sdf * inv_s) + alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5) + weights = alpha * torch.cumprod( + torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + + z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach() + return z_samples + + def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False, pts_ts=0): + batch_size, n_samples = z_vals.shape + _, n_importance = new_z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None] + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + z_vals = torch.cat([z_vals, new_z_vals], dim=-1) + z_vals, index = torch.sort(z_vals, dim=-1) + + if not last: + if isinstance(self.sdf_network, list): + tot_new_sdf = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_new_sdf = cur_sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + tot_new_sdf.append(cur_new_sdf) + tot_new_sdf = torch.stack(tot_new_sdf, dim=-1) + new_sdf, _ = torch.min(tot_new_sdf, dim=-1) # + else: + new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + sdf = torch.cat([sdf, new_sdf], dim=-1) + xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1) + index = index.reshape(-1) + sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance) + + return z_vals, sdf + + + + def render_core(self, + rays_o, + rays_d, + z_vals, + sample_dist, + sdf_network, + deviation_network, + color_network, + background_alpha=None, + background_sampled_color=None, + background_rgb=None, + cos_anneal_ratio=0.0, + pts_ts=0): + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 # z_vals and dists * 0.5 # + + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + dirs = rays_d[:, None, :].expand(pts.shape) + + pts = pts.reshape(-1, 3) # pts, nn_ou + dirs = dirs.reshape(-1, 3) + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + if isinstance(sdf_network, list): + tot_sdf = [] + tot_feature_vector = [] + tot_obj_sel = [] + tot_gradients = [] + for i_obj, cur_sdf_network in enumerate(sdf_network): + cur_sdf_nn_output = cur_sdf_network(pts) + cur_sdf, cur_feature_vector = cur_sdf_nn_output[:, :1], cur_sdf_nn_output[:, 1:] + tot_sdf.append(cur_sdf) + tot_feature_vector.append(cur_feature_vector) + + gradients = cur_sdf_network.gradient(pts).squeeze() + tot_gradients.append(gradients) + tot_sdf = torch.stack(tot_sdf, dim=-1) + + # + if self.use_selector: + sdf = batched_index_select(tot_sdf, sdf_selector.unsqueeze(1).unsqueeze(1), dim=2).squeeze(-1) + obj_sel = sdf_selector.unsqueeze(1) + else: + sdf, obj_sel = torch.min(tot_sdf, dim=-1) + feature_vector = torch.stack(tot_feature_vector, dim=1) + + # batched_index_select + # print(f"before sel: {feature_vector.size()}, obj_sel: {obj_sel.size()}") + feature_vector = batched_index_select(values=feature_vector, indices=obj_sel, dim=1).squeeze(1) + + # feature_vector = feature_vector[obj_sel.unsqueeze(-1), :].squeeze(1) + # print(f"after sel: {feature_vector.size()}") + tot_gradients = torch.stack(tot_gradients, dim=1) + # gradients = tot_gradients[obj_sel.unsqueeze(-1)].squeeze(1) + gradients = batched_index_select(values=tot_gradients, indices=obj_sel, dim=1).squeeze(1) + # print(f"gradients: {gradients.size()}, tot_gradients: {tot_gradients.size()}") + + else: + sdf_nn_output = sdf_network(pts) + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + gradients = sdf_network.gradient(pts).squeeze() + + if self.use_selector: + tot_sampled_color = [] + for i_color_net, cur_color_network in enumerate(color_network): + cur_sampled_color = cur_color_network(pts, gradients, dirs, feature_vector) # .reshape(batch_size, n_samples, 3) + tot_sampled_color.append(cur_sampled_color) + # print(f"tot_density: {tot_density.size()}, tot_sampled_color: {tot_sampled_color.size()}, sdf_selector: {sdf_selector.size()}") + tot_sampled_color = torch.stack(tot_sampled_color, dim=1) + sampled_color = batched_index_select(values=tot_sampled_color, indices=sdf_selector.unsqueeze(-1), dim=1).squeeze(1).reshape(batch_size, n_samples, 3) + else: + sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3) + + if self.use_selector and isinstance(deviation_network, list): + tot_inv_s = [] + for i_dev_net, cur_deviation_network in enumerate(deviation_network): + cur_inv_s = cur_deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) + tot_inv_s.append(cur_inv_s) + tot_inv_s = torch.stack(tot_inv_s, dim=1) + inv_s = batched_index_select(values=tot_inv_s, indices=sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + # inv_s = + else: + # deviation network # + inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) # Single parameter + inv_s = inv_s.expand(batch_size * n_samples, 1) + + true_cos = (dirs * gradients).sum(-1, keepdim=True) + + # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes + # the cos value "not dead" at the beginning training iterations, for better convergence. + iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + + F.relu(-true_cos) * cos_anneal_ratio) # always non-positive + + # Estimate signed distances at section points + estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 + estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 + + prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s) + next_cdf = torch.sigmoid(estimated_next_sdf * inv_s) + + p = prev_cdf - next_cdf + c = prev_cdf + + alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0) + + pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples) + inside_sphere = (pts_norm < 1.0).float().detach() + relax_inside_sphere = (pts_norm < 1.2).float().detach() + + # Render with background + if background_alpha is not None: + alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere) + alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1) + sampled_color = sampled_color * inside_sphere[:, :, None] +\ + background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None] + sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1) + + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + weights_sum = weights.sum(dim=-1, keepdim=True) + + color = (sampled_color * weights[:, :, None]).sum(dim=1) + if background_rgb is not None: # Fixed background, usually black + color = color + background_rgb * (1.0 - weights_sum) + + # Eikonal loss + gradient_error = (torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2, + dim=-1) - 1.0) ** 2 + gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5) + + return { + 'color': color, + 'sdf': sdf, + 'dists': dists, + 'gradients': gradients.reshape(batch_size, n_samples, 3), + 's_val': 1.0 / inv_s, + 'mid_z_vals': mid_z_vals, + 'weights': weights, + 'cdf': c.reshape(batch_size, n_samples), + 'gradient_error': gradient_error, + 'inside_sphere': inside_sphere + } + + def per_sdf_query(self, pts): + tot_sdfs = [] + for i_sdf_net, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_value = cur_sdf_network.sdf(pts).squeeze(-1) + tot_sdfs.append(cur_sdf_value) + tot_sdfs = torch.stack(tot_sdfs, dim=1) + return tot_sdfs + + + def render(self, rays_o, rays_d, near, far, pts_ts=0, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, use_gt_sdf=False): + batch_size = len(rays_o) + sample_dist = 2.0 / self.n_samples # in a unit sphere # # Assuming the region of interest is a unit sphere + z_vals = torch.linspace(0.0, 1.0, self.n_samples) # linspace # + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + if perturb_overwrite >= 0: + perturb = perturb_overwrite + if perturb > 0: + t_rand = (torch.rand([batch_size, 1]) - 0.5) + z_vals = z_vals + t_rand * 2.0 / self.n_samples + + if self.n_outside > 0: # z values output # n_outside # + mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1]) + upper = torch.cat([mids, z_vals_outside[..., -1:]], -1) + lower = torch.cat([z_vals_outside[..., :1], mids], -1) + t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]]) + z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand + + if self.n_outside > 0: + z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples + + background_alpha = None + background_sampled_color = None + + # Up sample + if self.n_importance > 0: + with torch.no_grad(): + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + # sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples) + # gt_sdf # + + # + # pts = ((pts - xyz_min) / (xyz_max - xyz_min)).flip((-1,)) * 2 - 1 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) # give nthe pts + + pts_exp = pts.reshape(-1, 3) + # minn_pts, _ = torch.min(pts_exp, dim=0) + # maxx_pts, _ = torch.max(pts_exp, dim=0) # deformation field (not a rigid one) -> the meshes # + # print(f"minn_pts: {minn_pts}, maxx_pts: {maxx_pts}") + + # pts_to_near = pts - near.unsqueeze(1) + # maxx_pts = 1.5; minn_pts = -1.5 + # # maxx_pts = 3; minn_pts = -3 + # # maxx_pts = 1; minn_pts = -1 + # pts_exp = (pts_exp - minn_pts) / (maxx_pts - minn_pts) + + ## render and iamges #### + # if use_gt_sdf: + # ### use the GT sdf field #### + # # print(f"Using gt sdf :") + # sdf = self.gt_sdf(pts_exp.reshape(-1, 3).detach().cpu().numpy()) + # sdf = torch.from_numpy(sdf).float().cuda() + # sdf = sdf.reshape(batch_size, self.n_samples) + # ### use the GT sdf field #### + # else: + # # pts_exp: (bsz x nn_s) x 3 -> (sdf_network) -> (bsz x nn_s) + # #### use the optimized sdf field #### + + # # sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + + if isinstance(self.sdf_network, list): + if self.use_selector: + tot_sdf_values = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_values = cur_sdf_network.sdf(pts_exp).squeeze(-1) + tot_sdf_values.append(cur_sdf_values) + tot_sdf_values = torch.stack(tot_sdf_values, dim=1) + tot_sdf_values = batched_index_select(tot_sdf_values, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) + sdf = tot_sdf_values.reshape(batch_size, self.n_samples) + else: + # tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + tot_sdf_values = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_values = cur_sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + tot_sdf_values.append(cur_sdf_values) + tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + sdf = tot_sdf_values + else: + sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + + #### use the optimized sdf field #### + + for i in range(self.up_sample_steps): + new_z_vals = self.up_sample(rays_o, + rays_d, + z_vals, + sdf, + self.n_importance // self.up_sample_steps, + 64 * 2**i, + pts_ts=pts_ts) + z_vals, sdf = self.cat_z_vals(rays_o, + rays_d, + z_vals, + new_z_vals, + sdf, + last=(i + 1 == self.up_sample_steps), + pts_ts=pts_ts) + + n_samples = self.n_samples + self.n_importance + + # Background model + if self.n_outside > 0: + z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1) + z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1) + ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf, pts_ts=pts_ts) + + background_sampled_color = ret_outside['sampled_color'] + background_alpha = ret_outside['alpha'] + + tot_sdfs = self.per_sdf_query(pts_exp) + + # Render core + ret_fine = self.render_core(rays_o, # + rays_d, + z_vals, + sample_dist, + self.sdf_network, + self.deviation_network, + self.color_network, + background_rgb=background_rgb, + background_alpha=background_alpha, + background_sampled_color=background_sampled_color, + cos_anneal_ratio=cos_anneal_ratio, + pts_ts=pts_ts) + + color_fine = ret_fine['color'] + weights = ret_fine['weights'] + weights_sum = weights.sum(dim=-1, keepdim=True) + gradients = ret_fine['gradients'] + s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True) + + return { + 'color_fine': color_fine, + 's_val': s_val, + 'cdf_fine': ret_fine['cdf'], + 'weight_sum': weights_sum, + 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0], + 'gradients': gradients, + 'weights': weights, + 'gradient_error': ret_fine['gradient_error'], + 'inside_sphere': ret_fine['inside_sphere'], + 'tot_sdfs': tot_sdfs, + } + + + + def render_def(self, rays_o, rays_d, near, far, pts_ts=0, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, use_gt_sdf=False, update_tot_def=True): + batch_size = len(rays_o) + # sample_dist = 2.0 / self.n_samples # in a unit sphere # # Assuming the region of interest is a unit sphere + z_vals = torch.linspace(0.0, 1.0, self.n_samples) + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + if perturb_overwrite >= 0: + perturb = perturb_overwrite + if perturb > 0: + t_rand = (torch.rand([batch_size, 1]) - 0.5) + z_vals = z_vals + t_rand * 2.0 / self.n_samples + + if self.n_outside > 0: # z values output # n_outside # + mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1]) + upper = torch.cat([mids, z_vals_outside[..., -1:]], -1) + lower = torch.cat([z_vals_outside[..., :1], mids], -1) + t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]]) + z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand + + if self.n_outside > 0: + z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples + + background_alpha = None + background_sampled_color = None + + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + # sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples) + # gt_sdf # + + # + # pts = ((pts - xyz_min) / (xyz_max - xyz_min)).flip((-1,)) * 2 - 1 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts, update_tot_def=update_tot_def) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts, update_tot_def=update_tot_def) # give nthe pts + + return { + 'defed_pts': pts + } + # + + + def extract_fields_from_tets_selector_self(self, bound_min, bound_max, resolution, i_ts, passive=False): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + if not os.path.exists(tet_fn): + tet_fn = f"/data/xueyi/NeuS/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + # if def_func is not None: + cur_query_pts, sdf_selector = self.deform_pts_with_selector(cur_query_pts, pts_ts=i_ts) + # cur_query_pts, _ + # cur_query_vals = query_func(cur_query_pts) + + + + if passive: + cur_query_vals = self.sdf_network[1].sdf(cur_query_pts) # .squeeze(-1) + else: + tot_sdf_values = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_values = cur_sdf_network.sdf(cur_query_pts).squeeze(-1) + tot_sdf_values.append(cur_sdf_values) + tot_sdf_values = torch.stack(tot_sdf_values, dim=1) + tot_sdf_values = batched_index_select(tot_sdf_values, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) + cur_query_vals = tot_sdf_values.unsqueeze(1) + # sdf = tot_sdf_values.reshape(batch_size, self.n_samples) + # for i_obj, + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + gt_sdf_fn = "/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy" + if not os.path.exists(gt_sdf_fn): + gt_sdf_fn = "/data/xueyi/NeuS/data/100_sdf_values.npy" + GT_sdf_values = np.load(gt_sdf_fn, allow_pickle=True) + + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + + + def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0): + return extract_geometry(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts), + query_func=lambda pts: -self.query_func_sdf(pts) + ) + + # if self.deform_pts_with_selector: + # pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + def extract_geometry_tets(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts(pts, pts_ts=pts_ts) if not self.use_selector else self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts), + selector=True) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + selector=True + ) + + def extract_geometry_tets_passive(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_sdf_passive(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts_passive(pts, pts_ts=pts_ts), + selector=False + ) + # return extract_geometry_tets(bound_min, # extract geometry # + # bound_max, + # resolution=resolution, + # threshold=threshold, + # query_func=lambda pts: -self.query_func_sdf_passive(pts), # lambda pts: -self.sdf_network.sdf(pts), + # def_func=lambda pts: self.deform_pts(pts, pts_ts=pts_ts) if not self.use_selector else self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts), + # selector=True) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + selector=False + ) diff --git a/models/renderer_def_multi_objs_rigidtrans_forward.py b/models/renderer_def_multi_objs_rigidtrans_forward.py new file mode 100644 index 0000000000000000000000000000000000000000..19af2398bea5d0709c0edd2a3ce1c53cc99f076f --- /dev/null +++ b/models/renderer_def_multi_objs_rigidtrans_forward.py @@ -0,0 +1,1603 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import logging +import mcubes +from icecream import ic +import os + +import trimesh +from pysdf import SDF + +import models.fields as fields + +from uni_rep.rep_3d.dmtet import marching_tets_tetmesh, create_tetmesh_variables + +def batched_index_select(values, indices, dim = 1): + value_dims = values.shape[(dim + 1):] + values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices)) + indices = indices[(..., *((None,) * len(value_dims)))] + indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims) + value_expand_len = len(indices_shape) - (dim + 1) + values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)] + + value_expand_shape = [-1] * len(values.shape) + expand_slice = slice(dim, (dim + value_expand_len)) + value_expand_shape[expand_slice] = indices.shape[expand_slice] + values = values.expand(*value_expand_shape) + + dim += value_expand_len + return values.gather(dim, indices) + + +def create_mt_variable(device): + triangle_table = torch.tensor( + [ + [-1, -1, -1, -1, -1, -1], + [1, 0, 2, -1, -1, -1], + [4, 0, 3, -1, -1, -1], + [1, 4, 2, 1, 3, 4], + [3, 1, 5, -1, -1, -1], + [2, 3, 0, 2, 5, 3], + [1, 4, 0, 1, 5, 4], + [4, 2, 5, -1, -1, -1], + [4, 5, 2, -1, -1, -1], + [4, 1, 0, 4, 5, 1], + [3, 2, 0, 3, 5, 2], + [1, 3, 5, -1, -1, -1], + [4, 1, 2, 4, 3, 1], + [3, 0, 4, -1, -1, -1], + [2, 0, 1, -1, -1, -1], + [-1, -1, -1, -1, -1, -1] + ], dtype=torch.long, device=device) + + num_triangles_table = torch.tensor([0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long, device=device) + base_tet_edges = torch.tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long, device=device) + v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=device)) + return triangle_table, num_triangles_table, base_tet_edges, v_id + + + +def extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=None): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + if def_func is not None: + # cur_query_pts = def_func(cur_query_pts) + cur_query_pts, _ = def_func(cur_query_pts) + cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + + +def extract_fields_from_tets_selector(bound_min, bound_max, resolution, query_func, def_func=None): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + if def_func is not None: + # cur_query_pts = def_func(cur_query_pts) + cur_query_pts, _ = def_func(cur_query_pts) + cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + + +def extract_fields(bound_min, bound_max, resolution, query_func): + N = 64 + X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N) + Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N) + Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N) + + u = np.zeros([resolution, resolution, resolution], dtype=np.float32) + with torch.no_grad(): + for xi, xs in enumerate(X): + for yi, ys in enumerate(Y): + for zi, zs in enumerate(Z): + xx, yy, zz = torch.meshgrid(xs, ys, zs) + pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1) + val = query_func(pts).reshape(len(xs), len(ys), len(zs)).detach().cpu().numpy() + u[xi * N: xi * N + len(xs), yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val + # should save u here # + # save_u_path = os.path.join("/data2/datasets/diffsim/neus/exp/hand_test/womask_sphere_reverse_value/other_saved", "sdf_values.npy") + # np.save(save_u_path, u) + # print(f"u saved to {save_u_path}") + return u + + +def extract_geometry(bound_min, bound_max, resolution, threshold, query_func): + print('threshold: {}'.format(threshold)) + + ## using maching cubes ### + u = extract_fields(bound_min, bound_max, resolution, query_func) + vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + b_max_np = bound_max.detach().cpu().numpy() + b_min_np = bound_min.detach().cpu().numpy() + + vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ### using marching tets ### + # vertices, triangles = extract_fields_from_tets(bound_min, bound_max, resolution, query_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles + +def extract_geometry_tets(bound_min, bound_max, resolution, threshold, query_func, def_func=None): + # print('threshold: {}'.format(threshold)) + + ### using maching cubes ### + # u = extract_fields(bound_min, bound_max, resolution, query_func) + # vertices, triangles = mcubes.marching_cubes(u, threshold) # grid sdf and marching cubes # + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + ### using maching cubes ### + + ## + ### using marching tets ### fiels from tets ## + vertices, triangles, tet_sdf_values, GT_verts, GT_faces = extract_fields_from_tets(bound_min, bound_max, resolution, query_func, def_func=def_func) + # vertices = vertices.detach().cpu().numpy() + # triangles = triangles.detach().cpu().numpy() + ### using marching tets ### + + # b_max_np = bound_max.detach().cpu().numpy() + # b_min_np = bound_min.detach().cpu().numpy() + # + + # vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] + return vertices, triangles, tet_sdf_values, GT_verts, GT_faces + + +def sample_pdf(bins, weights, n_samples, det=False): + # This implementation is from NeRF + # Get pdf + weights = weights + 1e-5 # prevent nans + pdf = weights / torch.sum(weights, -1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) + # Take uniform samples + if det: + u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples) + u = u.expand(list(cdf.shape[:-1]) + [n_samples]) + else: + u = torch.rand(list(cdf.shape[:-1]) + [n_samples]) + + # Invert CDF # invert cdf # + u = u.contiguous() + inds = torch.searchsorted(cdf, u, right=True) + below = torch.max(torch.zeros_like(inds - 1), inds - 1) + above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds) + inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2) + + matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]] + cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g) + bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g) + + denom = (cdf_g[..., 1] - cdf_g[..., 0]) + denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples + + +def load_GT_vertices(GT_meshes_folder): + tot_meshes_fns = os.listdir(GT_meshes_folder) + tot_meshes_fns = [fn for fn in tot_meshes_fns if fn.endswith(".obj")] + tot_mesh_verts = [] + tot_mesh_faces = [] + n_tot_verts = 0 + for fn in tot_meshes_fns: + cur_mesh_fn = os.path.join(GT_meshes_folder, fn) + obj_mesh = trimesh.load(cur_mesh_fn, process=False) + # obj_mesh.remove_degenerate_faces(height=1e-06) + + verts_obj = np.array(obj_mesh.vertices) + faces_obj = np.array(obj_mesh.faces) + + tot_mesh_verts.append(verts_obj) + tot_mesh_faces.append(faces_obj + n_tot_verts) + n_tot_verts += verts_obj.shape[0] + + # tot_mesh_faces.append(faces_obj) + tot_mesh_verts = np.concatenate(tot_mesh_verts, axis=0) + tot_mesh_faces = np.concatenate(tot_mesh_faces, axis=0) + return tot_mesh_verts, tot_mesh_faces + + +class NeuSRenderer: + def __init__(self, + nerf, + sdf_network, + deviation_network, + color_network, + n_samples, + n_importance, + n_outside, + up_sample_steps, + perturb): + self.nerf = nerf # multiple sdf networks and deviation networks and xxx # + self.sdf_network = sdf_network + self.deviation_network = deviation_network + self.color_network = color_network + self.n_samples = n_samples + self.n_importance = n_importance + self.n_outside = n_outside + self.up_sample_steps = up_sample_steps + self.perturb = perturb + + GT_meshes_folder = "/home/xueyi/diffsim/DiffHand/assets/hand" + self.mesh_vertices, self.mesh_faces = load_GT_vertices(GT_meshes_folder=GT_meshes_folder) + maxx_pts = 25. + minn_pts = -15. + self.mesh_vertices = (self.mesh_vertices - minn_pts) / (maxx_pts - minn_pts) + f = SDF(self.mesh_vertices, self.mesh_faces) + self.gt_sdf = f ## a unite sphere or box + + self.minn_pts = 0 + self.maxx_pts = 1. + + # self.minn_pts = -1.5 # gorudn-truth states with the deformation -> update the sdf value fiedl + # self.maxx_pts = 1.5 # + self.bkg_pts = ... # TODO: the bkg pts # bkg_pts; # bkg_pts_defs # + self.cur_fr_bkg_pts_defs = ... # TODO: set the cur_bkg_pts_defs for each frame # + self.dist_interp_thres = ... # TODO: set the cur_bkg_pts_defs # + + self.bending_network = ... # TODO: add the bending network # + self.use_bending_network = ... # TODO: set the property # + self.use_delta_bending = ... # TODO + self.prev_sdf_network = ... # TODO + self.use_selector = False + # self.bending_network_rigidtrans_forward = ... ## TODO: set the rigidjtrans forward ### + # timestep_to_mesh, timestep_to_passive_mesh, bending_net, bending_net_passive, act_sdf_net, details=None, special_loss_return=False + self.timestep_to_mesh = ... ## TODO + self.timestep_to_passive_mesh = ... ### TODO + self.bending_net = ... ## TODO + self.bending_net_passive = ... ### TODO + # self.act_sdf_net = ... ### TODO + self.bending_net_kinematic = ... ### TODO + self.time_to_act_joints = ... # ## TODO + # use bending network # + # two bending netwrok + # two sdf networks # deform pts kinematic # + + def deform_pts_kinematic(self, pts, pts_ts=0): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + + if isinstance(self.bending_net_kinematic, list): + pts_offsets = [] # + for i_obj, cur_bending_network in enumerate(self.bending_net_kinematic): + if isinstance(cur_bending_network, fields.BendingNetwork): + for cur_pts_ts in range(pts_ts, -1, -1): + cur_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts) + # elif isinstance(cur_bending_network, fields.BendingNetworkForward): + # cur_pts_exp = cur_bending_network(input_pts=pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + elif isinstance(cur_bending_network, fields.BendingNetworkRigidTrans): + cur_pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + raise ValueError('Encountered with unexpected bending network class...') + pts_offsets.append(cur_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): # pts ts # + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + if isinstance(self.bending_net_kinematic, list): # prev sdf network # + pts_offsets = [] + for i_obj, cur_bending_network in enumerate(self.bending_net_kinematic): + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + pts_offsets.append(bended_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + else: + pts_exp = self.bending_net_kinematic(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def deform_pts_kinematic_active(self, pts, pts_ts=0): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + for cur_pts_ts in range(pts_ts, -1, -1): + pts_exp = self.bending_net(pts_exp, input_pts_ts=cur_pts_ts) + # if isinstance(self.bending_net_kinematic, list): + # pts_offsets = [] # + # for i_obj, cur_bending_network in enumerate(self.bending_net_kinematic): + # if isinstance(cur_bending_network, fields.BendingNetwork): + # for cur_pts_ts in range(pts_ts, -1, -1): + # cur_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts) + # # elif isinstance(cur_bending_network, fields.BendingNetworkForward): + # # cur_pts_exp = cur_bending_network(input_pts=pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + # elif isinstance(cur_bending_network, fields.BendingNetworkRigidTrans): + # cur_pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # raise ValueError('Encountered with unexpected bending network class...') + # pts_offsets.append(cur_pts_exp - pts_exp) + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): # pts ts # + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + pts_exp = self.bending_net(pts_exp, input_pts_ts=pts_ts) + # if isinstance(self.bending_net_kinematic, list): # prev sdf network # + # pts_offsets = [] + # for i_obj, cur_bending_network in enumerate(self.bending_net_kinematic): + # bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # pts_offsets.append(bended_pts_exp - pts_exp) + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + # else: + # pts_exp = self.bending_net_kinematic(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + # get the pts and render the pts # + # pts and the rendering pts # + def deform_pts(self, pts, pts_ts=0): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + + if isinstance(self.bending_network, list): + pts_offsets = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + if isinstance(cur_bending_network, fields.BendingNetwork): + for cur_pts_ts in range(pts_ts, -1, -1): + cur_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts) + elif isinstance(cur_bending_network, fields.BendingNetworkForward): + for cur_pts_ts in range(pts_ts-1, -1, -1): + cur_pts_exp = cur_bending_network(input_pts=pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + elif isinstance(cur_bending_network, fields.BendingNetworkForwardJointDyn): + # for cur_pts_ts in range(pts_ts-1, -1, -1): + for cur_pts_ts in range(pts_ts, 0, -1): + cur_pts_exp = cur_bending_network(input_pts=pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_joints_pts=self.time_to_act_joints, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + # elif isinstance(cur_bending_network, fields.BendingNetworkRigidTrans): + # cur_pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + raise ValueError('Encountered with unexpected bending network class...') + pts_offsets.append(cur_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): # pts ts # + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + if isinstance(self.bending_network, list): # prev sdf network # + pts_offsets = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + pts_offsets.append(bended_pts_exp - pts_exp) + pts_offsets = torch.stack(pts_offsets, dim=0) + pts_offsets = torch.sum(pts_offsets, dim=0) + pts_exp = pts_exp + pts_offsets + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def deform_pts_with_selector(self, pts, pts_ts=0): # deform pts # + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + if self.use_delta_bending: + if isinstance(self.bending_network, list): + bended_pts = [] + queries_sdfs_selector = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + # if cur_bending_network.use_opt_rigid_translations: + # bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # else: + # # bended_pts_exp = pts_exp.clone() + # for cur_pts_ts in range(pts_ts, -1, -1): + # bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts) + + if isinstance(cur_bending_network, fields.BendingNetwork): + for cur_pts_ts in range(pts_ts, -1, -1): + bended_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts) + elif isinstance(cur_bending_network, fields.BendingNetworkForward): + for cur_pts_ts in range(pts_ts-1, -1, -1): + bended_pts_exp = cur_bending_network(input_pts=pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + elif isinstance(cur_bending_network, fields.BendingNetworkForwardJointDyn): + # for cur_pts_ts in range(pts_ts-1, -1, -1): + for cur_pts_ts in range(pts_ts, 0, -1): + bended_pts_exp = cur_bending_network(input_pts=pts_exp if cur_pts_ts == pts_ts else bended_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_joints_pts=self.time_to_act_joints, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + + if pts_ts == 0: + bended_pts_exp = pts_exp.clone() + _, cur_bended_pts_selecotr = self.query_pts_sdf_fn_for_selector(bended_pts_exp) + bended_pts.append(bended_pts_exp) + queries_sdfs_selector.append(cur_bended_pts_selecotr) + bended_pts = torch.stack(bended_pts, dim=1) # nn_pts x 2 x 3 for bended pts # + queries_sdfs_selector = torch.stack(queries_sdfs_selector, dim=1) # nn_pts x 2 + # queries_sdfs_selector = (queries_sdfs_selector.sum(dim=1) > 0.5).float().long() + sdf_selector = queries_sdfs_selector[:, -1] + # sdf_selector = queries_sdfs_selector + # delta_sdf, sdf_selector = self.query_pts_sdf_fn_for_selector(pts_exp) + # print(f"bended_pts: {bended_pts.size()}, sdf_selector: {sdf_selector.size()}, maxx_sdf_selector: {torch.max(sdf_selector)}, minn_sdf_selector: {torch.min(sdf_selector)}") + bended_pts = batched_index_select(values=bended_pts, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) # nn_pts x 3 # + # print(f"bended_pts: {bended_pts.size()}, pts_exp: {pts_exp.size()}") + # pts_exp = bended_pts.squeeze(1) + pts_exp = bended_pts + + + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + if isinstance(self.bending_network, list): # prev sdf network # + # pts_offsets = [] + bended_pts = [] + queries_sdfs_selector = [] + for i_obj, cur_bending_network in enumerate(self.bending_network): + bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # pts_offsets.append(bended_pts_exp - pts_exp) + _, cur_bended_pts_selecotr = self.query_pts_sdf_fn_for_selector(bended_pts_exp) + bended_pts.append(bended_pts_exp) + queries_sdfs_selector.append(cur_bended_pts_selecotr) + bended_pts = torch.stack(bended_pts, dim=1) # nn_pts x 2 x 3 for bended pts # + queries_sdfs_selector = torch.stack(queries_sdfs_selector, dim=1) # nn_pts x 2 + # queries_sdfs_selector = (queries_sdfs_selector.sum(dim=1) > 0.5).float().long() + sdf_selector = queries_sdfs_selector[:, -1] + # sdf_selector = queries_sdfs_selector + + + # delta_sdf, sdf_selector = self.query_pts_sdf_fn_for_selector(pts_exp) + bended_pts = batched_index_select(values=bended_pts, indices=sdf_selector.unsqueeze(1), dim=1).squeeze(1) # nn_pts x 3 # + # print(f"bended_pts: {bended_pts.size()}, pts_exp: {pts_exp.size()}") + pts_exp = bended_pts.squeeze(1) + + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + else: + pts_exp = self.bending_network(pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts, sdf_selector + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def deform_pts_passive(self, pts, pts_ts=0): + + if self.use_bending_network: + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # pts_ts # + + + + if self.use_delta_bending: + cur_bending_network = self.bending_network[-1] + if isinstance(cur_bending_network, fields.BendingNetwork): + for cur_pts_ts in range(pts_ts, -1, -1): + cur_pts_exp = cur_bending_network(pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts) + elif isinstance(cur_bending_network, fields.BendingNetworkForward): + for cur_pts_ts in range(pts_ts-1, -1, -1): + cur_pts_exp = cur_bending_network(input_pts=pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + elif isinstance(cur_bending_network, fields.BendingNetworkForwardJointDyn): + # for cur_pts_ts in range(pts_ts-1, -1, -1): + for cur_pts_ts in range(pts_ts, 0, -1): + cur_pts_exp = cur_bending_network(input_pts=pts_exp if cur_pts_ts == pts_ts else cur_pts_exp, input_pts_ts=cur_pts_ts, timestep_to_mesh=self.timestep_to_mesh, timestep_to_passive_mesh=self.timestep_to_passive_mesh, timestep_to_joints_pts=self.time_to_act_joints, bending_net=self.bending_net, bending_net_passive=self.bending_net_passive, act_sdf_net=self.prev_sdf_network, details=None, special_loss_return=False) + # elif isinstance(cur_bending_network, fields.BendingNetworkRigidTrans): + # cur_pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + else: + raise ValueError('Encountered with unexpected bending network class...') + # for cur_pts_ts in range(pts_ts, -1, -1): + # if isinstance(self.bending_network, list): + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # pts_exp = cur_bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # else: + # pts_exp = self.bending_network(pts_exp, input_pts_ts=cur_pts_ts) + # time_to_offset = { + # 0:[-0.03338804, 0.07566567, 0.0958022 ], + # 1:[-0.05909395, 0.05454276, 0.09974975], + # 2: [-0.07214502, -0.00118192, 0.09003166], + # 3: [-0.10040219, -0.01334709, 0.08493543], + # 4: [-0.10047092, -0.01264334, 0.05320398], + # 5: [-0.09152254, 0.00722668, 0.0101514 ], + # } + + if pts_ts > 0: + pts_exp = cur_pts_exp + else: + pts_exp = pts_exp + else: + # if isinstance(self.bending_network, list): + # pts_offsets = [] + # for i_obj, cur_bending_network in enumerate(self.bending_network): + # bended_pts_exp = cur_bending_network(pts_exp, input_pts_ts=pts_ts) + # pts_offsets.append(bended_pts_exp - pts_exp) + # pts_offsets = torch.stack(pts_offsets, dim=0) + # pts_offsets = torch.sum(pts_offsets, dim=0) + # pts_exp = pts_exp + pts_offsets + # else: + pts_exp = self.bending_network[-1](pts_exp, input_pts_ts=pts_ts) + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts + + # pts: nn_batch x nn_samples x 3 + if len(pts.size()) == 3: + nnb, nns = pts.size(0), pts.size(1) + pts_exp = pts.contiguous().view(nnb * nns, -1).contiguous() + else: + pts_exp = pts + # print(f"prior to deforming: {pts.size()}") + + dist_pts_to_bkg_pts = torch.sum( + (pts_exp.unsqueeze(1) - self.bkg_pts.unsqueeze(0)) ** 2, dim=-1 ## nn_pts_exp x nn_bkg_pts + ) + dist_mask = dist_pts_to_bkg_pts <= self.dist_interp_thres # + dist_mask_float = dist_mask.float() + + # dist_mask_float # + cur_fr_bkg_def_exp = self.cur_fr_bkg_pts_defs.unsqueeze(0).repeat(pts_exp.size(0), 1, 1).contiguous() + cur_fr_pts_def = torch.sum( + cur_fr_bkg_def_exp * dist_mask_float.unsqueeze(-1), dim=1 + ) + dist_mask_float_summ = torch.sum( + dist_mask_float, dim=1 + ) + dist_mask_float_summ = torch.clamp(dist_mask_float_summ, min=1) + cur_fr_pts_def = cur_fr_pts_def / dist_mask_float_summ.unsqueeze(-1) # bkg pts deformation # + pts_exp = pts_exp - cur_fr_pts_def + if len(pts.size()) == 3: + pts = pts_exp.contiguous().view(nnb, nns, -1).contiguous() + else: + pts = pts_exp + return pts # + + + def query_pts_sdf_fn_for_selector(self, pts): + # for negative + # 1) inside the current mesh but outside the previous mesh ---> negative sdf for this field but positive for another field + # 2) negative in thie field and also negative in the previous field ---> + # 2) for positive values of this current field ---> + # maxx_pts, _ = torch.max(pts, dim=0) + # minn_pts, _ = torch.min(pts, dim=0) + + cur_sdf = self.sdf_network.sdf(pts).squeeze(-1) + prev_sdf = self.prev_sdf_network.sdf(pts).squeeze(-1) + neg_neg = ((cur_sdf < 0.).float() + (prev_sdf < 0.).float()) > 1.5 + neg_pos = ((cur_sdf < 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + + neg_weq_pos = ((cur_sdf <= 0.).float() + (prev_sdf > 0.).float()) > 1.5 + + pos_neg = ((cur_sdf >= 0.).float() + (prev_sdf < 0.).float()) > 1.5 + pos_pos = ((cur_sdf >= 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + res_sdf = torch.zeros_like(cur_sdf) + + # print(f"res_sdf: {res_sdf.size()}, neg_neg: {neg_neg.size()}, pts: {pts.size()}, maxx_pts: {maxx_pts}, minn_pts: {minn_pts}") + # if torch.sum(neg_neg.float()).item() > 0.: + res_sdf[neg_neg] = 1. # + # if torch.sum(neg_pos.float()).item() > 0.: + res_sdf[neg_pos] = cur_sdf[neg_pos] + # if torch.sum(pos_neg.float()).item() > 0.: + res_sdf[pos_neg] = cur_sdf[pos_neg] + + # inside the residual mesh -> must be neg and pos + res_sdf_selector = torch.zeros_like(cur_sdf).long() # + # res_sdf_selector[neg_pos] = 1 # is the residual mesh + # if torch.sum(neg_weq_pos.float()).item() > 0.: + res_sdf_selector[neg_weq_pos] = 1 + # res_sdf_selector[] + + cat_cur_prev_sdf = torch.stack( + [cur_sdf, prev_sdf], dim=-1 + ) + minn_cur_prev_sdf, _ = torch.min(cat_cur_prev_sdf, dim=-1) + + if torch.sum(pos_pos.float()).item() > 0.: + res_sdf[pos_pos] = minn_cur_prev_sdf[pos_pos] + + return res_sdf, res_sdf_selector + + def query_func_sdf(self, pts): + # if isinstance(self.sdf_network, list): + # tot_sdf_values = [] + # for i_obj, cur_sdf_network in enumerate(self.sdf_network): + # cur_sdf_values = cur_sdf_network.sdf(pts) + # tot_sdf_values.append(cur_sdf_values) + # tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + # tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + # sdf = tot_sdf_values + # else: + # sdf = self.sdf_network.sdf(pts) + + + cur_sdf = self.sdf_network.sdf(pts) + prev_sdf = self.prev_sdf_network.sdf(pts) + neg_neg = ((cur_sdf < 0.).float() + (prev_sdf < 0.).float()) > 1.5 + neg_pos = ((cur_sdf < 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + + neg_weq_pos = ((cur_sdf <= 0.).float() + (prev_sdf > 0.).float()) > 1.5 + + pos_neg = ((cur_sdf >= 0.).float() + (prev_sdf < 0.).float()) > 1.5 + pos_pos = ((cur_sdf >= 0.).float() + (prev_sdf >= 0.).float()) > 1.5 + res_sdf = torch.zeros_like(cur_sdf) + + # print(f"res_sdf: {res_sdf.size()}, neg_neg: {neg_neg.size()}, pts: {pts.size()}, maxx_pts: {maxx_pts}, minn_pts: {minn_pts}") + # if torch.sum(neg_neg.float()).item() > 0.: + res_sdf[neg_neg] = prev_sdf[neg_neg] + # if torch.sum(neg_pos.float()).item() > 0.: + res_sdf[neg_pos] = cur_sdf[neg_pos] + # if torch.sum(pos_neg.float()).item() > 0.: + res_sdf[pos_neg] = cur_sdf[pos_neg] + + # inside the residual mesh -> must be neg and pos + # res_sdf_selector = torch.zeros_like(cur_sdf).long() # + # res_sdf_selector[neg_pos] = 1 # is the residual mesh + # if torch.sum(neg_weq_pos.float()).item() > 0.: + # res_sdf_selector[neg_weq_pos] = 1 + # res_sdf_selector[] + + cat_cur_prev_sdf = torch.stack( + [cur_sdf, prev_sdf], dim=-1 + ) + minn_cur_prev_sdf, _ = torch.min(cat_cur_prev_sdf, dim=-1) + + # if torch.sum(pos_pos.float()).item() > 0.: + res_sdf[pos_pos] = minn_cur_prev_sdf[pos_pos] + + return res_sdf + + def query_func_active(self, pts): + # if isinstance(self.sdf_network, list): + # tot_sdf_values = [] + # for i_obj, cur_sdf_network in enumerate(self.sdf_network): + # cur_sdf_values = cur_sdf_network.sdf(pts) + # tot_sdf_values.append(cur_sdf_values) + # tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + # tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + # sdf = tot_sdf_values + # else: + sdf = self.prev_sdf_network.sdf(pts) + return sdf + + def query_func_sdf_passive(self, pts): + # if isinstance(self.sdf_network, list): + # tot_sdf_values = [] + # for i_obj, cur_sdf_network in enumerate(self.sdf_network): + # cur_sdf_values = cur_sdf_network.sdf(pts) + # tot_sdf_values.append(cur_sdf_values) + # tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + # tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + # sdf = tot_sdf_values + # else: + sdf = self.sdf_network[-1].sdf(pts) + + return sdf + + + def render_core_outside(self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None, pts_ts=0): + """ + Render background + """ + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 + + # Section midpoints # + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # batch_size, n_samples, 3 # + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(1.0, 1e10) + pts = torch.cat([pts / dis_to_center, 1.0 / dis_to_center], dim=-1) # batch_size, n_samples, 4 # + + dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3) + + pts = pts.reshape(-1, 3 + int(self.n_outside > 0)) + dirs = dirs.reshape(-1, 3) + + density, sampled_color = nerf(pts, dirs) + sampled_color = torch.sigmoid(sampled_color) + alpha = 1.0 - torch.exp(-F.softplus(density.reshape(batch_size, n_samples)) * dists) + alpha = alpha.reshape(batch_size, n_samples) + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + sampled_color = sampled_color.reshape(batch_size, n_samples, 3) + color = (weights[:, :, None] * sampled_color).sum(dim=1) + if background_rgb is not None: + color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True)) + + return { + 'color': color, + 'sampled_color': sampled_color, + 'alpha': alpha, + 'weights': weights, + } + + def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts_ts=0): + """ + Up sampling give a fixed inv_s + """ + batch_size, n_samples = z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False) + inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0) + sdf = sdf.reshape(batch_size, n_samples) + prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:] + prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:] + mid_sdf = (prev_sdf + next_sdf) * 0.5 + cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5) + + # ---------------------------------------------------------------------------------------------------------- + # Use min value of [ cos, prev_cos ] + # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more + # robust when meeting situations like below: + # + # SDF + # ^ + # |\ -----x----... + # | \ / + # | x x + # |---\----/-------------> 0 level + # | \ / + # | \/ + # | + # ---------------------------------------------------------------------------------------------------------- + prev_cos_val = torch.cat([torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1) + cos_val = torch.stack([prev_cos_val, cos_val], dim=-1) + cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False) + cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere + + dist = (next_z_vals - prev_z_vals) + prev_esti_sdf = mid_sdf - cos_val * dist * 0.5 + next_esti_sdf = mid_sdf + cos_val * dist * 0.5 + prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s) + next_cdf = torch.sigmoid(next_esti_sdf * inv_s) + alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5) + weights = alpha * torch.cumprod( + torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + + z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach() + return z_samples + + def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False, pts_ts=0): + batch_size, n_samples = z_vals.shape + _, n_importance = new_z_vals.shape + pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None] + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + z_vals = torch.cat([z_vals, new_z_vals], dim=-1) + z_vals, index = torch.sort(z_vals, dim=-1) + + if not last: + if isinstance(self.sdf_network, list): + tot_new_sdf = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_new_sdf = cur_sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + tot_new_sdf.append(cur_new_sdf) + tot_new_sdf = torch.stack(tot_new_sdf, dim=-1) + new_sdf, _ = torch.min(tot_new_sdf, dim=-1) # + else: + if self.use_selector: + new_sdf_cur = self.sdf_network.sdf(pts.reshape(-1, 3)) # .reshape(batch_size, n_importance) + new_sdf_prev = self.prev_sdf_network.sdf(pts.reshape(-1, 3)) # .reshape(batch_size, n_importance) + new_sdf = torch.stack([new_sdf_prev, new_sdf_cur], dim=1) + new_sdf = batched_index_select(new_sdf, sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + new_sdf = new_sdf.reshape(batch_size, n_importance) + else: + new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + sdf = torch.cat([sdf, new_sdf], dim=-1) + xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1) + index = index.reshape(-1) + sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance) + + return z_vals, sdf + + + + def render_core(self, + rays_o, + rays_d, + z_vals, + sample_dist, + sdf_network, + deviation_network, + color_network, + background_alpha=None, + background_sampled_color=None, + background_rgb=None, + cos_anneal_ratio=0.0, + pts_ts=0): + batch_size, n_samples = z_vals.shape + + # Section length + dists = z_vals[..., 1:] - z_vals[..., :-1] + dists = torch.cat([dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1) + mid_z_vals = z_vals + dists * 0.5 # z_vals and dists * 0.5 # + + # Section midpoints + pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None] # n_rays, n_samples, 3 + dirs = rays_d[:, None, :].expand(pts.shape) + + pts = pts.reshape(-1, 3) # pts, nn_ou + dirs = dirs.reshape(-1, 3) + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) + + if isinstance(sdf_network, list): + tot_sdf = [] + tot_feature_vector = [] + tot_obj_sel = [] + tot_gradients = [] + for i_obj, cur_sdf_network in enumerate(sdf_network): + cur_sdf_nn_output = cur_sdf_network(pts) + cur_sdf, cur_feature_vector = cur_sdf_nn_output[:, :1], cur_sdf_nn_output[:, 1:] + tot_sdf.append(cur_sdf) + tot_feature_vector.append(cur_feature_vector) + + gradients = cur_sdf_network.gradient(pts).squeeze() + tot_gradients.append(gradients) + tot_sdf = torch.stack(tot_sdf, dim=-1) + sdf, obj_sel = torch.min(tot_sdf, dim=-1) + feature_vector = torch.stack(tot_feature_vector, dim=1) + + # batched_index_select + # print(f"before sel: {feature_vector.size()}, obj_sel: {obj_sel.size()}") + feature_vector = batched_index_select(values=feature_vector, indices=obj_sel, dim=1).squeeze(1) + + + # feature_vector = feature_vector[obj_sel.unsqueeze(-1), :].squeeze(1) + # print(f"after sel: {feature_vector.size()}") + tot_gradients = torch.stack(tot_gradients, dim=1) + # gradients = tot_gradients[obj_sel.unsqueeze(-1)].squeeze(1) + gradients = batched_index_select(values=tot_gradients, indices=obj_sel, dim=1).squeeze(1) + # print(f"gradients: {gradients.size()}, tot_gradients: {tot_gradients.size()}") + + else: + # sdf_nn_output = sdf_network(pts) + # sdf = sdf_nn_output[:, :1] + # feature_vector = sdf_nn_output[:, 1:] + # gradients = sdf_network.gradient(pts).squeeze() + + if self.use_selector: + prev_sdf_nn_output = self.prev_sdf_network(pts) + prev_gradients = self.prev_sdf_network.gradient(pts).squeeze() + cur_sdf_nn_output = self.sdf_network(pts) + cur_gradients = self.sdf_network.gradient(pts).squeeze() + + sdf_nn_output = torch.stack([prev_sdf_nn_output, cur_sdf_nn_output], dim=1) + sdf_nn_output = batched_index_select(sdf_nn_output, sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + + gradients = torch.stack([prev_gradients, cur_gradients], dim=1) + gradients = batched_index_select(gradients, sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + else: + sdf_nn_output = sdf_network(pts) + sdf = sdf_nn_output[:, :1] + feature_vector = sdf_nn_output[:, 1:] + gradients = sdf_network.gradient(pts).squeeze() + # new_sdf_cur = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + # new_sdf_prev = self.prev_sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, n_importance) + # new_sdf = torch.stack([new_sdf_prev, new_sdf_cur], dim=1) + # new_sdf = batched_index_select(new_sdf, sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + + sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3) + + # deviation network # + inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(1e-6, 1e6) # Single parameter + inv_s = inv_s.expand(batch_size * n_samples, 1) + + true_cos = (dirs * gradients).sum(-1, keepdim=True) + + # "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes + # the cos value "not dead" at the beginning training iterations, for better convergence. + iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) + + F.relu(-true_cos) * cos_anneal_ratio) # always non-positive + + # Estimate signed distances at section points + estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5 + estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5 + + prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s) + next_cdf = torch.sigmoid(estimated_next_sdf * inv_s) + + p = prev_cdf - next_cdf + c = prev_cdf + + alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0) + + pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(batch_size, n_samples) + inside_sphere = (pts_norm < 1.0).float().detach() + relax_inside_sphere = (pts_norm < 1.2).float().detach() + + # Render with background + if background_alpha is not None: + alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (1.0 - inside_sphere) + alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1) + sampled_color = sampled_color * inside_sphere[:, :, None] +\ + background_sampled_color[:, :n_samples] * (1.0 - inside_sphere)[:, :, None] + sampled_color = torch.cat([sampled_color, background_sampled_color[:, n_samples:]], dim=1) + + weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1]), 1. - alpha + 1e-7], -1), -1)[:, :-1] + weights_sum = weights.sum(dim=-1, keepdim=True) + + color = (sampled_color * weights[:, :, None]).sum(dim=1) + if background_rgb is not None: # Fixed background, usually black + color = color + background_rgb * (1.0 - weights_sum) + + # Eikonal loss + gradient_error = (torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2, + dim=-1) - 1.0) ** 2 + gradient_error = (relax_inside_sphere * gradient_error).sum() / (relax_inside_sphere.sum() + 1e-5) + + return { + 'color': color, + 'sdf': sdf, + 'dists': dists, + 'gradients': gradients.reshape(batch_size, n_samples, 3), + 's_val': 1.0 / inv_s, + 'mid_z_vals': mid_z_vals, + 'weights': weights, + 'cdf': c.reshape(batch_size, n_samples), + 'gradient_error': gradient_error, + 'inside_sphere': inside_sphere + } + + def render(self, rays_o, rays_d, near, far, pts_ts=0, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0, use_gt_sdf=False): + batch_size = len(rays_o) + sample_dist = 2.0 / self.n_samples # in a unit sphere # # Assuming the region of interest is a unit sphere + z_vals = torch.linspace(0.0, 1.0, self.n_samples) # linspace # + z_vals = near + (far - near) * z_vals[None, :] + + z_vals_outside = None + if self.n_outside > 0: + z_vals_outside = torch.linspace(1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside) + + n_samples = self.n_samples + perturb = self.perturb + + if perturb_overwrite >= 0: + perturb = perturb_overwrite + if perturb > 0: + t_rand = (torch.rand([batch_size, 1]) - 0.5) + z_vals = z_vals + t_rand * 2.0 / self.n_samples + + if self.n_outside > 0: # z values output # n_outside # + mids = .5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1]) + upper = torch.cat([mids, z_vals_outside[..., -1:]], -1) + lower = torch.cat([z_vals_outside[..., :1], mids], -1) + t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]]) + z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand + + if self.n_outside > 0: + z_vals_outside = far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples + + background_alpha = None + background_sampled_color = None + + # Up sample + if self.n_importance > 0: + with torch.no_grad(): + pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] + + pts = (pts - self.minn_pts) / (self.maxx_pts - self.minn_pts) + # sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(batch_size, self.n_samples) + # gt_sdf # + + # + # pts = ((pts - xyz_min) / (xyz_max - xyz_min)).flip((-1,)) * 2 - 1 + + # pts = pts.flip((-1,)) * 2 - 1 + pts = pts * 2 - 1 + + if self.use_selector: + pts, sdf_selector = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + else: + pts = self.deform_pts(pts=pts, pts_ts=pts_ts) # give nthe pts + + pts_exp = pts.reshape(-1, 3) + # minn_pts, _ = torch.min(pts_exp, dim=0) + # maxx_pts, _ = torch.max(pts_exp, dim=0) # deformation field (not a rigid one) -> the meshes # + # print(f"minn_pts: {minn_pts}, maxx_pts: {maxx_pts}") + + # pts_to_near = pts - near.unsqueeze(1) + # maxx_pts = 1.5; minn_pts = -1.5 + # # maxx_pts = 3; minn_pts = -3 + # # maxx_pts = 1; minn_pts = -1 + # pts_exp = (pts_exp - minn_pts) / (maxx_pts - minn_pts) + + ## render and iamges #### + if use_gt_sdf: + ### use the GT sdf field #### + # print(f"Using gt sdf :") + sdf = self.gt_sdf(pts_exp.reshape(-1, 3).detach().cpu().numpy()) + sdf = torch.from_numpy(sdf).float().cuda() + sdf = sdf.reshape(batch_size, self.n_samples) + ### use the GT sdf field #### + else: + # pts_exp: (bsz x nn_s) x 3 -> (sdf_network) -> (bsz x nn_s) + #### use the optimized sdf field #### + + # sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + + if isinstance(self.sdf_network, list): + tot_sdf_values = [] + for i_obj, cur_sdf_network in enumerate(self.sdf_network): + cur_sdf_values = cur_sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + tot_sdf_values.append(cur_sdf_values) + tot_sdf_values = torch.stack(tot_sdf_values, dim=-1) + tot_sdf_values, _ = torch.min(tot_sdf_values, dim=-1) # totsdf values # + sdf = tot_sdf_values + else: + # sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + + if self.use_selector: + prev_sdf = self.prev_sdf_network.sdf(pts_exp) # .reshape(batch_size, self.n_samples) + cur_sdf = self.sdf_network.sdf(pts_exp) # .reshape(batch_size, self.n_samples) + sdf = torch.stack([prev_sdf, cur_sdf], dim=1) + sdf = batched_index_select(sdf, indices=sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + sdf = sdf.reshape(batch_size, self.n_samples) + else: + sdf = self.sdf_network.sdf(pts_exp).reshape(batch_size, self.n_samples) + + #### use the optimized sdf field #### + + for i in range(self.up_sample_steps): + new_z_vals = self.up_sample(rays_o, + rays_d, + z_vals, + sdf, + self.n_importance // self.up_sample_steps, + 64 * 2**i, + pts_ts=pts_ts) + z_vals, sdf = self.cat_z_vals(rays_o, + rays_d, + z_vals, + new_z_vals, + sdf, + last=(i + 1 == self.up_sample_steps), + pts_ts=pts_ts) + + n_samples = self.n_samples + self.n_importance + + # Background model + if self.n_outside > 0: + z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1) + z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1) + ret_outside = self.render_core_outside(rays_o, rays_d, z_vals_feed, sample_dist, self.nerf, pts_ts=pts_ts) + + background_sampled_color = ret_outside['sampled_color'] + background_alpha = ret_outside['alpha'] + + # Render core + ret_fine = self.render_core(rays_o, # + rays_d, + z_vals, + sample_dist, + self.sdf_network, + self.deviation_network, + self.color_network, + background_rgb=background_rgb, + background_alpha=background_alpha, + background_sampled_color=background_sampled_color, + cos_anneal_ratio=cos_anneal_ratio, + pts_ts=pts_ts) + + color_fine = ret_fine['color'] + weights = ret_fine['weights'] + weights_sum = weights.sum(dim=-1, keepdim=True) + gradients = ret_fine['gradients'] + s_val = ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True) + + return { + 'color_fine': color_fine, + 's_val': s_val, + 'cdf_fine': ret_fine['cdf'], + 'weight_sum': weights_sum, + 'weight_max': torch.max(weights, dim=-1, keepdim=True)[0], + 'gradients': gradients, + 'weights': weights, + 'gradient_error': ret_fine['gradient_error'], + 'inside_sphere': ret_fine['inside_sphere'] + } + + # def + def extract_fields_from_tets_with_selector(self, bound_min, bound_max, resolution, pts_ts ): + # load tet via resolution # + # scale them via bounds # + # extract the geometry # + # /home/xueyi/gen/DeepMetaHandles/data/tets/100_compress.npz # strange # + device = bound_min.device + # if resolution in [64, 70, 80, 90, 100]: + # tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{resolution}_compress.npz" + # else: + tet_fn = f"/home/xueyi/gen/DeepMetaHandles/data/tets/{100}_compress.npz" + tets = np.load(tet_fn) + verts = torch.from_numpy(tets['vertices']).float().to(device) # verts positions + indices = torch.from_numpy(tets['tets']).long().to(device) # .to(self.device) + # split # + # verts; verts; # + minn_verts, _ = torch.min(verts, dim=0) + maxx_verts, _ = torch.max(verts, dim=0) # (3, ) # exporting the + # scale_verts = maxx_verts - minn_verts + scale_bounds = bound_max - bound_min # scale bounds # + + ### scale the vertices ### + scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + # scaled_verts = (verts - minn_verts.unsqueeze(0)) / (maxx_verts - minn_verts).unsqueeze(0) ### the maxx and minn verts scales ### + + scaled_verts = scaled_verts * 2. - 1. # init the sdf filed viathe tet mesh vertices and the sdf values ## + # scaled_verts = (scaled_verts * scale_bounds.unsqueeze(0)) + bound_min.unsqueeze(0) ## the scaled verts ### + + # scaled_verts = scaled_verts - scale_bounds.unsqueeze(0) / 2. # + # scaled_verts = scaled_verts - bound_min.unsqueeze(0) - scale_bounds.unsqueeze(0) / 2. + + sdf_values = [] + N = 64 + query_bundles = N ** 3 ### N^3 + query_NNs = scaled_verts.size(0) // query_bundles + if query_NNs * query_bundles < scaled_verts.size(0): + query_NNs += 1 + for i_query in range(query_NNs): + cur_bundle_st = i_query * query_bundles + cur_bundle_ed = (i_query + 1) * query_bundles + cur_bundle_ed = min(cur_bundle_ed, scaled_verts.size(0)) + cur_query_pts = scaled_verts[cur_bundle_st: cur_bundle_ed] + # if def_func is not None: + cur_query_pts, sdf_selector = self.deform_pts_with_selector(pts=cur_query_pts, pts_ts=pts_ts) + + prev_query_vals = -self.prev_sdf_network.sdf(cur_query_pts) + cur_query_vals = -self.sdf_network.sdf(cur_query_pts) + cur_query_vals = torch.stack([prev_query_vals, cur_query_vals], dim=1) + cur_query_vals = batched_index_select(cur_query_vals, sdf_selector.unsqueeze(-1), dim=1).squeeze(1) + + # cur_query_vals = query_func(cur_query_pts) + sdf_values.append(cur_query_vals) + sdf_values = torch.cat(sdf_values, dim=0) + # print(f"queryed sdf values: {sdf_values.size()}") # + + GT_sdf_values = np.load("/home/xueyi/diffsim/DiffHand/assets/hand/100_sdf_values.npy", allow_pickle=True) + GT_sdf_values = torch.from_numpy(GT_sdf_values).float().to(device) + + # intrinsic, tet values, pts values, sdf network # + triangle_table, num_triangles_table, base_tet_edges, v_id = create_mt_variable(device) + tet_table, num_tets_table = create_tetmesh_variables(device) + + sdf_values = sdf_values.squeeze(-1) # how the rendering # + + # print(f"GT_sdf_values: {GT_sdf_values.size()}, sdf_values: {sdf_values.size()}, scaled_verts: {scaled_verts.size()}") + # print(f"scaled_verts: {scaled_verts.size()}, ") + # pos_nx3, sdf_n, tet_fx4, triangle_table, num_triangles_table, base_tet_edges, v_id, + # return_tet_mesh=False, ori_v=None, num_tets_table=None, tet_table=None): + # marching_tets_tetmesh ## + verts, faces, tet_verts, tets = marching_tets_tetmesh(scaled_verts, sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + ### use the GT sdf values for the marching tets ### + GT_verts, GT_faces, GT_tet_verts, GT_tets = marching_tets_tetmesh(scaled_verts, GT_sdf_values, indices, triangle_table, num_triangles_table, base_tet_edges, v_id, return_tet_mesh=True, ori_v=scaled_verts, num_tets_table=num_tets_table, tet_table=tet_table) + + # print(f"After tet marching with verts: {verts.size()}, faces: {faces.size()}") + return verts, faces, sdf_values, GT_verts, GT_faces # verts, faces # + + + def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0): + return extract_geometry(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts), + query_func=lambda pts: -self.query_func_sdf(pts) + ) + + # if self.deform_pts_with_selector: + # pts = self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts) + def extract_geometry_tets(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts(pts, pts_ts=pts_ts) if not self.use_selector else self.deform_pts_with_selector(pts=pts, pts_ts=pts_ts)) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + ) + + + def extract_geometry_tets_kinematic(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts_kinematic(pts, pts_ts=pts_ts)) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + ) + + + def extract_geometry_tets_active(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_active(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts_kinematic_active(pts, pts_ts=pts_ts)) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + ) + + def extract_geometry_tets_passive(self, bound_min, bound_max, resolution, pts_ts=0, threshold=0.0, wdef=False): + if wdef: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + query_func=lambda pts: -self.query_func_sdf_passive(pts), # lambda pts: -self.sdf_network.sdf(pts), + def_func=lambda pts: self.deform_pts_passive(pts, pts_ts=pts_ts)) + else: + return extract_geometry_tets(bound_min, # extract geometry # + bound_max, + resolution=resolution, + threshold=threshold, + # query_func=lambda pts: -self.sdf_network.sdf(pts) + query_func=lambda pts: -self.query_func_sdf(pts), # lambda pts: -self.sdf_network.sdf(pts), + ) diff --git a/models/test.js b/models/test.js new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pre-requirements.txt b/pre-requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..29a17c7570f2b5c75f815a79a40de3671fc821bb --- /dev/null +++ b/pre-requirements.txt @@ -0,0 +1,2 @@ +pip==23.3.2 +torch==2.2.0 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8ae7192367b668451604feac1a33d51d1eeb3ffb --- /dev/null +++ b/requirements.txt @@ -0,0 +1,27 @@ +-f https://download.pytorch.org/whl/cpu/torch_stable.html +-f https://data.pyg.org/whl/torch-2.2.0%2Bcpu.html +# pip==20.2.4 +torch==2.2.0 +# torchvision==0.13.1 +# torchaudio==0.12.1 +scipy +trimesh +icecream +tqdm +pyhocon +open3d +tensorboard + +# blobfile==2.0.1 +# manopth @ git+https://github.com/hassony2/manopth.git +# numpy==1.23.1 +# psutil==5.9.2 +# scikit-learn +# scipy==1.9.3 +# tensorboard +# tensorboardx +# tqdm +# trimesh +# clip +# chumpy +# opencv-python \ No newline at end of file diff --git a/scripts_demo/train_grab_pointset_points_dyn_s1.sh b/scripts_demo/train_grab_pointset_points_dyn_s1.sh new file mode 100644 index 0000000000000000000000000000000000000000..709358b0a259c0bcd9abfd3a63b533d1b9225c63 --- /dev/null +++ b/scripts_demo/train_grab_pointset_points_dyn_s1.sh @@ -0,0 +1,28 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + +export mode="train_point_set" + +export conf=dyn_grab_pointset_points_dyn_s1.conf + +export conf_root="./confs_new" + + +export data_path="./data/102_grab_all_data.npy" +# bash scripts_new/train_grab_pointset_points_dyn_s1.sh + +export cuda_ids="0" + + +CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} --data_path=${data_path} + diff --git a/scripts_new/train_grab_mano.sh b/scripts_new/train_grab_mano.sh new file mode 100644 index 0000000000000000000000000000000000000000..ff5455fc555d94817b26c1950ad780f0fa824ead --- /dev/null +++ b/scripts_new/train_grab_mano.sh @@ -0,0 +1,26 @@ + +export PYTHONPATH=. + +export conf=wmask_refine_passive_rigidtrans_forward.conf +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + +export mode="train_dyn_mano_model" + + +export conf=dyn_grab_pointset_mano.conf + +export conf_root="./confs_new" + + + +# bash scripts_new/train_grab_mano.sh + +export cuda_ids="0" +# + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_mano_wreact.sh b/scripts_new/train_grab_mano_wreact.sh new file mode 100644 index 0000000000000000000000000000000000000000..9771ce9b23a73790ffddfd3e88c90a6a556410e3 --- /dev/null +++ b/scripts_new/train_grab_mano_wreact.sh @@ -0,0 +1,26 @@ + +export PYTHONPATH=. + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + +export mode="train_dyn_mano_model_wreact" ## wreact ## wreact ## + +export conf=dyn_grab_pointset_mano_dyn.conf + + +export conf_root="./confs_new" + + + +export cuda_ids="0" + + + +CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_mano_wreact_optacts.sh b/scripts_new/train_grab_mano_wreact_optacts.sh new file mode 100644 index 0000000000000000000000000000000000000000..9771ce9b23a73790ffddfd3e88c90a6a556410e3 --- /dev/null +++ b/scripts_new/train_grab_mano_wreact_optacts.sh @@ -0,0 +1,26 @@ + +export PYTHONPATH=. + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + +export mode="train_dyn_mano_model_wreact" ## wreact ## wreact ## + +export conf=dyn_grab_pointset_mano_dyn.conf + + +export conf_root="./confs_new" + + + +export cuda_ids="0" + + + +CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset.sh b/scripts_new/train_grab_pointset.sh new file mode 100644 index 0000000000000000000000000000000000000000..2c40a3ccf3a76503ad59c7315a4d18321360742c --- /dev/null +++ b/scripts_new/train_grab_pointset.sh @@ -0,0 +1,99 @@ + +export PYTHONPATH=. + +export cuda_ids="4" + +# export cuda_ids="5" + + +export trainer=exp_runner_arti_forward.py +export conf=wmask_refine_passive_rigidtrans_forward.conf +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + +# export trainer=exp_runner_arti_multi_objs_compositional.py +export trainer=exp_runner_arti_multi_objs_compositional_ks.py +# export trainer=exp_runner_arti_multi_objs_dyn.py +export trainer=exp_runner_arti_multi_objs_pointset.py +# /home/xueyi/diffsim/NeuS/confs/wmask_refine_passive_compositional.conf +export conf=wmask_refine_passive_compositional.conf +export conf=dyn_arctic_ks.conf + +export conf=dyn_arctic_ks_robohand.conf +# /data/xueyi/diffsim/NeuS/confs/dyn_arctic_ks_robohand_from_mano_model_rules.conf +export conf=dyn_arctic_ks_robohand_from_mano_model_rules.conf +export conf=dyn_arctic_robohand_from_mano_model_rules.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v2.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v4.conf + +# /home/xueyi/diffsim/NeuS/confs/dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf +export conf=dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf +# /home/xueyi/diffsim/NeuS/confs/dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf +# export conf=dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf + +export conf=dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf +# export conf=dyn_arctic_ks_robohand_from_mano_model_rules_arti.conf +# a very stiff system for # +export conf=dyn_grab_pointset_mano.conf + +export mode="train_from_model_rules" +export mode="train_from_model_rules" + +export mode="train_sdf_from_model_rules" +export mode="train_actions_from_model_rules" +# export mode="train_actions_from_sim_rules" + + + +export mode="train_def" +# export mode="train_actions_from_model_rules" +export mode="train_mano_actions_from_model_rules" + +# virtual force # # ## virtual forces ## +# /data/xueyi/diffsim/NeuS/confs/dyn_arctic_ks_robohand_from_mano_model_rules.conf ## +export mode="train_actions_from_model_rules" + +export mode="train_actions_from_mano_model_rules" + +export mode="train_real_robot_actions_from_mano_model_rules" +export mode="train_real_robot_actions_from_mano_model_rules_diffhand" + +export mode="train_real_robot_actions_from_mano_model_rules_diffhand_fortest" +export mode="train_real_robot_actions_from_mano_model_rules_manohand_fortest" + + +export mode="train_real_robot_actions_from_mano_model_rules_diffhand_fortest" + + + +export mode="train_real_robot_actions_from_mano_model_rules_manohand_fortest_states" + +###### diffsim ###### +# /home/xueyi/diffsim/NeuS/confs/dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v3.conf + +export mode="train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_world" +export mode="train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_rl" + +export mode="train_dyn_mano_model_states" + + +## train dyn mano model states wreact ## +export mode="train_dyn_mano_model_states_wreact" +export conf=dyn_grab_pointset_mano_dyn.conf +## train dyn mano model states wreact ## + + +export conf_root="./confs_new" + + +# +# bash scripts_new/train_grab_pointset_dyn.sh + +export cuda_ids="2" +# + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset_points_dyn.sh b/scripts_new/train_grab_pointset_points_dyn.sh new file mode 100644 index 0000000000000000000000000000000000000000..525878cbd1f82ec08f063ac167a8b9e9a045af27 --- /dev/null +++ b/scripts_new/train_grab_pointset_points_dyn.sh @@ -0,0 +1,27 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + +export mode="train_point_set" + +export conf=dyn_grab_pointset_points_dyn.conf + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_pointset_points_dyn.sh + +export cuda_ids="0" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset_points_dyn_retar.sh b/scripts_new/train_grab_pointset_points_dyn_retar.sh new file mode 100644 index 0000000000000000000000000000000000000000..f35580466cbb7f82f4908bf735bcf54f183010be --- /dev/null +++ b/scripts_new/train_grab_pointset_points_dyn_retar.sh @@ -0,0 +1,36 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + + +### stage 1 -> tracking MANO expanded set using Shadow's object mesh points ### +export mode="train_point_set_retar" +export conf=dyn_grab_pointset_points_dyn_retar.conf + +# ### stage 2 -> tracking MANO expanded set using Shadow's expanded points ### +# export mode="train_expanded_set_motions_retar_pts" +# export conf=dyn_grab_pointset_points_dyn_retar_pts.conf + + + + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_pointset_points_dyn_retar.sh + +export cuda_ids="0" + + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset_points_dyn_retar_pts.sh b/scripts_new/train_grab_pointset_points_dyn_retar_pts.sh new file mode 100644 index 0000000000000000000000000000000000000000..4e86cd86ab314c640b34f522768f5c9b398f1d60 --- /dev/null +++ b/scripts_new/train_grab_pointset_points_dyn_retar_pts.sh @@ -0,0 +1,36 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + + +# ### stage 1 -> tracking MANO expanded set using Shadow's object mesh points ### +# export mode="train_expanded_set_motions_retar" +# export conf=dyn_grab_pointset_points_dyn_retar.conf + +### stage 2 -> tracking MANO expanded set using Shadow's expanded points ### +export mode="train_point_set_retar_pts" +export conf=dyn_grab_pointset_points_dyn_retar_pts.conf + + + + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_pointset_points_dyn_retar.sh + +export cuda_ids="0" + + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset_points_dyn_s1.sh b/scripts_new/train_grab_pointset_points_dyn_s1.sh new file mode 100644 index 0000000000000000000000000000000000000000..7d3a85975593f20924a815e2105905cd7f23eb27 --- /dev/null +++ b/scripts_new/train_grab_pointset_points_dyn_s1.sh @@ -0,0 +1,27 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + +export mode="train_point_set" + +export conf=dyn_grab_pointset_points_dyn_s1.conf + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_pointset_points_dyn_s1.sh + +export cuda_ids="0" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset_points_dyn_s2.sh b/scripts_new/train_grab_pointset_points_dyn_s2.sh new file mode 100644 index 0000000000000000000000000000000000000000..ebeb71f3adf3896abed20dbba4af6e1f61902199 --- /dev/null +++ b/scripts_new/train_grab_pointset_points_dyn_s2.sh @@ -0,0 +1,27 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + +export mode="train_point_set" + +export conf=dyn_grab_pointset_points_dyn_s2.conf + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_pointset_points_dyn_s2.sh + +export cuda_ids="0" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset_points_dyn_s3.sh b/scripts_new/train_grab_pointset_points_dyn_s3.sh new file mode 100644 index 0000000000000000000000000000000000000000..9fb48904be97cb7f03fa24299572ea7e92dd5d5f --- /dev/null +++ b/scripts_new/train_grab_pointset_points_dyn_s3.sh @@ -0,0 +1,27 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + +export mode="train_point_set" + +export conf=dyn_grab_pointset_points_dyn_s3.conf + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_pointset_points_dyn_s3.sh + +export cuda_ids="0" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_pointset_points_dyn_s4.sh b/scripts_new/train_grab_pointset_points_dyn_s4.sh new file mode 100644 index 0000000000000000000000000000000000000000..fbb10a0953289a37f8a83a1538bda93cc471a6bc --- /dev/null +++ b/scripts_new/train_grab_pointset_points_dyn_s4.sh @@ -0,0 +1,27 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + +export mode="train_point_set" + +export conf=dyn_grab_pointset_points_dyn_s4.conf + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_pointset_points_dyn_s4.sh + +export cuda_ids="0" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_shadow_multistages.sh b/scripts_new/train_grab_shadow_multistages.sh new file mode 100644 index 0000000000000000000000000000000000000000..39faa306c2748a331ead9037f736f520a9f80bc9 --- /dev/null +++ b/scripts_new/train_grab_shadow_multistages.sh @@ -0,0 +1,102 @@ + +export PYTHONPATH=. + +export cuda_ids="3" + + + +export trainer=exp_runner_arti_forward.py +export conf=wmask_refine_passive_rigidtrans_forward.conf +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + +# export trainer=exp_runner_arti_multi_objs_compositional.py +export trainer=exp_runner_arti_multi_objs_compositional_ks.py +export trainer=exp_runner_arti_multi_objs_arti_dyn.py +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v2.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v4.conf + +# /home/xueyi/diffsim/NeuS/confs/dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf +export conf=dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf + +# /home/xueyi/diffsim/NeuS/confs/dyn_grab_mano_model_states.conf +export conf=dyn_grab_mano_model_states.conf + +export conf=dyn_grab_shadow_model_states.conf + + + + +export mode="train_def" +# export mode="train_actions_from_model_rules" +export mode="train_mano_actions_from_model_rules" + +# virtual force # +# /data/xueyi/diffsim/NeuS/confs/dyn_arctic_ks_robohand_from_mano_model_rules.conf ## + +export mode="train_real_robot_actions_from_mano_model_rules_diffhand_fortest" + + + +export mode="train_real_robot_actions_from_mano_model_rules_manohand_fortest_states" + + +### using the diffsim ### + +###### diffsim ###### +# /home/xueyi/diffsim/NeuS/confs/dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v3.conf + + +export mode="train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_world" +export mode="train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_rl" + +export mode="train_dyn_mano_model_states" + +# +export mode="train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab" +## dyn grab shadow + + +### optimize for the manipulatable hand actions ### +export mode="train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab_redmax_acts" + + + +# ## acts ## +# export conf=dyn_grab_shadow_model_states_224.conf +# export conf=dyn_grab_shadow_model_states_89.conf +# export conf=dyn_grab_shadow_model_states_102.conf +# export conf=dyn_grab_shadow_model_states_7.conf +# export conf=dyn_grab_shadow_model_states_47.conf +# export conf=dyn_grab_shadow_model_states_67.conf +# export conf=dyn_grab_shadow_model_states_76.conf +# export conf=dyn_grab_shadow_model_states_85.conf +# # export conf=dyn_grab_shadow_model_states_91.conf +# # export conf=dyn_grab_shadow_model_states_167.conf +# export conf=dyn_grab_shadow_model_states_107.conf +# export conf=dyn_grab_shadow_model_states_306.conf +# export conf=dyn_grab_shadow_model_states_313.conf +# export conf=dyn_grab_shadow_model_states_322.conf + + +# /home/xueyi/diffsim/NeuS/confs_new/dyn_grab_arti_shadow_multi_stages.conf +export conf=dyn_grab_arti_shadow_multi_stages.conf +# export conf=dyn_grab_shadow_model_states_398.conf +# export conf=dyn_grab_shadow_model_states_363.conf +# export conf=dyn_grab_shadow_model_states_358.conf + +# bash scripts_new/train_grab_shadow_multistages.sh + + + + + +export conf_root="./confs_new" + + +export cuda_ids="4" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_shadow_singlestage.sh b/scripts_new/train_grab_shadow_singlestage.sh new file mode 100644 index 0000000000000000000000000000000000000000..3197b2eb26e8181d5dcf580707eb6bec44785ee7 --- /dev/null +++ b/scripts_new/train_grab_shadow_singlestage.sh @@ -0,0 +1,101 @@ + +export PYTHONPATH=. + +export cuda_ids="3" + + + +export trainer=exp_runner_arti_forward.py +export conf=wmask_refine_passive_rigidtrans_forward.conf +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + +# export trainer=exp_runner_arti_multi_objs_compositional.py +export trainer=exp_runner_arti_multi_objs_compositional_ks.py +export trainer=exp_runner_arti_multi_objs_arti_dyn.py +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v2.conf +export conf=dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v4.conf + +# /home/xueyi/diffsim/NeuS/confs/dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf +export conf=dyn_arctic_robohand_from_mano_model_train_mano_dyn_model_states.conf + +# /home/xueyi/diffsim/NeuS/confs/dyn_grab_mano_model_states.conf +export conf=dyn_grab_mano_model_states.conf + +export conf=dyn_grab_shadow_model_states.conf + + + + +export mode="train_def" +# export mode="train_actions_from_model_rules" +export mode="train_mano_actions_from_model_rules" + +# virtual force # +# /data/xueyi/diffsim/NeuS/confs/dyn_arctic_ks_robohand_from_mano_model_rules.conf ## + +export mode="train_real_robot_actions_from_mano_model_rules_diffhand_fortest" + + + +export mode="train_real_robot_actions_from_mano_model_rules_manohand_fortest_states" + + +###### diffsim ###### +# /home/xueyi/diffsim/NeuS/confs/dyn_arctic_robohand_from_mano_model_rules_actions_f2_diffhand_v3.conf + + +export mode="train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_world" +export mode="train_real_robot_actions_from_mano_model_rules_v5_manohand_fortest_states_res_rl" + +export mode="train_dyn_mano_model_states" + +# +export mode="train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab" +## dyn grab shadow + +### optimze for the redmax hand actions from joint states ### +export mode="train_real_robot_actions_from_mano_model_rules_shadowhand" + +### optimize for the manipulatable hand actions ### +export mode="train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab_redmax_acts" + + + +# ## acts ## +# export conf=dyn_grab_shadow_model_states_224.conf +# export conf=dyn_grab_shadow_model_states_89.conf +# export conf=dyn_grab_shadow_model_states_102.conf +# export conf=dyn_grab_shadow_model_states_7.conf +# export conf=dyn_grab_shadow_model_states_47.conf +# export conf=dyn_grab_shadow_model_states_67.conf +# export conf=dyn_grab_shadow_model_states_76.conf +# export conf=dyn_grab_shadow_model_states_85.conf +# # export conf=dyn_grab_shadow_model_states_91.conf +# # export conf=dyn_grab_shadow_model_states_167.conf +# export conf=dyn_grab_shadow_model_states_107.conf +# export conf=dyn_grab_shadow_model_states_306.conf +# export conf=dyn_grab_shadow_model_states_313.conf +# export conf=dyn_grab_shadow_model_states_322.conf + +# /home/xueyi/diffsim/NeuS/confs_new/dyn_grab_arti_shadow_multi_stages.conf +# /home/xueyi/diffsim/NeuS/confs_new/dyn_grab_arti_shadow_single_stage.conf +export conf=dyn_grab_arti_shadow_single_stage.conf +# export conf=dyn_grab_shadow_model_states_398.conf +# export conf=dyn_grab_shadow_model_states_363.conf +# export conf=dyn_grab_shadow_model_states_358.conf + +# bash scripts_new/train_grab_shadow_singlestage.sh + + + + +export conf_root="./confs_new" + + +export cuda_ids="3" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_sparse_retar.sh b/scripts_new/train_grab_sparse_retar.sh new file mode 100644 index 0000000000000000000000000000000000000000..a30ca20fa64f6f8dd5da5f8e477fdb9fa4e61718 --- /dev/null +++ b/scripts_new/train_grab_sparse_retar.sh @@ -0,0 +1,27 @@ + +export PYTHONPATH=. + + + + +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + + +export trainer=exp_runner_stage_1.py + + +export mode="train_sparse_retar" + +export conf=dyn_grab_sparse_retar.conf + +export conf_root="./confs_new" + + +# bash scripts_new/train_grab_sparse_retar.sh + +export cuda_ids="0" + + +PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_stage_2_dm.sh b/scripts_new/train_grab_stage_2_dm.sh new file mode 100644 index 0000000000000000000000000000000000000000..b9e21bde8363aa89f2ab933f67db3191d0268562 --- /dev/null +++ b/scripts_new/train_grab_stage_2_dm.sh @@ -0,0 +1,40 @@ + +export PYTHONPATH=. + +# export cuda_ids="3" + + + +export conf=wmask_refine_passive_rigidtrans_forward.conf +export data_case=hand_test_routine_2_light_color_wtime_active_passive + +export trainer=exp_runner_stage_2.py + + + + +export mode="train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab" +## dyn grab shadow + +### optimze for the redmax hand actions from joint states ### +export mode="train_diffhand_model" + +## optimize for the manipulatable hand actions ### +# export mode="train_real_robot_actions_from_mano_model_rules_v5_shadowhand_fortest_states_grab_redmax_acts" + +### xxx ### + + +export conf=dyn_grab_arti_shadow_diffhand.conf + + + + +export conf_root="./confs_new" + + +export cuda_ids="3" + + +CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/scripts_new/train_grab_stage_2_dm_curriculum.sh b/scripts_new/train_grab_stage_2_dm_curriculum.sh new file mode 100644 index 0000000000000000000000000000000000000000..85974d345e8d867ed5cb56239096e4b94835f317 --- /dev/null +++ b/scripts_new/train_grab_stage_2_dm_curriculum.sh @@ -0,0 +1,33 @@ + +export PYTHONPATH=. + +export cuda_ids="3" + + + +export conf=wmask_refine_passive_rigidtrans_forward.conf +export data_case=hand_test_routine_2_light_color_wtime_active_passive + + +export trainer=exp_runner_stage_2.py + + +### optimize for the manipulatable hand actions ### +export mode="train_manip_acts_params" + + +# export conf=dyn_grab_arti_shadow_multi_stages.conf +export conf=dyn_grab_arti_shadow_dm_curriculum.conf + + +# bash scripts_new/train_grab_stage_2_dm_curriculum.sh + + +export conf_root="./confs_new" + + +export cuda_ids="0" + + +CUDA_VISIBLE_DEVICES=${cuda_ids} python ${trainer} --mode ${mode} --conf ${conf_root}/${conf} --case ${data_case} + diff --git a/utils/arctic_preprocessing.py b/utils/arctic_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..fffb7caa0a970329518dd6a515e3b0ed4032f236 --- /dev/null +++ b/utils/arctic_preprocessing.py @@ -0,0 +1,156 @@ +import numpy as np +import torch +import trimesh +import os +import mesh2sdf +import time +from scipy.spatial.transform import Rotation as R + +QUASI_DYN_ROOT = "/home/xueyi/diffsim/NeuS" +if not os.path.exists(QUASI_DYN_ROOT): + QUASI_DYN_ROOT = "/root/diffsim/quasi-dyn" + +ARCTIC_CANON_OBJ_SV_FOLDER = os.path.join(QUASI_DYN_ROOT, "raw_data/arctic_processed_canon_obj") + + + +def export_canon_obj_file(kinematic_mano_gt_sv_fn, obj_name): + + subject_idx = kinematic_mano_gt_sv_fn.split("/")[-2] # + print(f"subject_idx: {subject_idx}, obj name: {obj_name}") + sv_dict = np.load(kinematic_mano_gt_sv_fn, allow_pickle=True).item() + + + object_global_orient = sv_dict["obj_rot"] + object_transl = sv_dict["obj_trans"] * 0.001 + obj_pcs = sv_dict["verts.object"] + + # obj_pcs = sv_dict['object_pc'] + obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + + + # self.obj_verts = obj_verts + init_obj_verts = obj_pcs[0] + init_obj_rot_vec = object_global_orient[0] + init_obj_transl = object_transl[0] + + init_obj_transl = torch.from_numpy(init_obj_transl).float().cuda() + init_rot_struct = R.from_rotvec(init_obj_rot_vec) + + init_glb_rot_mtx = init_rot_struct.as_matrix() + init_glb_rot_mtx = torch.from_numpy(init_glb_rot_mtx).float().cuda() + # ## reverse the global rotation matrix ## + init_glb_rot_mtx_reversed = init_glb_rot_mtx.contiguous().transpose(1, 0).contiguous() + + + + ''' canonical object verts ''' + canon_obj_verts = torch.matmul( + init_glb_rot_mtx_reversed.transpose(1, 0).contiguous(), (init_obj_verts - init_obj_transl.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + + # ## get canon obj verts ## + + # /home/xueyi/diffsim/NeuS/raw_data/arctic_processed_canon_obj + + canon_obj_sv_folder = ARCTIC_CANON_OBJ_SV_FOLDER # "/root/diffsim/control-vae-2/assets/arctic" + canon_obj_mesh = trimesh.Trimesh(vertices=canon_obj_verts.detach().cpu().numpy(), faces=sv_dict['f'][0]) + + canon_obj_mesh_sv_fn = f"{subject_idx}_{obj_name}.obj" + canon_obj_mesh_sv_fn = os.path.join(canon_obj_sv_folder, canon_obj_mesh_sv_fn) + canon_obj_mesh.export(canon_obj_mesh_sv_fn) + + print(f"Canonical obj mesh saved to {canon_obj_mesh_sv_fn}") + return canon_obj_mesh_sv_fn + + +def compute_sdf(obj_file_name): + filename = obj_file_name + + # init_mesh_scale = 1.0 + init_mesh_scale = 0.8 + + mesh_scale = 0.8 + size = 128 + level = 2 / size + + mesh = trimesh.load(filename, force='mesh') + + # normalize mesh + vertices = mesh.vertices + vertices = vertices * init_mesh_scale + bbmin = vertices.min(0) # + bbmax = vertices.max(0) # + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # # bbmax - bbmin # + vertices = (vertices - center) * scale # (vertices - center) * scale # + + scaled_bbmin = vertices.min(0) + scaled_bbmax = vertices.max(0) + print(f"scaled_bbmin: {scaled_bbmin}, scaled_bbmax: {scaled_bbmax}") + + + t0 = time.time() + sdf, mesh = mesh2sdf.compute( ## sdf and mesh ## + vertices, mesh.faces, size, fix=True, level=level, return_mesh=True) + t1 = time.time() + + print(f"sdf: {sdf.shape}, mesh: {mesh.vertices.shape}") + + mesh.vertices = mesh.vertices / scale + center + mesh.export(filename[:-4] + '.fixed.obj') ## .fixed.obj ## + np.save(filename[:-4] + '.npy', sdf) ## .npy ## + print('It takes %.4f seconds to process %s' % (t1-t0, filename)) + + +if __name__=='__main__': + + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/box_grab_01.npy' + + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/ketchup_grab_01.npy' + + # kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/mixer_grab_01.npy' + + # kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/laptop_grab_01.npy' + + # kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/box_grab_01.npy' + + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/waffleiron_grab_01.npy' + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/ketchup_grab_01.npy' + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/phone_grab_01.npy' + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/box_grab_01.npy' + + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s02" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s04" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s05" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s06" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s07" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s10" + + kinematic_mano_gt_sv_fn_all = os.listdir(kinematic_mano_gt_sv_folder) + kinematic_mano_gt_sv_fn_all = [fn for fn in kinematic_mano_gt_sv_fn_all if fn.endswith(".npy") and "grab" in fn] + kinematic_mano_gt_sv_fn_all = [os.path.join(kinematic_mano_gt_sv_folder, fn) for fn in kinematic_mano_gt_sv_fn_all] + for kinematic_mano_gt_sv_fn in kinematic_mano_gt_sv_fn_all: + obj_name = kinematic_mano_gt_sv_fn.split("/")[-1].split(".")[0].split("_")[0] + print(f"obj_name: {obj_name}") + + ##### process and export canonical object file ##### + canon_obj_mesh_sv_fn = export_canon_obj_file(kinematic_mano_gt_sv_fn, obj_name) + + + ##### compute sdf of the canonical object mesh ##### + compute_sdf(canon_obj_mesh_sv_fn) + + + # obj_name = kinematic_mano_gt_sv_fn.split("/")[-1].split(".")[0].split("_")[0] + # print(f"obj_name: {obj_name}") + + # ##### process and export canonical object file ##### + # canon_obj_mesh_sv_fn = export_canon_obj_file(kinematic_mano_gt_sv_fn, obj_name) + + + # ##### compute sdf of the canonical object mesh ##### + # compute_sdf(canon_obj_mesh_sv_fn) + + + \ No newline at end of file diff --git a/utils/data_format_transformation_processing.py b/utils/data_format_transformation_processing.py new file mode 100644 index 0000000000000000000000000000000000000000..7808f516772e493ef483340c6e9b7430964f2f2a --- /dev/null +++ b/utils/data_format_transformation_processing.py @@ -0,0 +1,239 @@ +import numpy as np +import torch +import trimesh +import os +import mesh2sdf +import time +from scipy.spatial.transform import Rotation as R + +QUASI_DYN_ROOT = "/home/xueyi/diffsim/NeuS" +if not os.path.exists(QUASI_DYN_ROOT): + QUASI_DYN_ROOT = "/root/diffsim/quasi-dyn" + +ARCTIC_CANON_OBJ_SV_FOLDER = os.path.join(QUASI_DYN_ROOT, "raw_data/arctic_processed_canon_obj") + + + +def export_canon_obj_file(kinematic_mano_gt_sv_fn, obj_name): + + subject_idx = kinematic_mano_gt_sv_fn.split("/")[-2] # + print(f"subject_idx: {subject_idx}, obj name: {obj_name}") + sv_dict = np.load(kinematic_mano_gt_sv_fn, allow_pickle=True).item() + + + object_global_orient = sv_dict["obj_rot"] + object_transl = sv_dict["obj_trans"] * 0.001 + obj_pcs = sv_dict["verts.object"] + + # obj_pcs = sv_dict['object_pc'] + obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + + + # self.obj_verts = obj_verts + init_obj_verts = obj_pcs[0] + init_obj_rot_vec = object_global_orient[0] + init_obj_transl = object_transl[0] + + init_obj_transl = torch.from_numpy(init_obj_transl).float().cuda() + init_rot_struct = R.from_rotvec(init_obj_rot_vec) + + init_glb_rot_mtx = init_rot_struct.as_matrix() + init_glb_rot_mtx = torch.from_numpy(init_glb_rot_mtx).float().cuda() + # ## reverse the global rotation matrix ## + init_glb_rot_mtx_reversed = init_glb_rot_mtx.contiguous().transpose(1, 0).contiguous() + + + + ''' canonical object verts ''' + canon_obj_verts = torch.matmul( + init_glb_rot_mtx_reversed.transpose(1, 0).contiguous(), (init_obj_verts - init_obj_transl.unsqueeze(0)).contiguous().transpose(1, 0).contiguous() + ).contiguous().transpose(1, 0).contiguous() + + # ## get canon obj verts ## + + # /home/xueyi/diffsim/NeuS/raw_data/arctic_processed_canon_obj + + canon_obj_sv_folder = ARCTIC_CANON_OBJ_SV_FOLDER # "/root/diffsim/control-vae-2/assets/arctic" + canon_obj_mesh = trimesh.Trimesh(vertices=canon_obj_verts.detach().cpu().numpy(), faces=sv_dict['f'][0]) + + canon_obj_mesh_sv_fn = f"{subject_idx}_{obj_name}.obj" + canon_obj_mesh_sv_fn = os.path.join(canon_obj_sv_folder, canon_obj_mesh_sv_fn) + canon_obj_mesh.export(canon_obj_mesh_sv_fn) + + print(f"Canonical obj mesh saved to {canon_obj_mesh_sv_fn}") + return canon_obj_mesh_sv_fn + + +def compute_sdf(obj_file_name): + filename = obj_file_name + + # init_mesh_scale = 1.0 + init_mesh_scale = 0.8 + + mesh_scale = 0.8 + size = 128 + level = 2 / size + + mesh = trimesh.load(filename, force='mesh') + + # normalize mesh + vertices = mesh.vertices + vertices = vertices * init_mesh_scale + bbmin = vertices.min(0) # + bbmax = vertices.max(0) # + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # # bbmax - bbmin # + vertices = (vertices - center) * scale # (vertices - center) * scale # + + scaled_bbmin = vertices.min(0) + scaled_bbmax = vertices.max(0) + print(f"scaled_bbmin: {scaled_bbmin}, scaled_bbmax: {scaled_bbmax}") + + + t0 = time.time() + sdf, mesh = mesh2sdf.compute( ## sdf and mesh ## + vertices, mesh.faces, size, fix=True, level=level, return_mesh=True) + t1 = time.time() + + print(f"sdf: {sdf.shape}, mesh: {mesh.vertices.shape}") + + mesh.vertices = mesh.vertices / scale + center + mesh.export(filename[:-4] + '.fixed.obj') ## .fixed.obj ## + np.save(filename[:-4] + '.npy', sdf) ## .npy ## + print('It takes %.4f seconds to process %s' % (t1-t0, filename)) + + + +def convert_tot_states_to_data_ref_format(tot_states_fn, sv_gt_ref_data_fn): + tot_states_data = np.load(tot_states_fn, allow_pickle=True).item() + tot_states = tot_states_data['tot_states'] + tot_mano_rot = [] + tot_mano_glb_trans = [] + tot_mano_states = [] + + tot_obj_rot = [] + tot_obj_trans = [] + + for i_fr in range(len(tot_states)): + cur_state = tot_states[i_fr] + cur_mano_state = cur_state[:-7] + cur_obj_state = cur_state[-7:] + + tot_mano_glb_trans.append(cur_mano_state[:3]) + cur_mano_rot_vec = cur_mano_state[3:6] + cur_mano_rot_euler_zyx = [cur_mano_rot_vec[2], cur_mano_rot_vec[1], cur_mano_rot_vec[0]] + cur_mano_rot_euler_zyx = np.array(cur_mano_rot_euler_zyx, dtype=np.float32) + cur_mano_rot_struct = R.from_euler('zyx', cur_mano_rot_euler_zyx, degrees=False) + cur_mano_rot_quat_xyzw = cur_mano_rot_struct.as_quat() + cur_mano_rot_quat_wxyz = cur_mano_rot_quat_xyzw[[3, 0, 1, 2]] + tot_mano_rot.append(cur_mano_rot_quat_wxyz.astype(np.float32)) + + tot_mano_states.append(cur_mano_state[4:]) + + tot_obj_rot.append(cur_obj_state[-4:][[3, 0, 1, 2]]) + tot_obj_trans.append(cur_obj_state[:3]) + tot_obj_rot = np.stack(tot_obj_rot, axis=0) + tot_obj_trans = np.stack(tot_obj_trans, axis=0) + tot_mano_rot = np.stack(tot_mano_rot, axis=0) + tot_mano_glb_trans = np.stack(tot_mano_glb_trans, axis=0) + tot_mano_states = np.stack(tot_mano_states, axis=0) + + gt_ref_data = { + 'obj_rot': tot_obj_rot, + 'obj_trans': tot_obj_trans, + 'mano_states': tot_mano_states, + 'mano_glb_trans': tot_mano_glb_trans, + 'mano_glb_rot': tot_mano_rot + } + + np.save(sv_gt_ref_data_fn, gt_ref_data) + print(f"gt ref data svaed to {sv_gt_ref_data_fn}") + + +# spoon --> how to sue the spoon # +# + +if __name__=='__main__': + + tot_states_fn = "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_mouse_idx_102_tracking_2/2024-02-27-05-44-09/sv_info_800.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_grab_mouse_102_dgrasptracking.npy" + + tot_states_fn = "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_bunny_idx_85_tracking_2/2024-02-26-08-33-58/sv_info_600.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_grab_bunny_85_dgrasptracking.npy" + + tot_states_fn = "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_spoon2_idx_20231105_067_tracking_2/2024-02-29-09-49-32/sv_info_300.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_taco_spoon2_idx_20231105_067_dgrasptracking.npy" + + tot_states_fn= "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_20231027_086_idx_20231027_086_tracking_2/2024-03-09-13-01-24/sv_info_best.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_taco_20231027_086_idx_20231027_086_dgrasptracking.npy" + + # /home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_20231024_044_idx_20231024_044_tracking_2/2024-03-09-13-20-07/sv_info_best.npy + tot_states_fn= "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_20231024_044_idx_20231024_044_tracking_2/2024-03-09-13-20-07/sv_info_best.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_taco_20231024_044_idx_20231024_044_dgrasptracking.npy" + + tot_states_fn = "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_20231027_130_idx_20231027_130_tracking_2/2024-03-09-13-40-52/sv_info_best.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_taco_20231027_130_idx_20231027_130_dgrasptracking.npy" ## gt 130 ## + + tot_states_fn = "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_20231020_199_idx_20231020_199_tracking_2/2024-03-09-14-01-13/sv_info_best.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_taco_20231020_199_idx_20231020_199_dgrasptracking.npy" ## gt 130 ## + + tot_states_fn = "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_20231026_016_idx_20231026_016_tracking_2/2024-03-11-10-06-43/sv_info_best.npy" + + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_taco_20231026_016_idx_20231026_016_dgrasptracking.npy" ## gt 130 ## + + tot_states_fn = "/home/xueyi/diffsim/raisim/dgrasp/raisimGymTorch/bullet_env/obj_20231027_114_idx_20231027_114_tracking_2/2024-03-11-10-07-36/sv_info_best.npy" + sv_gt_ref_data_fn = "/home/xueyi/diffsim/Control-VAE/ReferenceData/shadow_taco_20231027_114_idx_20231027_114_dgrasptracking.npy" ## gt 130 ## + + convert_tot_states_to_data_ref_format(tot_states_fn, sv_gt_ref_data_fn) + exit(0) + + + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/box_grab_01.npy' + + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/ketchup_grab_01.npy' + + # kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/mixer_grab_01.npy' + + # kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/laptop_grab_01.npy' + + # kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s01/box_grab_01.npy' + + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/waffleiron_grab_01.npy' + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/ketchup_grab_01.npy' + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/phone_grab_01.npy' + kinematic_mano_gt_sv_fn = '/data/xueyi/sim/arctic_processed_data/processed_seqs/s02/box_grab_01.npy' + + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s02" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s04" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s05" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s06" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s07" + kinematic_mano_gt_sv_folder = "/data/xueyi/sim/arctic_processed_data/processed_seqs/s10" + + kinematic_mano_gt_sv_fn_all = os.listdir(kinematic_mano_gt_sv_folder) + kinematic_mano_gt_sv_fn_all = [fn for fn in kinematic_mano_gt_sv_fn_all if fn.endswith(".npy") and "grab" in fn] + kinematic_mano_gt_sv_fn_all = [os.path.join(kinematic_mano_gt_sv_folder, fn) for fn in kinematic_mano_gt_sv_fn_all] + for kinematic_mano_gt_sv_fn in kinematic_mano_gt_sv_fn_all: + obj_name = kinematic_mano_gt_sv_fn.split("/")[-1].split(".")[0].split("_")[0] + print(f"obj_name: {obj_name}") + + ##### process and export canonical object file ##### + canon_obj_mesh_sv_fn = export_canon_obj_file(kinematic_mano_gt_sv_fn, obj_name) + + + ##### compute sdf of the canonical object mesh ##### + compute_sdf(canon_obj_mesh_sv_fn) + + + # obj_name = kinematic_mano_gt_sv_fn.split("/")[-1].split(".")[0].split("_")[0] + # print(f"obj_name: {obj_name}") + + # ##### process and export canonical object file ##### + # canon_obj_mesh_sv_fn = export_canon_obj_file(kinematic_mano_gt_sv_fn, obj_name) + + + # ##### compute sdf of the canonical object mesh ##### + # compute_sdf(canon_obj_mesh_sv_fn) + + + \ No newline at end of file diff --git a/utils/denoise/cvt_format.py b/utils/denoise/cvt_format.py new file mode 100644 index 0000000000000000000000000000000000000000..ceeef95efeeb2a4cea1b77ebdfaf23f86d713f6e --- /dev/null +++ b/utils/denoise/cvt_format.py @@ -0,0 +1,160 @@ +import os +import sys +sys.path.append('.') +import pickle +import numpy as np +# import open3d as o3d +import torch +# import json +from tqdm import tqdm +import traceback + +from manopth.manolayer import ManoLayer +# from utils.hoi_io2 import get_num_frame_v2 +from utils.organize_dataset import load_sequence_names_from_organized_record, txt2intrinsic, load_simplied_nokov_objs_mesh, load_organized_mano_info, load_dates_from_organized_record + +def load_objs_orientation(root, video_id, frame_list = None): + ''' + + return: tool_pose_batch, obj_pose_batch + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + if frame_list is not None: + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + tool_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + assert os.path.exists(tool_pose_path) + if frame_list is None: + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path)) + else: + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path))[frame_idx_list, ...] + + obj_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + assert os.path.exists(obj_pose_path) + if frame_list is None: + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path)) + else: + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path))[frame_idx_list, ...] + + return tool_pose_batch, obj_pose_batch + +if __name__ == '__main__': + # date_list = ['20230930'] + device = 'cuda:0' + + root = '/data3/hlyang/results' + upload_root = '/data3/hlyang/HOI-mocap' + nokov_root = upload_root + dataset_root = os.path.join(root, 'dataset') + hand_pose_organized_record_path = os.path.join(dataset_root, 'organized_record.txt') + + date_list = load_dates_from_organized_record(hand_pose_organized_record_path) + + # save_root = '/data3/hlyang/results/test_data' + save_root = '/data3/datasets/xueyi/taco/processed_data' + + mano_path = "/data1/xueyi/mano_models/mano/models" + + for date in date_list: + video_list = load_sequence_names_from_organized_record(hand_pose_organized_record_path, date) + + for video_id in tqdm(video_list): + try: + save_dir = os.path.join(save_root, date) + os.makedirs(save_dir, exist_ok=True) + + (right_hand_pose, right_hand_trans, right_hand_shape) = load_organized_mano_info(dataset_root, date, video_id, frame_list = None, right_hand_bool=True) + (left_hand_pose, left_hand_trans, left_hand_shape) = load_organized_mano_info(dataset_root, date, video_id, frame_list = None, right_hand_bool=False) + tool_verts_batch, tool_faces, obj_verts_batch, obj_faces = load_simplied_nokov_objs_mesh(nokov_root, video_id, frame_list = None, use_cm = True) + tool_pose_batch, obj_pose_batch = load_objs_orientation(nokov_root, video_id, frame_list = None) + + batch_size = right_hand_pose.shape[0] + + if right_hand_shape is not None and left_hand_shape is not None: + use_shape = True + else: + use_shape = False + + right_hand_pose = right_hand_pose.to(device) + right_hand_trans = right_hand_trans.to(device) + left_hand_pose = left_hand_pose.to(device) + left_hand_trans = left_hand_trans.to(device) + tool_verts_batch = tool_verts_batch.to(device) + obj_verts_batch = obj_verts_batch.to(device) + + if use_shape: + right_hand_shape = right_hand_shape.to(device) + left_hand_shape = left_hand_shape.to(device) + + # get hand meshes + use_pca = False + ncomps = 45 + left_hand_mano_layer = ManoLayer(mano_root=mano_path, use_pca=use_pca, ncomps=ncomps, side='left', center_idx = 0).to(device) + right_hand_mano_layer = ManoLayer(mano_root=mano_path, use_pca=use_pca, ncomps=ncomps, side='right', center_idx = 0).to(device) + + with torch.no_grad(): + if use_shape: + right_hand_verts, _ = right_hand_mano_layer(right_hand_pose, right_hand_shape.unsqueeze(0).expand(batch_size, -1)) + else: + right_hand_verts, _ = right_hand_mano_layer(right_hand_pose) + right_hand_verts = right_hand_verts + right_hand_verts = right_hand_verts / 1000.0 + right_hand_verts += right_hand_trans.unsqueeze(1) + + if use_shape: + left_hand_verts, _ = left_hand_mano_layer(left_hand_pose, left_hand_shape.unsqueeze(0).expand(batch_size, -1)) + else: + left_hand_verts, _ = left_hand_mano_layer(left_hand_pose) + left_hand_verts = left_hand_verts + left_hand_verts = left_hand_verts / 1000.0 + left_hand_verts += left_hand_trans.unsqueeze(1) + + # left hand + hand_pose = left_hand_pose.cpu().numpy() + hand_trans = left_hand_trans.cpu().numpy() + if use_shape: + hand_shape = left_hand_shape.cpu().numpy() + else: + hand_shape = torch.zeros(10).cpu().numpy() + hand_verts = left_hand_verts.cpu().numpy() + hand_faces = left_hand_mano_layer.th_faces.cpu().numpy() + obj_verts = obj_verts_batch.cpu().numpy() + obj_faces = obj_faces.cpu().numpy() + + # + rgt_hand_pose = right_hand_pose.cpu().numpy() + rgt_hand_trans = right_hand_trans.cpu().numpy() + if use_shape: + rgt_hand_shape = right_hand_shape.cpu().numpy() + else: + rgt_hand_shape = torch.zeros(10).cpu().numpy() + rgt_hand_verts = right_hand_verts.cpu().numpy() + rgt_hand_faces = right_hand_mano_layer.th_faces.cpu().numpy() + rgt_obj_verts = tool_verts_batch.cpu().numpy() + rgt_obj_faces = tool_faces.cpu().numpy() + + + # print(hand_verts.shape, hand_faces.shape, obj_verts.shape, obj_faces.shape) + save_path = os.path.join(save_dir, f'right_{video_id}.pkl') + # save_data = { + # 'hand_pose': hand_pose, 'hand_trans': hand_trans, 'hand_shape': hand_shape, 'hand_verts': hand_verts, 'hand_faces': hand_faces, 'obj_verts': obj_verts, 'obj_faces': obj_faces, 'obj_pose': obj_pose_batch.cpu().numpy(), + + # } + save_data = { + 'hand_pose': rgt_hand_pose, 'hand_trans': rgt_hand_trans, 'hand_shape': rgt_hand_shape, 'hand_verts': rgt_hand_verts, 'hand_faces': rgt_hand_faces, 'obj_verts': rgt_obj_verts, 'obj_faces': rgt_obj_faces, 'obj_pose': tool_pose_batch.cpu().numpy(), + + } + # print(save_data['hand_pose'].shape, save_data['hand_trans'].shape, save_data['hand_shape'].shape, save_data['hand_verts'].shape, save_data['hand_faces'].shape, save_data['obj_verts'].shape, save_data['obj_faces'].shape, save_data['obj_pose'].shape) + pickle.dump(save_data, open(save_path, 'wb')) + + except Exception as err: + traceback.print_exc() + print(err) + continue \ No newline at end of file diff --git a/utils/denoise/cvt_mesh.py b/utils/denoise/cvt_mesh.py new file mode 100644 index 0000000000000000000000000000000000000000..637faa337fa4876df3071f2bf7de4df7fd6ec0f5 --- /dev/null +++ b/utils/denoise/cvt_mesh.py @@ -0,0 +1,93 @@ +import os +import sys +sys.path.append('.') +import pickle +import numpy as np +import open3d as o3d +import torch +import json +from tqdm import tqdm +import traceback + +from manopth.manopth.manolayer import ManoLayer +from utils.hoi_io2 import get_num_frame_v2 +from utils.organize_dataset import load_sequence_names_from_organized_record, txt2intrinsic, load_simplied_nokov_objs_mesh, load_organized_mano_info, load_dates_from_organized_record + +def load_simplied_objs_template(root, video_id, frame_list = None, use_cm = True): + ''' + + return: tool_verts_batch: [num_verts, 3] + tool_faces + obj_verts_batch: [num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + if use_cm: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_cm.obj') + else: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_m.obj') + tool_mesh = o3d.io.read_triangle_mesh(tool_mesh_path) + tool_verts = np.asarray(tool_mesh.vertices) + if use_cm: + tool_verts = torch.from_numpy(tool_verts / 100.).float() + else: + tool_verts = torch.from_numpy(tool_verts).float() + tool_faces = torch.from_numpy(np.asarray(tool_mesh.triangles)) + + if use_cm: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_cm.obj') + else: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_m.obj') + obj_mesh = o3d.io.read_triangle_mesh(obj_mesh_path) + obj_verts = np.asarray(obj_mesh.vertices) + if use_cm: + obj_verts = torch.from_numpy(obj_verts / 100.).float() + else: + obj_verts = torch.from_numpy(obj_verts).float() + obj_faces = torch.from_numpy(np.asarray(obj_mesh.triangles)) + + return tool_verts, tool_faces, obj_verts, obj_faces + +if __name__ == '__main__': + # date_list = ['20230930'] + device = 'cuda:0' + + root = '/data3/hlyang/results' + upload_root = '/data3/hlyang/HOI-mocap' + nokov_root = upload_root + dataset_root = os.path.join(root, 'dataset') + hand_pose_organized_record_path = os.path.join(dataset_root, 'organized_record.txt') + + date_list = load_dates_from_organized_record(hand_pose_organized_record_path) + + save_root = '/data3/hlyang/results/test_data' + + for date in date_list: + video_list = load_sequence_names_from_organized_record(hand_pose_organized_record_path, date) + + save_dir = os.path.join(save_root, date) + os.makedirs(save_dir, exist_ok=True) + + for video_id in tqdm(video_list): + try: + _, _, obj_verts, obj_faces = load_simplied_objs_template(nokov_root, video_id, use_cm = True) + obj_verts = obj_verts.numpy() + obj_faces = obj_faces.numpy() + obj_mesh = o3d.geometry.TriangleMesh() + obj_mesh.vertices = o3d.utility.Vector3dVector(obj_verts) + obj_mesh.triangles = o3d.utility.Vector3iVector(obj_faces) + + save_path = os.path.join(save_dir, f'{video_id}.obj') + o3d.io.write_triangle_mesh(save_path, obj_mesh) + + except Exception as err: + traceback.print_exc() + print(err) + continue \ No newline at end of file diff --git a/utils/denoise/vis_denoise.py b/utils/denoise/vis_denoise.py new file mode 100644 index 0000000000000000000000000000000000000000..616f8186b85b534e5a8479b9d32ce616e6970dc4 --- /dev/null +++ b/utils/denoise/vis_denoise.py @@ -0,0 +1,201 @@ +import os +import sys +sys.path.append('.') +import numpy as np +import trimesh +import cv2 +import torch +from tqdm import tqdm + +from prepare_2Dmask.utils.pyt3d_wrapper import Pyt3DWrapper +from prepare_2Dmask.utils.json_to_caminfo import json_to_caminfo +from prepare_2Dmask.utils.colors import FAKE_COLOR_LIST +from prepare_2Dmask.utils.visualization import render_HO_meshes + +from utils.hoi_io2 import load_bg_imgs_with_resize + +# try: +# import polyscope as ps +# ps.init() +# ps.set_ground_plane_mode("none") +# ps.look_at((0., 0.0, 1.5), (0., 0., 1.)) +# ps.set_screenshot_extension(".png") +# except: +# pass + +import sys +sys.path.append("./manopth") +from manopth.manopth.manolayer import ManoLayer + +color = [ +(0,191/255.0,255/255.0), + (186/255.0,85/255.0,211/255.0), + (255/255.0,81/255.0,81/255.0), + (92/255.0,122/255.0,234/255.0), + (255/255.0,138/255.0,174/255.0), + (77/255.0,150/255.0,255/255.0), + (192/255.0,237/255.0,166/255.0) + # +] + +def seal(v, f): + circle_v_id = np.array([108, 79, 78, 121, 214, 215, 279, 239, 234, 92, 38, 122, 118, 117, 119, 120], dtype=np.int32) + center = (v[circle_v_id, :]).mean(0) + + # sealed_mesh = copy.copy(mesh_to_seal) + v = np.vstack([v, center]) + center_v_id = v.shape[0] - 1 + + for i in range(circle_v_id.shape[0]): + new_faces = [circle_v_id[i - 1], circle_v_id[i], center_v_id] + f = np.vstack([f, new_faces]) + return v, f, center + +def get_mano_model(ncomps=45, side='right', flat_hand_mean=False,): + # ncomps = 45 # mano root # + batch_size = 1 + mano_model = ManoLayer(mano_root='manopth/mano/models', use_pca=False if ncomps == 45 else True, ncomps=ncomps, flat_hand_mean=flat_hand_mean, side=side, center_idx=0) + return mano_model + +def vis_predicted(root, nokov_root, video_id, camera_list, stg1_use_t, stg2_use_t, seed, st, predicted_info_fn, optimized_fn=None, ws=60, device=0): + date = video_id[:8] + mano_model = get_mano_model(side='right') + faces = mano_model.th_faces.squeeze(0).numpy() + + H_downsampled = 750 + W_downsampled = 1024 + save_height = 3000 + save_width = 4096 + dowmsampled_factor = 4 + save_fps = 30 + save_height_view = save_height // dowmsampled_factor + save_width_view = save_width // dowmsampled_factor + + ws = ws + is_toch = False + # predicted_info_data = np.load(predicted_info_fn, allow_pickle=True).item() + if optimized_fn is not None: + data = np.load(optimized_fn, allow_pickle=True).item() + print(f"keys of optimized dict: {data.keys()}") + optimized_out_hand_verts_woopt = data["bf_ct_verts"] + optimized_out_hand_verts = optimized_out_hand_verts_woopt + else: + optimized_out_hand_verts = None + + data = np.load(predicted_info_fn, allow_pickle=True).item() + + try: + targets = data['targets'] + except: + targets = data['tot_gt_rhand_joints'] + + outputs = data['outputs'] + if 'obj_verts' in data: + obj_verts = data['obj_verts'] + obj_faces = data['obj_faces'] + elif 'tot_obj_pcs' in data: + obj_verts = data['tot_obj_pcs'][0] + obj_faces = data['template_obj_fs'] + tot_base_pts = data["tot_base_pts"][0] + + if 'tot_obj_rot' in data: + tot_obj_rot = data['tot_obj_rot'][0] + tot_obj_trans = data['tot_obj_transl'][0] + obj_verts = np.matmul(obj_verts, tot_obj_rot) + tot_obj_trans.reshape(tot_obj_trans.shape[0], 1, 3) # ws x nn_obj x 3 # + + outputs = np.matmul(outputs, tot_obj_rot) + tot_obj_trans.reshape(tot_obj_trans.shape[0], 1, 3) # ws x nn_obj x 3 # + + # jts_radius = 0.01787 + jts_radius = 0.03378 + gray_color = (233 / 255., 241 / 255., 148 / 255.) + + camera_info_path = os.path.join(root, date, video_id, 'src', 'calibration.json') + cam_info = json_to_caminfo(camera_info_path, camera_list=camera_list) + + device = torch.device(device) + + pyt3d_wrapper_dict = {} + for camera in camera_list: + pyt3d_wrapper_dict[camera] = Pyt3DWrapper(rasterization_image_size=(W_downsampled, H_downsampled), camera_image_size=cam_info[camera]["image_size"], use_fixed_cameras=True, intrin=cam_info[camera]["intrinsic"], extrin=cam_info[camera]["extrinsic"], device=device, colors=FAKE_COLOR_LIST, use_ambient_lights=False) + + # frame_list = [str(i).zfill(5) for i in range(1, ws+1)] + frame_list = [str(i).zfill(5) for i in range(1+int(st), ws+int(st)+1)] + + rgb_batch = load_bg_imgs_with_resize(root, video_id, frame_list, camera_list, BATCH_SIZE=20, width=W_downsampled, height=H_downsampled) + + video_save_dir = os.path.join('/data3/hlyang/results/vis_dataset_denoise_test', date) + os.makedirs(video_save_dir, exist_ok=True) + video_save_path = os.path.join(video_save_dir, f"{video_id}_st_{st}_ws_{ws}_seed_{seed}_use_t_{stg1_use_t}_{stg2_use_t}.mp4") + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + + videoWriter = cv2.VideoWriter(video_save_path, fourcc, save_fps, (save_width_view * 4, save_height_view * 3)) + + maxx_ws = ws + # skipp = 6 + skipp = 1 + iidx = 1 + tot_hand_verts_woopt = [] + for i_fr in tqdm(range(0, min(maxx_ws, optimized_out_hand_verts.shape[0]), skipp)): + cur_base_pts = tot_base_pts + + if i_fr < obj_verts.shape[0]: + cur_obj_verts = obj_verts[i_fr] + cur_obj_faces = obj_faces + + + if optimized_out_hand_verts is not None: + sealed_v, seald_f, center_wopt = seal(optimized_out_hand_verts[i_fr], faces) + + # print(sealed_v.shape, seald_f.shape) + hand_mesh = trimesh.Trimesh(vertices=sealed_v, faces=seald_f) + # hand_mesh.export('/home/hlyang/HOI/HOI/tmp/hand_denoised.obj') + # exit(1) + # hand_mesh = ps.register_surface_mesh(f"cur_hand_mesh", sealed_v, seald_f, color=color[0 % len(color)]) + + # print(cur_obj_verts.shape, cur_obj_faces.shape) + obj_mesh = trimesh.Trimesh(vertices=cur_obj_verts, faces=cur_obj_faces) + # obj_mesh = ps.register_surface_mesh(f"cur_object", cur_obj_verts, cur_obj_faces, color=gray_color) + + meshes = [hand_mesh, obj_mesh] + frame = str(i_fr+1).zfill(5) + saved_img = np.zeros((save_height_view * 3, save_width_view * 4, 3)).astype(np.uint8) + + for c_idx, camera in enumerate(camera_list): + bg = rgb_batch[i_fr, c_idx, ...] + bg = cv2.cvtColor(bg, cv2.COLOR_BGR2RGB) + + img = render_HO_meshes(pyt3d_wrapper_dict[camera], meshes, bg) + img =cv2.cvtColor(img, cv2.COLOR_RGB2BGR) + img = cv2.resize(img, (save_width_view, save_height_view)) + + cv2.putText(img, f'{frame} {camera}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=2) + saved_img[save_height_view*(c_idx//4):save_height_view*((c_idx//4)+1), save_width_view*(c_idx%4):save_width_view*((c_idx%4)+1)] = img + + videoWriter.write(saved_img) + iidx += 1 + + videoWriter.release() + + print(iidx-1) + +if __name__=='__main__': + root = '/data3/hlyang/results' + upload_root = '/data2/HOI-mocap' + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + cuda = 1 + + video_id = '20231104_001' + date = video_id[:8] + stg1_use_t = '200' + stg2_use_t = '200' + seed = '0' + st = '30' + n_tag = '2' + + # predicted_info_fn = "./save_res/predicted_infos_sv_dict_seq_0_seed_110_tag_jts_spatial_t_200_hho__0_jts_spatial_t_200_multi_ntag_3.npy" + # optimized_fn = "./save_res/optimized_infos_sv_dict_seq_0_seed_110_tag_jts_t_50_rep_arctic_st_100__0_jts_spatial_t_200_dist_thres_0.001_with_proj_False_wmaskanchors_multi_ntag_3.npy" + predicted_info_fn = f'/data3/hlyang/results/denoise_test/{date}/{video_id}/predicted_infos_sv_dict_seq_0_seed_{seed}_tag_{video_id}_spatial_jts_t_{stg1_use_t}_st_{st}_hho__0_jts_spatial_t_{stg2_use_t}_multi_ntag_{n_tag}.npy' + optimized_fn = f'/data3/hlyang/results/denoise_test/{date}/{video_id}/optimized_infos_sv_dict_seq_0_seed_{seed}_tag_{video_id}_spatial_jts_t_{stg1_use_t}_st_{st}_hho__0_jts_spatial_t_{stg2_use_t}_dist_thres_0.001_with_proj_False_wmaskanchors_multi_ntag_{n_tag}.npy' + # ws = 60 + ws = 30*int(n_tag) + 30 + vis_predicted(root, upload_root, video_id, camera_list, stg1_use_t, stg2_use_t, seed, st, predicted_info_fn, optimized_fn=optimized_fn, ws=ws, device=cuda) \ No newline at end of file diff --git a/utils/extract_obj_meshes.py b/utils/extract_obj_meshes.py new file mode 100644 index 0000000000000000000000000000000000000000..43320601823914eecd122d6bd1396fdf3a743acd --- /dev/null +++ b/utils/extract_obj_meshes.py @@ -0,0 +1,171 @@ + + +import numpy as np +import trimesh +import os + + +def extract_obj_meshes(sv_dict_fn): + # sv_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # if not os.path.exists(sv_fn): + # sv_fn = "/data/xueyi/arctic/processed_sv_dicts/box_grab_01_extracted_dict.npy" + active_passive_sv_dict = np.load(sv_dict_fn, allow_pickle=True).item() # + + obj_verts = active_passive_sv_dict['obj_verts'] # object orientation # + obj_faces = active_passive_sv_dict['obj_faces'] + + obj_mesh = trimesh.Trimesh(obj_verts[0], obj_faces) + obj_mesh_sv_fn = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_box.obj" + ) + obj_mesh.export(obj_mesh_sv_fn) + + obj_verts_reversed = obj_verts[:, :, [1, 0, 2]] + obj_verts_reversed[:, :, 1] = -obj_verts_reversed[:, :, 1] + obj_mesh_reversed = trimesh.Trimesh(obj_verts_reversed[0], obj_faces) + obj_mesh_sv_fn_reversed = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_box_reversed.obj" + ) + obj_mesh_reversed.export(obj_mesh_sv_fn_reversed) + + +def extract_obj_meshes_boundingbox(sv_dict_fn): + # sv_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # if not os.path.exists(sv_fn): + # sv_fn = "/data/xueyi/arctic/processed_sv_dicts/box_grab_01_extracted_dict.npy" + active_passive_sv_dict = np.load(sv_dict_fn, allow_pickle=True).item() # + + obj_verts = active_passive_sv_dict['obj_verts'] # object orientation # + obj_faces = active_passive_sv_dict['obj_faces'] + + init_obj_verts = obj_verts[0] + minn_box = np.min(init_obj_verts, axis=0, keepdims=True) + maxx_box = np.max(init_obj_verts, axis=0, keepdims=True) + # get the minn box and maxx box # + + + box_triangle_mesh_faces = np.array([ + [1, 2, 3], # Left face (triangle 1) + [2, 3, 4], # Left face (triangle 2) + [5, 6, 7], # Right face (triangle 1) + [6, 7, 8], # Right face (triangle 2) + [1, 3, 5], # Bottom face (triangle 1) + [3, 5, 7], # Bottom face (triangle 2) + [2, 4, 6], # Top face (triangle 1) + [4, 6, 8], # Top face (triangle 2) + [1, 2, 5], # Front face (triangle 1) + [2, 5, 6], # Front face (triangle 2) + [3, 4, 7], # Back face (triangle 1) + [4, 7, 8] # Back face (triangle 2) + ], dtype=np.int32) - 1 + + box_vertices = np.array([ + [-1, -1, -1], + [-1, -1, 1], + [-1, 1, -1], + [-1, 1, 1], + [1, -1, -1], + [1, -1, 1], + [1, 1, -1], + [1, 1, 1] + ], dtype=np.float32) + + box_vertices = (box_vertices - (-1)) / 2 + + + box_vertices = box_vertices * (maxx_box - minn_box) + minn_box + + box_mesh= trimesh.Trimesh(box_vertices, box_triangle_mesh_faces) + # + + # obj_mesh = trimesh.Trimesh(obj_verts[0], obj_faces) + obj_mesh_sv_fn = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_bounding_box.obj" + ) + box_mesh.export(obj_mesh_sv_fn) + + # obj_verts_reversed = obj_verts[:, :, [1, 0, 2]] + # obj_verts_reversed[:, :, 1] = -obj_verts_reversed[:, :, 1] + # obj_mesh_reversed = trimesh.Trimesh(obj_verts_reversed[0], obj_faces) + # obj_mesh_sv_fn_reversed = os.path.join( + # "/home/xueyi/diffsim/NeuS/utils", "init_box_reversed.obj" + # ) + # obj_mesh_reversed.export(obj_mesh_sv_fn_reversed) + +def extract_obj_meshes_taco(pkl_fn): + import pickle as pkl + + sv_dict = pkl.load(open(pkl_fn, "rb")) + + print(f"sv_dict: {sv_dict.keys()}") + + # maxx_ws = min(maxx_ws, len(sv_dict['obj_verts']) - start_idx) + + obj_pcs = sv_dict['obj_verts'] # [start_idx: start_idx + maxx_ws] + # obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + + # self.obj_pcs = obj_pcs + # # obj_vertex_normals = sv_dict['obj_vertex_normals'] + # # obj_vertex_normals = torch.from_numpy(obj_vertex_normals).float().cuda() + # self.obj_normals = torch.zeros_like(obj_pcs[0]) ### get the obj naormal vectors ## + + object_pose = sv_dict['obj_pose'] # [start_idx: start_idx + maxx_ws] + # object_pose = torch.from_numpy(object_pose).float().cuda() ### nn_frames x 4 x 4 ### + object_global_orient_mtx = object_pose[:, :3, :3 ] ## nn_frames x 3 x 3 ## + object_transl = object_pose[:, :3, 3] ## nn_frmaes x 3 ## + + obj_faces = sv_dict['obj_faces'] + # obj_faces = torch.from_numpy(obj_faces).long().cuda() + # self.obj_faces = obj_faces # [0] ### obj faces ## + + # obj_verts = sv_dict['obj_verts'] + # minn_verts = np.min(obj_verts, axis=0) + # maxx_verts = np.max(obj_verts, axis=0) + # extent = maxx_verts - minn_verts + # center_ori = (maxx_verts + minn_verts) / 2 + # scale_ori = np.sqrt(np.sum(extent ** 2)) + # obj_verts = torch.from_numpy(obj_verts).float().cuda() + + init_obj_verts = obj_pcs[0] + init_obj_ornt_mtx = object_global_orient_mtx[0] + init_obj_transl = object_transl[0] + + canon_obj_verts = np.matmul( + init_obj_ornt_mtx.T, (init_obj_verts - init_obj_transl[None]).T + ).T + # self.obj_verts = canon_obj_verts.clone() + # obj_verts = canon_obj_verts.clone() + canon_obj_mesh = trimesh.Trimesh(vertices=canon_obj_verts, faces=obj_faces) + canon_obj_mesh_export_dir = "/".join(pkl_fn.split("/")[:-1]) + pkl_name = pkl_fn.split("/")[-1].split(".")[0] + canon_obj_mesh_sv_fn = f"{pkl_name}.obj" + canon_obj_mesh.export(os.path.join(canon_obj_mesh_export_dir, canon_obj_mesh_sv_fn)) + print(f"canon_obj_mesh_sv_fn: {canon_obj_mesh_sv_fn}") + + +if __name__=='__main__': + + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_004.pkl" + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_030.pkl" + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_037.pkl" + pkl_fn = "/data/xueyi/taco/processed_data/20230917/right_20230917_037.pkl" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230917" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231010" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230919" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231104" + tot_pkl_fns = os.listdir(pkl_root_folder) + tot_pkl_fns = [fn for fn in tot_pkl_fns if fn.endswith(".pkl")] + for i_fn, cur_pkl_fn in enumerate(tot_pkl_fns): + cur_full_pkl_fn = os.path.join(pkl_root_folder, cur_pkl_fn) + extract_obj_meshes_taco(cur_full_pkl_fn) + exit(0) + + extract_obj_meshes_taco(pkl_fn) + exit(0) + + sv_dict_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # extract_obj_meshes(sv_dict_fn=sv_dict_fn) + + extract_obj_meshes_boundingbox(sv_dict_fn) + + diff --git a/utils/grab_preprocessing.py b/utils/grab_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..37359a90b67c3c6a942eb31a33f1248dda084413 --- /dev/null +++ b/utils/grab_preprocessing.py @@ -0,0 +1,257 @@ + + +import numpy as np +import trimesh +import os +# try: +# import mesh2sdf +# except: +# pass +import mesh2sdf +import time +from scipy.spatial.transform import Rotation as R + +def extract_obj_meshes(sv_dict_fn): + # sv_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # if not os.path.exists(sv_fn): + # sv_fn = "/data/xueyi/arctic/processed_sv_dicts/box_grab_01_extracted_dict.npy" + active_passive_sv_dict = np.load(sv_dict_fn, allow_pickle=True).item() # + + obj_verts = active_passive_sv_dict['obj_verts'] # object orientation # + obj_faces = active_passive_sv_dict['obj_faces'] + + obj_mesh = trimesh.Trimesh(obj_verts[0], obj_faces) + obj_mesh_sv_fn = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_box.obj" + ) + obj_mesh.export(obj_mesh_sv_fn) + + obj_verts_reversed = obj_verts[:, :, [1, 0, 2]] + obj_verts_reversed[:, :, 1] = -obj_verts_reversed[:, :, 1] + obj_mesh_reversed = trimesh.Trimesh(obj_verts_reversed[0], obj_faces) + obj_mesh_sv_fn_reversed = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_box_reversed.obj" + ) + obj_mesh_reversed.export(obj_mesh_sv_fn_reversed) + + +def extract_obj_meshes_boundingbox(sv_dict_fn): + # sv_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # if not os.path.exists(sv_fn): + # sv_fn = "/data/xueyi/arctic/processed_sv_dicts/box_grab_01_extracted_dict.npy" + active_passive_sv_dict = np.load(sv_dict_fn, allow_pickle=True).item() # + + obj_verts = active_passive_sv_dict['obj_verts'] # object orientation # + obj_faces = active_passive_sv_dict['obj_faces'] + + init_obj_verts = obj_verts[0] + minn_box = np.min(init_obj_verts, axis=0, keepdims=True) + maxx_box = np.max(init_obj_verts, axis=0, keepdims=True) + # get the minn box and maxx box # + + + box_triangle_mesh_faces = np.array([ + [1, 2, 3], # Left face (triangle 1) + [2, 3, 4], # Left face (triangle 2) + [5, 6, 7], # Right face (triangle 1) + [6, 7, 8], # Right face (triangle 2) + [1, 3, 5], # Bottom face (triangle 1) + [3, 5, 7], # Bottom face (triangle 2) + [2, 4, 6], # Top face (triangle 1) + [4, 6, 8], # Top face (triangle 2) + [1, 2, 5], # Front face (triangle 1) + [2, 5, 6], # Front face (triangle 2) + [3, 4, 7], # Back face (triangle 1) + [4, 7, 8] # Back face (triangle 2) + ], dtype=np.int32) - 1 + + box_vertices = np.array([ + [-1, -1, -1], + [-1, -1, 1], + [-1, 1, -1], + [-1, 1, 1], + [1, -1, -1], + [1, -1, 1], + [1, 1, -1], + [1, 1, 1] + ], dtype=np.float32) + + box_vertices = (box_vertices - (-1)) / 2 + + + box_vertices = box_vertices * (maxx_box - minn_box) + minn_box + + box_mesh= trimesh.Trimesh(box_vertices, box_triangle_mesh_faces) + # + + # obj_mesh = trimesh.Trimesh(obj_verts[0], obj_faces) + obj_mesh_sv_fn = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_bounding_box.obj" + ) + box_mesh.export(obj_mesh_sv_fn) + + # obj_verts_reversed = obj_verts[:, :, [1, 0, 2]] + # obj_verts_reversed[:, :, 1] = -obj_verts_reversed[:, :, 1] + # obj_mesh_reversed = trimesh.Trimesh(obj_verts_reversed[0], obj_faces) + # obj_mesh_sv_fn_reversed = os.path.join( + # "/home/xueyi/diffsim/NeuS/utils", "init_box_reversed.obj" + # ) + # obj_mesh_reversed.export(obj_mesh_sv_fn_reversed) + +def extract_obj_meshes_taco(pkl_fn): + import pickle as pkl + + sv_dict = pkl.load(open(pkl_fn, "rb")) + + print(f"sv_dict: {sv_dict.keys()}") + + # maxx_ws = min(maxx_ws, len(sv_dict['obj_verts']) - start_idx) + + obj_pcs = sv_dict['obj_verts'] # [start_idx: start_idx + maxx_ws] + # obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + + # self.obj_pcs = obj_pcs + # # obj_vertex_normals = sv_dict['obj_vertex_normals'] + # # obj_vertex_normals = torch.from_numpy(obj_vertex_normals).float().cuda() + # self.obj_normals = torch.zeros_like(obj_pcs[0]) ### get the obj naormal vectors ## + + object_pose = sv_dict['obj_pose'] # [start_idx: start_idx + maxx_ws] + # object_pose = torch.from_numpy(object_pose).float().cuda() ### nn_frames x 4 x 4 ### + object_global_orient_mtx = object_pose[:, :3, :3 ] ## nn_frames x 3 x 3 ## + object_transl = object_pose[:, :3, 3] ## nn_frmaes x 3 ## + + obj_faces = sv_dict['obj_faces'] + # obj_faces = torch.from_numpy(obj_faces).long().cuda() + # self.obj_faces = obj_faces # [0] ### obj faces ## + + # obj_verts = sv_dict['obj_verts'] + # minn_verts = np.min(obj_verts, axis=0) + # maxx_verts = np.max(obj_verts, axis=0) + # extent = maxx_verts - minn_verts + # center_ori = (maxx_verts + minn_verts) / 2 + # scale_ori = np.sqrt(np.sum(extent ** 2)) + # obj_verts = torch.from_numpy(obj_verts).float().cuda() + + init_obj_verts = obj_pcs[0] + init_obj_ornt_mtx = object_global_orient_mtx[0] + init_obj_transl = object_transl[0] + + canon_obj_verts = np.matmul( + init_obj_ornt_mtx.T, (init_obj_verts - init_obj_transl[None]).T + ).T + # self.obj_verts = canon_obj_verts.clone() + # obj_verts = canon_obj_verts.clone() + canon_obj_mesh = trimesh.Trimesh(vertices=canon_obj_verts, faces=obj_faces) + canon_obj_mesh_export_dir = "/".join(pkl_fn.split("/")[:-1]) + pkl_name = pkl_fn.split("/")[-1].split(".")[0] + canon_obj_mesh_sv_fn = f"{pkl_name}.obj" + canon_obj_mesh.export(os.path.join(canon_obj_mesh_export_dir, canon_obj_mesh_sv_fn)) + print(f"canon_obj_mesh_sv_fn: {canon_obj_mesh_sv_fn}") + +def compute_sdf(obj_file_name): + filename = obj_file_name + + # init_mesh_scale = 1.0 + init_mesh_scale = 0.8 + + mesh_scale = 0.8 + size = 128 + level = 2 / size + + mesh = trimesh.load(filename, force='mesh') + + # normalize mesh + vertices = mesh.vertices + vertices = vertices * init_mesh_scale + bbmin = vertices.min(0) # + bbmax = vertices.max(0) # + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # # bbmax - bbmin # + vertices = (vertices - center) * scale # (vertices - center) * scale # + + scaled_bbmin = vertices.min(0) + scaled_bbmax = vertices.max(0) + print(f"scaled_bbmin: {scaled_bbmin}, scaled_bbmax: {scaled_bbmax}") + + + t0 = time.time() + sdf, mesh = mesh2sdf.compute( ## sdf and mesh ## + vertices, mesh.faces, size, fix=True, level=level, return_mesh=True) + t1 = time.time() + + print(f"sdf: {sdf.shape}, mesh: {mesh.vertices.shape}") + + mesh.vertices = mesh.vertices / scale + center + mesh.export(filename[:-4] + '.fixed.obj') ## .fixed.obj ## + np.save(filename[:-4] + '.npy', sdf) ## .npy ## + print('It takes %.4f seconds to process %s' % (t1-t0, filename)) + + + +def get_grab_data_dict(data_fn): + grab_data_dict = np.load(data_fn, allow_pickle=True).item() + print(f"grab_data_keys: { grab_data_dict.keys()}") + + +if __name__=='__main__': + + + ## data_fn ## ### that we can use in the GRAB + data_fn = "/data1/xueyi/GRAB_extracted_test/train/363_sv_dict.npy" + get_grab_data_dict(data_fn) + exit(0) + + # compute_sdf('/data/xueyi/taco/processed_data/20230917/right_20230917_032.obj') + # exit(0) + + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_004.pkl" + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_030.pkl" + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_037.pkl" + pkl_fn = "/data/xueyi/taco/processed_data/20230917/right_20230917_037.pkl" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230917" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231010" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230919" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231104" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231105" + pkl_root_folder = "/data/xueyi/taco/processed_data/20230917" ## pkl folder + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231102" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230923" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230926" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230927" + pkl_root_folder = "/data/xueyi/taco/processed_data/20230930" + pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20231031" + # pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20230929" + # pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20230930" + tot_pkl_fns = os.listdir(pkl_root_folder) + tot_pkl_fns = [fn for fn in tot_pkl_fns if fn.endswith(".pkl")] + # tot_pkl_fns = ['right_20230930_001.pkl'] + # tot_pkl_fns = ['/data/xueyi/taco/processed_data/20230917/right_20230917_032.obj'] + for i_fn, cur_pkl_fn in enumerate(tot_pkl_fns): + cur_full_pkl_fn = os.path.join(pkl_root_folder, cur_pkl_fn) + extract_obj_meshes_taco(cur_full_pkl_fn) + # compute_sdf(cur_full_fn) + # exit(0) + + # pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231104" + # pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231105" + # pkl_root_folder = "/data/xueyi/taco/processed_data/20230917" + # pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231102" + tot_fns = os.listdir(pkl_root_folder) + tot_fns = [fn for fn in tot_fns if fn.endswith(".obj")] + for cur_fn in tot_fns: + cur_full_fn = os.path.join(pkl_root_folder, cur_fn) + compute_sdf(cur_full_fn) + + # obj_mesh_fn = "/data3/datasets/xueyi/taco/processed_data/20231104/right_20231104_017.obj" + # compute_sdf(obj_mesh_fn) + exit(0) + + extract_obj_meshes_taco(pkl_fn) + exit(0) + + sv_dict_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # extract_obj_meshes(sv_dict_fn=sv_dict_fn) + + extract_obj_meshes_boundingbox(sv_dict_fn) + + diff --git a/utils/init_bounding_box.obj b/utils/init_bounding_box.obj new file mode 100644 index 0000000000000000000000000000000000000000..69cf6f4c5dc906ecac1ba6c3718b50b9683d4fe0 --- /dev/null +++ b/utils/init_bounding_box.obj @@ -0,0 +1,20 @@ +v -0.69829494 -0.67062342 -0.41100514 +v -0.69829494 -0.67062342 -0.00599849 +v -0.69829494 0.25977796 -0.41100514 +v -0.69829494 0.25977796 -0.00599849 +v 0.53622180 -0.67062342 -0.41100514 +v 0.53622180 -0.67062342 -0.00599849 +v 0.53622180 0.25977796 -0.41100514 +v 0.53622180 0.25977796 -0.00599849 +f 1 2 3 +f 2 3 4 +f 5 6 7 +f 6 7 8 +f 1 3 5 +f 3 5 7 +f 2 4 6 +f 4 6 8 +f 1 2 5 +f 2 5 6 +f 3 4 7 +f 4 7 8 \ No newline at end of file diff --git a/utils/init_box.obj b/utils/init_box.obj new file mode 100644 index 0000000000000000000000000000000000000000..5d8918a54d30c7041a0c237f975b376079e027fa --- /dev/null +++ b/utils/init_box.obj @@ -0,0 +1,11897 @@ +v -0.22805248 -0.64802873 -0.39543864 +v -0.63894272 -0.41075331 -0.01564734 +v -0.53797120 0.15222843 -0.01929682 +v 0.17117184 -0.00595185 -0.01511540 +v -0.03853312 -0.65378946 -0.23400235 +v -0.62572032 -0.45813864 -0.40319827 +v -0.48931840 0.06723945 -0.03140957 +v 0.40861440 -0.66878164 -0.10170428 +v -0.58482176 0.22437581 -0.12320501 +v 0.41263872 0.01486603 -0.02886097 +v 0.42774016 -0.35338861 -0.02293481 +v 0.49333760 -0.58559424 -0.37035972 +v -0.66396928 -0.20665558 -0.03344155 +v 0.49012992 -0.30806261 -0.10738873 +v -0.67114496 -0.59839165 -0.23880762 +v 0.44628224 -0.26624233 -0.38263312 +v 0.08913921 -0.62525225 -0.38002089 +v -0.21821183 -0.57933193 -0.02292545 +v 0.53416705 0.21207507 -0.11479323 +v -0.44942591 -0.61114949 -0.11310934 +v 0.30096897 -0.66269177 -0.01144719 +v 0.12608513 0.20088278 -0.03165383 +v 0.48591617 0.19608571 -0.34440333 +v 0.02394625 0.00236418 -0.02868692 +v 0.49321473 -0.09902169 -0.10919006 +v -0.28075263 -0.64778644 -0.01261404 +v -0.64971775 0.08531128 -0.11866357 +v 0.49519873 -0.38940489 -0.22516291 +v -0.67657727 -0.00590211 -0.25334430 +v 0.52210689 -0.27000064 -0.10887048 +v -0.40456191 0.24738574 -0.40630475 +v 0.22658049 0.02640445 -0.02873714 +v 0.48887041 -0.63278222 -0.14893651 +v -0.53475070 -0.64067954 -0.01413835 +v 0.25562626 0.20755221 -0.12100195 +v -0.67626750 0.05568581 -0.12842475 +v -0.32957694 -0.65239024 -0.10441884 +v 0.30023938 0.20224766 -0.11663882 +v 0.44902146 0.22637939 -0.35969830 +v 0.49231106 0.18802884 -0.11481238 +v -0.39433214 0.24806629 -0.10903899 +v 0.50823426 -0.40588704 -0.10980966 +v 0.49907458 -0.19389606 -0.11467195 +v 0.52201730 -0.34389728 -0.29483676 +v -0.63206142 -0.46489301 -0.02855713 +v 0.02265346 -0.49183649 -0.02203138 +v 0.50309378 -0.12207241 -0.30376706 +v 0.00961026 0.24080211 -0.26258630 +v 0.18199298 -0.41279715 -0.00928031 +v 0.32945666 0.19148363 -0.03140829 +v -0.12141566 0.23362038 -0.01859807 +v 0.47890690 -0.49031484 -0.00772835 +v -0.22756350 -0.65853065 -0.11091353 +v 0.51169026 0.19334386 -0.19361801 +v 0.51367426 -0.55365640 -0.14144775 +v -0.57020158 0.22669546 -0.26116008 +v -0.12978174 -0.02775381 -0.40219951 +v -0.61669630 -0.63658512 -0.11146376 +v 0.25632259 -0.62756884 -0.01959641 +v 0.53230339 0.13592505 -0.39748296 +v 0.52314371 0.10486837 -0.11441535 +v -0.19447549 -0.26308849 -0.02507999 +v -0.16430333 -0.65056324 -0.28733373 +v 0.50615299 0.05405315 -0.11925679 +v -0.22980349 0.24438648 -0.11912104 +v -0.42636541 0.21515231 -0.03652549 +v 0.49966595 -0.28826499 -0.10740447 +v 0.51017731 -0.66528517 -0.32207659 +v 0.25072643 -0.66398275 -0.10338902 +v 0.23698179 0.20317785 -0.12069768 +v 0.49061379 -0.63532507 -0.25725973 +v -0.02904317 -0.61832672 -0.38204587 +v 0.42382595 -0.66637659 -0.25560883 +v 0.49855235 0.06858470 -0.11095183 +v 0.03263235 0.08758285 -0.01669163 +v 0.53006339 -0.01887808 -0.36265987 +v -0.39446780 -0.61701035 -0.03161000 +v 0.48635396 -0.41692981 -0.02485956 +v 0.13246724 0.23732848 -0.26061708 +v -0.18537468 -0.25793800 -0.01233318 +v -0.41802236 -0.51462436 -0.38651261 +v 0.25510404 -0.57394081 -0.00717641 +v 0.44850692 0.19619539 -0.11747885 +v 0.48586500 0.01561255 -0.01532264 +v 0.05529604 -0.25507280 -0.38586685 +v 0.03630340 -0.66275865 -0.10886793 +v -0.57171196 0.08440588 -0.01852999 +v -0.09649916 -0.36495891 -0.39855877 +v -0.66663420 -0.47968063 -0.02958355 +v -0.18690300 -0.28525478 -0.40013945 +v 0.15456004 0.20252709 -0.11967978 +v 0.49011460 -0.22121820 -0.11195186 +v 0.03595268 0.24066806 -0.21183735 +v 0.17621508 0.23712188 -0.15079275 +v 0.50458372 -0.66693032 -0.15133786 +v 0.08976644 0.23567662 -0.40076244 +v -0.24508411 -0.39621431 -0.38637963 +v -0.52557051 -0.63956970 -0.10915049 +v -0.38334459 -0.39483166 -0.40079692 +v -0.27975419 0.22278091 -0.11846995 +v -0.66967547 -0.54090685 -0.02979675 +v 0.48310789 -0.63506615 -0.37681100 +v 0.46937093 -0.63695109 -0.02402507 +v 0.45304069 -0.63826776 -0.30098143 +v 0.07956229 -0.62707460 -0.02242884 +v -0.29190651 0.24684635 -0.40194973 +v 0.48666629 -0.18773468 -0.11114545 +v 0.22978565 0.23447818 -0.26503617 +v 0.05418757 0.20719820 -0.22864558 +v -0.03751931 -0.65781564 -0.08674803 +v 0.48813573 -0.28164241 -0.02598386 +v 0.52418309 -0.19176668 -0.39438009 +v 0.36496133 0.13878255 -0.01719675 +v -0.00066811 -0.65401679 -0.11576220 +v 0.12452102 -0.66247898 -0.10596315 +v 0.29164550 -0.06424136 -0.38647324 +v -0.07464954 -0.62787670 -0.10900623 +v -0.58451450 -0.55148727 -0.40208673 +v -0.50846714 -0.61545801 -0.11173951 +v 0.50607622 0.10650832 -0.38520661 +v -0.22921722 -0.64805830 -0.01091058 +v -0.53394938 -0.53955638 -0.01345364 +v -0.47281402 0.18589169 -0.39335391 +v -0.36860666 -0.38567203 -0.01291278 +v -0.66808826 -0.60401523 -0.16762352 +v -0.32331002 -0.61854684 -0.06329235 +v 0.49852166 -0.11521968 -0.38266161 +v -0.67521274 -0.60632783 -0.11273486 +v -0.68503034 -0.63156945 -0.40028986 +v -0.43204346 0.15780139 -0.39345071 +v 0.50058246 0.15700045 -0.08159509 +v 0.49407494 0.00689183 -0.03081891 +v -0.48435962 0.21818390 -0.12001383 +v -0.31133434 -0.64646804 -0.11395150 +v -0.68150777 0.05966483 -0.08141551 +v -0.46783993 -0.38359421 -0.02692048 +v -0.36086777 -0.07797901 -0.01570393 +v 0.27945223 0.23412146 -0.15428606 +v -0.68781817 -0.43509275 -0.26120180 +v 0.09534215 0.23214129 -0.01843722 +v 0.06914567 -0.62915105 -0.10678871 +v -0.29845753 -0.29394516 -0.02580002 +v -0.16669177 0.21372257 -0.25914532 +v 0.07187463 -0.03877764 -0.02801711 +v -0.41504249 0.25301445 -0.21533449 +v -0.34413561 0.21908994 -0.28053543 +v 0.11069191 -0.65428776 -0.10549121 +v 0.30205959 -0.28607655 -0.01064504 +v -0.68348920 -0.34713501 -0.31242499 +v -0.32301816 -0.33309069 -0.38736731 +v 0.50212616 0.19212191 -0.04577091 +v 0.33902344 -0.66506165 -0.10219876 +v -0.65449208 -0.04139799 -0.24942671 +v -0.48209912 -0.61491698 -0.06435281 +v 0.28283912 0.20140179 -0.15139319 +v 0.37462792 0.20652542 -0.11407406 +v -0.02640376 -0.38402405 -0.01050461 +v 0.09110025 0.17367175 -0.03111764 +v -0.03538423 -0.24197875 -0.01175827 +v 0.37753353 -0.63706291 -0.10405968 +v -0.57075191 -0.39251131 -0.01478901 +v 0.12460553 0.23900938 -0.12339608 +v 0.45246473 0.10120165 -0.39913368 +v 0.00879881 0.21030629 -0.11754949 +v 0.40009481 -0.13672914 -0.38474807 +v -0.68780023 -0.44416875 -0.01596011 +v -0.33976823 -0.59961098 -0.01139102 +v 0.27824649 -0.66252613 -0.24148516 +v 0.33267465 -0.06717184 -0.01421750 +v 0.15969801 -0.65814829 -0.00843603 +v -0.10698999 -0.22371536 -0.39999157 +v -0.45556471 0.24716850 -0.40725756 +v 0.09756169 -0.66358668 -0.10585463 +v 0.25288713 -0.42799065 -0.38223138 +v -0.09830390 0.20726980 -0.03433307 +v 0.04659466 -0.65738863 -0.10569931 +v -0.65396470 -0.17762375 -0.38902843 +v 0.50606346 -0.66783577 -0.31265989 +v -0.65773046 0.25434512 -0.12015086 +v 0.49576202 -0.35123158 -0.18629947 +v -0.46489334 -0.34125692 -0.40222761 +v -0.02311926 0.12689608 -0.40274444 +v 0.32037386 0.19914341 -0.34833154 +v -0.37418997 0.17593110 -0.03243684 +v 0.39261451 -0.29674771 -0.38271290 +v -0.58966517 0.21963820 -0.03623484 +v -0.25607669 0.21564621 -0.38822863 +v 0.49920267 0.07923672 -0.10567931 +v -0.00856821 -0.65480584 -0.10658317 +v 0.12849164 0.20505744 -0.22139470 +v -0.50195700 -0.65164065 -0.10929007 +v -0.06284276 -0.21773671 -0.02495233 +v -0.28561908 -0.28108564 -0.01288299 +v -0.53985268 -0.19663136 -0.01594607 +v 0.48387340 -0.61708772 -0.10561590 +v 0.52067852 -0.35371241 -0.11127184 +v 0.30251020 0.23047356 -0.02444295 +v -0.51137012 0.25483525 -0.29126087 +v -0.67058164 0.24927090 -0.02162584 +v -0.28381172 -0.62159723 -0.10999690 +v 0.25293836 -0.66068745 -0.10486226 +v -0.41542131 -0.06822884 -0.39116105 +v -0.69255155 -0.50773948 -0.11329318 +v -0.66960627 -0.34796476 -0.08060315 +v -0.26655987 0.15439005 -0.40539664 +v -0.69315571 -0.49699467 -0.01624863 +v 0.27286029 -0.66973549 -0.10533419 +v -0.08494579 -0.65287131 -0.33451360 +v 0.00680717 -0.62253916 -0.12296883 +v -0.47158259 -0.64139020 -0.24198411 +v -0.68975091 -0.45977011 -0.20063400 +v -0.24606963 -0.46809766 -0.01140209 +v 0.53121293 0.22589672 -0.21465404 +v -0.68380147 -0.04833184 -0.05989819 +v 0.09817869 0.23954397 -0.12062960 +v -0.65546995 -0.63625073 -0.11595710 +v -0.25307891 -0.65821248 -0.11067820 +v -0.04595699 0.13850525 -0.01742484 +v 0.11008269 -0.45665511 -0.02197521 +v 0.52497166 -0.22072484 -0.13734506 +v -0.01236978 -0.13279633 -0.01361833 +v 0.22688270 0.23387198 -0.30980384 +v 0.43269134 -0.64249861 -0.10734532 +v 0.37180942 0.19869176 -0.11981212 +v -0.67209202 0.11748087 -0.31261352 +v -0.13990386 -0.62208700 -0.00977224 +v 0.53315598 0.22198834 -0.38049856 +v -0.01406962 0.20488207 -0.11492302 +v 0.12031502 -0.65799755 -0.11168461 +v 0.09282830 0.21021363 -0.11617115 +v -0.33837298 -0.24097297 -0.40199420 +v -0.21311986 -0.61694127 -0.20974962 +v -0.61354226 0.22318874 -0.39429840 +v 0.49867022 -0.01348301 -0.38550490 +v -0.17618929 -0.65208513 -0.01450432 +v 0.49295631 -0.57514691 -0.37459731 +v 0.52512527 -0.23934376 -0.25212508 +v -0.42041585 -0.63338023 -0.39936835 +v -0.44203505 -0.61100137 -0.17426333 +v 0.25659919 -0.12049794 -0.39841342 +v 0.04624399 0.22946832 -0.01813891 +v -0.30037233 0.19306226 -0.01904915 +v -0.61297649 -0.64215285 -0.11356893 +v -0.07422449 0.20624201 -0.03327134 +v 0.49595407 -0.00763616 -0.11084203 +v -0.35065073 0.05726640 -0.03043805 +v -0.13501681 -0.28195524 -0.01181061 +v -0.09233136 -0.65273100 -0.11193398 +v 0.50016272 -0.21344666 -0.27138767 +v 0.49100560 -0.40657514 -0.37955940 +v 0.49169424 -0.15125243 -0.02828095 +v -0.68138224 -0.12804714 -0.12407823 +v 0.02845712 -0.66185153 -0.10662530 +v -0.26183152 -0.33182314 -0.38716412 +v 0.13665552 -0.63271296 -0.10562059 +v 0.11175440 -0.66395009 -0.10831387 +v 0.16223504 0.23287779 -0.01918278 +v -0.67263472 -0.43465832 -0.11521792 +v 0.45405456 -0.38318792 -0.02249906 +v 0.52615952 -0.19925289 -0.20502861 +v -0.56693488 0.16409032 -0.40816993 +v 0.28179216 -0.27162758 -0.39668825 +v 0.46008593 -0.14625882 -0.38388231 +v 0.52891153 0.00671948 -0.39607570 +v -0.66901487 -0.60029811 -0.36531168 +v 0.52718097 -0.04182258 -0.39602187 +v -0.65827823 -0.23323186 -0.11682394 +v 0.18453777 -0.58595508 -0.02035601 +v 0.51100433 -0.44748151 -0.10830875 +v -0.59784943 0.23679081 -0.12304117 +v 0.06472465 -0.05970664 -0.38793734 +v -0.66204655 -0.27696088 -0.17653999 +v 0.22341649 -0.55250651 -0.02053218 +v 0.03468561 -0.65565979 -0.10631635 +v 0.05435153 -0.45093495 -0.02230075 +v 0.12644881 0.08673926 -0.40155268 +v 0.05244434 -0.62433040 -0.37917492 +v 0.18026514 -0.27970216 -0.39747339 +v 0.50994706 0.14960909 -0.38351357 +v -0.34920174 0.25030541 -0.29399437 +v -0.49534446 -0.05706112 -0.40572688 +v -0.65196782 -0.63527995 -0.24466953 +v -0.27381486 -0.61555666 -0.29020807 +v -0.37145326 -0.64435488 -0.22105001 +v -0.08239086 -0.41538203 -0.01062844 +v -0.64662766 -0.04904963 -0.39278579 +v -0.00829166 -0.62740064 -0.10841770 +v -0.51025134 -0.61438489 -0.03275812 +v 0.50755858 0.02063756 -0.29764983 +v 0.08419602 -0.45413291 -0.00933436 +v -0.67724526 -0.04116213 -0.12151303 +v 0.10787858 -0.10097378 -0.38731390 +v -0.69082862 -0.57921755 -0.30920383 +v 0.51635730 -0.46027502 -0.39048785 +v -0.60099822 -0.61179888 -0.05962882 +v 0.04656403 0.21397588 -0.11634094 +v -0.27324909 -0.13787274 -0.38928971 +v 0.07284499 -0.62813038 -0.06084673 +v 0.49621779 -0.02756455 -0.07682174 +v 0.22883347 -0.38813841 -0.00936627 +v -0.01557997 -0.62421870 -0.10774789 +v 0.38550803 0.19935034 -0.12052279 +v 0.52053267 -0.11614833 -0.10928624 +v -0.57448429 -0.60840714 -0.25468156 +v -0.54234093 -0.09001234 -0.40584880 +v -0.21332717 -0.45197570 -0.39905006 +v 0.07963667 -0.61595327 -0.00799431 +v -0.65104365 0.08641446 -0.22150491 +v -0.22112493 -0.62087429 -0.02752901 +v -0.10904557 0.24361166 -0.29656914 +v -0.13650413 -0.27049068 -0.39977497 +v 0.24232467 0.08180909 -0.02964908 +v 0.52287507 -0.29415938 -0.14278652 +v 0.03530771 -0.65953189 -0.10665637 +v -0.07936749 0.07937911 -0.02973590 +v -0.06493165 -0.65559047 -0.04563047 +v 0.50064403 0.22440103 -0.36546975 +v 0.39710739 0.08771992 -0.39945582 +v 0.49609235 -0.03928223 -0.10928836 +v -0.55443436 -0.63783956 -0.39944303 +v -0.40170220 -0.11113720 -0.40401298 +v -0.19260652 -0.61905414 -0.02259268 +v 0.05381140 0.20883858 -0.11742481 +v 0.40909588 -0.63729072 -0.34381905 +v 0.20351508 -0.62552667 -0.37992260 +v 0.16223764 -0.62732977 -0.14413762 +v 0.46113300 -0.63405836 -0.37717271 +v -0.68836844 -0.30987698 -0.01853765 +v -0.08083180 0.02060724 -0.02901119 +v 0.35960340 0.22772039 -0.11645669 +v 0.01702932 0.23749220 -0.11745205 +v -0.60422379 0.22760943 -0.36488253 +v -0.68937707 -0.42473215 -0.11923976 +v 0.24322581 -0.60175228 -0.02000621 +v 0.48571157 0.19490567 -0.08236703 +v 0.25306645 0.20183052 -0.20314813 +v 0.50014997 -0.13393633 -0.11526261 +v 0.36004117 -0.63460988 -0.10467332 +v -0.03771371 0.20926189 -0.34849027 +v -0.22185963 0.24368732 -0.03379305 +v -0.65173995 0.07312231 -0.16466342 +v -0.47919083 -0.29816261 -0.02764434 +v -0.52039659 0.24928346 -0.02196968 +v -0.60620522 -0.63908130 -0.11535963 +v 0.51318038 -0.59532875 -0.29725832 +v -0.40759274 -0.61338800 -0.02477573 +v 0.09769750 0.22371621 -0.01813423 +v -0.04712170 -0.49548846 -0.00976033 +v -0.67690474 0.21543658 -0.06465793 +v -0.26663402 -0.64492607 -0.10889474 +v -0.08958698 0.21428797 -0.11762056 +v -0.32025322 -0.64440370 -0.10917049 +v -0.39621866 -0.61209518 -0.11401065 +v -0.04228842 0.20664468 -0.39017189 +v 0.49904406 0.19530936 -0.11744268 +v -0.02523370 0.02215915 -0.38922441 +v -0.11696362 -0.61373359 -0.39604717 +v -0.50884330 0.25396988 -0.35667649 +v 0.11638806 -0.66225302 -0.10623720 +v 0.25338390 -0.48736149 -0.39404732 +v -0.07952362 -0.13171721 -0.40085989 +v -0.36325610 -0.43739927 -0.40030882 +v 0.00131095 -0.40203479 -0.38461915 +v 0.49676567 -0.29652777 -0.14650664 +v -0.68086761 -0.22037902 -0.11735246 +v -0.11396073 -0.65193129 -0.29403713 +v -0.51173353 0.05754215 -0.40705395 +v -0.58772969 -0.64160454 -0.02678601 +v 0.51461911 -0.27652490 -0.01277362 +v 0.49241623 -0.15825401 -0.11279231 +v -0.67471081 -0.13443086 -0.40213057 +v 0.16481303 -0.09419323 -0.39945814 +v 0.50444055 -0.02876251 -0.15242088 +v 0.02014999 -0.04213786 -0.02807158 +v -0.12203497 0.21282414 -0.11902656 +v 0.49340439 -0.09190315 -0.11306722 +v -0.65532136 -0.08793025 -0.29923710 +v 0.48112664 -0.64421421 -0.10313752 +v -0.36227560 -0.63977396 -0.01126719 +v 0.49166360 -0.16490476 -0.10995349 +v 0.24267800 -0.63231540 -0.06726822 +v -0.68793064 -0.57624662 -0.11524133 +v -0.53191400 0.21221986 -0.40781546 +v 0.52129048 -0.06060470 -0.05914029 +v -0.31949288 -0.64810199 -0.01375621 +v -0.01078504 0.20868225 -0.31093028 +v 0.53192472 0.22417271 -0.31851032 +v -0.39996904 -0.61217648 -0.21012837 +v 0.50842392 0.07998062 -0.22079723 +v 0.04841752 -0.66338325 -0.10899304 +v -0.39849448 -0.24824698 -0.02715283 +v 0.52622104 -0.10335593 -0.39488479 +v 0.49558809 0.16705367 -0.11590178 +v 0.41516569 -0.05945408 -0.39782491 +v -0.69631463 -0.63792074 -0.07801370 +v 0.29984537 -0.03826890 -0.39907408 +v -0.64900839 -0.63406038 -0.40059862 +v 0.52216345 -0.01568026 -0.11027138 +v 0.50908697 0.14436822 -0.11986021 +v 0.25416729 -0.66438067 -0.10932879 +v -0.54864615 0.25299391 -0.40625709 +v -0.03024615 -0.18557742 -0.01263915 +v 0.52943385 0.04480752 -0.11347872 +v -0.21898215 -0.62182730 -0.10814194 +v -0.08198375 0.23455371 -0.40424171 +v 0.10816282 -0.62678802 -0.10658189 +v -0.62254566 0.22097895 -0.11496218 +v 0.27071258 0.02231889 -0.01552265 +v 0.52766490 -0.12631787 -0.35446557 +v 0.01649690 -0.41653600 -0.01003779 +v -0.40678886 -0.34003046 -0.01357407 +v 0.47601178 0.19485450 -0.11621285 +v -0.37518054 0.21092451 -0.40633476 +v 0.27624986 -0.44674581 -0.02166371 +v -0.24537830 -0.00439738 -0.01607203 +v 0.47423002 -0.36301261 -0.39343008 +v -0.66934758 -0.53754699 -0.22598761 +v 0.50838810 -0.57838362 -0.10312475 +v -0.60047334 0.20986064 -0.40826696 +v -0.25966054 0.20484185 -0.03181426 +v -0.32647142 -0.61419386 -0.29076788 +v 0.01891354 -0.22412239 -0.38647261 +v 0.49015066 -0.55436373 -0.10753214 +v -0.04199398 -0.49008790 -0.38402784 +v -0.53545702 -0.38873023 -0.38855991 +v 0.07340571 -0.64794457 -0.10590314 +v -0.66384357 -0.33477953 -0.16156970 +v 0.44570395 -0.40311113 -0.00898626 +v 0.10756123 -0.62652826 -0.34002680 +v 0.52110875 -0.35301599 -0.13932045 +v 0.02108955 -0.02365702 -0.01531881 +v -0.19247077 -0.51521653 -0.02323014 +v -0.65906405 -0.38844335 -0.38687050 +v 0.45243931 -0.50443745 -0.39161682 +v -0.00693477 -0.62194514 -0.11259656 +v -0.67273957 -0.55201370 -0.03226577 +v -0.64649701 -0.06377880 -0.39263090 +v -0.13331941 -0.65122467 -0.24177177 +v -0.67594725 0.25648254 -0.37492818 +v 0.08167195 0.20833005 -0.12131473 +v -0.14573541 -0.32516241 -0.39934537 +v -0.30820069 -0.58900261 -0.39853045 +v 0.50773787 0.10567474 -0.11977553 +v -0.65748197 -0.13542834 -0.21036538 +v -0.10100197 -0.32250407 -0.01129272 +v -0.19994597 0.15337628 -0.01798401 +v -0.38594276 -0.61712599 -0.06067183 +v 0.45883164 0.18814236 -0.38841394 +v 0.11550236 0.22757018 -0.12079386 +v 0.03558172 0.20867129 -0.11825250 +v 0.48126236 -0.63707310 -0.05896411 +v -0.48403940 -0.61082733 -0.11292636 +v -0.10916068 0.19881128 -0.40382254 +v -0.04421348 -0.33108780 -0.38555557 +v -0.69430244 -0.62629008 -0.19295374 +v 0.39560220 -0.10895160 -0.02692559 +v 0.50901020 0.07135799 -0.33509320 +v -0.47333860 -0.61447740 -0.02742773 +v -0.28326884 -0.61508256 -0.20234895 +v -0.36056292 0.24670316 -0.06917212 +v 0.13494556 0.22797476 -0.01803593 +v 0.42730781 -0.66743088 -0.10355669 +v 0.33884445 -0.26647118 -0.38370144 +v 0.07747101 -0.54969203 -0.02123348 +v 0.29576221 -0.66617548 -0.10831174 +v 0.18238749 0.20210682 -0.01805635 +v 0.00087325 0.23747593 -0.11641371 +v -0.24674787 0.21301389 -0.39157575 +v -0.01998051 -0.65552473 -0.10940666 +v 0.27187485 -0.54894227 -0.39311665 +v 0.09105949 0.06444319 -0.02942440 +v 0.01572125 -0.62683213 -0.07005002 +v 0.04004382 -0.66031659 -0.09555896 +v -0.51894242 -0.48219940 -0.40198568 +v 0.31336734 0.22813605 -0.11707416 +v -0.55628002 0.25579622 -0.30574456 +v -0.25148898 0.06124627 -0.01693589 +v -0.24097762 0.06744004 -0.39153087 +v 0.13978654 0.23562843 -0.35817719 +v -0.08232674 -0.65216535 -0.14478020 +v 0.50292766 -0.05567151 -0.11657201 +v -0.67668450 0.02126492 -0.18552114 +v 0.42631966 -0.45139661 -0.39268303 +v 0.49451038 -0.06229093 -0.11296041 +v 0.21604638 0.23596109 -0.17873156 +v 0.50257438 -0.09212114 -0.14595769 +v -0.05968098 0.19915411 -0.01831083 +v 0.11334942 -0.65669698 -0.00805091 +v -0.66651618 -0.54091829 -0.11358085 +v 0.26347551 0.20090704 -0.32711911 +v -0.58384609 -0.60780472 -0.02613833 +v 0.24258079 -0.01517365 -0.38741690 +v 0.49771807 -0.03774260 -0.11063011 +v 0.03438111 0.07836185 -0.02966909 +v -0.67199969 0.14977281 -0.26178542 +v 0.48932639 -0.60993427 -0.13466454 +v -0.68237281 -0.15926909 -0.12449270 +v -0.38535649 -0.64342409 -0.10907006 +v 0.24380192 -0.08580087 -0.38652921 +v -0.31192288 0.21039842 -0.40575495 +v -0.10807776 -0.11739641 -0.02682090 +v 0.01320736 -0.29110652 -0.01104037 +v 0.48968992 0.19476216 -0.10666232 +v 0.51266336 0.21888690 -0.01824062 +v -0.65189344 -0.40661919 -0.40360296 +v -0.43756256 -0.02575587 -0.40547472 +v -0.52991712 0.22312598 -0.39291665 +v 0.26307872 -0.63467646 -0.10605081 +v -0.67992800 -0.08797371 -0.11972446 +v 0.49783328 0.04315342 -0.11088204 +v -0.65097696 0.07773332 -0.27613932 +v -0.16600288 -0.46153334 -0.38533574 +v 0.42122528 -0.25870836 -0.01113655 +v 0.48743200 -0.43758449 -0.06888487 +v -0.63439840 -0.47599813 -0.01494306 +v 0.09764384 0.23901790 -0.20738657 +v 0.28127521 -0.66823024 -0.10110341 +v -0.53514719 0.25621966 -0.17494547 +v 0.04813089 -0.57069165 -0.39519778 +v 0.25921825 -0.65533537 -0.10473417 +v -0.20671199 0.14998899 -0.39167362 +v -0.64837599 0.18336655 -0.29186621 +v 0.35828257 0.23147210 -0.19580150 +v 0.21672481 -0.11004935 -0.01357322 +v -0.06305247 0.24965432 -0.13475007 +v 0.36888865 0.19998233 -0.11847165 +v -0.69663966 -0.63751721 -0.04253675 +v 0.52824354 -0.11356750 -0.14939524 +v 0.48566818 -0.52816856 -0.06020118 +v 0.19731490 -0.33433133 -0.38406700 +v 0.02103842 -0.66331178 -0.10935943 +v 0.36841762 0.19922571 -0.25538605 +v -0.51891166 -0.40420017 -0.02730262 +v 0.48327714 -0.63323087 -0.03580929 +v -0.11504606 -0.65356296 -0.01284086 +v 0.42966050 -0.06768212 -0.01413239 +v 0.52208930 -0.33903569 -0.23477642 +v -0.05133022 -0.62289029 -0.10816024 +v 0.51496226 -0.53358847 -0.19450869 +v -0.53554654 0.21812889 -0.03540247 +v 0.48487714 -0.46524298 -0.02386038 +v 0.12981027 0.23356527 -0.11699373 +v -0.00894941 0.17039511 -0.39015612 +v -0.21076189 -0.61781317 -0.11029436 +v 0.52718115 0.21851486 -0.09620494 +v 0.52458787 -0.25181291 -0.19651045 +v -0.57699549 -0.13534249 -0.39154106 +v 0.36811811 -0.66311717 -0.10256218 +v 0.50317603 -0.00288260 -0.38375187 +v 0.52362531 0.10424442 -0.02009302 +v 0.50791204 -0.55522853 -0.06043523 +v -0.66305244 -0.41173187 -0.11503112 +v 0.43863076 0.22306342 -0.11864868 +v -0.40736988 0.24961847 -0.12064279 +v 0.29917476 -0.23960456 -0.01135230 +v -0.00155612 -0.64943504 -0.00791771 +v -0.63197660 -0.63594544 -0.01415537 +v -0.33246428 -0.52252352 -0.01173231 +v 0.34714404 0.22895384 -0.02274928 +v -0.51203036 -0.60962552 -0.23598796 +v 0.24428068 -0.62474877 -0.38007155 +v -0.17832412 0.20929497 -0.08162020 +v 0.48889893 -0.63609046 -0.21590897 +v 0.50146341 -0.13648906 -0.18222700 +v -0.54050779 0.03842060 -0.03131595 +v 0.50746405 -0.49871388 -0.00794495 +v 0.36460325 0.19250260 -0.01875595 +v 0.28860709 -0.62346935 -0.39212322 +v 0.08617765 -0.22170694 -0.01172125 +v -0.58547419 -0.07901117 -0.01714951 +v -0.66868699 -0.30806923 -0.11595242 +v 0.27640101 -0.09453989 -0.01385749 +v 0.08466470 -0.02397474 -0.01512647 +v 0.51199526 -0.62392253 -0.34032848 +v 0.53168422 0.08596093 -0.39596739 +v -0.31436250 -0.23229374 -0.01355195 +v 0.22041382 -0.66105789 -0.25603119 +v 0.14049830 -0.66591632 -0.10857388 +v 0.51607078 -0.24037603 -0.11064288 +v -0.28473818 -0.64657462 -0.17755364 +v -0.61699802 0.22820264 -0.27575314 +v -0.16281562 0.24499352 -0.30823591 +v 0.04956710 -0.48514131 -0.38339695 +v -0.15682010 0.24645729 -0.14432700 +v -0.10895322 0.20425938 -0.03174490 +v -0.18562266 0.21612063 -0.11874869 +v 0.47382054 -0.66650701 -0.15697806 +v 0.11862311 0.01328061 -0.01546393 +v 0.41358119 0.07610358 -0.02978611 +v 0.00858151 -0.65923959 -0.10626018 +v -0.68475097 -0.08390868 -0.02564469 +v -0.29342937 0.21732143 -0.32833511 +v 0.29902375 -0.21981028 -0.02476254 +v 0.51795751 -0.28014746 -0.10736702 +v 0.05986087 -0.50686717 -0.02169860 +v 0.08812839 -0.66261315 -0.10398521 +v -0.67534041 -0.22040009 -0.40393150 +v -0.22246617 0.20861895 -0.03263089 +v 0.52340007 0.11731789 -0.01824870 +v 0.49196839 -0.50652003 -0.16811205 +v -0.53890777 0.25594625 -0.12259605 +v 0.32628775 -0.66398001 -0.27218342 +v -0.66063833 -0.61158288 -0.11253784 +v 0.49476135 -0.48396957 -0.37634525 +v -0.63571417 -0.01500136 -0.40719947 +v 0.24248360 -0.66576385 -0.10239068 +v 0.19399464 0.13047349 -0.03030783 +v -0.66510040 0.25087309 -0.40737095 +v -0.65854424 0.11157839 -0.07867159 +v -0.56280792 0.03411748 -0.39316604 +v -0.65518552 -0.04954204 -0.15725297 +v 0.49830696 0.03452935 -0.10583378 +v -0.42900184 -0.12108860 -0.39060315 +v 0.48394024 -0.66724575 -0.10579463 +v -0.50690264 -0.61875880 -0.01258042 +v -0.65238744 -0.09939452 -0.39014593 +v -0.17260760 -0.40636328 -0.01120038 +v -0.13912536 -0.61736810 -0.38216799 +v 0.50123304 -0.66627717 -0.04368190 +v -0.25827032 -0.61525422 -0.38154137 +v 0.49212712 -0.63818181 -0.10289709 +v 0.49212712 -0.46197122 -0.10948411 +v -0.46192855 -0.20696884 -0.40367723 +v 0.26027817 -0.21291633 -0.01177444 +v -0.03719383 0.20605977 -0.11840910 +v -0.16364759 0.00998965 -0.40296486 +v -0.66367447 -0.64167720 -0.11455791 +v 0.52453417 0.22275437 -0.02335696 +v -0.65903831 -0.19703871 -0.24332373 +v 0.09136681 0.14287114 -0.40208846 +v -0.41718999 -0.64100945 -0.11025861 +v 0.53374249 0.22324513 -0.11474174 +v 0.02715945 -0.65018857 -0.10586271 +v 0.52270377 0.00340187 -0.06598010 +v 0.51619625 -0.51069772 -0.24712068 +v -0.13653463 0.21265206 -0.29312477 +v -0.39341527 -0.61251706 -0.29169685 +v 0.18337065 -0.41480610 -0.39569482 +v -0.14030039 -0.05525840 -0.01512519 +v -0.07737559 -0.07494636 -0.02762051 +v -0.18940631 0.20503277 -0.01869850 +v 0.50074154 0.17513888 -0.11224804 +v 0.52764970 -0.14543357 -0.20374049 +v -0.04235478 -0.62405258 -0.02281992 +v -0.34307286 -0.64541471 -0.29111195 +v -0.24997334 0.12288510 -0.03091764 +v -0.36916694 0.24975124 -0.36821327 +v -0.08538838 0.20750518 -0.03642846 +v -0.64565462 0.22634332 -0.17201516 +v 0.45541418 0.19830163 -0.11993213 +v 0.01920299 -0.65898800 -0.08825021 +v -0.22982869 -0.64872849 -0.28838611 +v -0.18130645 0.24167724 -0.11615753 +v 0.15163691 0.20411050 -0.27568525 +v -0.53445333 -0.10069206 -0.39172554 +v -0.48854741 0.13327359 -0.39359751 +v -0.46100181 0.22172689 -0.39123702 +v 0.43030059 -0.63476616 -0.02016876 +v 0.30643755 -0.62907350 -0.01940448 +v 0.02731563 0.23583108 -0.01937385 +v -0.51858133 -0.09614085 -0.01657928 +v 0.33535275 0.23158243 -0.25398132 +v 0.52480811 -0.22985354 -0.11382001 +v -0.05635796 0.23767349 -0.40326169 +v 0.25171500 0.20159499 -0.26965994 +v -0.27362260 -0.20236017 -0.38865671 +v -0.14844628 0.16018610 -0.03109807 +v -0.04675284 -0.65160233 -0.39482141 +v 0.13740332 0.04073169 -0.02902055 +v 0.49393964 -0.08177095 -0.11055437 +v -0.58999252 0.13764635 -0.03303558 +v -0.68302292 -0.13298805 -0.11872826 +v 0.06545196 -0.65609854 -0.11287700 +v 0.42066988 0.22958779 -0.20836404 +v 0.48358700 -0.43863443 -0.00848752 +v -0.67508692 -0.11552650 -0.39665252 +v -0.11142868 0.11755205 -0.39066616 +v 0.47975469 -0.63655841 -0.10753554 +v -0.61002707 0.03403779 -0.40766907 +v 0.17706285 0.20172472 -0.03614803 +v 0.37512749 -0.66532129 -0.28905952 +v 0.05206061 -0.65488160 -0.10653040 +v -0.41586643 -0.61196357 -0.25287512 +v 0.04095533 0.23739699 -0.02248757 +v 0.31561261 0.20482786 -0.11473451 +v -0.36725459 -0.61540890 -0.11040670 +v 0.49127725 0.08328089 -0.01635757 +v 0.51754797 -0.15503566 -0.01422474 +v 0.50249773 -0.14053586 -0.37864831 +v -0.62335443 0.20196676 -0.02041686 +v 0.19192621 -0.19790407 -0.38577577 +v 0.49275693 -0.54647225 -0.27862513 +v 0.50981677 0.11630014 -0.38204736 +v 0.22153518 -0.36867845 -0.39596888 +v -0.28631762 -0.44112253 -0.38622898 +v -0.52552402 0.04858196 -0.01796997 +v 0.35462958 -0.67062342 -0.10449629 +v 0.50276142 -0.66696787 -0.08750593 +v -0.36549586 -0.64708817 -0.01578947 +v -0.47689426 -0.64119196 -0.15965475 +v -0.64328146 0.22889468 -0.30109718 +v -0.30498770 0.24290644 -0.02011217 +v -0.53239506 -0.63972330 -0.32237598 +v -0.08788946 0.24714072 -0.12266073 +v 0.32439342 -0.62981153 -0.37909278 +v -0.67363793 0.08087677 -0.12249051 +v -0.57531345 -0.61212397 -0.02896268 +v -0.36945105 0.21963149 -0.33490300 +v 0.53455919 0.22251043 -0.33587429 +v -0.32199121 -0.45868200 -0.39991349 +v 0.27439919 -0.66176856 -0.39104873 +v -0.24184273 0.03240599 -0.02955461 +v 0.51320112 -0.25084233 -0.11179781 +v -0.66513360 -0.52322847 -0.38538620 +v -0.68477392 -0.52337801 -0.40244401 +v 0.13967152 0.02227783 -0.38851821 +v -0.56803024 -0.63853097 -0.36750028 +v -0.66763216 -0.37472939 -0.03191299 +v -0.67805392 -0.04074515 -0.21159266 +v -0.03438032 0.23820585 -0.40286678 +v -0.67464912 -0.05006313 -0.36553466 +v 0.42248240 0.10856744 -0.01674525 +v -0.25568464 -0.64786178 -0.23745055 +v -0.67235279 -0.64354622 -0.11427023 +v 0.51977265 -0.41394019 -0.29806644 +v 0.10887217 -0.62586671 -0.15695593 +v 0.35729969 -0.63383728 -0.18042523 +v 0.51198769 -0.29072374 -0.11113098 +v -0.69122511 -0.36001709 -0.11114800 +v -0.64859855 0.16932985 -0.14089368 +v -0.30386639 -0.61463869 -0.23978873 +v -0.66675663 0.25691190 -0.27823365 +v -0.66996175 -0.60141355 -0.11338723 +v 0.51697201 -0.48035565 -0.19538021 +v 0.51895857 -0.42624813 -0.19076984 +v 0.01040689 -0.62304795 -0.17170835 +v 0.50980657 -0.43311471 -0.00877944 +v 0.20312881 -0.66274625 -0.10486651 +v -0.63373262 0.22915278 -0.19367291 +v -0.37876174 -0.59572428 -0.39899153 +v -0.63434958 -0.63592541 -0.12807113 +v -0.64548814 0.21712482 -0.11965638 +v -0.18738382 -0.39940044 -0.38595876 +v 0.46932274 -0.63429815 -0.01992918 +v -0.01628366 -0.62106341 -0.38108581 +v -0.52090830 -0.35246864 -0.40275165 +v -0.15125710 -0.61887276 -0.30119357 +v -0.34916046 0.22082202 -0.12009426 +v 0.39732274 -0.66558707 -0.05375288 +v 0.39210290 0.23058070 -0.16051818 +v 0.26241842 0.04479263 -0.38794288 +v -0.39219150 0.13824892 -0.40674710 +v 0.15178546 -0.09244242 -0.02695623 +v -0.57758158 -0.19951934 -0.39083147 +v -0.16325326 -0.21317597 -0.40059820 +v 0.34924594 0.22102718 -0.12057598 +v 0.48819506 -0.63745826 -0.32061082 +v 0.49282098 -0.46143794 -0.15397072 +v -0.03408334 -0.65545589 -0.01267021 +v -0.58850253 -0.35307312 -0.40347573 +v 0.39066419 -0.07107623 -0.38565576 +v 0.09855795 -0.65846002 -0.01192849 +v 0.49771315 0.03353437 -0.08427731 +v -0.58687949 -0.63524818 -0.40087095 +v 0.32211763 -0.63527167 -0.10282560 +v -0.17982157 -0.61802614 -0.25699377 +v -0.65626061 0.20326883 -0.11105779 +v 0.29015091 -0.65383542 -0.10344349 +v 0.46190387 -0.59331983 -0.37749508 +v 0.45345843 0.20702496 -0.11350936 +v -0.00591053 -0.62533736 -0.10759427 +v -0.24243148 -0.61591709 -0.15934835 +v 0.25969204 0.19903672 -0.03453138 +v -0.31894988 -0.47887641 -0.38612875 +v 0.47798324 -0.04489380 -0.01447793 +v 0.51741236 -0.13566889 -0.01411026 +v 0.49520180 -0.32866785 -0.11278976 +v 0.52341044 0.04377132 -0.11209229 +v -0.54414284 -0.44188496 -0.38820159 +v 0.09878836 -0.09376375 -0.02707155 +v 0.52020532 0.14196074 -0.11703118 +v 0.43896884 -0.52812159 -0.37849531 +v 0.47567156 -0.30992034 -0.39383709 +v -0.62852812 0.25146267 -0.11534261 +v 0.51573300 -0.14648563 -0.11326339 +v -0.68549579 -0.11405635 -0.11503622 +v 0.53090101 -0.01757421 -0.20461115 +v 0.49223989 -0.62916762 -0.30898446 +v 0.52764213 0.07525812 -0.39809257 +v -0.63167691 0.22654301 -0.39370945 +v 0.11872821 -0.65673465 -0.10574441 +v -0.46841291 0.21564031 -0.03434626 +v -0.37449163 0.15826070 -0.39306581 +v 0.42442805 -0.63675386 -0.05634147 +v -0.64052171 -0.63031596 -0.01379876 +v 0.32096821 0.10836062 -0.01674354 +v -0.00604619 -0.62464350 -0.02232203 +v 0.23316789 -0.63091779 -0.37594673 +v 0.45732149 -0.35111338 -0.00979905 +v -0.55054539 -0.60899574 -0.34082639 +v 0.07009333 -0.06992002 -0.01453240 +v 0.53542197 0.19448730 -0.17046149 +v -0.16906442 -0.25643030 -0.38722923 +v -0.67754698 -0.06052145 -0.26361483 +v 0.48377398 -0.22178870 -0.02484467 +v -0.01429706 -0.65415543 -0.00900073 +v 0.16332854 0.13032205 -0.40163013 +v -0.56411338 0.25086319 -0.05835856 +v 0.52119350 -0.28826657 -0.39218217 +v 0.11093814 0.11888938 -0.38955823 +v -0.64392650 0.25832590 -0.25412899 +v -0.67002058 -0.55127162 -0.11694352 +v -0.45238218 0.08605327 -0.01815934 +v -0.69682634 -0.63848597 -0.09912419 +v -0.35334858 -0.61331075 -0.14061154 +v -0.63776714 -0.60665697 -0.35892549 +v -0.66259658 -0.27601734 -0.12117600 +v 0.49873719 -0.09092759 -0.10990456 +v -0.60159177 -0.60796183 -0.32380220 +v -0.61665225 -0.49053490 -0.38828287 +v 0.52236855 -0.32181260 -0.18344490 +v 0.12196919 -0.65786487 -0.17839666 +v 0.03156023 0.20432439 -0.07820221 +v -0.26726857 0.21495427 -0.39060488 +v 0.49691191 -0.37663978 -0.34004679 +v -0.04111305 0.23834905 -0.02073134 +v -0.58898121 0.22641811 -0.39149448 +v -0.19183049 -0.64997274 -0.34136087 +v 0.52649015 0.21781228 -0.11513367 +v -0.64259273 0.22958183 -0.18408407 +v 0.33848375 0.19677265 -0.03338155 +v -0.68776393 -0.54379177 -0.37111145 +v 0.52598071 0.08534852 -0.10626869 +v 0.51682103 -0.50163037 -0.34398946 +v -0.47997385 0.21735020 -0.08923110 +v 0.40084791 -0.63534039 -0.37750146 +v -0.65291977 -0.06632905 -0.38739645 +v -0.61095369 -0.03246189 -0.03148957 +v -0.61614537 -0.61333895 -0.11248379 +v -0.50209993 -0.63052410 -0.11097948 +v 0.22408247 -0.62979305 -0.02110029 +v -0.62392521 -0.63778526 -0.10914028 +v -0.42729416 -0.61176270 -0.37834978 +v -0.02093512 0.06188480 -0.40217632 +v 0.12792376 -0.23202415 -0.39837301 +v 0.49300024 -0.14842272 -0.10794321 +v -0.43558344 -0.30080360 -0.40232909 +v 0.48979512 0.22530073 -0.02411060 +v -0.27171272 0.02340627 -0.39131957 +v -0.53251016 -0.03176916 -0.40636986 +v 0.50167352 -0.10915544 -0.11631329 +v 0.16093752 -0.07739979 -0.38722286 +v -0.43347144 0.21794410 -0.12167857 +v -0.57960904 -0.45512012 -0.01445496 +v -0.68577480 -0.37579539 -0.26355824 +v -0.08611272 -0.62003189 -0.11113992 +v 0.25471032 0.23049623 -0.11591199 +v -0.64502984 0.22710484 -0.19023152 +v -0.66796488 -0.61101139 -0.10582867 +v 0.33157176 0.22924544 -0.07421441 +v -0.23595208 -0.27872854 -0.01248553 +v -0.08446407 -0.16449991 -0.02590683 +v -0.48285127 -0.64491510 -0.05267199 +v 0.51597369 -0.20468035 -0.01388770 +v -0.47754183 -0.24824278 -0.40338954 +v 0.51272249 0.19034530 -0.26854479 +v -0.67212999 0.16895086 -0.16831759 +v -0.11440583 -0.22210829 -0.38723242 +v -0.25257415 -0.62019700 -0.07007087 +v -0.67452359 0.06740185 -0.12247221 +v 0.11060281 0.20348191 -0.12005001 +v -0.48771527 -0.17101991 -0.40432662 +v -0.19737543 -0.08145498 -0.01483454 +v -0.05056455 0.20688064 -0.04031668 +v 0.30202937 -0.44982231 -0.39406988 +v -0.66592711 -0.63654405 -0.11235400 +v -0.46077895 0.22294353 -0.27772638 +v 0.52030265 -0.18815422 -0.10855132 +v 0.50530618 -0.62472063 -0.06175272 +v 0.20845626 0.20251763 -0.28919634 +v 0.24068154 -0.02448843 -0.39963901 +v -0.67084998 0.19375366 -0.21352039 +v 0.43676218 0.22435741 -0.39877409 +v 0.44741178 -0.63639653 -0.37719059 +v 0.01884986 0.23886746 -0.36442739 +v -0.14559942 -0.61878532 -0.02199393 +v -0.39723718 -0.25527015 -0.38887802 +v -0.55253702 -0.13639444 -0.01644907 +v -0.06428358 0.21038848 -0.29665637 +v 0.04847674 -0.66077298 -0.10738192 +v -0.08978630 0.22513370 -0.11772992 +v 0.14488123 -0.62815624 -0.02164073 +v -0.58187717 -0.24660747 -0.01585458 +v -0.58380997 -0.49155438 -0.02732645 +v -0.40226501 0.07796992 -0.40655795 +v 0.49630779 -0.06802240 -0.11005903 +v -0.37382853 -0.64638311 -0.11159525 +v -0.65496773 0.21972474 -0.08511351 +v -0.50028741 -0.30010223 -0.40300849 +v 0.39886907 -0.01045918 -0.38624451 +v 0.41530427 0.14946190 -0.03094402 +v 0.17416251 -0.18248817 -0.02523021 +v 0.49922875 0.10706057 -0.11151780 +v -0.60893893 0.22225329 -0.12142665 +v -0.36711109 0.21432425 -0.07794220 +v 0.49144635 -0.30197236 -0.10724617 +v -0.54464197 0.22570980 -0.38615578 +v 0.50923580 -0.30178767 -0.11153525 +v 0.28445756 -0.03945617 -0.01462432 +v 0.05445436 0.15232864 -0.38988039 +v -0.17556676 0.11321185 -0.03048401 +v -0.68080580 -0.16722910 -0.23657309 +v -0.48671940 -0.60721344 -0.38570067 +v -0.13984708 -0.62236333 -0.02614812 +v 0.00564540 -0.54114306 -0.39605314 +v -0.34495172 0.09322134 -0.01775719 +v -0.30102980 -0.52965057 -0.38551086 +v -0.68044484 -0.14377065 -0.11978276 +v 0.47897148 -0.10563718 -0.01369238 +v -0.67703748 -0.24079567 -0.11544559 +v -0.42865348 -0.61153054 -0.35205889 +v 0.49804604 0.22296225 -0.01916022 +v 0.25754428 -0.67046821 -0.10490439 +v -0.25750467 0.21067344 -0.39222279 +v -0.67692995 -0.55440563 -0.11193994 +v 0.49553725 0.17090815 -0.11809760 +v 0.48584509 -0.43281162 -0.02427061 +v 0.37249085 -0.63556969 -0.06302127 +v -0.08219843 0.21135446 -0.19077495 +v 0.12887357 -0.65989262 -0.10876537 +v -0.38230211 -0.60390669 -0.38537407 +v -0.65230787 0.13149907 -0.11807206 +v -0.65165251 0.22056367 -0.03797702 +v 0.47102013 -0.63737774 -0.37695441 +v -0.54340547 -0.61169517 -0.02693410 +v -0.23012547 0.21521422 -0.35310742 +v -0.66919363 -0.63370717 -0.13023120 +v -0.61543107 -0.64321494 -0.10548484 +v -0.68883139 -0.29728144 -0.08173339 +v -0.69341379 -0.43945909 -0.02250203 +v 0.23917373 -0.62925899 -0.37880915 +v -0.68028355 -0.23276742 -0.31336802 +v -0.34730434 -0.04998429 -0.39089659 +v -0.12578498 -0.61918962 -0.37917215 +v -0.60153794 0.22075632 -0.08542629 +v 0.50736958 0.01731096 -0.34957135 +v -0.43695554 -0.61613548 -0.10662189 +v 0.50350654 0.16603002 -0.11820398 +v 0.52644670 -0.16434155 -0.37242681 +v 0.48320830 -0.63163233 -0.04715734 +v -0.68134850 -0.39608851 -0.39935112 +v 0.51022398 0.13997068 -0.21137606 +v 0.18779966 -0.06054061 -0.01434219 +v 0.28952894 -0.14129137 -0.38555917 +v -0.67122114 0.12175607 -0.12184156 +v -0.07408066 0.23933780 -0.40321678 +v 0.06619199 -0.28595141 -0.01094122 +v -0.14148033 0.24028964 -0.11702267 +v -0.66501313 -0.46229139 -0.11450344 +v 0.50152767 -0.17682309 -0.37821743 +v -0.18721217 0.24646793 -0.25149551 +v -0.16751297 -0.45294714 -0.02359058 +v -0.31822017 -0.57443827 -0.02387059 +v -0.30969793 -0.16019098 -0.40252379 +v 0.35881791 0.18362550 -0.40090054 +v 0.12945215 -0.62706417 -0.37787017 +v 0.49252927 0.03977161 -0.11532389 +v -0.25062081 -0.07883117 -0.38990143 +v -0.24602561 -0.40776753 -0.39963880 +v -0.51800001 -0.60478884 -0.02521234 +v -0.65101761 -0.05653403 -0.39045209 +v 0.36286527 0.05219405 -0.02942099 +v -0.62014657 0.25730297 -0.31067514 +v -0.64959937 0.14374958 -0.18969235 +v 0.51826239 -0.45470697 -0.24474229 +v 0.27795008 -0.66395086 -0.11055479 +v -0.08310208 0.23299886 -0.01850062 +v -0.62176704 0.21754499 -0.39462927 +v 0.52122432 -0.13317406 -0.10554994 +v -0.65381312 -0.04906830 -0.03208406 +v -0.68788928 -0.25930130 -0.01967769 +v 0.00092224 0.23042516 -0.40361893 +v -0.69699776 -0.57689846 -0.06594435 +v 0.37139520 -0.44729769 -0.39336157 +v -0.07145664 0.20811611 -0.12071556 +v -0.05371583 -0.12293718 -0.02669282 +v -0.21097663 0.21160835 -0.12071854 +v -0.32693183 -0.64293575 -0.01131315 +v 0.19605313 -0.62903464 -0.37764740 +v -0.29282495 0.21978034 -0.11924828 +v 0.11946817 -0.66357923 -0.10362860 +v -0.06789567 0.24559014 -0.09811139 +v -0.23619007 0.21611080 -0.16962273 +v 0.07843649 0.01331492 -0.02873416 +v -0.15477695 -0.37650281 -0.39897728 +v 0.49417794 -0.51962906 -0.34013361 +v -0.06353086 0.06948388 -0.38994250 +v -0.67693758 -0.17016190 -0.36621702 +v -0.00715710 -0.66125298 -0.10674871 +v -0.29090750 0.24157415 -0.40571326 +v 0.29826114 -0.63253736 -0.02168243 +v 0.17052226 -0.02102688 -0.40017030 +v 0.19674690 -0.63895434 -0.10847132 +v 0.34289986 0.22854392 -0.39682508 +v 0.41500482 -0.63608491 -0.20177914 +v 0.49377090 -0.10328253 -0.11326339 +v -0.67139262 -0.23009589 -0.40546387 +v 0.14651202 -0.65920502 -0.01131017 +v -0.62876862 0.04316720 -0.01866744 +v -0.69225150 -0.62232482 -0.11268252 +v 0.14925122 0.20617248 -0.11762013 +v 0.52358210 -0.28019705 -0.11537027 +v -0.43651518 -0.55536443 -0.01239957 +v -0.36901054 0.21540029 -0.12014320 +v -0.02558654 0.23911791 -0.40121588 +v 0.39058754 0.05361792 -0.38699603 +v -0.04320445 -0.03993789 -0.38876778 +v -0.17755069 0.21308553 -0.38929802 +v -0.66309053 0.12222417 -0.40850079 +v 0.51006275 0.19296597 -0.32811233 +v -0.37724349 -0.39230907 -0.38712922 +v 0.35094339 -0.66398638 -0.17979032 +v -0.52198845 -0.20250204 -0.40431747 +v 0.18716227 0.23300642 -0.11466046 +v -0.32268989 -0.64603025 -0.35017648 +v 0.23243587 0.23209830 -0.06157867 +v -0.21970621 -0.36633438 -0.02451870 +v 0.32032835 0.04705462 -0.38753563 +v -0.67646909 -0.11526979 -0.35154036 +v -0.55718589 -0.64208275 -0.01855722 +v 0.05429315 0.24065402 -0.15746574 +v 0.18016067 -0.51867908 -0.02110795 +v -0.22019516 -0.64840883 -0.18127632 +v 0.50737220 0.12455063 -0.38462830 +v 0.33961028 0.22603756 -0.40102097 +v 0.36718148 0.11307472 -0.03028272 +v -0.10532028 0.24034762 -0.02071049 +v -0.26181820 -0.61727387 -0.11037394 +v -0.05266364 -0.65274334 -0.12954991 +v -0.64182460 -0.63693684 -0.11155057 +v -0.00873916 0.23369028 -0.11940870 +v -0.40127420 -0.64228904 -0.39726532 +v 0.26918980 -0.63060588 -0.37870809 +v -0.68302524 -0.07527930 -0.01998238 +v -0.68942779 -0.38178632 -0.11412385 +v 0.02755141 -0.17727146 -0.01259787 +v 0.00463429 0.23800574 -0.07464762 +v 0.29236293 0.06910478 -0.40019393 +v -0.17294011 -0.13545829 -0.40157056 +v -0.67162043 0.08412257 -0.36611319 +v -0.13356219 0.06550093 -0.39045444 +v -0.41077691 -0.48715138 -0.40058181 +v -0.66897595 0.24528864 -0.22443607 +v -0.04048571 0.23122732 -0.40404531 +v -0.05744571 -0.53788084 -0.38368380 +v -0.27829435 0.18323053 -0.03190150 +v 0.12549701 -0.62910581 -0.05918497 +v -0.67731643 -0.63580757 -0.11009478 +v -0.69563067 -0.58779269 -0.01710994 +v 0.47759429 -0.23808745 -0.39487460 +v -0.46333882 0.11059112 -0.40713671 +v -0.68219322 -0.24236327 -0.25705633 +v 0.00862534 -0.35190225 -0.39784384 +v -0.66245306 -0.04421537 -0.03892642 +v -0.63681978 0.25868249 -0.12324160 +v -0.00753850 -0.56898046 -0.00882923 +v 0.43225926 0.22659971 -0.02263608 +v 0.28744262 -0.66243279 -0.18637905 +v 0.32678470 -0.50823808 -0.39295644 +v -0.09788346 -0.52401906 -0.00979650 +v 0.36219206 0.22999898 -0.30811146 +v 0.48671046 -0.33459634 -0.10899304 +v 0.28553286 -0.63414401 -0.10440905 +v 0.51166022 -0.62747651 -0.26196629 +v 0.37927750 -0.66545570 -0.34656104 +v 0.14990406 0.05746864 -0.01602735 +v 0.02740294 0.22695185 -0.12119175 +v -0.61153466 -0.39063582 -0.02866522 +v -0.59836602 -0.64049780 -0.01671205 +v 0.51247430 -0.42064083 -0.10789555 +v 0.37885255 -0.18254524 -0.38429528 +v -0.37087417 -0.65006572 -0.10299368 +v 0.48633927 -0.63790411 -0.26649261 +v 0.05383751 -0.62450409 -0.19739048 +v 0.47855943 -0.48667169 -0.02078581 +v 0.51070279 -0.39573893 -0.00957266 +v -0.05288633 -0.58908099 -0.38266057 +v 0.52123207 -0.36787161 -0.34738874 +v 0.36135751 -0.63757700 -0.10620783 +v -0.32079801 -0.17939945 -0.02720985 +v 0.50826055 -0.15137286 -0.11472940 +v 0.14650439 0.20122498 -0.03307431 +v 0.17876039 -0.35709140 -0.00984501 +v -0.03095481 0.23924780 -0.11589752 +v 0.46784583 0.19799007 -0.19170603 +v -0.04725945 -0.59690017 -0.00889094 +v 0.39775559 -0.63475561 -0.10531078 +v 0.07084872 -0.44988406 -0.39637655 +v -0.68665272 -0.17278300 -0.07167603 +v 0.18301256 -0.01887133 -0.38777626 +v -0.17019832 0.20925429 -0.39165106 +v -0.65577912 0.25732735 -0.34463587 +v 0.42032200 0.19847786 -0.22301348 +v -0.69504184 -0.46134460 -0.10184514 +v -0.66549432 0.25886261 -0.21795118 +v 0.07244360 0.23623791 -0.02130327 +v 0.39295304 -0.61922216 -0.39112785 +v -0.26630840 0.21762855 -0.11959382 +v -0.03820984 0.24270728 -0.21755585 +v 0.50837064 -0.66658223 -0.21827587 +v 0.30983752 -0.44422057 -0.38147265 +v 0.53471816 0.19803545 -0.36117813 +v -0.66193336 -0.46222320 -0.38662878 +v -0.66904247 0.20133780 -0.12278371 +v 0.48442185 -0.63704801 -0.10380520 +v 0.52445769 -0.17533910 -0.11052330 +v 0.45697609 -0.22891256 -0.02501659 +v 0.11628105 -0.66379815 -0.10826237 +v -0.34309047 -0.09561030 -0.40355766 +v 0.26841161 -0.47707975 -0.38137987 +v 0.35626569 -0.62988907 -0.37893683 +v -0.38883767 0.22073734 -0.22343178 +v -0.00019639 -0.62584955 -0.02600471 +v -0.46605751 -0.61078393 -0.28257677 +v 0.33090633 0.04658391 -0.01589628 +v -0.00266935 0.19767086 -0.01861637 +v -0.05635254 -0.62160045 -0.29116407 +v 0.51322186 -0.48244792 -0.10505503 +v -0.63561654 -0.17518388 -0.40559241 +v 0.49691978 0.16455619 -0.11826399 +v 0.15288138 -0.54465055 -0.38156945 +v 0.23678282 -0.27233243 -0.01083015 +v -0.28999862 -0.14184600 -0.01440644 +v 0.50106442 0.18498634 -0.10611507 +v -0.61694646 -0.07951667 -0.03106828 +v 0.31104586 -0.66284269 -0.01143698 +v -0.54482102 -0.63906407 -0.22538206 +v 0.53458506 0.15328698 -0.14997144 +v 0.45990986 -0.13431969 -0.02651323 +v -0.19866550 0.21461181 -0.30157486 +v -0.65932214 0.07430527 -0.12052406 +v 0.51510602 0.22380856 -0.11517112 +v -0.56423861 -0.49104768 -0.38799053 +v -0.15132853 0.23902014 -0.40479812 +v 0.49815115 -0.15906388 -0.38202330 +v 0.48573515 -0.48851377 -0.02618556 +v 0.25909835 -0.19032429 -0.38531959 +v -0.05739957 -0.61726254 -0.02101092 +v 0.08678219 -0.65678155 -0.10532270 +v -0.15044789 0.24474809 -0.11979340 +v 0.29913419 0.19893856 -0.07515232 +v 0.43944523 -0.17123380 -0.02600215 +v 0.51088715 0.17223102 -0.18963064 +v -0.66220725 -0.60419369 -0.38391250 +v 0.10916171 -0.05916136 -0.40016839 +v -0.67726517 -0.61958170 -0.11149227 +v 0.25538123 -0.63073975 -0.20641376 +v 0.49211979 -0.04752554 -0.11174037 +v -0.66852021 0.25214919 -0.23618838 +v 0.19525707 0.23204234 -0.01923767 +v 0.48236620 -0.63922918 -0.37391132 +v 0.51395148 -0.52871835 -0.10883728 +v 0.51659340 -0.16054499 -0.11339446 +v 0.09978956 -0.62312490 -0.02020323 +v -0.40875444 -0.12453186 -0.01553669 +v -0.35631540 -0.64723837 -0.01586479 +v -0.21670324 0.03689784 -0.40388298 +v -0.32937396 -0.40880325 -0.02529915 +v 0.04289868 -0.58416331 -0.02145094 +v -0.66154164 -0.35595021 -0.11538091 +v -0.12347060 0.04333944 -0.40282720 +v 0.25409612 -0.15830544 -0.01271532 +v 0.49413964 -0.40679818 -0.16370551 +v 0.18482253 -0.22816455 -0.39805129 +v 0.13180749 -0.62594497 -0.11367915 +v 0.31020877 -0.66090864 -0.10277496 +v 0.23946829 -0.66124129 -0.39084169 +v 0.23363149 -0.42931134 -0.39500311 +v -0.53705907 -0.40316948 -0.40258145 +v -0.00686003 -0.65728915 -0.05497079 +v 0.04164941 0.20719044 -0.29290944 +v 0.04363853 0.23731060 -0.11401703 +v -0.44715443 -0.61077595 -0.13110271 +v -0.09915315 0.17044654 -0.03105381 +v -0.19412915 -0.02762737 -0.40292209 +v 0.10210125 0.23441316 -0.11682522 +v 0.02085453 -0.65501076 -0.13703611 +v 0.53140557 0.07372414 -0.39538544 +v 0.01630797 -0.02143503 -0.38858268 +v -0.05512627 0.23070262 -0.12018916 +v -0.01642675 -0.30112633 -0.39839557 +v 0.01313870 -0.66133636 -0.10553505 +v 0.20844622 0.04117282 -0.38832033 +v 0.05446478 -0.61655027 -0.39402243 +v -0.39113650 -0.20891353 -0.40288040 +v -0.32493746 -0.64565694 -0.11546858 +v -0.59361714 -0.42799050 -0.38865715 +v -0.02990514 -0.65443128 -0.34719574 +v -0.09215154 -0.62127113 -0.10846578 +v 0.52521294 -0.18293554 -0.39215684 +v 0.45791310 -0.60948747 -0.39048719 +v 0.27059790 -0.63019174 -0.10901942 +v 0.14351438 -0.62746829 -0.10700786 +v 0.50202703 0.10331455 -0.39874005 +v -0.66319025 -0.39980873 -0.35103673 +v 0.12200271 -0.66465950 -0.10823642 +v 0.38021967 -0.66749990 -0.10394393 +v -0.17073841 -0.18465617 -0.38806733 +v 0.10123855 -0.39165410 -0.38394380 +v -0.54294705 0.24582866 -0.02060750 +v -0.67987889 -0.39195141 -0.40339679 +v -0.67331761 -0.37219325 -0.11734438 +v 0.33951312 -0.19723032 -0.39689636 +v 0.49157456 -0.49642530 -0.10857473 +v -0.66119856 -0.01833759 -0.03614335 +v 0.49355856 -0.13743649 -0.03662762 +v -0.27582896 -0.61849964 -0.02456764 +v 0.27739984 -0.66302997 -0.36169389 +v -0.26784176 0.21348272 -0.12128877 +v 0.47998032 -0.63693345 -0.03067380 +v 0.22310480 0.12392066 -0.01694653 +v 0.50035536 -0.66444939 -0.00699768 +v -0.38618800 0.23822266 -0.11960403 +v 0.51886928 -0.18015012 -0.07681706 +v -0.17381296 0.23488520 -0.11792611 +v 0.50319952 -0.09107445 -0.24662811 +v 0.09819216 -0.28965056 -0.38516852 +v -0.14361776 -0.65800661 -0.10667253 +v 0.19393104 0.20312211 -0.23503602 +v 0.10358353 -0.23038399 -0.38601726 +v 0.48115537 -0.63729763 -0.14323887 +v 0.49436241 -0.10912076 -0.10992328 +v -0.15246767 -0.00655091 -0.38984677 +v 0.35151953 -0.40984431 -0.38152817 +v -0.04423599 0.21164012 -0.11734438 +v -0.28930479 -0.65660113 -0.11034925 +v -0.68316591 -0.02953318 -0.02429231 +v 0.49260369 -0.62002891 -0.36939543 +v -0.36000431 0.25134629 -0.23124354 +v -0.65228975 -0.11139652 -0.01819849 +v 0.51032913 -0.55009681 -0.10442905 +v -0.13907631 -0.11998883 -0.38862652 +v -0.68169647 -0.64434904 -0.11412555 +v -0.30338734 0.24552514 -0.40529132 +v -0.64414638 -0.63969672 -0.01865552 +v 0.37500498 -0.31123927 -0.02346292 +v -0.07588014 -0.50836229 -0.02245310 +v 0.32197714 0.22813657 -0.39911687 +v 0.49637970 -0.03491282 -0.11447663 +v -0.10849454 -0.65027773 -0.10745086 +v -0.09859758 -0.45392290 -0.02311780 +v 0.32022610 0.19898969 -0.03930558 +v -0.68900526 -0.53137904 -0.33181396 +v -0.55792814 0.25282061 -0.12098280 +v 0.50835282 0.22092234 -0.11895677 +v -0.47727534 -0.19657151 -0.39021805 +v -0.66535342 0.25487342 -0.39754000 +v -0.27932078 -0.42662996 -0.01192338 +v 0.24107090 -0.32270226 -0.01015609 +v -0.62130862 0.22805211 -0.38933098 +v -0.42927790 -0.61463559 -0.02585874 +v -0.16576941 -0.64940554 -0.39491543 +v 0.25104979 -0.66770482 -0.10660615 +v -0.48947629 -0.59183556 -0.40056011 +v 0.07677011 -0.00522417 -0.38850331 +v 0.42018387 0.22814243 -0.31118602 +v 0.01792595 -0.47058415 -0.00957309 +v -0.60720301 -0.64145851 -0.05023318 +v -0.68584365 -0.34389776 -0.21049221 +v -0.31092653 0.24551855 -0.02353016 +v 0.44407123 -0.33498439 -0.38159734 +v 0.00713043 -0.54554844 -0.02161009 +v -0.67373997 0.10094633 -0.12327649 +v 0.49613908 -0.29906726 -0.11331446 +v 0.39195988 -0.63667876 -0.37482199 +v 0.00671060 -0.61668837 -0.38216820 +v -0.65379500 0.00393226 -0.15787426 +v -0.49183148 0.22169428 -0.12312373 +v -0.13922988 -0.65146321 -0.34156790 +v -0.52585900 -0.51383895 -0.02648728 +v -0.50159276 0.16728868 -0.40757012 +v 0.38184276 -0.51723927 -0.37962770 +v -0.63395756 -0.24250525 -0.40490749 +v 0.38387284 0.19821519 -0.32879597 +v -0.17771436 0.24562812 -0.12046363 +v -0.45030060 -0.65086550 -0.10895516 +v 0.35843924 -0.14553629 -0.39736041 +v -0.67571628 0.24953549 -0.04509471 +v 0.03341396 -0.30407855 -0.02355696 +v 0.49090645 -0.55844086 -0.17270242 +v -0.31976363 0.24651167 -0.40504366 +v 0.31727445 -0.13140635 -0.01320215 +v 0.26159701 -0.07185019 -0.39890748 +v -0.68537259 -0.21247563 -0.01839977 +v -0.63816363 0.22786604 -0.12585701 +v -0.68399531 -0.30986306 -0.25249958 +v -0.48146603 -0.21114515 -0.01534690 +v -0.67280811 -0.50138909 -0.11323743 +v -0.64060587 0.22932576 -0.20926322 +v -0.65566379 0.20979032 -0.12129515 +v 0.53452885 0.22452092 -0.25279489 +v -0.62463403 0.25612432 -0.36723197 +v 0.41547605 -0.63378876 -0.01984067 +v -0.03597482 -0.62768537 -0.10892835 +v -0.15523754 0.21436255 -0.11942956 +v 0.50670934 0.19742697 -0.15623166 +v 0.26430294 -0.58392316 -0.38002303 +v -0.12167338 0.24725296 -0.12361012 +v 0.21061462 -0.44861615 -0.02175052 +v -0.62691754 -0.29015681 -0.01615842 +v 0.06780502 0.23762348 -0.35831484 +v -0.09471402 0.03777239 -0.01623417 +v 0.01214038 -0.65165693 -0.00803772 +v 0.15894102 -0.57378548 -0.39402115 +v -0.66806442 0.25517696 -0.20525202 +v -0.00744874 -0.65483469 -0.11149099 +v -0.34755498 -0.01108298 -0.02952100 +v 0.12576598 -0.39761010 -0.00964415 +v 0.40955734 0.19578043 -0.38738900 +v -0.07010473 0.25381744 -0.13973024 +v 0.12597847 -0.62679332 -0.10757086 +v -0.42379177 0.22171789 -0.40671733 +v -0.06131369 0.18983305 -0.03144744 +v -0.18580137 0.02930455 -0.02935971 +v -0.60325289 -0.07279006 -0.40651029 +v 0.22644311 0.23622993 -0.12416971 +v 0.28427607 0.20462976 -0.11535453 +v 0.32491863 -0.66420883 -0.32584843 +v -0.11088041 0.21209688 -0.23536666 +v -0.52308393 -0.48496199 -0.01375748 +v -0.17439657 0.21415158 -0.20742317 +v -0.69002920 -0.48956820 -0.24419099 +v 0.53292888 0.15056588 -0.11491238 +v 0.47073880 0.22388898 -0.01980365 +v -0.08696232 0.20171307 -0.03129254 +v -0.14258856 0.12190725 -0.01737122 +v -0.27431336 -0.54697371 -0.39873472 +v -0.52918696 -0.55025584 -0.38720667 +v -0.64709800 0.14233464 -0.12002958 +v 0.21929048 0.17255986 -0.03132616 +v -0.40985256 -0.64452904 -0.10810194 +v 0.14727512 0.18522938 -0.40203908 +v -0.41763496 -0.61612755 -0.02779413 +v -0.28326568 0.24394137 -0.02182031 +v -0.57618600 -0.63829786 -0.11058117 +v -0.36309928 -0.64769793 -0.03546077 +v 0.35197273 -0.66590941 -0.11262422 +v -0.40294823 -0.64304501 -0.15771638 +v -0.65654439 -0.09273461 -0.03187895 +v -0.52215463 -0.60985595 -0.11329318 +v -0.28424871 -0.49840450 -0.01150039 +v 0.28526169 -0.48638555 -0.00814793 +v -0.13867943 0.21019736 -0.38905269 +v 0.51081049 0.12058550 -0.33499172 +v -0.43155879 -0.20421322 -0.02800775 +v 0.03907673 -0.66145152 -0.10929007 +v -0.69424295 -0.46878931 -0.05890156 +v 0.49267545 -0.14512005 -0.11073097 +v -0.67048359 -0.38125727 -0.11607710 +v 0.39453530 -0.20765875 -0.02519021 +v 0.42469722 0.10656375 -0.38726583 +v -0.10671014 0.21359508 -0.11820398 +v 0.23872602 -0.31749225 -0.02308716 +v -0.69056422 -0.51939076 -0.01569330 +v -0.66106790 0.09801873 -0.11727715 +v 0.50094938 0.15746467 -0.11567497 +v -0.37198758 -0.64514083 -0.01228850 +v -0.08097702 0.25175908 -0.11213697 +v 0.11763546 -0.63220966 -0.10580102 +v -0.29711014 -0.64545584 -0.39667189 +v 0.30060378 -0.15770137 -0.39769000 +v 0.08499546 0.20305121 -0.03476032 +v -0.65017766 0.08996734 -0.33743137 +v -0.13831846 0.24507533 -0.25447157 +v -0.63899302 0.25892112 -0.20230937 +v -0.29162917 0.01446948 -0.02953972 +v 0.49833563 -0.66825873 -0.10324646 +v -0.46567845 -0.48057544 -0.40130204 +v -0.67865509 -0.11891587 -0.27494437 +v 0.50497371 0.01735840 -0.11799078 +v -0.27220133 -0.16484790 -0.02696006 +v 0.50837851 0.09146468 -0.16793714 +v -0.48772261 -0.41894385 -0.38818330 +v -0.50932389 -0.00785372 -0.01736186 +v -0.10757285 0.21140857 -0.33806160 +v 0.01246300 -0.66016221 -0.10743937 +v -0.67040932 0.13701819 -0.12152877 +v -0.11391652 -0.41328323 -0.38527980 +v -0.24166052 -0.06241370 -0.02820095 +v -0.67284644 -0.60791481 -0.03103764 +v -0.15248804 0.24215332 -0.02385059 +v -0.68256420 -0.43932506 -0.40049818 +v 0.35487580 -0.66951942 -0.10827259 +v 0.17468764 -0.65652120 -0.39274493 +v 0.37857628 -0.42448762 -0.00868625 +v -0.57956004 0.25730202 -0.20626354 +v -0.47917476 -0.14029521 -0.39078850 +v -0.10561444 -0.65172470 -0.11393788 +v -0.30479780 -0.05629514 -0.40373254 +v -0.59837604 0.22225940 -0.12209561 +v 0.52244061 0.03841167 -0.01838275 +v -0.40288675 -0.44145802 -0.01289958 +v 0.28788573 0.06767255 -0.01617544 +v 0.50626141 -0.01264878 -0.38087964 +v -0.65829539 0.17223644 -0.11869762 +v -0.19555491 0.03883211 -0.39077318 +v -0.25842595 -0.54892099 -0.02349951 +v -0.66012579 -0.01700632 -0.11787929 +v -0.16332963 -0.11369314 -0.02709240 +v -0.33630115 0.24497916 -0.11659712 +v -0.08912034 -0.60316694 -0.38303781 +v -0.40107170 0.24797767 -0.11431577 +v -0.66769314 -0.60439044 -0.20962495 +v -0.65848994 0.19783182 -0.40896869 +v -0.40158626 -0.61835164 -0.11104289 +v 0.44258398 -0.29483435 -0.02396081 +v 0.20410974 0.23321621 -0.02631833 +v 0.52789086 0.21016905 -0.10651679 +v -0.67665058 -0.08066948 -0.01822359 +v -0.40465826 0.25140098 -0.32778636 +v 0.48729950 -0.37618408 -0.11008073 +v -0.59072930 0.14964670 -0.01940151 +v -0.64576930 -0.61137754 -0.03915451 +v -0.11096994 -0.62000531 -0.25416771 +v -0.67588514 -0.63872373 -0.01997557 +v 0.49726814 -0.29315835 -0.20103316 +v -0.02831778 -0.62189603 -0.37874404 +v -0.27797922 0.24012719 -0.11922529 +v -0.54927266 0.21859598 -0.39421883 +v -0.26353058 0.21340916 -0.40525302 +v -0.69464481 -0.60415953 -0.11796781 +v -0.06220449 -0.62332016 -0.02274970 +v -0.67495841 -0.63379335 -0.11497536 +v 0.17046367 -0.18023112 -0.39854792 +v -0.33938593 -0.61752385 -0.02585236 +v 0.15086943 0.22960949 -0.01814316 +v -0.34849697 0.24524407 -0.02194797 +v 0.48252255 -0.56086332 -0.02219607 +v -0.57124513 -0.49925384 -0.40239015 +v 0.53303903 0.06383274 -0.29812494 +v 0.48587871 -0.45455676 -0.10555675 +v -0.30573985 -0.08686266 -0.39015123 +v 0.49183327 -0.61258912 -0.29756492 +v -0.67007905 0.06199204 -0.11863847 +v -0.10646689 -0.61970621 -0.15245450 +v -0.54814881 -0.01788220 -0.03078529 +v -0.67592609 0.18895046 -0.12189219 +v 0.16037983 -0.56788033 -0.00775771 +v 0.32160863 -0.66339612 -0.14087497 +v -0.67256737 -0.47701997 -0.03526417 +v -0.67908257 -0.17734113 -0.31113943 +v 0.48491359 -0.35450447 -0.10975647 +v -0.64870816 0.22119023 -0.11995724 +v -0.04315552 0.20593747 -0.03390582 +v 0.23540320 -0.18545285 -0.39797917 +v -0.13680288 -0.44535044 -0.01073185 +v -0.67419040 0.04903740 -0.30122399 +v -0.40614304 0.01062919 -0.03015932 +v 0.46748000 -0.23510990 -0.01157018 +v 0.45120096 0.22498009 -0.11689287 +v 0.51086944 -0.46060994 -0.10484184 +v 0.50694752 -0.66562080 -0.13877532 +v 0.49715808 -0.04834323 -0.11501665 +v 0.23702368 -0.06115839 -0.01431240 +v 0.14201440 -0.31618199 -0.02312504 +v -0.39337888 0.24299069 -0.11901423 +v 0.52806240 -0.10836722 -0.11548432 +v -0.27605152 0.06967241 -0.03024996 +v 0.33545056 -0.13622576 -0.02635238 +v -0.38871968 -0.60812217 -0.01168252 +v -0.65409184 -0.01517631 -0.20212170 +v 0.38207585 -0.63493794 -0.37748295 +v -0.10745503 -0.65247226 -0.10802236 +v -0.63958687 -0.36272845 -0.40392807 +v 0.25895777 0.22976194 -0.39952070 +v -0.68343711 -0.42588186 -0.37436134 +v 0.20339297 -0.05712347 -0.38718370 +v 0.01662561 0.12050577 -0.39000335 +v -0.67082655 -0.57867754 -0.19502316 +v -0.38029471 -0.15964708 -0.02798988 +v 0.48089185 0.22603309 -0.31893310 +v 0.50711137 -0.66210973 -0.10410989 +v 0.37277025 0.03769659 -0.39925155 +v -0.22356895 -0.61682487 -0.31442359 +v 0.51046497 0.15878540 -0.15727253 +v -0.65016735 0.03100562 -0.11867507 +v -0.52940190 -0.60877258 -0.38370463 +v 0.33963874 -0.66805851 -0.10753427 +v 0.17260642 0.20407432 -0.17632297 +v 0.52593762 -0.18840683 -0.11380512 +v 0.45063010 0.19805780 -0.11934743 +v 0.28882530 -0.39076537 -0.38241780 +v -0.05515678 -0.62484556 -0.02857501 +v 0.48814434 0.22733580 -0.20556480 +v -0.66527390 -0.26731381 -0.03318410 +v -0.42143646 0.22143443 -0.33194098 +v 0.17367907 -0.33493432 -0.39676657 +v 0.39458403 -0.36271676 -0.38175884 +v -0.44424605 -0.61534268 -0.02842819 +v 0.46277219 -0.63902456 -0.37390262 +v -0.54119581 -0.60919279 -0.29466483 +v -0.65061021 0.00726689 -0.38894162 +v 0.52905059 0.02608944 -0.39681977 +v -0.66693789 -0.25944704 -0.11333148 +v 0.01138275 -0.66231865 -0.10785130 +v 0.52589411 0.18091924 -0.11299658 +v -0.34306973 -0.57062423 -0.38514534 +v 0.49911907 0.14072767 -0.11836229 +v -0.62283677 0.25184536 -0.06531028 +v -0.62085277 -0.63881332 -0.11224123 +v -0.10112157 0.10202169 -0.01700100 +v -0.08010653 -0.66068363 -0.10849855 +v -0.64883357 0.14945786 -0.34561974 +v 0.34866020 -0.40342441 -0.39421690 +v 0.50988644 -0.43348178 -0.10894836 +v -0.33418396 -0.64783454 -0.01494816 +v -0.67623836 -0.23026590 -0.40063351 +v -0.29872796 -0.50328082 -0.39943880 +v 0.12135780 -0.65748733 -0.39188853 +v 0.32658532 0.20122083 -0.11696309 +v 0.25392228 0.23449072 -0.21124542 +v 0.33454436 -0.20725746 -0.38439319 +v -0.66617244 0.23531023 -0.12187049 +v -0.65630364 -0.01099994 -0.03304878 +v -0.68707740 -0.63399559 -0.31460336 +v 0.43035236 -0.63724184 -0.25091952 +v -0.18103708 0.09854870 -0.39117467 +v 0.51309156 -0.34058559 -0.11003903 +v 0.49019748 -0.06114078 -0.11409449 +v 0.28573284 -0.66429704 -0.11516516 +v 0.53481317 0.21306522 -0.39451012 +v -0.53273755 -0.15075588 -0.40501472 +v -0.16834971 0.20721219 -0.03231598 +v -0.10149787 -0.62024212 -0.30572349 +v 0.42351717 -0.41161841 -0.02212287 +v 0.48776549 -0.39622420 -0.02883416 +v -0.15904155 -0.50108713 -0.01044418 +v 0.49635941 0.19487748 -0.11960063 +v -0.34510747 0.24113105 -0.40623966 +v -0.09080987 0.21184273 -0.13771017 +v -0.01541019 0.23915663 -0.39634976 +v 0.23301221 0.19877139 -0.03276493 +v 0.18653797 0.20169680 -0.38746244 +v 0.16493925 -0.26840624 -0.38525000 +v -0.67389083 0.12482969 -0.14410529 +v -0.68762267 -0.46700659 -0.30827188 +v 0.41078117 -0.19713080 -0.01213445 +v -0.56371355 -0.56491059 -0.02652047 +v -0.43451291 -0.59051383 -0.39989859 +v 0.32769126 -0.24716976 -0.39645848 +v -0.68217754 -0.25291529 -0.11754737 +v -0.68741018 -0.54400104 -0.11578305 +v -0.38986906 -0.61998630 -0.10998456 +v -0.30791834 0.10653822 -0.03094743 +v -0.59229338 -0.18330921 -0.01640523 +v -0.29082522 0.21720605 -0.37681758 +v 0.51142502 -0.39552009 -0.10935305 +v 0.47147110 -0.41632548 -0.39275920 +v -0.64981914 -0.63479435 -0.16999637 +v 0.50165862 0.18618335 -0.11829931 +v -0.66747802 -0.45340794 -0.15640314 +v 0.06133094 0.23449498 -0.01913128 +v -0.09525146 0.24533758 -0.17179728 +v 0.32820326 -0.66466439 -0.06025692 +v 0.13164134 -0.02558528 -0.02803626 +v 0.26995814 0.19969732 -0.11877379 +v -0.67372698 -0.50749892 -0.07327907 +v 0.10878054 -0.17300791 -0.39909983 +v -0.25815961 0.24786969 -0.12106493 +v 0.11081831 -0.62682444 -0.02117603 +v -0.69065881 -0.63356757 -0.26251820 +v 0.12534375 0.20245050 -0.08319302 +v -0.66435993 -0.39312944 -0.29538187 +v -0.57702041 -0.60898364 -0.11315105 +v 0.15508839 -0.62634259 -0.37970322 +v 0.23375463 0.07199041 -0.40068501 +v 0.51688039 -0.24950112 -0.06318000 +v 0.52018791 -0.08780881 -0.01917724 +v 0.50578535 0.19766055 -0.18956043 +v -0.25302169 -0.65143180 -0.06536943 +v -0.66654617 0.25489217 -0.33585387 +v -0.35060376 0.15263137 -0.01866063 +v 0.26483560 0.12054008 -0.40087268 +v -0.66181016 -0.30009463 -0.27047083 +v -0.66368920 -0.35555762 -0.25551265 +v 0.29772392 -0.00644484 -0.38712814 +v -0.67859608 0.25059074 -0.11891252 +v 0.31560552 -0.41655377 -0.02198117 +v 0.06801768 0.13159582 -0.01718824 +v -0.66918296 0.11570023 -0.40506345 +v -0.33822104 -0.64446467 -0.12678555 +v -0.68750232 -0.46031967 -0.11599455 +v -0.65076119 -0.03151149 -0.38991612 +v -0.09952407 -0.35377279 -0.02379612 +v -0.51560599 -0.53243214 -0.40152076 +v 0.52381545 -0.28487328 -0.28554219 +v -0.40743575 -0.62937200 -0.11003605 +v 0.50031209 -0.15583020 -0.11558390 +v 0.10323561 -0.16539212 -0.38671708 +v 0.48337001 -0.63607085 -0.09833863 +v -0.41446039 -0.64299333 -0.39614058 +v -0.55202711 0.24989475 -0.02196031 +v -0.31473047 0.23862171 -0.01959045 +v -0.48963991 -0.57465267 -0.02552469 +v 0.16506985 -0.65980548 -0.39006996 +v 0.36828521 -0.46467566 -0.38055730 +v 0.10614377 -0.65899885 -0.10900580 +v 0.14624362 -0.65296310 -0.00692917 +v 0.44650346 0.20155610 -0.12046704 +v -0.51163286 -0.48547217 -0.38770008 +v -0.00302742 0.22105543 -0.11705331 +v -0.08878486 -0.62037158 -0.20142466 +v -0.59144598 0.14767444 -0.39394498 +v 0.51939178 -0.15406503 -0.11000413 +v -0.28600982 0.21261211 -0.03721359 +v 0.45324394 0.22889298 -0.16524769 +v 0.48211050 0.19244967 -0.38763392 +v -0.31019670 -0.38429502 -0.38681516 +v -0.20402582 0.08973318 -0.01712015 +v -0.33636758 -0.42905670 -0.38653898 +v -0.00605334 -0.66078615 -0.10848749 +v -0.09711766 -0.65924579 -0.10695637 +v -0.61418134 -0.64581352 -0.11299828 +v 0.12638570 -0.26760843 -0.01102761 +v -0.40313494 -0.04404148 -0.02947972 +v 0.50520170 0.04355657 -0.38312674 +v 0.48041067 0.19510455 -0.10879048 +v 0.26153579 0.22524786 -0.01817891 +v 0.33691499 -0.01091766 -0.01507966 +v 0.09582955 -0.33833262 -0.38455233 +v -0.53720725 0.08973547 -0.03193299 +v -0.43753621 -0.64226913 -0.21303910 +v 0.05991019 -0.04224557 -0.40067586 +v -0.67537045 -0.56410921 -0.06963043 +v -0.52266133 -0.42990562 -0.01409792 +v 0.28475755 -0.21531035 -0.39723679 +v -0.48985237 -0.63752091 -0.11136971 +v -0.35942549 0.11041792 -0.03126616 +v -0.23486869 -0.21841933 -0.02593534 +v -0.35541397 0.10251101 -0.40626305 +v -0.41947541 -0.39119548 -0.01337917 +v -0.57282197 -0.20185685 -0.40482664 +v -0.69727892 -0.63686204 -0.02544979 +v 0.50597996 -0.54154462 -0.10739511 +v 0.29495916 0.03259051 -0.02901034 +v -0.24635540 0.24807650 -0.24617702 +v 0.48701292 -0.34139901 -0.10694063 +v -0.36361876 -0.29214829 -0.40163970 +v 0.51260012 -0.43431607 -0.10510184 +v -0.08108180 0.21226344 -0.11818101 +v 0.05786220 -0.21975449 -0.39885408 +v 0.06640748 -0.66196418 -0.10839216 +v 0.12474732 0.23458892 -0.40040052 +v 0.11099500 -0.49469376 -0.39548418 +v 0.15034476 -0.36410436 -0.38386613 +v -0.67077780 -0.57555026 -0.14496404 +v -0.04351124 -0.62590945 -0.10565590 +v -0.49953428 -0.63940567 -0.39851749 +v -0.09777556 -0.19623609 -0.01268468 +v -0.50144148 0.22656067 -0.12143303 +v 0.03925100 -0.66299242 -0.10881261 +v -0.66325140 -0.13928251 -0.11768184 +v -0.63699859 -0.60789335 -0.02728177 +v 0.18091373 -0.66020197 -0.39002314 +v -0.64024979 0.21794783 -0.39423415 +v 0.14364269 -0.62078089 -0.38082901 +v 0.51459181 -0.09519975 -0.01389579 +v 0.53295469 0.09134526 -0.35610944 +v 0.51723629 0.06944495 -0.01597415 +v -0.22984083 -0.44824159 -0.38590428 +v -0.26583955 0.21687694 -0.23073927 +v -0.13539475 0.21674427 -0.12208114 +v -0.02065555 -0.66012305 -0.10345072 +v 0.45650029 -0.66500217 -0.10186514 +v 0.43688557 -0.66793174 -0.10447969 +v -0.11491731 0.21247238 -0.12103642 +v -0.50026643 -0.60990697 -0.34420776 +v -0.66013587 -0.28104186 -0.36884734 +v 0.52608877 -0.08490940 -0.11121311 +v -0.28848019 0.21771452 -0.16644306 +v 0.46067309 -0.63760078 -0.21544513 +v 0.34143341 -0.07259095 -0.02745327 +v -0.30144146 -0.32918110 -0.01268809 +v -0.50656402 -0.63327420 -0.40037200 +v 0.19863406 -0.63055992 -0.10593038 +v -0.18868370 -0.06480351 -0.02799031 +v -0.67427218 0.12340692 -0.01970747 +v 0.03948654 0.05905353 -0.40180165 +v -0.65382546 -0.09375343 -0.38770649 +v -0.66674578 0.20559932 -0.12086322 +v 0.41199470 0.19073276 -0.03146872 +v -0.12602258 0.09682209 -0.40328509 +v -0.28002706 -0.44284952 -0.02455530 +v 0.52675438 -0.16655354 -0.15376604 +v -0.09641618 -0.30717403 -0.02405570 +v -0.66654098 -0.45544824 -0.25589713 +v -0.31584913 0.24827811 -0.36550722 +v 0.35722607 -0.66360998 -0.10385627 +v 0.51262063 -0.34118921 -0.01070079 +v 0.53103215 -0.01728564 -0.25920489 +v -0.67670673 0.25451469 -0.38003173 +v -0.61627537 0.22582139 -0.12364119 +v -0.60248977 0.25048307 -0.02261098 +v 0.48411503 -0.62339491 -0.37695399 +v -0.68896145 -0.50689000 -0.11446726 +v -0.01194897 -0.65669602 -0.10596570 +v 0.49530479 0.18026039 -0.11682266 +v 0.32034671 -0.66085774 -0.00674363 +v 0.49664623 -0.33209306 -0.24500145 +v 0.32957295 0.15255626 -0.38857991 +v -0.60885137 0.25834024 -0.16591282 +v 0.39454320 -0.63341630 -0.37797338 +v 0.18810736 -0.66019553 -0.15142255 +v -0.59439248 0.22754198 -0.15733765 +v -0.66647440 -0.50461000 -0.38032368 +v -0.22512528 -0.11084273 -0.02738731 +v -0.46094224 -0.61080194 -0.02488126 +v -0.21777296 -0.61577946 -0.02219479 +v -0.32327056 -0.61443692 -0.11151780 +v -0.12665744 0.24398041 -0.11617456 +v 0.37602928 -0.09486176 -0.01379153 +v -0.42676112 -0.18771102 -0.38987166 +v -0.40972176 0.24591696 -0.02100199 +v 0.06874736 -0.66334569 -0.10777087 +v 0.04202865 -0.66207886 -0.10692914 +v 0.37107569 -0.63406742 -0.37767017 +v -0.45401999 0.08818632 -0.39328325 +v -0.25347471 0.20761311 -0.03216789 +v 0.48712817 -0.63765526 -0.22577910 +v -0.40022671 -0.56770033 -0.38581002 +v 0.47535729 -0.66716880 -0.27087742 +v -0.67607439 0.01169050 -0.12213603 +v -0.65639311 0.08344153 -0.03462968 +v -0.33783695 -0.35388857 -0.40073201 +v -0.33119375 0.21400307 -0.04211759 +v 0.48476017 -0.62997425 -0.10211834 +v 0.37270129 -0.63072246 -0.01909639 +v 0.08439153 0.20706193 -0.11944573 +v 0.49144945 -0.21967214 -0.03232747 +v -0.23533711 -0.47854099 -0.02389527 +v -0.65994895 -0.24195257 -0.28300124 +v 0.14813298 0.23480520 -0.02313440 +v -0.39769742 -0.01345487 -0.39180854 +v -0.23642510 -0.49901330 -0.39888728 +v -0.64209038 -0.52528721 -0.38816372 +v -0.20692110 -0.17540772 -0.40141863 +v 0.04740210 0.23540267 -0.01939427 +v 0.48978034 -0.63216788 -0.20873342 +v -0.27167630 0.13183971 -0.39227960 +v -0.31099534 0.21249536 -0.03464627 +v 0.47612786 0.13536558 -0.01710483 +v -0.03698574 -0.62172705 -0.16255569 +v -0.48720270 -0.43711787 -0.40184715 +v -0.68441230 -0.31199524 -0.11781929 +v 0.53390706 0.22557989 -0.16730735 +v -0.64427406 0.22571883 -0.15755680 +v -0.54137486 -0.61341608 -0.06383619 +v -0.00660110 0.07379462 -0.38968039 +v -0.07016590 -0.65464127 -0.01305193 +v -0.53216910 -0.64153719 -0.11182164 +v 0.51575410 -0.30666512 -0.07049727 +v -0.64289421 0.11065698 -0.39312580 +v -0.27382925 -0.64706433 -0.11481366 +v -0.16632973 -0.62133771 -0.02420635 +v 0.46545267 -0.53785390 -0.00694662 +v 0.51331443 -0.35190302 -0.10620741 +v 0.48646003 -0.34008875 -0.02508468 +v 0.40456563 -0.66554081 -0.13810295 +v 0.26902387 -0.34206936 -0.38331634 +v 0.23561843 0.17961621 -0.01787592 +v 0.51616371 -0.12445765 -0.11363787 +v -0.07102349 0.24940017 -0.12752728 +v -0.22821261 0.24385987 -0.40386298 +v -0.00470413 -0.59698409 -0.02144072 +v -0.68689805 -0.30745250 -0.11669968 +v -0.44827789 0.21587993 -0.05708575 +v -0.30853772 0.12433393 -0.01804316 +v 0.48974964 -0.11416534 -0.02708687 +v 0.16942196 0.23557651 -0.30460304 +v -0.34175628 0.21613744 -0.39183575 +v 0.49255028 -0.51588809 -0.22907709 +v 0.51943284 -0.29975903 -0.10771470 +v -0.10433932 -0.62003082 -0.37846339 +v 0.38926452 -0.00832741 -0.39861023 +v -0.66979468 -0.53150225 -0.16830695 +v -0.61668491 0.19841142 -0.39438137 +v -0.61536139 0.00365826 -0.39309707 +v -0.67106187 0.25584751 -0.39059529 +v -0.65396619 -0.11661319 -0.11683074 +v -0.66704267 0.19965956 -0.40445918 +v 0.15674485 -0.66116959 -0.06985640 +v -0.69521803 -0.49826622 -0.10987137 +v 0.05195381 -0.65617758 -0.11209357 +v 0.53037941 0.01932778 -0.39397010 +v -0.05550219 -0.41487139 -0.39779085 +v -0.50767243 -0.60683990 -0.38576832 +v -0.65971339 -0.58613032 -0.02812435 +v 0.36330613 0.23024249 -0.11622605 +v -0.19372171 -0.19932167 -0.01307789 +v -0.21991563 -0.29938158 -0.38722709 +v 0.40270197 -0.04854929 -0.02790647 +v 0.49054325 -0.28455243 -0.10750575 +v -0.04550539 -0.65382653 -0.11164206 +v 0.12622453 -0.65752572 -0.14018983 +v -0.49892491 -0.15186134 -0.01597543 +v 0.49997941 -0.23236385 -0.32906941 +v -0.66587275 -0.62494379 -0.11131014 +v -0.68549258 -0.30997619 -0.16013306 +v -0.67756170 -0.21712567 -0.36852923 +v 0.07219830 -0.66268367 -0.10946837 +v 0.17772150 -0.16006933 -0.01265149 +v 0.20132214 0.03332527 -0.40053713 +v -0.08375434 0.16591150 -0.39084360 +v 0.49101686 -0.60026038 -0.37607396 +v -0.02669962 -0.13892554 -0.40043670 +v 0.49175414 -0.63003814 -0.32047039 +v 0.44981366 0.19636253 -0.38300654 +v 0.05466486 -0.61371946 -0.38156137 +v 0.52850550 0.22653800 -0.16065010 +v 0.40270454 0.22741431 -0.06821166 +v 0.20811638 -0.07642252 -0.39929751 +v 0.20357494 -0.14019802 -0.39862281 +v 0.39756662 -0.46875462 -0.02135476 +v 0.05422966 0.19386595 -0.01800954 +v 0.03916406 0.15580361 -0.03076146 +v -0.24066442 0.10037605 -0.40481749 +v -0.67710346 -0.13175359 -0.11775716 +v 0.46006903 0.07341008 -0.38657281 +v -0.46068105 -0.64184201 -0.34704575 +v -0.69525641 -0.58158779 -0.11080374 +v 0.02500215 -0.62807983 -0.10709637 +v -0.64738441 -0.00159346 -0.39230898 +v -0.35507593 -0.51686162 -0.38598877 +v -0.68930697 -0.63322979 -0.32558799 +v -0.50505865 0.06956793 -0.39329177 +v -0.42510217 -0.49981734 -0.01271958 +v 0.26382199 -0.08041404 -0.02714304 +v 0.05543799 0.23020972 -0.40319148 +v -0.65232009 -0.04800986 -0.11765205 +v -0.01194633 -0.63164818 -0.10705169 +v -0.66137993 -0.05656973 -0.11712012 +v 0.48425591 -0.58938277 -0.06160804 +v -0.05636233 0.24235003 -0.27999562 +v 0.24977784 -0.62914515 -0.37902042 +v -0.67942536 -0.07912650 -0.16657795 +v -0.58303624 -0.63842893 -0.01374855 +v -0.54961288 -0.63912237 -0.28056583 +v 0.05012600 -0.42313164 -0.38406232 +v -0.34884232 -0.12769760 -0.01506476 +v 0.33929592 -0.63726133 -0.10657679 +v -0.11484808 -0.20762940 -0.02532171 +v -0.65806216 -0.26607797 -0.38809863 +v -0.29756808 0.07406436 -0.01732016 +v -0.24840584 0.21128558 -0.06716014 +v 0.38402680 0.19407155 -0.03247046 +v -0.59960711 -0.60254312 -0.40148184 +v 0.48898169 -0.29294136 -0.10848962 +v -0.24958855 0.21169218 -0.03668890 +v 0.50543481 -0.62500906 -0.10543462 +v -0.53658247 -0.45702609 -0.02710091 +v -0.66827911 -0.60409492 -0.13651396 +v -0.68466055 -0.14358133 -0.02006876 +v 0.05467001 0.23670101 -0.39739978 +v -0.68583047 -0.34713766 -0.11666649 +v 0.23962489 -0.61602825 -0.37999538 +v -0.67524487 0.13675700 -0.02005089 +v 0.37729913 0.19639055 -0.38733158 +v -0.11617415 -0.09180129 -0.40152502 +v -0.39203975 -0.30697709 -0.02663707 +v 0.48091769 -0.63811207 -0.07985886 +v -0.68496519 -0.21228944 -0.11699756 +v 0.49927801 -0.66730577 -0.10310943 +v 0.49020281 -0.17658882 -0.02706304 +v 0.17497721 0.19283479 -0.03091764 +v -0.69202054 -0.63913959 -0.06268637 +v -0.29152390 -0.04041280 -0.02877331 +v -0.07915910 0.25441426 -0.10106468 +v -0.14204806 -0.62570536 -0.10843727 +v 0.50020730 0.19069372 -0.38726774 +v -0.68592006 -0.48527157 -0.36818773 +v 0.47678074 0.19708632 -0.24449846 +v -0.41052550 0.12534109 -0.01852658 +v -0.19611014 -0.61595249 -0.38276652 +v 0.36098170 -0.63388556 -0.02089390 +v 0.52482682 0.10465948 -0.05994074 +v -0.52767110 -0.29443642 -0.02813627 +v -0.65021574 0.12314442 -0.14196649 +v 0.25680250 -0.63063526 -0.10584825 +v -0.00327814 0.03973352 -0.01618991 +v 0.07412603 0.23249714 -0.01848786 +v 0.48504955 0.19619241 -0.11936615 +v 0.10961787 -0.65834212 -0.34145278 +v 0.21450107 -0.07654593 -0.02714006 +v 0.26891643 0.23337846 -0.26491424 +v -0.49457029 -0.23814696 -0.02835798 +v 0.13593979 0.23740019 -0.11785546 +v 0.49639291 0.20169754 -0.11982617 +v -0.04883077 -0.14939594 -0.38779774 +v -0.68713605 -0.61077648 -0.11143652 +v 0.49059963 -0.62996769 -0.22206834 +v -0.52257669 0.22525223 -0.16050372 +v 0.25142395 0.23117752 -0.02022579 +v 0.52340603 0.16868261 -0.01867935 +v 0.28226683 -0.66051692 -0.10311922 +v 0.53526139 0.22215888 -0.16125479 +v 0.41539451 0.19761756 -0.03770510 +v 0.35777915 -0.60836351 -0.01985258 +v 0.02425212 -0.65703380 -0.10482907 +v 0.02435196 -0.65133852 -0.10825174 +v 0.51786876 -0.10643864 -0.01445623 +v 0.18953852 -0.63083303 -0.06437111 +v 0.09139580 0.23828565 -0.11741417 +v 0.33129084 0.01398760 -0.39934006 +v 0.34708092 0.07640683 -0.39983070 +v 0.49322620 -0.04101330 -0.02957206 +v -0.32855940 0.24986443 -0.12203731 +v -0.69358468 -0.58560824 -0.18304446 +v 0.29013628 0.20150615 -0.11862272 +v -0.20004740 -0.32601142 -0.02463828 +v -0.33761668 -0.54354030 -0.39932773 +v 0.23651196 -0.13601112 -0.02612046 +v 0.40561020 0.15627952 -0.38782245 +v 0.33109885 -0.56542218 -0.37963408 +v -0.68798083 -0.23473105 -0.07087898 +v 0.02702717 -0.40183535 -0.02290545 +v 0.48710525 -0.37301841 -0.10677253 +v -0.24684931 -0.60671628 -0.01049610 +v -0.66951299 -0.57881838 -0.02936695 +v -0.19765379 -0.65192235 -0.01796656 +v 0.03112573 0.03336203 -0.38910994 +v 0.05605757 -0.19303001 -0.38667962 +v -0.50689667 -0.64025694 -0.36680663 +v -0.35220611 0.24670273 -0.40538153 +v 0.51035261 0.19697435 -0.16549367 +v -0.43531907 0.17647962 -0.40709352 +v 0.10089085 -0.51257956 -0.38254291 +v -0.32244867 0.21868956 -0.12284840 +v 0.53410941 0.11419561 -0.19342099 +v -0.31326595 -0.64979261 -0.06020203 +v -0.66711938 0.25899315 -0.17423864 +v -0.47112578 -0.46354243 -0.01344173 +v -0.39507586 -0.64389664 -0.36081791 +v 0.25834110 -0.66207016 -0.11350170 +v -0.65322626 -0.57919621 -0.01483795 +v -0.66304642 -0.08194899 -0.11912061 +v 0.42093182 -0.12679496 -0.01332641 +v -0.67988354 0.01969540 -0.01985301 +v -0.66737026 0.25760671 -0.25201914 +v 0.19186814 -0.24455106 -0.01125485 +v -0.32976258 -0.44602913 -0.01223190 +v 0.10278014 -0.66272813 -0.10783172 +v -0.66655362 -0.43280646 -0.19307077 +v -0.66523522 -0.60509974 -0.22281389 +v -0.62585729 -0.60219538 -0.38741988 +v 0.51845247 -0.08838756 -0.11313403 +v -0.22210433 0.21698456 -0.11980191 +v 0.48704383 -0.48944601 -0.10436139 +v -0.57188737 0.22132555 -0.12178411 +v 0.49373823 -0.45272714 -0.21438766 +v 0.33063295 -0.45833576 -0.00834156 +v -0.66939777 -0.50189525 -0.12091599 +v -0.22242433 -0.64883393 -0.10752235 +v 0.00368511 -0.52220362 -0.00914158 +v -0.66344577 0.03274180 -0.12109089 +v 0.48551807 -0.29070997 -0.11022244 +v 0.49536127 -0.34320647 -0.11280636 +v -0.42596480 -0.63122952 -0.11033904 +v -0.29024896 -0.24578372 -0.40153310 +v -0.31311744 0.03060263 -0.01678865 +v 0.07946112 -0.65645450 -0.11266380 +v 0.10327424 -0.66356421 -0.10481502 +v -0.01662592 0.24266760 -0.11999086 +v -0.00147328 -0.58681172 -0.38250163 +v 0.11912576 -0.35961953 -0.39682275 +v 0.09882496 -0.54905009 -0.39494035 +v -0.49163392 0.23592152 -0.12351310 +v 0.20438144 -0.56007481 -0.00758877 +v -0.01581952 -0.29459652 -0.02382165 +v -0.24646528 0.20230740 -0.03184576 +v -0.67960703 0.07178853 -0.02126753 +v -0.36226943 0.05185792 -0.39246556 +v -0.66698879 -0.59818900 -0.38343611 +v -0.67548543 0.25794461 -0.39586505 +v 0.27680385 -0.55518901 -0.02038749 +v -0.68784511 -0.32096145 -0.11564049 +v -0.64979583 0.12575237 -0.29551974 +v -0.50167167 -0.64416403 -0.11210421 +v 0.10848129 0.23862587 -0.11879337 +v 0.49583745 -0.09774330 -0.10963433 +v -0.42951807 -0.64799613 -0.11199186 +v -0.15295103 -0.17174935 -0.01328726 +v 0.50570881 -0.51252961 -0.00746876 +v 0.53200769 0.22509381 -0.24556169 +v -0.37233791 -0.61585283 -0.02457019 +v -0.44637823 0.03411689 -0.03072146 +v -0.13700223 -0.55878675 -0.39738062 +v -0.02885759 -0.65572035 -0.10712489 +v 0.49413761 -0.37676191 -0.11227740 +v 0.05902210 -0.66276157 -0.10625932 +v -0.65858942 -0.07511546 -0.03266834 +v -0.69129342 -0.35912216 -0.07276671 +v -0.69246078 -0.45750871 -0.01731718 +v -0.50497150 -0.28619248 -0.38932356 +v -0.66880894 -0.54269552 -0.28466663 +v -0.65564030 -0.07041641 -0.20372261 +v -0.03827838 -0.64936823 -0.00842922 +v -0.34689918 -0.61343968 -0.19182050 +v 0.11450754 0.23898812 -0.15369412 +v 0.52346498 0.21442840 -0.01880915 +v 0.15327618 -0.62786543 -0.32804427 +v -0.46732926 -0.61542588 -0.03312239 +v 0.43579778 0.22688344 -0.11268380 +v -0.44951165 -0.01463682 -0.01699291 +v -0.14865789 -0.61871052 -0.20570226 +v -0.67225981 -0.45796785 -0.07623789 +v 0.37502339 -0.66267473 -0.00713854 +v 0.49303427 -0.12199427 -0.03019932 +v -0.31239805 0.07973833 -0.39236194 +v -0.59998077 -0.63387483 -0.01326130 +v -0.58817661 -0.27050725 -0.40428767 +v 0.34442371 0.22934608 -0.10945944 +v -0.05992061 -0.10703350 -0.01413622 +v 0.27433091 0.23066336 -0.02093305 +v -0.22699901 -0.11061527 -0.40232888 +v 0.49717379 0.02287263 -0.11113226 +v -0.67459965 -0.53791922 -0.11457109 +v -0.07624573 0.02029249 -0.40215993 +v -0.19809149 -0.30991858 -0.01197317 +v -0.18760061 0.24749541 -0.12386715 +v 0.42459011 0.04278384 -0.39873812 +v -0.00401277 -0.66239578 -0.10892537 +v -0.68752253 -0.57578111 -0.39875600 +v 0.52297860 0.09185379 -0.01786018 +v -0.07336572 0.21102148 -0.12279691 +v -0.68415868 -0.23770578 -0.12173985 +v -0.65261436 -0.00515746 -0.35626030 +v -0.55363452 -0.03550258 -0.39249963 +v -0.63750780 -0.22630194 -0.01692568 +v 0.10327172 -0.16333732 -0.01270213 +v 0.27308676 -0.66280174 -0.04400234 +v -0.09584764 -0.47718146 -0.38460553 +v 0.48217988 -0.42616633 -0.02207819 +v 0.36816004 -0.63487005 -0.23788632 +v -0.59976828 -0.35222018 -0.38913888 +v 0.50323588 -0.01807476 -0.11711884 +v 0.48754308 0.22281389 -0.11731161 +v 0.35985029 -0.66380352 -0.39033785 +v -0.18994811 -0.65114444 -0.01251234 +v 0.52573061 0.17675745 -0.07069430 +v -0.42978171 -0.57511032 -0.02505531 +v -0.66893947 0.10301901 -0.11925594 +v -0.26324859 -0.37590432 -0.01205275 +v 0.38564485 -0.47510916 -0.00800367 +v -0.45377403 -0.63836944 -0.39913025 +v -0.47009915 -0.60098302 -0.38629919 +v -0.44644219 0.22475976 -0.12044746 +v 0.30529413 0.19786556 -0.38784882 +v -0.35659131 -0.49478087 -0.39993837 +v -0.00328571 -0.65955049 -0.10066935 +v -0.39124858 0.01531419 -0.40559983 +v -0.66582394 0.18901302 -0.11985340 +v 0.03280006 0.20489904 -0.38859141 +v -0.65204602 0.13872032 -0.11841463 +v -0.16900474 -0.50252372 -0.39827940 +v 0.49817222 -0.21428068 -0.38117072 +v -0.17683578 -0.40031004 -0.02401272 +v 0.19871622 0.23393199 -0.35524347 +v -0.68530810 -0.25321946 -0.11748609 +v -0.32798842 0.20900284 -0.03241045 +v 0.42375046 -0.53240812 -0.02049686 +v 0.35698566 -0.00768122 -0.02851926 +v 0.51892614 -0.34240532 -0.10746277 +v 0.29482886 0.21931505 -0.11479451 +v 0.11136902 -0.65801191 -0.23216060 +v -0.03532921 -0.61943066 -0.02086964 +v 0.04201607 0.23911621 -0.31166136 +v 0.28585863 -0.63398749 -0.10159960 +v 0.52839303 -0.11756492 -0.30039248 +v 0.52518535 -0.16967285 -0.39341241 +v 0.48851079 -0.40030184 -0.10608102 +v 0.50886023 -0.66280514 -0.14949483 +v 0.03504263 0.20382352 -0.03353262 +v 0.23628679 0.09877062 -0.38867608 +v -0.18902393 -0.62120122 -0.02580257 +v 0.04562055 -0.12541167 -0.01363450 +v -0.33708921 0.24668731 -0.40490004 +v -0.18304633 -0.07177593 -0.38943782 +v 0.50509703 -0.65911204 -0.09187799 +v -0.17707129 -0.64569616 -0.00962330 +v -0.65202296 -0.61107832 -0.07972354 +v 0.43398792 -0.45695600 -0.00821262 +v -0.21470328 -0.61468661 -0.38314354 +v -0.17996152 0.24015102 -0.01969045 +v 0.37318280 -0.66501170 -0.23858081 +v -0.58625400 0.22046909 -0.11822611 +v 0.07637128 -0.38638580 -0.00996246 +v 0.44345480 -0.63823020 -0.10389967 +v 0.12893064 0.11646925 -0.01683291 +v -0.04013688 0.11938174 -0.39026740 +v -0.54667640 0.22548456 -0.34261835 +v -0.45163384 -0.24425499 -0.38945505 +v 0.15132040 -0.10888521 -0.01364174 +v 0.49543560 -0.04690238 -0.11145057 +v 0.47057033 -0.63775772 -0.03103466 +v 0.49747849 -0.26362815 -0.37976110 +v 0.52571273 -0.21999933 -0.30851805 +v 0.15288201 -0.14155823 -0.38654387 +v 0.14522761 -0.21883179 -0.02455657 +v -0.09390455 -0.65197057 -0.19912799 +v -0.62653047 -0.60780311 -0.38103858 +v -0.57866102 -0.63769752 -0.39863002 +v 0.48830858 0.22744995 -0.11648435 +v -0.67296630 0.10749479 -0.12310415 +v 0.20729738 -0.63335866 -0.10688829 +v -0.02731894 0.20974936 -0.14811903 +v -0.68135542 -0.42609549 -0.40302083 +v -0.65841526 0.08332749 -0.11800823 +v 0.30628234 0.20046712 -0.24140112 +v 0.34237322 0.12624671 -0.40039840 +v -0.69829494 -0.62513381 -0.10623975 +v 0.49580938 0.06794516 -0.03186278 +v -0.51736182 -0.23264763 -0.39009422 +v -0.33317494 -0.18333714 -0.01432304 +v -0.35217014 0.10027631 -0.39289686 +v -0.32261494 0.21276866 -0.03461649 +v 0.10930826 -0.11222575 -0.39967114 +v 0.07065738 -0.62625098 -0.10692190 +v -0.62795126 -0.60636991 -0.15631804 +v 0.25560459 -0.01670418 -0.02813542 +v 0.04657547 0.20772988 -0.12029640 +v 0.53549963 0.18463916 -0.29104257 +v 0.49093771 -0.63570178 -0.29960436 +v 0.49106315 -0.63416004 -0.37467369 +v -0.41722229 -0.07028955 -0.01616821 +v 0.12672907 -0.65881610 -0.10505971 +v 0.49973387 -0.19597778 -0.16840823 +v -0.68775797 -0.37889495 -0.15553673 +v -0.02318453 -0.23009388 -0.38660708 +v -0.21386869 -0.35830784 -0.38654920 +v -0.69355893 -0.63533467 -0.11348382 +v 0.48478091 -0.55753022 -0.10445586 +v 0.39762059 0.20186718 -0.12043555 +v -0.27669621 -0.65288895 -0.10294305 +v 0.01626507 -0.65699559 -0.10621166 +v -0.01572468 -0.65472859 -0.11112162 +v 0.49091724 -0.23082337 -0.10848068 +v -0.68536180 -0.10367397 -0.07330545 +v 0.37377420 0.07669365 -0.01627714 +v -0.07775860 0.24811438 -0.14465083 +v 0.50030220 -0.18609490 -0.22086191 +v 0.51999372 0.09327187 -0.11603583 +v -0.28012916 -0.57796699 -0.38474107 +v 0.48998284 -0.60395116 -0.17758216 +v -0.66272372 -0.11526851 -0.11686820 +v -0.25828212 0.24892589 -0.17779195 +v 0.52030092 -0.11776014 -0.07283607 +v -0.21236852 -0.64851540 -0.11602604 +v 0.29885325 -0.60039032 -0.02040537 +v -0.63697267 -0.60659963 -0.38509446 +v -0.16436595 0.19328646 -0.40437108 +v 0.32588685 -0.49787214 -0.38053453 +v 0.48973453 -0.20579523 -0.02655026 +v 0.48327565 -0.07755368 -0.39687487 +v -0.22580339 -0.42043912 -0.01149656 +v -0.61637747 -0.63979131 -0.01567627 +v 0.46694541 -0.66869932 -0.10179619 +v -0.01800819 0.24064316 -0.31999761 +v -0.67598963 -0.27968320 -0.40410957 +v -0.09009011 -0.65313792 -0.38760200 +v -0.35550067 -0.61338437 -0.24388695 +v -0.30438003 0.24971890 -0.23914933 +v -0.36976755 -0.46302965 -0.38644198 +v -0.65676659 -0.09885578 -0.16118628 +v -0.68428403 -0.42304549 -0.11410129 +v -0.08396147 -0.63827115 -0.10724276 +v 0.24310413 -0.62980008 -0.14468403 +v 0.03472269 -0.65603811 -0.11064969 +v 0.06496398 -0.65873617 -0.10585293 +v -0.14016114 0.20829165 -0.11701288 +v -0.69388914 -0.55833167 -0.13484029 +v -0.12109938 -0.38395497 -0.01099952 +v -0.64469874 -0.61053222 -0.03071976 +v -0.00441202 -0.09392705 -0.38808522 +v -0.28096882 -0.27448696 -0.38789755 +v -0.66107506 -0.22993557 -0.12121430 +v 0.40718222 0.19715452 -0.08392326 +v 0.06050958 -0.24955586 -0.02416209 +v 0.52982670 0.06139010 -0.39730680 +v -0.69174898 -0.58240527 -0.26364931 +v -0.55476594 -0.63329023 -0.01286767 +v 0.03122318 -0.66208005 -0.10751554 +v 0.28027278 0.23084038 -0.06417876 +v 0.13740942 -0.62800449 -0.37307256 +v -0.23866482 0.24425574 -0.02415103 +v 0.45405838 -0.63636959 -0.17272922 +v -0.27273330 -0.38241795 -0.02487360 +v -0.38412914 0.06119265 -0.01750655 +v -0.30610034 0.21426047 -0.12109217 +v -0.49286258 -0.60837322 -0.02502510 +v 0.18020238 0.07908081 -0.40115204 +v -0.68356210 -0.08034441 -0.11807462 +v -0.33610353 -0.64492404 -0.16424553 +v 0.53553551 0.20401166 -0.11790312 +v -0.42522225 -0.43695089 -0.40100798 +v 0.51065487 0.19651231 -0.17521144 +v 0.15482767 -0.48101419 -0.38244012 +v -0.20036465 -0.62144578 -0.06657075 +v 0.33849743 -0.46468800 -0.02141859 +v 0.09996175 0.20555395 -0.28461555 +v -0.67200625 -0.07213642 -0.40558794 +v 0.51879311 -0.36514163 -0.10748788 +v 0.02728079 -0.19337125 -0.02515191 +v 0.31247503 -0.66248918 -0.39062658 +v 0.01179792 -0.65537578 -0.28127289 +v -0.67093360 -0.39999491 -0.08090656 +v 0.49497744 -0.42678413 -0.26459169 +v 0.13651088 -0.66431540 -0.10513631 +v -0.67612016 -0.19750023 -0.11562858 +v -0.21143152 -0.62296236 -0.11023350 +v -0.66035056 -0.14052831 -0.03284621 +v -0.42109296 0.02941099 -0.01731505 +v -0.48399472 -0.07162167 -0.39165151 +v 0.25460368 0.13504009 -0.03046996 +v 0.11576720 -0.66391820 -0.10703765 +v 0.29216912 -0.63075113 -0.11335786 +v -0.67113328 0.24697009 -0.41100514 +v -0.10424176 -0.51203549 -0.39754978 +v 0.49543568 -0.35194829 -0.14188521 +v 0.28507280 -0.39425179 -0.39499417 +v 0.03605393 -0.66232365 -0.10590442 +v 0.50793105 -0.58690870 -0.09202523 +v -0.44822383 -0.64486957 -0.01735037 +v -0.15066223 0.20994979 -0.12088833 +v -0.56089199 -0.13109308 -0.02984143 +v -0.30195823 -0.64665258 -0.29209262 +v -0.66822255 -0.27399239 -0.11740609 +v 0.47155601 -0.21597946 -0.38274440 +v -0.21716335 -0.62397552 -0.11059351 +v 0.11067025 -0.65754342 -0.11240081 +v -0.64426607 0.22681569 -0.38764286 +v 0.52304274 -0.30986795 -0.34546781 +v -0.33018222 -0.47143096 -0.02491105 +v 0.01528978 -0.65517032 -0.21214800 +v -0.65448046 -0.09223168 -0.35759717 +v -0.26453358 0.24477363 -0.02587109 +v 0.53504658 0.14568587 -0.25011632 +v -0.01607534 -0.08169265 -0.02748646 +v 0.50890386 0.07603315 -0.27605015 +v -0.62681454 -0.58031982 -0.38721815 +v -0.68174702 -0.18726626 -0.12015214 +v -0.51393902 0.10232664 -0.01857424 +v 0.25283986 -0.66627371 -0.09824587 +v 0.11392658 -0.66219258 -0.10606188 +v 0.40363154 -0.63925147 -0.10630827 +v 0.38923922 -0.18841311 -0.39650530 +v -0.16055405 -0.01992062 -0.02864011 +v 0.43652755 0.19869925 -0.12005341 +v 0.12741011 -0.30333292 -0.39737171 +v -0.68519789 -0.40238670 -0.31607598 +v 0.09665939 -0.10976753 -0.01373323 +v 0.09210259 0.20542236 -0.33733904 +v 0.19958675 0.23608206 -0.11809419 +v 0.01288083 -0.62139100 -0.38119540 +v -0.07355501 0.24151064 -0.35379875 +v 0.50056595 -0.32676187 -0.10693126 +v -0.68357229 -0.57800174 -0.40297338 +v -0.17359468 -0.32565391 -0.38655770 +v -0.68407404 -0.25293306 -0.15307495 +v -0.01625452 -0.59131098 -0.39559376 +v -0.09679212 0.24412955 -0.23436110 +v 0.26826644 -0.63511634 -0.10431203 +v 0.06974356 -0.65650040 -0.19281246 +v -0.67603564 -0.60331082 -0.08649951 +v -0.53182572 0.24762617 -0.02117433 +v 0.48666772 -0.40374398 -0.02485871 +v 0.48539284 -0.41755158 -0.10646912 +v -0.64963692 0.13516822 -0.25507200 +v 0.04375956 -0.64347100 -0.10629124 +v 0.08383892 0.19708239 -0.40231505 +v -0.51375467 0.22469518 -0.28517556 +v 0.07869589 -0.62570566 -0.37710419 +v -0.66315883 -0.08579789 -0.03774127 +v 0.01913493 -0.23085396 -0.01171997 +v -0.67094123 -0.49112102 -0.03211215 +v -0.04769387 -0.57155812 -0.02172329 +v 0.22298773 -0.30902791 -0.39684317 +v 0.26888853 -0.26134503 -0.02398804 +v -0.12950891 0.23457870 -0.12019894 +v -0.56269419 -0.31632328 -0.01517115 +v -0.03053931 0.20575853 -0.06592350 +v -0.57906795 0.22681451 -0.30899894 +v 0.51432341 -0.50553346 -0.10694233 +v 0.50582421 -0.44243538 -0.00876370 +v -0.48109931 0.15321149 -0.01921469 +v -0.67373419 0.09144521 -0.26282290 +v 0.01899157 0.24173874 -0.12181517 +v 0.09569942 -0.65343815 -0.00741642 +v -0.44233578 -0.42877686 -0.39139405 +v 0.05511574 -0.65867239 -0.10499588 +v -0.30529898 -0.64879328 -0.11105906 +v -0.44487274 0.22244357 -0.38349718 +v -0.42717290 -0.61364168 -0.11092544 +v 0.00672662 -0.62133271 -0.02072666 +v 0.49370006 -0.48329532 -0.25805232 +v 0.52515990 0.07561372 -0.10059913 +v 0.01467798 -0.08590157 -0.40059626 +v -0.21204074 0.16452837 -0.03148064 +v -0.07767658 0.20701449 -0.06567753 +v 0.48660374 -0.33086196 -0.10939262 +v -0.45250410 -0.02312083 -0.39201301 +v -0.00552298 -0.28471881 -0.38583067 +v -0.22831722 -0.61892962 -0.02325951 +v 0.50909078 -0.66605133 -0.28391129 +v -0.11412074 0.24065711 -0.11819590 +v 0.33358487 0.23000026 -0.35483900 +v -0.53607273 -0.63165212 -0.11108502 +v 0.11147671 -0.66359538 -0.10359328 +v -0.21475689 -0.65602720 -0.10585038 +v -0.62951273 -0.32026616 -0.02965845 +v -0.66028649 -0.64064431 -0.07594299 +v -0.63338345 0.08703011 -0.40807885 +v -0.63007337 -0.60660571 -0.21418977 +v -0.65952873 0.06114727 -0.08760636 +v 0.50248599 0.14452456 -0.11786313 +v -0.30223209 -0.61483335 -0.01147571 +v 0.50126231 -0.20191425 -0.37628821 +v -0.36510313 -0.64481819 -0.32510883 +v 0.26801047 -0.66350138 -0.11745758 +v -0.24837480 0.24423860 -0.06023054 +v 0.52629912 0.14083976 -0.11036542 +v 0.50996888 0.18585695 -0.38499531 +v -0.21155944 0.19952741 -0.40481749 +v -0.01163624 -0.66093934 -0.10803130 +v 0.49758360 0.05233426 -0.11349446 +v 0.20005272 0.03917930 -0.01574691 +v 0.49890968 -0.23649234 -0.21736775 +v 0.45503640 -0.46028638 -0.02173988 +v 0.07167640 0.20276172 -0.11376341 +v -0.69177192 -0.54084593 -0.23284572 +v 0.06456984 -0.51845670 -0.00880030 +v -0.29441640 0.21760435 -0.26648834 +v 0.51373976 -0.31073701 -0.11040840 +v -0.37161320 -0.64504296 -0.11459876 +v -0.68093288 -0.00556641 -0.11921763 +v 0.19398552 -0.02974888 -0.02786860 +v -0.66322279 0.25891006 -0.23824292 +v -0.13434727 -0.07022215 -0.02779200 +v -0.48427367 0.25470459 -0.12488208 +v -0.68988007 -0.45330375 -0.11801249 +v 0.53434265 0.22340046 -0.30811316 +v 0.50880665 -0.45249036 -0.00819219 +v 0.03301529 -0.66125619 -0.10820067 +v 0.20409241 0.22672430 -0.01815210 +v -0.61703527 -0.25493515 -0.02962738 +v -0.29588071 0.20717363 -0.03235258 +v -0.55467367 0.22748251 -0.12220157 +v -0.11091047 -0.05244193 -0.38907376 +v 0.15975833 -0.62783742 -0.20330814 +v -0.05575015 0.24550408 -0.12109940 +v 0.08586650 0.21493602 -0.01815295 +v -0.28176230 -0.19300193 -0.01372642 +v -0.07199078 0.23922110 -0.11679755 +v -0.65843302 -0.19585569 -0.29687768 +v 0.36394650 -0.66626102 -0.11528091 +v 0.28205466 -0.37460437 -0.02240288 +v -0.44931430 -0.08512585 -0.02938695 +v 0.06582170 -0.66295189 -0.10547122 +v -0.02002790 0.23056315 -0.40375575 +v -0.66092390 -0.49675632 -0.38801011 +v -0.66484582 -0.19297278 -0.11708182 +v -0.39611238 0.23128806 -0.12273946 +v -0.63859046 -0.63927382 -0.01627629 +v 0.37076634 -0.66528505 -0.10764448 +v 0.48941210 -0.23947844 -0.10921261 +v 0.53267610 0.05618764 -0.18000692 +v 0.53537434 0.17538942 -0.21393189 +v 0.46329242 0.19760847 -0.12029001 +v -0.29559654 -0.61497265 -0.33604154 +v 0.00001435 0.23876119 -0.39669570 +v -0.44035429 -0.27392957 -0.02744986 +v 0.36120731 0.19965301 -0.15035060 +v -0.68202341 -0.14362819 -0.13235638 +v -0.52601701 -0.33817768 -0.38892183 +v -0.65639013 0.15610947 -0.11991383 +v 0.05862555 -0.15086280 -0.02600258 +v 0.45587867 -0.50462890 -0.02067431 +v 0.26192027 -0.66438353 -0.10359669 +v 0.49590427 -0.12391998 -0.10956667 +v 0.45396891 0.22799912 -0.25238043 +v -0.68700773 -0.63342106 -0.36940119 +v 0.06076059 -0.65593845 -0.39244896 +v 0.52739227 -0.03030794 -0.11151610 +v -0.20790629 -0.16192073 -0.02654770 +v -0.14496869 0.14489621 -0.40379360 +v -0.17574245 0.07971933 -0.40376893 +v -0.49741668 0.24949868 -0.11559794 +v -0.65337956 -0.01697478 -0.30230764 +v -0.69596004 -0.62439984 -0.12239264 +v 0.49947292 -0.09646671 -0.38255608 +v -0.68008292 0.08913981 -0.11899295 +v 0.07825564 -0.27358368 -0.39807960 +v 0.44461468 -0.63589776 -0.10617975 +v -0.68649572 -0.32609510 -0.11703075 +v -0.49307236 0.20533261 -0.39382562 +v -0.31546980 -0.37948966 -0.01250425 +v 0.09026460 0.20324738 -0.38896993 +v -0.63252068 -0.35160649 -0.01586054 +v 0.32238492 -0.18528678 -0.01226084 +v -0.44566372 0.25208598 -0.36594957 +v 0.51575452 -0.28186297 -0.10935475 +v 0.49347484 0.19770135 -0.17109215 +v 0.08994717 0.21680991 -0.12083557 +v -0.69187427 -0.33717200 -0.10547930 +v -0.33535331 0.01282985 -0.39169341 +v -0.68196195 -0.20663851 -0.11938403 +v -0.48530275 -0.61020726 -0.38131902 +v -0.06778723 -0.62099731 -0.34686041 +v 0.06987677 -0.66038573 -0.08803616 +v -0.67000163 -0.43629780 -0.03276110 +v 0.02730909 -0.62388086 -0.10915134 +v -0.69029987 -0.52459675 -0.28016710 +v 0.07659421 0.09414573 -0.40187865 +v -0.32380515 0.22487785 -0.11885167 +v -0.04723299 0.19871739 -0.40349403 +v 0.09302941 0.07388983 -0.01638736 +v 0.48625565 -0.34896246 -0.10824493 +v -0.65269858 -0.04589022 -0.38741669 +v 0.39063710 0.22810243 -0.36178112 +v 0.29500318 0.23205894 -0.30309874 +v 0.43334302 0.04685706 -0.01578904 +v 0.09976734 -0.56775349 -0.00814538 +v -0.00572514 -0.62314391 -0.27283645 +v -0.00048226 -0.45212859 -0.02257438 +v -0.45195618 -0.57576019 -0.38614514 +v 0.49832350 -0.27330843 -0.26734602 +v -0.33788514 -0.28049099 -0.01333619 +v -0.19232866 -0.64957911 -0.23397087 +v 0.46437022 0.20134786 -0.11384129 +v 0.36154014 0.09801383 -0.38772860 +v -0.61491042 -0.60542047 -0.01381493 +v 0.43563166 0.19717282 -0.33394212 +v 0.50513822 -0.65844244 -0.10395967 +v -0.38346082 -0.15961555 -0.38977060 +v 0.15137182 -0.20913312 -0.01184551 +v -0.02289249 -0.63111472 -0.10955986 +v -0.19849057 0.14531495 -0.40443790 +v -0.05099617 0.24311389 -0.10854238 +v 0.13193375 -0.42746046 -0.39600569 +v 0.51273119 -0.58534908 -0.17963286 +v 0.22766751 0.19825785 -0.38888142 +v -0.36663905 0.25192332 -0.17909284 +v -0.14839137 -0.35932207 -0.02408762 +v -0.47862369 -0.51881218 -0.01311661 +v 0.11919007 0.23338941 -0.01890149 +v -0.39710560 -0.37963966 -0.02617152 +v -0.65461856 -0.26171690 -0.39028081 +v -0.06404192 0.24708828 -0.14645813 +v -0.00831584 0.20888795 -0.25883722 +v -0.69185120 -0.63256276 -0.22280964 +v 0.03761568 -0.65977019 -0.10695042 +v 0.37447840 -0.25095612 -0.39590293 +v -0.08027744 -0.56588358 -0.39670125 +v -0.33389920 -0.12459351 -0.02797456 +v 0.14517152 -0.20323749 -0.38603684 +v 0.46896544 0.22525339 -0.07470380 +v -0.66479456 -0.16451454 -0.07801582 +v -0.67786336 -0.44386899 -0.11288040 +v -0.07032160 -0.27254951 -0.38637111 +v -0.67049056 -0.57716179 -0.23293380 +v 0.10028449 -0.57901007 -0.38142264 +v 0.10562977 -0.62637240 -0.22583272 +v 0.50017697 -0.10186295 -0.10991732 +v 0.53436577 0.15533929 -0.34766597 +v -0.21797727 -0.56514138 -0.01044844 +v -0.28063071 -0.64647216 -0.39593080 +v 0.16632481 -0.65938920 -0.21511662 +v 0.36555681 -0.41447201 -0.02200883 +v -0.32770911 0.21880759 -0.22121298 +v 0.42017698 0.19542494 -0.03222704 +v -0.39955550 -0.61193258 -0.14869097 +v 0.27687842 -0.42315623 -0.00885732 +v 0.28802210 -0.63254118 -0.33321890 +v -0.65826654 -0.63251191 -0.11158376 +v 0.49782690 -0.52287781 -0.00745898 +v -0.17783134 -0.61771369 -0.34335989 +v -0.52254558 -0.25546154 -0.01530520 +v -0.58348894 -0.60753328 -0.38428932 +v 0.04436642 -0.09275062 -0.02723922 +v -0.20268638 -0.65869915 -0.11019053 +v -0.68502110 -0.52649909 -0.11630946 +v -0.54147166 0.02064075 -0.40699095 +v -0.28258398 0.05007589 -0.40487918 +v 0.02613922 0.18178219 -0.40277508 +v -0.46275678 -0.11184414 -0.01607629 +v -0.37032798 -0.57641095 -0.02433146 +v -0.64819038 -0.06402700 -0.01803933 +v 0.10416290 0.23732263 -0.31202987 +v -0.65855837 -0.26881453 -0.11660223 +v 0.49749923 -0.01790481 -0.39710787 +v 0.21639331 0.23082900 -0.11573879 +v 0.48513699 0.04185194 -0.39806363 +v 0.22698659 -0.63059568 -0.35600987 +v 0.43149987 0.19448103 -0.40043989 +v 0.48012963 -0.60539299 -0.10488098 +v 0.43104675 0.20164606 -0.11453365 +v 0.52293795 -0.05284412 -0.39661890 +v 0.40894628 -0.63511926 -0.15018123 +v 0.53622180 0.21534295 -0.26248395 +v -0.27773020 -0.10607240 -0.40284529 +v -0.04817756 -0.29516074 -0.01124506 +v 0.24282020 -0.63087672 -0.30960107 +v -0.51215196 -0.61216396 -0.02584938 +v 0.38506660 -0.31939954 -0.39499012 +v -0.09068380 -0.64793956 -0.39557716 +v -0.59791452 -0.41576546 -0.40315083 +v 0.48675492 -0.63398910 -0.11368596 +v -0.69288796 -0.56793237 -0.11633541 +v 0.49730724 -0.17222448 -0.39549887 +v -0.16259676 0.23698837 -0.01908405 +v 0.32761508 -0.66538066 -0.10190003 +v -0.22615388 -0.62370330 -0.11073309 +v -0.38407516 -0.54150581 -0.01197572 +v -0.36495963 -0.61120671 -0.02364888 +v -0.67690843 0.14073129 -0.12146622 +v -0.28299099 -0.64733356 -0.39233834 +v 0.24393125 0.20061943 -0.03916898 +v 0.14957733 -0.65926534 -0.27743870 +v 0.40715429 0.22682397 -0.11488345 +v -0.65188187 0.03562726 -0.32171595 +v 0.52254629 -0.23521213 -0.39270791 +v 0.06709157 -0.62521768 -0.35378683 +v 0.51145893 0.19474848 -0.22615358 +v 0.21330085 -0.62974578 -0.24548934 +v -0.68255579 -0.20791727 -0.19064941 +v -0.08614235 0.24039997 -0.02268502 +v -0.20338523 0.23808648 -0.01912150 +v 0.49202341 -0.19413079 -0.10863600 +v 0.29219237 -0.29990315 -0.38378337 +v 0.05500325 0.23399614 -0.11848442 +v -0.67440987 -0.54628855 -0.11102885 +v -0.24969563 0.21615824 -0.29012638 +v -0.16315994 0.21344842 -0.38200095 +v -0.00650842 0.21070631 -0.11762609 +v 0.33429158 -0.38750035 -0.00920457 +v 0.19667622 0.23580067 -0.23988914 +v 0.14295462 -0.31629327 -0.38457105 +v -0.66768986 -0.49856740 -0.27455372 +v -0.69256026 -0.41424680 -0.06642351 +v 0.48844966 -0.40577513 -0.10580016 +v 0.30827686 -0.63265872 -0.22581485 +v 0.24147622 -0.51936388 -0.00788452 +v 0.31686822 -0.63436061 -0.06595755 +v 0.48071334 -0.62774694 -0.02117859 +v -0.66484314 -0.37756118 -0.20105146 +v 0.49648550 -0.24779303 -0.11401874 +v 0.44144806 -0.63587171 -0.13390408 +v 0.52402598 0.01367259 -0.39750937 +v 0.50307494 -0.13283889 -0.34955499 +v -0.12862298 0.20820591 -0.07579745 +v 0.31310502 -0.63715994 -0.10726532 +v -0.66066778 -0.10732000 -0.03325474 +v -0.66982746 0.22369823 -0.16487704 +v 0.00585126 -0.65553266 -0.11112290 +v -0.56360281 0.22657439 -0.19794497 +v -0.17886041 -0.60410571 -0.39700872 +v 0.49682599 -0.40671781 -0.37330791 +v -0.06524249 -0.61335528 -0.39576250 +v 0.20215975 -0.19937991 -0.01197615 +v -0.16942425 0.21348464 -0.35259783 +v -0.47804761 0.21752584 -0.03814426 +v -0.44197977 -0.14343525 -0.02870224 +v -0.67983705 0.05561006 -0.11981766 +v -0.11946841 -0.41899177 -0.39834961 +v 0.49132711 -0.25000817 -0.10594741 +v 0.49791911 -0.32127982 -0.31444272 +v 0.22727591 0.19962040 -0.10867260 +v -0.54727769 0.25643966 -0.12395652 +v 0.35187879 -0.31894997 -0.01017183 +v 0.04847271 -0.62468982 -0.30423301 +v 0.46137512 -0.60329688 -0.00605169 +v -0.67692888 -0.15028045 -0.11703628 +v -0.56746072 -0.61596686 -0.11322977 +v -0.10738008 -0.57247752 -0.02210330 +v 0.49161896 -0.21491107 -0.05844622 +v 0.46018216 -0.63875425 -0.35223976 +v -0.65719128 -0.14630163 -0.27063337 +v 0.02307240 0.21325548 -0.11654989 +v 0.48711848 -0.63028133 -0.10517801 +v 0.49565864 -0.47624674 -0.37226489 +v -0.66757464 0.25723544 -0.12488719 +v -0.49381208 0.25026584 -0.12063981 +v -0.20204888 -0.61315513 -0.38341293 +v -0.64305496 -0.60652900 -0.11376298 +v 0.31773096 0.19953246 -0.11911550 +v 0.48815528 -0.38987783 -0.05631850 +v 0.49210280 -0.63549370 -0.31843221 +v 0.09105064 0.12317974 -0.03021293 +v -0.69407064 -0.54644179 -0.01593628 +v 0.51051944 0.19553186 -0.27665338 +v -0.08180055 -0.29638633 -0.39905941 +v -0.02738519 0.04764006 -0.02932141 +v 0.30695593 -0.51144785 -0.02088709 +v 0.50487721 -0.02891236 -0.21691370 +v -0.64650071 0.22507605 -0.34010935 +v 0.10847657 -0.66341436 -0.10714276 +v -0.09066327 -0.34241989 -0.38576725 +v 0.53595817 0.21962932 -0.31772730 +v -0.66980183 -0.01006837 -0.40661582 +v -0.03136342 -0.62529504 -0.02660005 +v -0.31443030 -0.23153919 -0.38861418 +v 0.11097002 -0.66145712 -0.08977962 +v -0.02857814 -0.17571220 -0.02561108 +v -0.54282838 0.21919520 -0.08379729 +v -0.64562774 0.19360057 -0.11939935 +v 0.03135914 0.11179786 -0.40225738 +v 0.51963562 -0.39740440 -0.14394698 +v -0.62715477 0.09905647 -0.01908405 +v 0.22948779 -0.60707235 -0.39287964 +v 0.53165227 0.00924866 -0.30625182 +v 0.43729579 -0.47985718 -0.37954023 +v -0.65696597 0.17080528 -0.07502721 +v -0.46160981 0.25191790 -0.40227589 +v -0.66078037 -0.28377676 -0.32360771 +v -0.13517141 0.24335550 -0.35760355 +v -0.62003285 -0.62346339 -0.11147568 +v -0.47384661 0.03181224 -0.39280814 +v -0.12445524 -0.61569870 -0.38259289 +v -0.42263380 0.22224770 -0.17778514 +v -0.36360276 -0.21541987 -0.38906291 +v -0.59886676 -0.63762015 -0.31818795 +v -0.57327956 -0.30284142 -0.38964206 +v 0.33902252 -0.35719854 -0.39495865 +v 0.17322668 -0.66014701 -0.34237942 +v -0.67414868 0.25424501 -0.38044411 +v 0.16869804 0.20066570 -0.11525580 +v 0.50820524 0.22577161 -0.26429018 +v 0.48854700 -0.15937835 -0.02669835 +v -0.66815060 0.21258296 -0.35462898 +v 0.01802156 -0.62592852 -0.02330844 +v -0.18629716 0.21474239 -0.15243620 +v -0.66860372 -0.60262179 -0.30249998 +v -0.66662484 -0.23821469 -0.08268406 +v -0.30286932 -0.61445165 -0.15655166 +v -0.04853076 -0.08770833 -0.40103033 +v 0.35523501 -0.63512582 -0.34303752 +v 0.50536877 -0.03334001 -0.27073252 +v 0.48898989 0.19422740 -0.09365422 +v -0.66966355 -0.20626232 -0.11496345 +v -0.04181843 -0.66209471 -0.10888920 +v 0.50414509 -0.66449279 -0.08227469 +v 0.47731629 -0.63101888 -0.37729549 +v -0.56853843 0.21914159 -0.39441371 +v 0.48988077 0.19305448 -0.11670011 +v 0.52007341 0.10431819 -0.01694951 +v -0.65751123 -0.23899120 -0.38767159 +v 0.11452333 -0.63103759 -0.10764533 +v 0.01558189 -0.66071618 -0.10790491 +v 0.42846125 -0.28327417 -0.39494246 +v 0.50984621 -0.52351099 -0.10422351 +v 0.07606190 -0.66296721 -0.10633167 +v -0.66054994 -0.08268826 -0.03301771 +v 0.05189550 0.00890679 -0.40122586 +v 0.22560686 -0.66154706 -0.35284403 +v 0.00478894 -0.64554948 -0.10659466 +v -0.12627026 -0.29055771 -0.38651133 +v -0.66167378 -0.29652977 -0.11701884 +v 0.22439086 -0.66081828 -0.20101826 +v -0.53247058 -0.63620424 -0.11129822 +v -0.66091090 -0.25795427 -0.23375086 +v -0.45219153 -0.60545343 -0.01230807 +v 0.21039791 -0.66134107 -0.05538314 +v 0.48630959 -0.61243051 -0.10404819 +v -0.12102225 0.24155480 -0.02490637 +v -0.04105041 -0.44174537 -0.01022545 +v -0.41780049 -0.61017084 -0.02411528 +v -0.14581329 0.24307835 -0.39634016 +v -0.12353105 0.24141990 -0.05954712 +v -0.18508881 -0.07616294 -0.40232462 +v 0.37986991 -0.63624734 -0.09655645 +v 0.18594223 0.23214288 -0.40019497 +v -0.32909393 0.24796236 -0.40234825 +v 0.50264239 -0.15496543 -0.37585607 +v 0.16186543 -0.46317062 -0.02175052 +v -0.01442385 0.09976182 -0.01687419 +v -0.04847952 -0.61797929 -0.38218671 +v -0.64613968 0.22128820 -0.15010506 +v 0.49092272 -0.63707811 -0.35608327 +v 0.40508848 -0.38225657 -0.39390263 +v -0.08381008 0.01360243 -0.38953441 +v -0.67492944 -0.56294107 -0.11300296 +v 0.43066800 0.19684003 -0.11821505 +v -0.00118608 -0.19840345 -0.39940113 +v -0.55737168 -0.59687638 -0.01326343 +v 0.52070832 -0.37453088 -0.18668588 +v 0.48533168 -0.40470189 -0.10816407 +v 0.51754928 -0.25840154 -0.10356306 +v 0.48545456 -0.44010445 -0.10756703 +v -0.01253456 0.01101968 -0.40165439 +v -0.65605968 -0.09626287 -0.24919499 +v 0.40043697 -0.64010602 -0.10343881 +v -0.67563087 0.24460043 -0.02785668 +v 0.48370865 -0.40666127 -0.10803215 +v -0.35183439 -0.64554173 -0.01229871 +v 0.53292209 0.22440422 -0.27617207 +v -0.37930319 0.21232232 -0.03340326 +v -0.60472143 -0.63728631 -0.26207712 +v -0.17475151 -0.64978611 -0.13837828 +v -0.57384783 0.25529101 -0.36049044 +v -0.21077839 -0.36573663 -0.01170635 +v 0.49375665 0.07692094 -0.11553496 +v -0.42633295 -0.60407990 -0.38587451 +v 0.50428081 -0.08588050 -0.34508824 +v 0.29532593 -0.66002738 -0.00686661 +v 0.45786545 -0.66642988 -0.05441758 +v -0.16729423 -0.21004735 -0.02557874 +v 0.53399473 0.22542797 -0.20271705 +v 0.10015153 0.20489505 -0.38334867 +v -0.52832335 0.18381806 -0.39383987 +v -0.69608271 -0.53712493 -0.02012578 +v 0.47244465 -0.63823372 -0.25544840 +v -0.20716111 -0.12689321 -0.38900438 +v -0.43391311 -0.42910570 -0.38816351 +v -0.66785359 -0.12212135 -0.11970574 +v 0.51639217 -0.40360919 -0.10658658 +v 0.00982961 -0.17082076 -0.38720030 +v -0.66112846 -0.63706803 -0.01542861 +v 0.52901810 -0.09139207 -0.20072336 +v -0.64139342 0.22465177 -0.14445040 +v -0.67343950 0.12017208 -0.20573799 +v -0.22448462 -0.64867181 -0.10965689 +v 0.45327282 -0.66685230 -0.34533951 +v -0.40915790 -0.61662573 -0.03135808 +v -0.65817934 -0.02504409 -0.03354241 +v -0.54611278 -0.64358419 -0.05725469 +v 0.17283762 -0.66046572 -0.38489595 +v -0.66728270 0.25803885 -0.22994733 +v 0.48485810 0.16336311 -0.39963388 +v 0.50780594 -0.51688546 -0.01083696 +v -0.04596558 0.24032177 -0.07834136 +v 0.51182258 -0.58930314 -0.38913396 +v -0.43315533 -0.61568379 -0.05119875 +v -0.20696397 -0.65268183 -0.10973774 +v 0.09915059 0.22989036 -0.11611795 +v 0.04221363 -0.65630132 -0.10610358 +v 0.11172019 -0.65854383 -0.10543887 +v 0.31161779 -0.65495503 -0.00599850 +v 0.36281011 -0.51581609 -0.02070963 +v 0.50241971 -0.61253679 -0.00632404 +v 0.49196211 -0.60894734 -0.37491775 +v 0.20821683 -0.62902302 -0.17778471 +v -0.59590989 0.08639036 -0.03244109 +v 0.20298931 -0.59515113 -0.38045815 +v 0.50715059 -0.66549313 -0.38823372 +v 0.47380915 -0.66826779 -0.10278985 +v -0.45415500 -0.61091286 -0.22215132 +v -0.58058572 -0.25739193 -0.39017954 +v 0.01974452 -0.26054841 -0.39840236 +v 0.49431476 -0.07745804 -0.11400640 +v -0.15051596 0.17252508 -0.01812742 +v -0.60530764 0.18222673 -0.03392242 +v -0.61446220 -0.56524795 -0.02706985 +v 0.05991092 0.03126908 -0.01591330 +v 0.03764404 -0.65448624 -0.10610188 +v 0.39351732 0.19654982 -0.11620519 +v -0.43220812 -0.42005682 -0.02626642 +v 0.41206452 -0.30583888 -0.01038886 +v -0.40710220 0.21918531 -0.39165917 +v -0.39397964 -0.10391270 -0.02873416 +v 0.33217204 -0.55921096 -0.02018834 +v 0.41412532 0.19875048 -0.11950786 +v -0.09823307 0.24021751 -0.40350595 +v -0.40882763 0.25035584 -0.40295976 +v -0.06798155 -0.65564972 -0.10747682 +v 0.28203189 -0.66615611 -0.10404862 +v -0.49450827 -0.60981321 -0.16853079 +v 0.51622069 -0.30657944 -0.10568952 +v 0.05294517 -0.12369093 -0.38747093 +v 0.26465973 0.15088727 -0.38891566 +v -0.50142539 -0.05898124 -0.03005718 +v -0.67442763 -0.15328421 -0.40511346 +v -0.10163787 -0.65568972 -0.10898495 +v -0.12319306 -0.62133408 -0.02272502 +v 0.49219254 -0.00076992 -0.11579369 +v -0.08970826 0.24410264 -0.09096690 +v -0.33414474 -0.34119353 -0.02579704 +v 0.34350774 -0.65101475 -0.10614613 +v 0.48972982 -0.63611025 -0.23139545 +v -0.41201994 -0.60858583 -0.38529894 +v -0.49196106 -0.57154673 -0.01283618 +v 0.02777270 -0.65597844 -0.11054628 +v -0.66689354 -0.64293122 -0.11187015 +v -0.68917322 -0.63365704 -0.19126432 +v -0.43157834 0.25381798 -0.16182162 +v -0.67533386 0.05348627 -0.22881070 +v 0.33722806 0.22874500 -0.11578007 +v -0.45245770 -0.53631765 -0.40074202 +v 0.05875126 -0.35680842 -0.02306376 +v -0.48255306 -0.60310429 -0.38627580 +v 0.42252982 -0.23136508 -0.39561611 +v -0.49625674 -0.16905400 -0.02897672 +v 0.05033654 0.08337276 -0.38951868 +v 0.43308726 -0.33482832 -0.39423648 +v 0.53205174 0.09912882 -0.11488983 +v -0.20588106 -0.61726946 -0.37722677 +v -0.01503561 -0.33126441 -0.01080122 +v 0.51384503 -0.37225586 -0.06704226 +v 0.18488503 0.09458931 -0.01653843 +v -0.47040585 0.21304506 -0.40729734 +v -0.67553353 -0.02320153 -0.31220031 +v -0.38256201 0.24764957 -0.40582708 +v 0.39929527 -0.63046736 -0.01895213 +v 0.53169847 0.03731796 -0.35521305 +v -0.47811145 0.25489110 -0.19285204 +v -0.22905673 -0.17206974 -0.38862586 +v 0.49194679 -0.46357605 -0.37864190 +v 0.12569015 -0.66369033 -0.10350221 +v -0.08334409 0.24776192 -0.13271043 +v -0.44626760 -0.32947883 -0.02706559 +v -0.38005832 -0.00874934 -0.01664567 +v -0.26273096 -0.64699018 -0.13622077 +v -0.35447624 0.24118808 -0.40631625 +v -0.10604616 -0.57728612 -0.00944202 +v 0.12729272 -0.66309804 -0.10644529 +v -0.65847880 0.06737998 -0.11846016 +v -0.04959304 -0.40138337 -0.02328334 +v 0.50353336 -0.07002683 -0.18635990 +v -0.65904200 0.03049443 -0.11749843 +v 0.45657784 -0.63762081 -0.10419160 +v -0.35342664 -0.33307230 -0.01312895 +v -0.64632648 0.22647780 -0.20070463 +v 0.11531704 -0.15694816 -0.02579789 +v 0.22345400 -0.23969771 -0.38524619 +v 0.41683641 -0.66584092 -0.20212638 +v -0.18019143 -0.60107362 -0.00986672 +v 0.35918009 -0.66897368 -0.10159662 +v 0.52841145 -0.11896598 -0.24623106 +v 0.52978361 0.21783526 -0.40066308 +v -0.32608071 -0.61252409 -0.38308483 +v -0.66086727 0.00701832 -0.07761794 +v 0.50109625 0.14526883 -0.11377745 +v 0.51422393 -0.38709182 -0.10555334 +v -0.42618951 -0.64672071 -0.06051949 +v -0.05062471 -0.38183111 -0.38510597 +v 0.20960441 -0.53088748 -0.39400563 +v 0.50389177 -0.66341656 -0.06756951 +v -0.44569159 0.03680534 -0.40642264 +v -0.67174214 0.25926092 -0.40966913 +v -0.67626566 -0.39919537 -0.40415108 +v 0.52043194 -0.39271182 -0.24678917 +v 0.13448378 0.19748996 -0.03136106 +v -0.45336902 -0.28000477 -0.01447155 +v -0.44810054 0.24561855 -0.02065346 +v -0.08498502 -0.65259814 -0.25487903 +v -0.66492486 0.25729746 -0.33146372 +v 0.36338362 -0.66742265 -0.10607123 +v 0.49380538 -0.52694374 -0.37492371 +v 0.48403898 -0.63768345 -0.20262130 +v -0.66218054 -0.33444887 -0.33004558 +v 0.53254074 0.22130539 -0.39756745 +v 0.23634106 -0.29238838 -0.38448212 +v 0.34122938 -0.65421957 -0.10340264 +v -0.24073030 -0.16716680 -0.01378344 +v 0.48278970 -0.49927184 -0.02230628 +v -0.69158214 -0.62865222 -0.31554830 +v -0.37821510 -0.65113336 -0.10686190 +v -0.30412358 0.10043083 -0.40567005 +v 0.14165947 0.23306249 -0.01896660 +v -0.63093829 -0.01890165 -0.01812785 +v -0.47950405 -0.34194222 -0.38846353 +v -0.42897221 0.24937165 -0.40571454 +v 0.33452219 -0.15396567 -0.38510510 +v -0.34498373 -0.06874412 -0.02875927 +v -0.59395653 0.12086891 -0.40809694 +v 0.25277883 -0.62733036 -0.37959236 +v -0.62661445 -0.10804029 -0.39168596 +v 0.26139835 0.23153180 -0.02295992 +v 0.48030651 -0.53470564 -0.10629890 +v -0.68352836 -0.19549212 -0.12649618 +v 0.52182716 0.21293701 -0.11849208 +v -0.00768836 0.23813988 -0.40220505 +v -0.32880452 0.24124850 -0.40609795 +v -0.04825668 -0.43756011 -0.38458678 +v -0.35299140 -0.65246481 -0.10747426 +v -0.54959172 0.17803106 -0.03332708 +v -0.01209156 0.17116763 -0.03104998 +v 0.49257916 -0.16166790 -0.06240679 +v 0.44221884 -0.63885885 -0.10508525 +v -0.67778116 -0.64398921 -0.11267146 +v -0.67971140 -0.32768548 -0.11799802 +v 0.17557180 -0.28354487 -0.02351654 +v 0.05707197 -0.32033139 -0.39771554 +v -0.13883971 -0.22949487 -0.01240552 +v 0.04860605 -0.62471068 -0.25134507 +v 0.50214589 0.17765573 -0.11561241 +v -0.41791299 -0.45383322 -0.38705793 +v -0.51621443 0.20999692 -0.02022834 +v -0.10923331 -0.12579690 -0.01388898 +v -0.69053251 -0.63273215 -0.30405110 +v 0.20536253 -0.66077936 -0.39014784 +v -0.06920515 -0.35212949 -0.01092377 +v 0.21328573 0.11902609 -0.40121692 +v 0.09471933 0.05796339 -0.38913760 +v -0.66871107 0.04064307 -0.40719479 +v -0.67837763 0.16830508 -0.05713044 +v -0.44766787 0.22265820 -0.22966647 +v -0.55578947 -0.45162773 -0.40258485 +v -0.67898947 -0.26497862 -0.36038086 +v -0.52876866 -0.64306313 -0.02218585 +v 0.00210366 -0.51864094 -0.38337335 +v 0.33961406 -0.09940021 -0.38573301 +v -0.51889986 -0.63876587 -0.01265617 +v -0.24688450 -0.05516835 -0.01540265 +v -0.62633538 -0.12282419 -0.03088487 +v 0.49177790 -0.18638805 -0.10793471 +v -0.30439746 0.22055201 -0.11907338 +v 0.32544190 -0.64438146 -0.10799173 +v -0.67134786 0.19701436 -0.14083198 +v -0.05137474 -0.54612005 -0.00931010 +v -0.22831938 -0.42020598 -0.02421486 +v -0.24993090 -0.23228203 -0.01304214 +v -0.26033986 0.24687742 -0.35755590 +v -0.46144322 0.25329497 -0.30700418 +v 0.26340798 -0.66271418 -0.38702667 +v 0.24506302 -0.20547235 -0.02487701 +v 0.27325886 0.23180468 -0.35264975 +v 0.49544638 -0.18675810 -0.38243353 +v -0.32111937 -0.15574323 -0.38940760 +v 0.51910335 0.06381300 -0.11547709 +v -0.68868929 -0.63035822 -0.37786934 +v 0.47535551 0.19850707 -0.11559454 +v -0.68069953 -0.11693761 -0.11956743 +v -0.19162433 -0.46642557 -0.01095739 +v -0.12671553 -0.65098679 -0.14234352 +v -0.11425601 -0.65324891 -0.11035861 +v 0.52022207 0.22402313 -0.09377125 +v 0.19651519 -0.66464126 -0.10387670 +v -0.65806657 -0.64300865 -0.11325914 +v -0.66981697 -0.51965475 -0.01520349 +v 0.12912320 0.23485212 -0.06017607 +v -0.64809024 0.19452278 -0.22864047 +v -0.25356352 0.24438457 -0.02309652 +v 0.49100736 -0.18710673 -0.11277317 +v -0.64793408 0.06257652 -0.39089444 +v -0.57580864 -0.41561231 -0.02781370 +v -0.28940096 -0.64107656 -0.39796364 +v -0.33199680 0.14811638 -0.40601838 +v -0.63277888 -0.50507951 -0.40292934 +v -0.18903616 -0.40910172 -0.39910218 +v 0.52404928 0.05693511 -0.06568307 +v -0.00023360 -0.47053176 -0.39684850 +v -0.66670144 0.17142157 -0.40661243 +v -0.68568128 -0.09133324 -0.11354085 +v -0.64035904 -0.16813157 -0.01727122 +v -0.63573056 -0.60671961 -0.26543662 +v 0.02163649 0.20849554 -0.12051257 +v 0.52700609 -0.16779955 -0.32494649 +v 0.48178625 -0.62209332 -0.10498396 +v -0.16241215 0.24285920 -0.40264741 +v -0.68205631 -0.02028877 -0.02050112 +v -0.31367487 0.00161202 -0.40457982 +v -0.67803967 -0.34592533 -0.40383404 +v -0.68063295 -0.31857032 -0.35962531 +v -0.66293055 -0.60561919 -0.24215306 +v 0.09276865 -0.66443884 -0.10926964 +v -0.39018047 -0.17788573 -0.01483412 +v 0.50508225 0.17112310 -0.11376426 +v 0.50706625 0.19811556 -0.11975510 +v -0.50086975 -0.61373252 -0.02770604 +v -0.13122879 0.21282084 -0.17286114 +v -0.57610559 -0.02743390 -0.40680116 +v 0.49475265 0.19521275 -0.03484330 +v 0.49478593 0.12123787 -0.11610349 +v 0.49287874 -0.58889842 -0.33721220 +v 0.10297026 0.19445261 -0.39016464 +v -0.45014846 -0.44333214 -0.38775009 +v 0.43723714 -0.66381139 -0.00661469 +v -0.42387262 0.03832315 -0.39267111 +v 0.29124546 -0.66327804 -0.10305539 +v 0.44527298 -0.09375747 -0.38481829 +v 0.53245378 0.18801367 -0.39916983 +v -0.06186302 0.20593679 -0.03335985 +v -0.66531390 -0.40697283 -0.24677321 +v 0.25616834 -0.66107285 -0.13793443 +v 0.38400194 -0.10034774 -0.39768320 +v 0.40106434 -0.61330378 -0.00617851 +v -0.43446589 0.24840929 -0.06797165 +v -0.64154941 0.22960429 -0.19354695 +v 0.45094851 -0.12178895 -0.39675188 +v 0.50535619 -0.58101350 -0.10691679 +v 0.48706499 0.22258677 -0.39939368 +v 0.49558723 -0.43125549 -0.33174247 +v -0.23185213 -0.01559313 -0.02888055 +v 0.48841923 -0.63481796 -0.15993178 +v 0.32393667 -0.63358760 -0.28015307 +v -0.67476541 -0.10900527 -0.11996064 +v 0.37518787 0.16585310 -0.03120701 +v 0.09211843 0.04658061 -0.40135905 +v 0.26579651 0.01999855 -0.39990029 +v 0.50637251 0.02643110 -0.18048225 +v -0.10766653 0.20888172 -0.12074152 +v 0.15647171 -0.13288432 -0.39912578 +v 0.34718403 -0.63491964 -0.37561586 +v -0.65609533 -0.07988301 -0.12229051 +v -0.64297277 0.16465487 -0.39208385 +v -0.54001468 -0.63948393 -0.16826397 +v -0.63040572 -0.63775390 -0.11055181 +v 0.52046788 -0.01726090 -0.01662950 +v -0.28493884 -0.09314123 -0.02800775 +v -0.21676348 0.21533875 -0.24546529 +v -0.40218428 0.09746525 -0.39324135 +v -0.64597052 0.21447611 -0.14195117 +v 0.53237700 0.03450542 -0.24774027 +v 0.49643460 -0.37205771 -0.28615305 +v 0.03506628 0.20695512 -0.34483182 +v 0.37849796 -0.63361353 -0.10804832 +v -0.42497084 -0.36820650 -0.38776287 +v -0.09658940 0.21255919 -0.11950020 +v -0.36790588 0.24674125 -0.40588582 +v -0.60837692 0.02819292 -0.03207172 +v 0.42972356 -0.63772774 -0.10021827 +v 0.32165316 -0.66482246 -0.10288730 +v -0.02044220 0.20863092 -0.12115132 +v 0.49599940 0.21727301 -0.01821976 +v -0.65873212 -0.22320673 -0.34542292 +v -0.51976764 -0.60983086 -0.37982962 +v 0.51965636 -0.13398229 -0.03117297 +v 0.26876613 0.12155548 -0.01692951 +v -0.05425467 0.20142350 -0.03131808 +v 0.19743429 -0.62935311 -0.30964234 +v 0.50287557 -0.56923008 -0.10767512 +v -0.57316411 -0.63277543 -0.40119246 +v -0.43419451 -0.46970758 -0.02601364 +v -0.22305851 0.24680316 -0.29896221 +v -0.25647163 0.13358717 -0.01793933 +v 0.19773637 -0.12831980 -0.38637111 +v -0.28062779 -0.55946392 -0.01104548 +v -0.16591163 -0.64988506 -0.01014587 +v 0.51635909 -0.04493386 -0.11489493 +v -0.66777658 -0.60374874 -0.24636725 +v -0.27451450 -0.36027157 -0.40026242 +v -0.67952186 -0.10219334 -0.21787331 +v -0.65719354 -0.11893193 -0.12169474 +v -0.04964154 -0.62167764 -0.23627733 +v -0.32882234 -0.64853519 -0.01844445 +v 0.45105606 -0.01570154 -0.02866820 +v -0.48541498 -0.00271445 -0.40625390 +v 0.34301126 0.22139071 -0.01824657 +v 0.01274566 0.23777945 -0.40184802 +v -0.40008506 0.22321402 -0.12028448 +v 0.29532102 0.09896174 -0.38820499 +v -0.62539834 -0.29271156 -0.38955355 +v -0.38355514 -0.44255924 -0.02560937 +v -0.55586874 0.22438815 -0.39254111 +v -0.49947450 -0.64065802 -0.27238321 +v -0.04580921 -0.65377861 -0.28681585 +v 0.11808199 -0.26085442 -0.02388634 +v 0.50737351 -0.66648901 -0.17307264 +v 0.02114247 -0.65451020 -0.39269578 +v -0.63022137 -0.30634442 -0.40425447 +v -0.65244985 0.03094716 -0.24849285 +v -0.18905145 -0.65315598 -0.06621841 +v 0.40088263 -0.63597029 -0.02680644 +v 0.10795207 -0.59534210 -0.39422393 +v 0.10343879 -0.62639076 -0.28111500 +v 0.19261639 -0.50500369 -0.00828070 +v 0.05049544 -0.54844749 -0.38253865 +v -0.12885560 0.02116444 -0.02912822 +v 0.49448392 -0.47960666 -0.30686012 +v -0.14051896 -0.65084124 -0.11423662 +v 0.42769608 -0.60040843 -0.01948746 +v -0.67518008 -0.60726810 -0.09386998 +v -0.14692408 -0.45868537 -0.39833066 +v 0.49013960 -0.27163050 -0.06412259 +v 0.50849736 0.19549026 -0.12683576 +v -0.15463479 -0.11310424 -0.01420048 +v -0.12041015 -0.40569645 -0.02363824 +v -0.67548215 -0.58937335 -0.03656166 +v 0.48124617 -0.63529187 -0.02580215 +v -0.07774775 -0.46547288 -0.39763382 +v 0.08293577 -0.65608758 -0.00819389 +v -0.46946871 0.25413203 -0.25055823 +v 0.23646665 0.07755272 -0.01630778 +v 0.49403337 -0.65405017 -0.10231324 +v 0.14933193 -0.53620070 -0.39460012 +v -0.69018167 -0.46114168 -0.15018550 +v 0.13364425 0.20284688 -0.03921196 +v 0.24118730 0.19968626 -0.11734651 +v -0.59962934 0.08272135 -0.39358369 +v 0.38737354 0.01967880 -0.01543031 +v 0.50535882 0.19683440 -0.26677921 +v -0.38196790 -0.31568411 -0.38788116 +v 0.51787722 0.06139308 -0.01590394 +v -0.28686646 -0.20496601 -0.40191844 +v -0.62175542 -0.16425958 -0.39147383 +v 0.19353290 0.19718398 -0.38956803 +v 0.48847306 -0.59031105 -0.37696889 +v 0.30768842 0.22648045 -0.01852743 +v 0.17404874 0.20462009 -0.12077259 +v 0.01285578 -0.66091764 -0.11007733 +v -0.21388854 -0.34903336 -0.39979836 +v -0.30035254 -0.23760419 -0.02632344 +v -0.06572086 0.21792527 -0.11708394 +v -0.01522998 0.23863001 -0.10765725 +v -0.48576054 0.21951319 -0.39361560 +v 0.29350346 0.17649372 -0.40118587 +v -0.68228918 -0.37309796 -0.36114770 +v -0.65730357 0.25411969 -0.40774927 +v -0.24637237 -0.59613401 -0.39779404 +v -0.68675381 -0.58790272 -0.40209547 +v 0.12132299 -0.51187515 -0.02139264 +v -0.07918645 -0.09855183 -0.38847947 +v 0.04081611 -0.65797406 -0.04726627 +v 0.44325835 -0.65930963 -0.10229324 +v -0.65445429 -0.28904203 -0.38996911 +v 0.49179083 -0.23364006 -0.11235230 +v 0.28274891 -0.63891327 -0.10788789 +v 0.53380811 0.09094945 -0.24088876 +v -0.35944757 -0.61982316 -0.11023393 +v -0.15430965 -0.31041712 -0.02439657 +v -0.51670581 -0.37069681 -0.01441963 +v -0.64708149 0.22175546 -0.32051590 +v 0.05875659 -0.65661377 -0.24979651 +v 0.34778315 -0.66294122 -0.10206471 +v -0.67698228 -0.63371718 -0.01490774 +v 0.52363980 -0.28740087 -0.22379138 +v -0.43644980 0.24617431 -0.11705373 +v 0.04473292 -0.66236550 -0.10619039 +v 0.49373132 -0.11722837 -0.11318892 +v 0.12675788 -0.65895462 -0.10529333 +v 0.45052364 -0.63677692 -0.02250757 +v -0.08677684 -0.62402177 -0.07001726 +v 0.51748556 -0.48111421 -0.29616913 +v -0.56318516 0.20674574 -0.02019983 +v -0.57234484 0.01112713 -0.01780996 +v 0.13285068 0.23511745 -0.02280928 +v -0.66929460 -0.11874022 -0.40658265 +v 0.01098444 -0.66112256 -0.10579676 +v -0.53882676 0.11685719 -0.40767354 +v -0.03615796 -0.65329093 -0.17276923 +v -0.64758068 0.19703542 -0.37216851 +v -0.66588980 -0.45304808 -0.31365249 +v -0.35714611 -0.11132700 -0.39015442 +v 0.15472077 -0.42027512 -0.38319972 +v -0.08838195 0.20921658 -0.38935822 +v 0.06630605 -0.65777391 -0.10558782 +v -0.07130419 -0.64862913 -0.00869263 +v -0.24496691 -0.10839876 -0.01462517 +v 0.48125133 -0.66472065 -0.10143108 +v 0.28729549 -0.11347377 -0.02663579 +v -0.49517107 0.25137183 -0.40623730 +v 0.03567821 0.20790169 -0.17551954 +v -0.15757619 0.15830278 -0.39116192 +v 0.53123277 0.22254825 -0.38562322 +v -0.59924275 -0.07343269 -0.39224514 +v -0.68241459 0.01700541 -0.05334861 +v -0.66207027 -0.04210461 -0.07032663 +v 0.38455501 -0.36583793 -0.02267651 +v 0.48883149 -0.28379408 -0.11033946 +v -0.06100275 0.21387689 -0.11737332 +v -0.68942386 -0.44064230 -0.11715417 +v -0.03986482 -0.02387979 -0.02838564 +v 0.28069070 -0.63143647 -0.10549079 +v -0.69445682 -0.54413986 -0.11132843 +v 0.48716494 -0.38458514 -0.02536937 +v 0.31419086 0.15597525 -0.03086870 +v 0.50168270 -0.17801371 -0.32775041 +v -0.49772850 -0.11413980 -0.02946567 +v 0.00042446 -0.62309158 -0.21905249 +v 0.43168718 0.22163135 -0.01836956 +v 0.48217550 -0.39438930 -0.02260331 +v 0.07716814 -0.66143245 -0.10675935 +v -0.63454258 -0.10970248 -0.40629795 +v 0.49530062 -0.02609295 -0.11302935 +v 0.51038158 0.19609746 -0.14875990 +v 0.31642830 0.20119110 -0.11879507 +v 0.52946126 -0.05754327 -0.15264216 +v -0.41227058 -0.64108092 -0.39837533 +v -0.67767346 -0.01665934 -0.16421404 +v 0.06290638 -0.09195969 -0.40021816 +v 0.41877967 0.17069730 -0.01774145 +v 0.01331151 -0.66210973 -0.10756618 +v 0.41308367 -0.55782175 -0.00675427 +v -0.51031601 -0.63980877 -0.11185441 +v -0.68198961 -0.44923872 -0.40312400 +v -0.52730417 -0.16743283 -0.39087233 +v -0.53447217 0.25589272 -0.24085365 +v 0.48922831 -0.26994032 -0.10984072 +v -0.03500081 -0.65345508 -0.11410044 +v -0.62018865 -0.64408100 -0.11394937 +v 0.30654415 0.09251530 -0.02993675 +v -0.03225905 0.24012634 -0.37393388 +v 0.38523343 -0.15592754 -0.02609918 +v -0.43526193 -0.29831204 -0.38859969 +v 0.03860943 -0.65635920 -0.34782171 +v -0.22214448 -0.23251049 -0.38795456 +v -0.64220208 0.22860511 -0.12151899 +v 0.52046800 -0.33883774 -0.39207685 +v -0.19255088 -0.64693397 -0.00996842 +v 0.08470992 0.23387347 -0.01863765 +v -0.39826480 -0.54089028 -0.40001944 +v 0.02706640 0.20137163 -0.03169511 +v 0.06771920 0.23641196 -0.06153995 +v -0.68396592 -0.29833549 -0.11798270 +v -0.66494000 0.20862326 -0.40820950 +v 0.20346320 0.20222336 -0.35874721 +v 0.18251728 -0.63203245 -0.10617890 +v -0.00734768 0.20908301 -0.20435923 +v -0.14561328 0.05602035 -0.01653460 +v -0.05051184 0.21030709 -0.22735277 +v -0.02818096 -0.65448838 -0.11185143 +v 0.12858833 -0.57477295 -0.02074878 +v 0.00998097 -0.64878172 -0.10873728 +v -0.67347759 0.21797745 -0.02131519 +v -0.00690991 0.24251983 -0.16237654 +v 0.03897041 -0.35141733 -0.01038546 +v 0.51875025 -0.08175084 -0.01490731 +v -0.09129263 0.23881055 -0.01951555 +v 0.51035345 -0.62973708 -0.15527885 +v 0.08841169 0.20369458 -0.11982489 +v -0.50337583 -0.64023703 -0.20793892 +v 0.34013394 -0.62422723 -0.00633085 +v 0.40763602 -0.14314738 -0.39693934 +v -0.28375086 0.24849609 -0.29532996 +v -0.66446126 0.25600782 -0.36703217 +v 0.33431250 -0.63290012 -0.13547266 +v -0.56746030 -0.50790602 -0.01396941 +v -0.40486446 -0.34718883 -0.40139267 +v -0.65378606 -0.24578275 -0.39036208 +v 0.52455378 -0.25462109 -0.34942496 +v -0.52193326 0.22182721 -0.12306075 +v -0.13198894 -0.66069102 -0.10996371 +v 0.49922258 -0.27912521 -0.33936420 +v -0.15603502 -0.65020984 -0.18925405 +v -0.68552238 -0.28556085 -0.11745077 +v 0.00729299 0.20564198 -0.11990319 +v 0.52700883 -0.17410593 -0.26416656 +v 0.12141779 0.23539777 -0.11320212 +v 0.52843475 -0.04749532 -0.39338177 +v -0.68856621 -0.33034417 -0.11484642 +v -0.06071085 0.14941157 -0.03066188 +v -0.28612909 -0.51188475 -0.02411954 +v -0.44664621 -0.08774253 -0.40480429 +v 0.48855507 -0.35383773 -0.11068160 +v 0.27691475 -0.33492774 -0.39594164 +v -0.63861549 0.22832307 -0.14055197 +v -0.10580269 -0.57617408 -0.38357994 +v 0.33859539 -0.34009072 -0.38266951 +v -0.01525293 0.20513937 -0.03350751 +v -0.08727853 -0.25613719 -0.01184636 +v -0.51653677 -0.64254034 -0.01514519 +v -0.14994220 -0.34190205 -0.01142975 +v -0.67086124 -0.61362380 -0.11466216 +v 0.50682068 -0.66468823 -0.11635073 +v 0.09067988 -0.59549665 -0.02108454 +v -0.49061164 0.21725695 -0.03600419 +v -0.22249772 0.24854641 -0.12800984 +v -0.66744876 0.10997579 -0.11873081 +v 0.31955412 0.23127168 -0.11652095 +v 0.49718484 0.19192988 -0.07601874 +v 0.35765204 -0.05049016 -0.39842534 +v -0.12075564 -0.25933844 -0.02465615 +v -0.69675308 -0.63572329 -0.11080374 +v -0.13376300 -0.65597516 -0.07555446 +v 0.50392276 -0.03937521 -0.38164648 +v -0.67965484 0.12685114 -0.02794222 +v -0.63692075 -0.52388358 -0.02805201 +v -0.18274859 0.24274802 -0.07148326 +v -0.12242987 -0.61936545 -0.35068694 +v -0.59032875 -0.31901559 -0.02870905 +v -0.69582123 -0.51613766 -0.07284671 +v 0.16538581 0.09312548 -0.38911715 +v 0.16277461 -0.40740603 -0.02231054 +v -0.12098091 -0.65510511 -0.10813258 +v -0.36335659 -0.15685555 -0.40308338 +v -0.24338987 -0.61624414 -0.25262299 +v 0.38510293 -0.63592678 -0.29237837 +v -0.66805547 -0.49019071 -0.20971856 +v 0.17933269 -0.29970595 -0.01051823 +v 0.52997077 -0.05199202 -0.31262842 +v -0.60700971 -0.60346425 -0.38719794 +v -0.68229419 -0.04022407 -0.02007898 +v -0.67049259 -0.35971186 -0.11659414 +v 0.16255702 -0.62823212 -0.39326006 +v 0.40769750 0.19396302 -0.03207513 +v 0.39791062 -0.35982081 -0.00957011 +v 0.53496790 0.22382835 -0.22573911 +v 0.35159510 -0.03899445 -0.38635856 +v 0.49710038 -0.66769874 -0.08596077 +v -0.10776106 0.20830633 -0.03900046 +v -0.66931754 0.21990864 -0.26639324 +v -0.43926314 -0.62003613 -0.11244209 +v 0.42779350 -0.20191553 -0.38359550 +v -0.50475818 -0.31188408 -0.01469837 +v -0.16264234 -0.65322697 -0.10888623 +v 0.46853590 -0.66687560 -0.22030529 +v 0.48888022 -0.30114350 -0.11055096 +v 0.52362454 0.06020208 -0.02420720 +v 0.50134486 0.13096079 -0.11519112 +v -0.30931497 -0.02502537 -0.01607161 +v 0.34867671 0.00719998 -0.38690963 +v -0.08697897 0.13773640 -0.40325850 +v -0.67739689 -0.03045581 -0.01860573 +v 0.19034839 0.15567020 -0.38934311 +v 0.45317847 -0.39017168 -0.38076711 +v -0.27360553 0.21035081 -0.03324069 +v 0.39290839 0.20147142 -0.11486770 +v -0.26702121 -0.64775592 -0.35439813 +v -0.22701609 -0.51579529 -0.01088505 +v -0.62342441 0.16307603 -0.40839505 +v 0.30523863 -0.03081561 -0.02806690 +v 0.29936343 -0.53899544 -0.00738706 +v -0.66004777 -0.28668228 -0.38328058 +v -0.36575529 -0.61202163 -0.38365379 +v -0.51975465 -0.63491243 -0.11165525 +v 0.20902103 -0.45379630 -0.38228267 +v -0.40108073 0.25290400 -0.12379907 +v -0.64615464 0.22523052 -0.22852813 +v -0.16052264 -0.56998909 -0.02253948 +v 0.52237272 -0.17961207 -0.39524588 +v 0.05193432 -0.51119667 -0.39592355 +v 0.50024664 0.18840119 -0.09257673 +v 0.36590552 -0.17008217 -0.01254851 +v -0.67148328 -0.13567175 -0.40616924 +v -0.08230184 -0.62899441 -0.00907222 +v 0.14779608 0.03270302 -0.40088287 +v 0.51886296 -0.39177528 -0.39145768 +v -0.42996776 -0.15492661 -0.40385363 +v 0.51443416 -0.56235415 -0.34398693 +v 0.50263768 -0.66685563 -0.07517998 +v 0.50793689 -0.63314247 -0.10375073 +v 0.48631513 -0.24264961 -0.11118077 +v 0.48895961 -0.33243167 -0.06440132 +v -0.66837287 -0.55227220 -0.33890972 +v -0.27506727 0.20914806 -0.39254579 +v -0.37074727 -0.64946091 -0.08027504 +v 0.36331481 0.20225470 -0.11518644 +v -0.23763239 0.24822086 -0.12240285 +v 0.12871385 -0.35997331 -0.02277949 +v -0.07114535 -0.65198141 -0.12160325 +v -0.68780327 -0.40610865 -0.20691463 +v -0.45186087 0.21667342 -0.39352879 +v -0.51084071 0.00090221 -0.39263985 +v 0.12752345 -0.05420076 -0.01455581 +v 0.19700441 -0.63168252 -0.10171151 +v 0.48603097 -0.48873255 -0.10597123 +v -0.09261863 0.16249144 -0.01782741 +v 0.21935833 -0.52100641 -0.38126561 +v 0.29344985 -0.63148069 -0.16241527 +v -0.00403495 -0.34981960 -0.38511470 +v -0.01902887 0.11608727 -0.03020570 +v -0.69141543 -0.63112003 -0.11271869 +v -0.16702758 0.24695264 -0.12401312 +v 0.00075226 -0.65665847 -0.01514051 +v -0.51825702 -0.63881063 -0.39895281 +v -0.68138278 -0.14688000 -0.16293868 +v 0.07557082 0.20005840 -0.03164149 +v -0.35365158 0.21985714 -0.16659285 +v -0.41786662 -0.64498842 -0.11162078 +v -0.24025382 0.20469865 -0.01891128 +v -0.44597542 -0.60073161 -0.38594005 +v 0.39030746 -0.63329691 -0.01965556 +v -0.59928614 -0.63710570 -0.20356049 +v 0.17802714 0.01357322 -0.02854479 +v -0.35992870 -0.61317217 -0.34150788 +v 0.46912730 0.18892823 -0.01794529 +v -0.19801894 -0.13930348 -0.01395366 +v -0.06296358 -0.20180222 -0.38720560 +v 0.49867994 0.18462479 -0.08497988 +v -0.04452901 -0.52099794 -0.39680338 +v 0.08988123 -0.66104531 -0.10591761 +v -0.14928165 0.06924249 -0.02977164 +v -0.66566437 -0.56072181 -0.38433594 +v -0.02593829 0.20933408 -0.12463186 +v 0.45583323 0.10204094 -0.03025889 +v 0.36544219 -0.24483798 -0.01129485 +v 0.24028635 -0.14751236 -0.38582066 +v 0.48740059 -0.32057160 -0.10885856 +v 0.23184603 -0.65888250 -0.00803814 +v 0.24757467 -0.63149923 -0.37432963 +v 0.51105499 -0.64972645 -0.30807292 +v -0.57486117 0.07507003 -0.40772161 +v 0.10149595 -0.44999698 -0.38334996 +v -0.67837989 0.14352958 -0.02282630 +v -0.46669605 0.21361922 -0.02014366 +v 0.46588635 0.15698987 -0.38727391 +v -0.48828965 -0.58180797 -0.38644749 +v -0.58920485 -0.64068854 -0.01610054 +v -0.66586405 -0.60825258 -0.11199059 +v 0.49809116 -0.66744584 -0.06601670 +v 0.34086108 -0.19056191 -0.02538511 +v 0.14294748 -0.59730804 -0.38053963 +v -0.45134884 -0.63586640 -0.11305998 +v 0.44323036 -0.01457427 -0.39791939 +v 0.32134876 -0.65940237 -0.39172342 +v -0.68854308 -0.55954921 -0.11286934 +v 0.04549596 -0.31310040 -0.38517788 +v -0.64118820 0.22937630 -0.31720006 +v -0.13456164 -0.62018478 -0.10895772 +v -0.65817636 -0.27557343 -0.38795117 +v 0.45464540 0.15671699 -0.03148957 +v -0.15548196 0.23527320 -0.12022533 +v -0.33044004 -0.52641070 -0.02439997 +v -0.06625572 -0.03701189 -0.40156969 +v -0.68608803 -0.56633049 -0.40199780 +v -0.04250915 -0.62159652 -0.37895513 +v 0.03615453 0.20844708 -0.12130324 +v 0.46940637 -0.30052966 -0.01066589 +v -0.17996067 -0.61777014 -0.16084628 +v 0.17918685 -0.47770700 -0.39499289 +v -0.60063267 -0.53661805 -0.01405154 +v 0.42566365 -0.50702947 -0.00747344 +v 0.13078237 -0.62348783 -0.00757174 +v -0.69824547 -0.63346618 -0.05781982 +v -0.50678307 0.22461689 -0.22848217 +v -0.43663651 0.25236675 -0.12207135 +v 0.25675485 -0.63205785 -0.37051418 +v 0.45664221 -0.05302349 -0.38468531 +v 0.04836317 -0.36781746 -0.38461679 +v 0.33875677 -0.61755824 -0.39179471 +v 0.25760478 0.17972346 -0.03108573 +v -0.68074018 -0.40732497 -0.40303358 +v -0.10270242 -0.16417606 -0.38786393 +v 0.20796382 -0.66091436 -0.30537710 +v 0.35217118 0.22659174 -0.01895936 +v 0.53239518 0.22618577 -0.15421456 +v 0.49111262 -0.59487426 -0.23867145 +v -0.39097634 -0.28858760 -0.01377876 +v -0.07307810 0.08171058 -0.40274081 +v -0.67331874 0.01936863 -0.35619053 +v -0.68893218 -0.63414270 -0.11646010 +v 0.51901662 -0.43518314 -0.34519291 +v -0.65656865 -0.14645728 -0.33122692 +v 0.50416863 -0.07549702 -0.29693428 +v -0.24225569 -0.54152310 -0.38499659 +v 0.53565919 0.22082420 -0.21266164 +v 0.48847583 -0.13388568 -0.02668856 +v -0.68195361 -0.63434035 -0.37982771 +v 0.52456927 -0.20108210 -0.11049309 +v -0.56332833 0.25034955 -0.11586390 +v -0.11373857 0.21570317 -0.11755417 +v -0.05531681 -0.65384752 -0.01003566 +v 0.49127135 -0.03940686 -0.11495409 +v -0.66344225 -0.09801288 -0.07175221 +v 0.52019423 -0.14384076 -0.39552078 +v 0.13685215 -0.66424829 -0.10723808 +v 0.10015455 -0.65817159 -0.10523589 +v 0.46388447 -0.54824024 -0.39088231 +v 0.13227999 -0.03867753 -0.38784519 +v -0.27138081 -0.45500475 -0.39960817 +v -0.67895329 0.11527782 -0.02216500 +v 0.32112864 -0.56678694 -0.39238471 +v 0.00461280 -0.62355822 -0.37528840 +v -0.68601120 -0.39742887 -0.11568049 +v 0.22625504 -0.45446834 -0.00870753 +v 0.45501152 0.04349162 -0.02956823 +v 0.43149024 -0.66660434 -0.30471283 +v 0.49705952 -0.32709438 -0.37787443 +v 0.48919776 -0.63742930 -0.31105793 +v -0.55864352 -0.34810227 -0.02809031 +v -0.09198368 -0.01672546 -0.01555882 +v 0.48932064 -0.23284355 -0.02648557 +v -0.37498656 -0.48772606 -0.01234084 +v -0.66653216 -0.24812363 -0.11667159 +v -0.40885791 0.25226131 -0.26946399 +v -0.66179359 0.25580773 -0.39844662 +v -0.40091679 -0.05597630 -0.40469661 +v 0.51140833 -0.62057287 -0.21202715 +v 0.13524193 -0.66047001 -0.10520653 +v -0.67079711 -0.60036069 -0.22253133 +v -0.40865055 0.22943141 -0.01992365 +v 0.27627745 -0.62472636 -0.00685300 +v -0.45573407 -0.03161271 -0.03003633 +v -0.13390879 -0.50340718 -0.02289822 +v 0.38648033 0.22707282 -0.39823407 +v 0.49920481 0.09041395 -0.07508551 +v 0.51171297 0.19220982 -0.15754701 +v -0.69348383 -0.63156474 -0.17847708 +v -0.43981087 -0.61133093 -0.38128391 +v 0.44759265 -0.63142592 -0.01895213 +v -0.66978079 -0.56145430 -0.11485579 +v -0.66580766 -0.39232221 -0.15020719 +v 0.52698082 0.22287863 -0.11110885 +v -0.10478622 -0.02670404 -0.02841287 +v -0.30006558 0.24521439 -0.06479963 +v 0.17834978 0.23345006 -0.05759811 +v 0.32056290 -0.66435993 -0.38574344 +v 0.11740898 -0.62544399 -0.11492898 +v -0.66702878 -0.50087118 -0.33649218 +v 0.49559522 -0.15741397 -0.10893048 +v 0.50281698 -0.10675418 -0.37924087 +v -0.66697758 0.10914331 -0.40801907 +v -0.19570974 0.02394831 -0.01626906 +v -0.51416862 0.12927791 -0.03241599 +v 0.49267682 -0.40431646 -0.11140333 +v 0.11847650 0.16221036 -0.38972080 +v 0.05296354 0.20780711 -0.13600926 +v -0.28577822 -0.03010128 -0.39074636 +v 0.10745570 -0.65921247 -0.03784723 +v -0.65272349 0.03199395 -0.20025952 +v -0.53802013 -0.25827321 -0.40383297 +v -0.27688733 0.21732956 -0.12276031 +v 0.34834403 0.19816253 -0.07201477 +v 0.31624931 -0.66753554 -0.10405882 +v 0.36999395 -0.56976110 -0.39178532 +v -0.05202205 -0.33949426 -0.02363909 +v 0.40808931 0.22480743 -0.01909596 +v -0.51461405 0.21930675 -0.12060790 +v 0.00380643 -0.24157724 -0.02439572 +v 0.20958947 -0.24718508 -0.02408336 +v -0.68939036 -0.61983043 -0.11503537 +v 0.51387620 -0.57067585 -0.24277201 +v 0.24913380 0.19246005 -0.03113509 +v 0.52440804 -0.14106406 -0.11020031 +v -0.66965532 -0.36957592 -0.11416385 +v -0.69193500 -0.52132231 -0.17895114 +v 0.01192676 -0.40481168 -0.39734766 +v -0.55560988 -0.24575238 -0.02886736 +v 0.47201252 0.19684328 -0.29575062 +v -0.67809820 -0.54894620 -0.11545411 +v 0.50232548 -0.07949097 -0.11653584 +v -0.59605020 0.25726351 -0.25898853 +v -0.18304540 -0.65114772 -0.01144294 +v -0.45036060 -0.61637270 -0.10996115 +v 0.00319460 -0.61984468 -0.00858582 +v -0.31113243 0.14831187 -0.03160532 +v -0.54959387 0.25124756 -0.12071386 +v -0.67671579 -0.08199994 -0.31882820 +v 0.38635749 -0.03816086 -0.01459751 +v -0.50615579 0.25004381 -0.06448643 +v -0.68174875 -0.28976646 -0.31127518 +v -0.63449371 -0.61071098 -0.03028953 +v 0.32691941 -0.66372710 -0.22365136 +v 0.48629734 -0.48471203 -0.05137833 +v 0.43924198 -0.07848153 -0.02749752 +v 0.35671526 -0.61401570 -0.37832275 +v 0.52202982 0.06749201 -0.01709802 +v -0.14899994 -0.62247366 -0.05370522 +v -0.40392986 0.24773586 -0.02383953 +v 0.30655974 0.23304695 -0.20025271 +v -0.68235290 -0.10024732 -0.11926019 +v -0.47455770 0.22316763 -0.34883219 +v 0.28697318 -0.52842480 -0.38049346 +v -0.44500762 -0.16866606 -0.01538435 +v -0.49019417 -0.11859586 -0.40494704 +v -0.03334937 -0.66224259 -0.10903559 +v -0.02417433 -0.62603050 -0.10395244 +v -0.21872921 -0.58679688 -0.38410124 +v -0.66820633 0.15862976 -0.12052108 +v -0.69297945 -0.53072613 -0.11920913 +v 0.44014823 -0.18173495 -0.39608973 +v 0.09152231 -0.31177637 -0.02330844 +v -0.20205593 -0.62064761 -0.02372250 +v 0.11387367 0.23276694 -0.11611370 +v -0.36055321 0.24902472 -0.40214929 +v 0.48755687 -0.47409400 -0.37877086 +v -0.23852056 -0.65433395 -0.11067352 +v -0.35319576 -0.02883230 -0.40455067 +v 0.48763368 -0.12567173 -0.11259954 +v -0.67628056 -0.43842503 -0.40406936 +v 0.50211048 -0.15533029 -0.26133180 +v 0.00405224 -0.62325561 -0.34142321 +v 0.18823144 -0.34226650 -0.02277779 +v 0.27282408 -0.66268528 -0.30265296 +v -0.46050584 -0.61461651 -0.11094672 +v 0.51075560 -0.43709767 -0.01344641 +v -0.25268504 -0.05264784 -0.40328041 +v 0.48199400 -0.47894716 -0.10679594 +v 0.37059048 -0.64319527 -0.10760150 +v 0.51543272 -0.53762752 -0.29596806 +v 0.51019752 -0.58488441 -0.10424394 +v -0.54097688 -0.64581257 -0.11317530 +v 0.05017064 -0.66220790 -0.10423245 +v -0.64835864 0.18849811 -0.17635190 +v 0.31967464 -0.63365293 -0.10520397 +v 0.41011688 -0.25593516 -0.02443146 +v 0.50582504 -0.03009006 -0.33018282 +v -0.68818456 -0.63629735 -0.01595586 +v -0.69728279 -0.62918073 -0.01685631 +v 0.26553321 0.23412922 -0.11762992 +v 0.50934249 -0.38625777 -0.11006286 +v -0.31631383 0.15468894 -0.39270175 +v -0.24225815 0.24089456 -0.01963130 +v -0.55479831 -0.04378009 -0.01720569 +v 0.08881129 -0.40611395 -0.02259395 +v -0.67271191 0.16946326 -0.12794644 +v -0.05206551 0.07106963 -0.01656396 +v -0.06708247 -0.62040299 -0.38016430 +v -0.41433367 -0.64325768 -0.28513727 +v -0.67052823 -0.60054696 -0.18956171 +v 0.46330345 0.19786890 -0.14625515 +v 0.47839209 -0.63908976 -0.10456055 +v -0.49612567 -0.00073508 -0.03064188 +v 0.11210218 0.20189270 -0.38924843 +v -0.54324758 -0.19315831 -0.02916524 +v -0.10939414 -0.65200269 -0.39352030 +v -0.03009302 -0.64244217 -0.10644656 +v -0.68018454 0.11160009 -0.06418940 +v -0.66705942 -0.32114002 -0.03322580 +v 0.51062250 0.13237941 -0.26327804 +v -0.68399638 -0.27217588 -0.20230044 +v -0.21337366 -0.61661118 -0.36300054 +v 0.52588010 -0.14894763 -0.39155108 +v 0.09860842 -0.62848628 -0.02632174 +v -0.66288662 -0.31845036 -0.21633963 +v 0.51021802 0.19385611 -0.37981662 +v -0.02779926 0.20622212 -0.03770085 +v 0.05020394 -0.62407595 -0.14631514 +v -0.65291286 -0.61027688 -0.03141808 +v 0.45014250 0.19386254 -0.03320027 +v 0.51708651 -0.45097455 -0.13813572 +v -0.63958293 -0.63630056 -0.36253095 +v 0.13642731 0.16558090 -0.03078359 +v -0.11253525 -0.61746228 -0.38216779 +v 0.06899947 0.24029879 -0.12069301 +v 0.50549483 -0.55981278 -0.00759089 +v 0.00681195 -0.66247910 -0.10806534 +v 0.42626283 0.19798349 -0.27643144 +v -0.65372437 -0.04864350 -0.35633478 +v -0.04881941 -0.45786241 -0.02278290 +v 0.51220971 0.22466315 -0.02655068 +v 0.15506923 -0.62803334 -0.26122391 +v -0.12935445 0.24077711 -0.02048920 +v 0.39632875 -0.51026154 -0.39212599 +v 0.50122219 0.22442709 -0.10311454 +v -0.66069525 -0.51332259 -0.38813946 +v 0.50981611 -0.48917723 -0.05230517 +v 0.18546155 0.20163915 -0.11971893 +v -0.65600276 0.12585142 -0.03551950 +v -0.43510804 -0.64264321 -0.11047649 +v 0.18488812 0.19929373 -0.03210959 +v 0.52961004 -0.07070446 -0.25839552 +v -0.56087828 -0.31168270 -0.40351999 +v -0.65655828 -0.14303914 -0.11739545 +v -0.13616404 0.24241610 -0.40141758 +v -0.09420820 -0.47328085 -0.01028375 +v -0.07061268 0.24081306 -0.08686591 +v -0.25539604 -0.16800082 -0.40194014 +v -0.13541140 0.24146692 -0.11124077 +v -0.58494228 -0.13421553 -0.40572709 +v -0.14119700 0.00170330 -0.01588266 +v 0.00102380 -0.45939904 -0.38405913 +v 0.38708204 -0.56878799 -0.01992237 +v -0.05331220 -0.26897955 -0.02423401 +v -0.51204884 -0.59481144 -0.38670474 +v -0.67111444 0.25977796 -0.39949942 +v 0.47780845 -0.63743007 -0.05043234 +v -0.02935059 -0.62551326 -0.05986159 +v -0.46117139 -0.38980421 -0.40179226 +v 0.53440237 0.12046268 -0.29484078 +v -0.69170451 -0.42310193 -0.01819764 +v -0.65040147 -0.60537308 -0.02752561 +v -0.47279635 0.03723375 -0.01767634 +v 0.13019885 -0.65665805 -0.00765685 +v 0.23050989 -0.38761461 -0.02232203 +v -0.58539283 0.25350392 -0.40689671 +v -0.07810067 0.24093552 -0.39929155 +v -0.13241875 -0.52448577 -0.38456297 +v 0.53018093 0.19305688 -0.11381235 +v -0.06546194 -0.18701960 -0.40011010 +v -0.25087762 -0.32585129 -0.01224850 +v -0.67358226 -0.12175634 -0.40570858 +v -0.20165138 -0.50891274 -0.38520959 +v 0.28923118 -0.16860117 -0.02564384 +v 0.32470766 0.19987684 -0.29411927 +v 0.33388782 -0.36519185 -0.02258502 +v -0.67536402 0.06655740 -0.15994114 +v -0.61111826 0.22786689 -0.21129224 +v -0.38763794 -0.61589444 -0.02514212 +v 0.49514222 -0.39443484 -0.37843999 +v -0.03632914 -0.06242859 -0.01486646 +v -0.68315154 -0.20326957 -0.14073113 +v 0.47489262 0.11818425 -0.38685900 +v -0.67460626 -0.23906131 -0.40476471 +v -0.41703442 0.15538859 -0.03231002 +v 0.37005550 0.19933751 -0.20437157 +v -0.59855634 -0.63677067 -0.15003824 +v 0.52479470 -0.15738744 -0.39502949 +v 0.03335150 0.20384145 -0.11579496 +v -0.68951058 -0.36544567 -0.01783209 +v -0.41945618 0.17463110 -0.01930661 +v -0.48297489 0.21262084 -0.03342751 +v -0.43315729 -0.60945886 -0.38481489 +v -0.45344529 -0.60998321 -0.38390103 +v 0.31929839 -0.09695901 -0.39822704 +v 0.49242607 -0.13091208 -0.11055266 +v 0.48723695 -0.33995509 -0.11027052 +v -0.46754321 -0.40895745 -0.01372387 +v 0.35170031 -0.51372778 -0.00755302 +v 0.25406191 0.19708766 -0.03212747 +v 0.18138863 0.14973436 -0.01727463 +v -0.34093073 0.24846439 -0.40167993 +v -0.04725009 0.00360560 -0.01578521 +v 0.38732272 -0.23511064 -0.38357505 +v -0.42586640 -0.22757150 -0.01464347 +v -0.31897616 -0.40756857 -0.40017626 +v 0.05327344 -0.64920950 -0.10604443 +v -0.31437328 0.21300569 -0.07305565 +v -0.08499728 -0.65230447 -0.00969948 +v 0.09784816 0.20615892 -0.16286081 +v 0.35410416 -0.57173258 -0.00673597 +v 0.37704176 -0.63516116 -0.02297184 +v 0.18635504 0.20121147 -0.06825677 +v 0.24743664 -0.50001550 -0.02111816 +v -0.46223632 -0.35253701 -0.01404515 +v 0.31043312 0.20056549 -0.19211158 +v 0.48869616 -0.35527372 -0.10721681 +v -0.26950160 0.24007644 -0.01966066 +v 0.22137584 0.17471270 -0.40154397 +v 0.50319344 -0.03454744 -0.11645329 +v -0.31073040 0.25037754 -0.17594679 +v -0.46277136 -0.51756567 -0.02586343 +v 0.51113200 0.12933362 -0.37714249 +v 0.04255728 -0.14914498 -0.39989668 +v -0.31001616 -0.30242211 -0.40102798 +v -0.34599951 -0.64394349 -0.39625782 +v 0.21570033 -0.38902688 -0.38306910 +v 0.41039345 -0.42719892 -0.38067091 +v 0.51729393 -0.22569607 -0.10792492 +v 0.42171121 -0.57129568 -0.39118743 +v -0.68505871 -0.63358533 -0.38831842 +v -0.62803215 -0.23192644 -0.39008209 +v -0.54277647 -0.63856703 -0.11025478 +v -0.65799439 -0.63535786 -0.30561861 +v 0.51643378 -0.46315530 -0.10964029 +v 0.23533298 -0.24839906 -0.39742488 +v -0.03924494 -0.24606112 -0.39900303 +v 0.52376562 0.22309364 -0.39240345 +v 0.51328754 0.17693314 -0.11995298 +v 0.15677682 0.23740582 -0.20293961 +v 0.51003122 0.18690039 -0.12139729 +v 0.49563378 -0.05728214 -0.10916198 +v 0.43603954 -0.01349535 -0.01489582 +v -0.21471246 0.07503125 -0.03006101 +v -0.69374222 -0.63148963 -0.12754898 +v 0.48924914 -0.66811639 -0.10277368 +v 0.01618930 -0.65572792 -0.11093268 +v -0.54281998 -0.60180396 -0.40095755 +v -0.47921678 -0.53127229 -0.38699263 +v 0.44954866 0.02641302 -0.38611472 +v -0.38012174 -0.61218870 -0.11462897 +v -0.29819918 -0.08708352 -0.01521328 +v 0.18745075 0.06949505 -0.02940056 +v -0.39774221 0.22084612 -0.27604738 +v -0.47241485 -0.61073023 -0.37948364 +v -0.49072909 0.22310460 -0.39064869 +v -0.07777549 0.24201715 -0.11848995 +v -0.68265229 -0.63347512 -0.11161526 +v -0.58630669 -0.55803472 -0.38732731 +v -0.68722445 -0.46218839 -0.11418300 +v 0.51812851 0.22078665 -0.01869382 +v 0.13359091 -0.50726157 -0.00850752 +v 0.26078963 -0.63137549 -0.02203478 +v 0.49282035 -0.16190982 -0.03279728 +v 0.49413875 -0.09786288 -0.06499624 +v -0.65206796 -0.08120598 -0.38979101 +v -0.65140748 -0.57694227 -0.40240377 +v 0.13964532 0.10156598 -0.02988441 +v 0.46538228 -0.17185064 -0.01283022 +v 0.28589556 0.19551346 -0.03197597 +v -0.65903372 -0.30909261 -0.38754371 +v 0.52072436 -0.25033945 -0.10846706 +v -0.13777164 -0.16084804 -0.02618599 +v 0.32895988 -0.30221483 -0.39582357 +v 0.08060404 -0.20815302 -0.02479360 +v 0.43057397 -0.66499555 -0.38919929 +v 0.01574133 -0.65573186 -0.01011183 +v -0.63501835 0.22935778 -0.18016182 +v 0.51188213 -0.41856396 -0.05405374 +v -0.64875019 0.13217717 -0.12188241 +v 0.09910005 0.23561630 -0.02124370 +v 0.30553845 -0.58819413 -0.00681512 +v 0.28916469 -0.35228416 -0.00972969 +v 0.39992565 -0.58931255 -0.37850660 +v -0.66299915 -0.60524732 -0.18326235 +v 0.47279349 -0.46564016 -0.39202875 +v -0.56985611 -0.64801997 -0.10755640 +v -0.39159307 -0.60773987 -0.02375867 +v 0.06978293 -0.38562071 -0.39699912 +v -0.64910603 0.14165621 -0.01978960 +v 0.33196789 -0.63442314 -0.37598693 +v 0.39161333 0.22987492 -0.25701824 +v -0.38563851 0.24644357 -0.11633967 +v 0.51290101 -0.37730187 -0.10902793 +v 0.48540917 -0.61641908 -0.10129958 +v -0.67515147 0.25466618 -0.39487928 +v -0.63451403 0.22859234 -0.14839223 +v -0.02559243 -0.50728273 -0.02214841 +v 0.51443189 -0.51607460 -0.38935503 +v 0.48494837 -0.56625426 -0.10278475 +v -0.54396427 0.10862339 -0.39363924 +v -0.48494858 -0.45447478 -0.02662643 +v -0.23656458 0.24237131 -0.11273316 +v -0.65988874 -0.20928870 -0.18697269 +v 0.41641974 0.22928725 -0.11653244 +v -0.20416266 0.24753702 -0.18370108 +v -0.48332554 -0.05923115 -0.01671120 +v -0.11957002 -0.16151305 -0.40082586 +v -0.65428746 0.16893990 -0.03562333 +v 0.49592054 0.13653727 -0.03264195 +v 0.04046582 -0.56659073 -0.00857816 +v 0.51560950 -0.50513273 -0.14271502 +v -0.05452554 0.21057715 -0.12168495 +v -0.67183626 -0.60996240 -0.08464243 +v 0.18144502 -0.13031030 -0.02622088 +v 0.46326774 -0.43782258 -0.37983322 +v 0.15598327 0.20359106 -0.33449638 +v -0.00064265 -0.04027274 -0.40109223 +v -0.56027657 -0.60830051 -0.18183507 +v -0.03725065 0.21357268 -0.12169006 +v 0.41232887 0.19866037 -0.15926707 +v -0.40752137 0.08360803 -0.03115637 +v 0.51399159 -0.44744566 -0.10546781 +v 0.13454327 -0.66044837 -0.10784321 +v -0.50702345 -0.64035195 -0.39468840 +v -0.65840137 -0.19947125 -0.11735587 +v -0.06070537 -0.15696748 -0.01322428 +v 0.26700279 -0.65634102 -0.39235961 +v -0.46174473 -0.47632542 -0.38732114 +v -0.35747081 0.20263065 -0.01951342 +v -0.37054729 -0.23423390 -0.01409835 +v 0.51357943 -0.40161359 -0.10648019 +v 0.32168695 0.20083614 -0.13173252 +v -0.47324425 0.22358254 -0.17011680 +v -0.45882377 -0.64249408 -0.01277703 +v 0.30482936 0.18074131 -0.01787465 +v -0.25999880 -0.33037248 -0.02512000 +v 0.16601080 -0.37821588 -0.39626592 +v -0.14786568 0.24598590 -0.20498946 +v -0.39294216 -0.50730789 -0.02518127 +v -0.65442824 -0.49096513 -0.38893354 +v -0.28019464 0.24525374 -0.40480280 +v 0.07697656 -0.65661323 -0.13762720 +v -0.20017672 -0.54894441 -0.39805067 +v 0.02723576 -0.66135031 -0.10825557 +v 0.47096312 -0.63797116 -0.04210269 +v -0.19159816 0.24489363 -0.35958999 +v 0.01419512 0.14142834 -0.01742612 +v 0.13089016 -0.63344407 -0.10556356 +v -0.41028360 -0.25451213 -0.40260783 +v -0.61540616 -0.60695422 -0.38464466 +v 0.22870776 -0.01393510 -0.01498604 +v -0.24115975 -0.29611865 -0.40053776 +v -0.26537991 -0.49201185 -0.38574767 +v 0.45945081 -0.66758722 -0.10452863 +v -0.22605063 -0.23299067 -0.40104648 +v 0.00793593 -0.08741470 -0.01444261 +v -0.43639815 0.21217625 -0.03330921 +v 0.20463865 -0.61215049 -0.00717343 +v -0.15318535 -0.65160257 -0.11137141 +v 0.52387321 -0.21404392 -0.11001733 +v 0.15622649 -0.45554817 -0.00897520 +v -0.36340743 -0.21256147 -0.02723283 +v -0.19362055 -0.03000793 -0.01556946 +v 0.49975801 0.08474392 -0.11531367 +v -0.34554887 -0.27153718 -0.38825309 +v -0.45753095 0.10389669 -0.03175937 +v -0.11279367 0.21615234 -0.01871510 +v 0.10418937 -0.00635357 -0.40072712 +v -0.26473990 -0.01070520 -0.40388808 +v 0.33234426 0.23207778 -0.15341453 +v -0.34660614 0.03693566 -0.40544239 +v 0.50668538 0.02330775 -0.23432620 +v 0.52581114 0.22246745 -0.08375346 +v 0.48914170 0.19723426 -0.13315512 +v 0.27091706 -0.63167101 -0.26513106 +v 0.53115898 0.00124052 -0.15766574 +v -0.47479558 0.17978036 -0.03300367 +v -0.02257414 -0.65364605 -0.12674342 +v 0.51680762 0.19702908 -0.11342765 +v 0.49780986 0.15808032 -0.11413278 +v -0.22832134 -0.02136678 -0.39037251 +v -0.55201542 -0.07727180 -0.03024740 +v -0.24857862 -0.27921990 -0.02539915 +v 0.50902522 -0.49853846 -0.10731085 +v -0.67190278 0.25263005 -0.40382829 +v -0.47718918 -0.60697871 -0.38561320 +v 0.49282554 -0.43283561 -0.11401576 +v 0.24257019 -0.64586776 -0.10464948 +v -0.65912837 -0.25635120 -0.38304654 +v 0.05983739 0.20440482 -0.12006234 +v -0.57708549 0.20069847 -0.39414734 +v -0.14061317 -0.35443541 -0.38596365 +v -0.06851077 0.25720149 -0.13615438 +v -0.34822661 -0.26836100 -0.02649536 +v 0.52286971 0.08339361 -0.11425576 +v -0.53618693 0.25017145 -0.02417528 +v -0.26282245 0.21205975 -0.03700721 +v -0.60225540 -0.18679614 -0.02996016 +v -0.68607748 -0.34205201 -0.11856059 +v 0.25043452 0.19986269 -0.07060409 +v -0.65785092 -0.15040566 -0.15329453 +v -0.60469508 -0.12293803 -0.01700313 +v 0.41575164 0.14432566 -0.40007475 +v -0.67016196 -0.25972497 -0.11799759 +v 0.09988604 -0.33376715 -0.01033780 +v -0.37589508 -0.61302793 -0.38049027 +v -0.67926788 -0.09518837 -0.12104238 +v 0.50436860 0.22723708 -0.15596227 +v -0.17650180 -0.56929195 -0.38413486 +v -0.23017220 0.24296686 -0.02038920 +v -0.50143236 -0.33964857 -0.02757497 +v 0.21823741 0.20456141 -0.11698862 +v -0.04194051 -0.35431167 -0.39821979 +v -0.51378179 0.23202692 -0.12324586 +v 0.08263165 -0.65741819 -0.29173708 +v -0.03074051 0.20195431 -0.03147808 +v -0.66837507 -0.29536119 -0.07537957 +v -0.67356163 -0.52781481 -0.03369773 +v -0.00112899 -0.35026526 -0.02333994 +v -0.11575555 0.12210289 -0.03042400 +v 0.45971453 -0.65868098 -0.10240856 +v 0.34045693 -0.25676563 -0.02422848 +v -0.63078403 0.22275071 -0.12154580 +v 0.52873725 -0.07506993 -0.35954255 +v 0.07335165 0.23899950 -0.26402125 +v 0.28437757 -0.24501315 -0.38456872 +v -0.31657475 -0.64599842 -0.22496673 +v -0.65992194 0.02679846 -0.03660251 +v -0.03923202 0.23971058 -0.40015647 +v -0.66246402 -0.60924017 -0.02975845 +v -0.67482114 -0.60802108 -0.07469018 +v -0.67009282 0.14380373 -0.36452952 +v -0.57308674 -0.63857943 -0.39332774 +v -0.68382722 -0.10187008 -0.02001131 +v -0.66545922 -0.48638314 -0.38297546 +v 0.53454846 0.18822539 -0.11610775 +v -0.69160962 -0.64098048 -0.10787300 +v 0.00378878 -0.13541664 -0.02641195 +v -0.58270210 -0.61245495 -0.03162277 +v 0.48556286 -0.39513662 -0.10779513 +v 0.52554494 -0.20181903 -0.36162326 +v 0.11864319 0.18470968 -0.01770868 +v 0.48892671 -0.62620348 -0.10372180 +v 0.29955071 -0.31594953 -0.02320674 +v 0.49816063 -0.24466474 -0.16233526 +v -0.10080257 -0.07692163 -0.01471496 +v 0.00151295 -0.65679580 -0.10632572 +v -0.66957057 0.19390632 -0.31401676 +v -0.68986881 -0.59584981 -0.35111353 +v 0.49307135 -0.41657776 -0.37926662 +v -0.43357953 -0.64383209 -0.01304086 +v -0.64841217 0.12456005 -0.11956360 +v -0.60766977 0.23113349 -0.12158453 +v 0.50907135 0.19674605 -0.20089060 +v -0.59190017 -0.60979736 -0.02650260 +v -0.16064513 -0.54153013 -0.01012332 +v 0.45805311 0.19606277 -0.09777649 +f 2566 250 3940 +f 3051 3399 3114 +f 1852 3399 3051 +f 2456 1852 3051 +f 3228 3721 2655 +f 130 1727 3010 +f 2456 3051 2756 +f 2456 2756 3721 +f 3721 754 3094 +f 2687 2133 1161 +f 177 3721 3094 +f 2631 2122 2756 +f 2973 1814 2749 +f 3010 1981 2124 +f 754 3721 2756 +f 996 604 2856 +f 754 2756 2122 +f 3581 793 2124 +f 1689 616 2875 +f 757 2596 1816 +f 3581 2124 2018 +f 1689 2875 3755 +f 3185 754 2122 +f 547 754 3185 +f 837 3755 437 +f 837 437 971 +f 2427 971 286 +f 2427 286 1593 +f 3497 968 3871 +f 1233 3871 2087 +f 547 655 3154 +f 1406 3871 1233 +f 2875 437 3755 +f 286 971 437 +f 1832 1787 1502 +f 437 2875 3154 +f 286 437 3154 +f 1233 2087 2357 +f 751 3050 2083 +f 3050 751 1028 +f 1787 286 3154 +f 2778 2083 3050 +f 1593 286 1787 +f 1832 1593 1787 +f 1787 3154 2037 +f 610 1787 2037 +f 1017 2689 2357 +f 1233 2357 2689 +f 849 478 2018 +f 1527 478 1406 +f 1787 3088 2943 +f 332 1260 233 +f 907 3053 1429 +f 1787 610 3088 +f 3745 3104 657 +f 2399 2979 127 +f 1762 3088 1613 +f 2979 3417 761 +f 1762 1613 3004 +f 1749 478 521 +f 936 187 468 +f 936 468 1018 +f 936 1018 2569 +f 1406 1233 1051 +f 1233 2689 1051 +f 2326 2065 1806 +f 3739 900 234 +f 3166 960 2326 +f 234 3417 3264 +f 3739 234 549 +f 1051 2689 997 +f 677 1051 997 +f 900 3417 234 +f 3264 3576 1404 +f 1406 1051 1527 +f 2383 2869 2003 +f 1828 1016 3739 +f 2406 2717 2062 +f 3193 887 2101 +f 2122 1256 3185 +f 2122 2003 2101 +f 2439 1016 1361 +f 1361 1016 1828 +f 1140 936 2569 +f 1361 1922 2439 +f 1256 2101 1722 +f 1361 3672 3385 +f 1361 1828 3672 +f 3672 1828 120 +f 2184 3855 150 +f 793 1780 2767 +f 2629 1722 887 +f 2083 2778 3303 +f 2610 2629 3855 +f 3581 1780 793 +f 2057 2450 2778 +f 2057 2778 1710 +f 2778 3050 1710 +f 1397 3185 1256 +f 1397 1256 1722 +f 1161 2585 2687 +f 2133 2687 1238 +f 2749 1238 2973 +f 2973 12 1814 +f 613 1397 1722 +f 468 924 521 +f 655 3185 1397 +f 1867 2057 1710 +f 655 1397 2222 +f 2222 1397 613 +f 3334 1780 3581 +f 3334 3581 1749 +f 824 3334 1749 +f 1749 521 924 +f 3385 1922 1361 +f 468 521 1018 +f 824 1749 924 +f 613 1722 2443 +f 2443 1722 2629 +f 837 971 3618 +f 3618 971 2427 +f 1832 2036 1593 +f 279 3385 1034 +f 1034 3385 3672 +f 2036 1832 1502 +f 2596 3458 788 +f 2596 788 1816 +f 788 1443 1816 +f 1813 3152 677 +f 3004 1513 1762 +f 1319 1867 1922 +f 448 1922 3385 +f 332 233 827 +f 2629 2927 2443 +f 2604 2240 790 +f 3053 907 2100 +f 507 2100 907 +f 666 297 2927 +f 1817 448 1617 +f 448 3385 1617 +f 3385 1879 1617 +f 2331 1879 3385 +f 1556 824 187 +f 613 2443 3142 +f 3152 1101 521 +f 1018 521 1101 +f 1353 2569 2542 +f 3152 1353 1101 +f 3152 1813 3144 +f 1354 693 120 +f 1354 3712 693 +f 2725 1898 2138 +f 666 2610 2184 +f 2184 3195 666 +f 422 2138 2311 +f 2725 2138 422 +f 2311 85 422 +f 2722 3016 2298 +f 2311 3396 85 +f 1227 2546 1544 +f 297 666 2814 +f 1931 422 85 +f 655 2222 2037 +f 1230 85 1227 +f 1230 1227 1544 +f 3342 2037 2222 +f 2973 236 12 +f 996 2856 236 +f 996 2589 604 +f 3669 2566 825 +f 1689 3755 2244 +f 3618 2427 1593 +f 1931 2725 422 +f 3342 2222 2310 +f 202 2310 2222 +f 3342 2310 2626 +f 2100 827 3053 +f 613 3142 202 +f 1743 2310 202 +f 1556 592 708 +f 2569 1018 2542 +f 1931 2777 2725 +f 1931 85 1230 +f 3790 610 1835 +f 1835 3342 2626 +f 2626 1727 1835 +f 1931 1599 2777 +f 3490 2712 2399 +f 1835 1727 656 +f 1835 656 3790 +f 656 1727 130 +f 656 130 123 +f 798 2752 325 +f 2752 798 1865 +f 1865 941 561 +f 2874 561 1844 +f 2874 1865 561 +f 2752 3391 1575 +f 3433 3576 2712 +f 2261 3593 3806 +f 271 2777 292 +f 880 3205 490 +f 292 2777 1599 +f 1121 1726 3535 +f 1307 2874 1043 +f 1307 1865 2874 +f 1307 1043 705 +f 1923 1307 705 +f 1544 2546 530 +f 3004 1786 1665 +f 790 2240 1665 +f 3542 1307 1923 +f 3881 1786 1613 +f 705 3535 1923 +f 1613 3790 2718 +f 2832 1544 2860 +f 1786 978 790 +f 1786 790 1665 +f 1260 790 978 +f 1260 978 233 +f 1786 3881 233 +f 233 978 1786 +f 233 3881 827 +f 2652 3881 1429 +f 1429 3881 2718 +f 827 3881 2652 +f 827 2652 3053 +f 3053 2652 1429 +f 1544 2464 1230 +f 1429 2718 507 +f 1429 507 907 +f 2718 3790 656 +f 946 1404 3576 +f 788 3458 2132 +f 424 2882 2041 +f 3745 2718 2404 +f 507 2718 3745 +f 3104 2404 123 +f 3745 2404 3104 +f 1388 2041 2882 +f 657 3104 123 +f 657 123 3341 +f 130 3341 123 +f 3341 2301 657 +f 2767 2301 3341 +f 3641 2882 424 +f 2767 3341 130 +f 424 2909 3641 +f 3641 2909 583 +f 1848 3641 583 +f 824 1556 3334 +f 3745 657 3541 +f 2470 1936 3066 +f 924 187 824 +f 187 924 468 +f 1780 708 2767 +f 583 3066 1936 +f 463 3916 2538 +f 1101 2542 1018 +f 2542 1101 1353 +f 1936 2470 1131 +f 2206 1936 1131 +f 3382 583 1936 +f 1848 583 3382 +f 2107 2464 691 +f 1936 2206 3382 +f 880 3806 3205 +f 2206 3143 3382 +f 1020 3605 2331 +f 2882 3641 363 +f 146 708 592 +f 2843 2882 363 +f 363 3641 1848 +f 1226 3433 47 +f 1147 3916 1522 +f 3916 463 1522 +f 1556 187 936 +f 1081 1522 3693 +f 1385 2569 1353 +f 454 2843 3349 +f 2871 1522 1081 +f 1806 2065 2105 +f 1848 3418 363 +f 3599 1020 1354 +f 1443 788 2132 +f 3382 1209 1848 +f 692 996 2973 +f 1081 165 2871 +f 3232 1806 2105 +f 2910 2871 165 +f 2682 1145 960 +f 2682 689 1145 +f 2910 165 761 +f 761 3287 2910 +f 165 2979 761 +f 2991 2566 3940 +f 3418 1848 1209 +f 946 549 1404 +f 1017 2357 3111 +f 946 457 1626 +f 1630 3418 1209 +f 1017 2183 1189 +f 271 1189 2183 +f 2546 1655 530 +f 2261 2717 3593 +f 3806 3593 1543 +f 1140 2541 936 +f 2301 2767 1496 +f 3347 2206 1131 +f 490 3205 2450 +f 1556 936 592 +f 1017 1189 356 +f 3347 2752 1307 +f 3347 1307 3542 +f 457 946 289 +f 1034 693 3712 +f 1034 3712 279 +f 3347 3315 2206 +f 2331 279 1020 +f 3315 3347 1120 +f 457 1354 120 +f 2107 3035 852 +f 852 292 2107 +f 2717 3014 2062 +f 2599 3518 1020 +f 3448 292 852 +f 1120 3347 3542 +f 1354 279 3712 +f 2161 3542 1923 +f 1020 279 1354 +f 3773 1923 3535 +f 3773 1282 1923 +f 3448 1100 716 +f 2161 1923 1282 +f 1017 356 2689 +f 3560 3166 249 +f 997 2689 356 +f 356 1758 997 +f 2248 1354 457 +f 997 1758 2099 +f 1282 1606 2161 +f 1930 1758 356 +f 880 654 3806 +f 1930 2801 1758 +f 1234 1111 1606 +f 997 2099 677 +f 3518 2599 1881 +f 1813 677 2099 +f 1120 1111 174 +f 2099 1758 1478 +f 1478 1758 2801 +f 543 2099 1478 +f 2801 810 1478 +f 1655 1209 3143 +f 2801 1930 2902 +f 1769 530 3716 +f 2860 530 1769 +f 1544 530 2860 +f 2860 1769 2538 +f 592 2541 2341 +f 592 936 2541 +f 2902 716 3271 +f 3271 716 1193 +f 1492 174 1111 +f 249 3166 1806 +f 810 910 1478 +f 2902 810 2801 +f 2646 3576 3433 +f 3576 2646 289 +f 3035 3376 499 +f 1477 3035 499 +f 880 490 665 +f 3716 1492 1769 +f 2057 1867 183 +f 2991 3013 825 +f 1477 499 492 +f 2538 1769 3247 +f 3448 852 1100 +f 1726 1472 3535 +f 3773 1472 1712 +f 3773 1712 836 +f 769 884 327 +f 1477 492 1100 +f 3743 708 146 +f 3773 836 884 +f 751 492 1586 +f 2651 327 934 +f 769 327 2651 +f 769 2651 1704 +f 1704 2651 102 +f 636 1385 889 +f 3287 116 2910 +f 3096 769 1814 +f 1586 492 116 +f 3287 1586 116 +f 1586 1028 751 +f 289 2646 3862 +f 3862 2248 289 +f 2458 889 386 +f 3426 692 1443 +f 769 782 3773 +f 3773 782 1282 +f 2083 1193 751 +f 716 1100 1193 +f 2441 23 3518 +f 3090 2599 866 +f 866 2599 1020 +f 604 782 2856 +f 1606 1282 2620 +f 2620 1282 782 +f 3555 2620 604 +f 3805 3717 2620 +f 16 185 1271 +f 3555 2815 3805 +f 1140 2569 636 +f 3805 2815 3940 +f 2435 1806 3232 +f 249 1806 2435 +f 3560 47 3166 +f 3247 1234 1498 +f 1498 1234 3717 +f 185 16 3693 +f 1498 3717 3304 +f 3940 250 3805 +f 3304 3717 3805 +f 3669 3304 250 +f 665 490 3664 +f 1284 2441 3617 +f 3013 2575 825 +f 3518 3617 2441 +f 1271 185 1498 +f 1081 3693 3292 +f 165 1081 3292 +f 3457 1271 3669 +f 1271 1498 3304 +f 2237 263 3292 +f 3669 1271 3304 +f 3669 250 2566 +f 263 2237 2926 +f 2926 2237 2065 +f 2926 2065 2326 +f 263 2926 960 +f 3292 263 165 +f 263 960 1145 +f 689 263 1145 +f 636 2569 1385 +f 2065 2237 2105 +f 960 2926 2326 +f 3013 1709 2575 +f 689 3490 127 +f 3009 1140 143 +f 127 3490 2399 +f 2558 2682 3166 +f 3166 47 2558 +f 2435 2575 1709 +f 946 3576 289 +f 1404 234 3264 +f 549 234 1404 +f 1226 47 3560 +f 692 2973 1443 +f 339 3014 386 +f 692 3068 996 +f 2216 2991 3068 +f 2248 457 289 +f 1626 1828 3739 +f 2248 3599 1354 +f 3672 120 693 +f 3518 1881 3617 +f 1020 3599 866 +f 2062 354 543 +f 3271 3303 810 +f 2468 2665 2606 +f 910 2406 2062 +f 2406 3495 2974 +f 2717 2406 2974 +f 3593 2974 3495 +f 2717 2974 3593 +f 3495 1543 3593 +f 1543 3495 3303 +f 868 803 2665 +f 3303 2450 3095 +f 868 2665 2468 +f 2468 2138 3367 +f 2778 2450 3303 +f 1543 3303 3095 +f 1208 2814 3195 +f 1543 3095 3205 +f 2450 3205 3095 +f 2450 2057 490 +f 2721 297 2814 +f 968 1442 297 +f 1817 1319 448 +f 1617 23 1817 +f 23 1617 1879 +f 3605 23 1879 +f 297 2721 968 +f 1034 3672 693 +f 2331 3385 279 +f 3605 1879 2331 +f 3422 868 3367 +f 3367 1898 3422 +f 1242 2721 1208 +f 2087 2721 1242 +f 1745 2369 3625 +f 2087 1242 2357 +f 1113 820 1197 +f 2249 3748 1745 +f 1898 3111 3422 +f 3830 2369 1745 +f 3830 1745 820 +f 1242 3111 2357 +f 425 1197 779 +f 1197 425 2044 +f 3386 2798 3876 +f 1338 1487 3644 +f 1796 913 3644 +f 3386 3644 913 +f 3386 913 2798 +f 3386 1338 3644 +f 3386 3876 2055 +f 3738 3386 2434 +f 1833 1507 917 +f 3547 3434 2152 +f 2592 3547 2092 +f 1883 3547 2592 +f 3547 1883 3899 +f 3899 618 2627 +f 3246 3899 2627 +f 3246 2627 3613 +f 1055 3657 3246 +f 1338 3386 3738 +f 779 1143 1610 +f 1610 1338 3738 +f 3738 3818 1610 +f 2434 81 3738 +f 3818 3738 81 +f 1610 3818 1383 +f 779 1610 1383 +f 2298 1383 2975 +f 2298 2975 2722 +f 1833 1730 1507 +f 81 1730 1833 +f 1833 2172 81 +f 774 2172 1833 +f 2172 2895 81 +f 1021 2895 2172 +f 2172 774 1620 +f 2383 425 2869 +f 2869 1383 2298 +f 3016 3193 2869 +f 1021 3016 2722 +f 1021 2722 2895 +f 1618 1021 1620 +f 3091 3016 1021 +f 3193 3016 3091 +f 774 917 3843 +f 3662 3843 3434 +f 3843 695 774 +f 695 3843 1670 +f 1670 97 695 +f 743 1670 512 +f 3899 3657 3662 +f 512 3662 3657 +f 3657 3899 3246 +f 2041 3657 1055 +f 512 3657 2041 +f 1388 512 2041 +f 2139 97 743 +f 1618 695 97 +f 1800 254 2139 +f 743 512 1388 +f 2267 2139 3882 +f 803 2267 2665 +f 1800 2139 2267 +f 1800 2267 803 +f 3882 2606 2665 +f 3882 743 1388 +f 1388 2606 3882 +f 3587 1087 1411 +f 1973 1055 1087 +f 3246 1411 1087 +f 1973 72 745 +f 1973 1276 2263 +f 1818 2263 277 +f 1818 277 2281 +f 1818 2281 17 +f 3391 966 1666 +f 1973 2263 1818 +f 2470 1818 17 +f 2133 102 1161 +f 102 2133 1704 +f 1704 2133 1238 +f 1238 2749 1704 +f 769 1704 2749 +f 1814 769 2749 +f 12 3096 1814 +f 769 3096 236 +f 12 236 3096 +f 2856 782 236 +f 604 2620 782 +f 2589 3555 604 +f 2589 2815 3555 +f 2815 2589 2991 +f 2991 3940 2815 +f 3009 2541 1140 +f 654 880 1229 +f 1579 1095 3944 +f 1900 563 1748 +f 54 3944 1153 +f 2341 146 592 +f 1676 375 2969 +f 1226 2646 3433 +f 2118 665 3664 +f 3175 3822 1520 +f 2381 3336 3822 +f 1781 692 3426 +f 1781 2304 692 +f 1680 2915 1937 +f 1680 990 2915 +f 990 1680 3501 +f 3743 1122 2905 +f 389 2248 3862 +f 532 2118 3664 +f 2532 3090 866 +f 3501 1680 993 +f 993 1108 3501 +f 1956 1108 993 +f 2640 1331 2969 +f 1676 2969 1540 +f 180 1709 28 +f 2115 1540 929 +f 3207 3151 2115 +f 2115 3802 2034 +f 450 2955 3496 +f 3496 323 2130 +f 1738 323 3496 +f 1738 3496 3699 +f 889 3209 1329 +f 109 3207 2458 +f 2381 3675 3810 +f 109 2209 190 +f 2341 1671 2478 +f 1103 1881 1095 +f 2136 564 2150 +f 2991 2216 3013 +f 952 389 1382 +f 929 1329 3209 +f 3209 2458 3207 +f 3151 3699 3496 +f 1011 3699 1489 +f 2532 1153 3944 +f 146 1122 3743 +f 1122 146 2478 +f 3822 1918 155 +f 146 2341 2478 +f 2787 1900 3426 +f 2603 2646 1226 +f 3862 2646 2603 +f 109 190 3699 +f 2248 389 3599 +f 1489 1229 336 +f 109 2458 1181 +f 1181 2209 109 +f 654 190 2209 +f 3675 1103 3810 +f 1095 3810 1103 +f 2532 1881 3090 +f 1748 3426 1900 +f 2153 1290 3426 +f 1290 1781 3426 +f 1331 3009 143 +f 3877 1173 758 +f 2381 3822 3705 +f 3675 2381 3705 +f 3822 155 3705 +f 3590 3810 1095 +f 2336 2136 2150 +f 3426 71 2787 +f 866 3599 952 +f 1290 600 1781 +f 1680 1937 3357 +f 1959 600 758 +f 2826 1226 564 +f 2826 2603 1226 +f 28 1173 180 +f 993 1331 2640 +f 3207 109 3151 +f 2118 532 3675 +f 3667 581 56 +f 993 3009 1331 +f 929 2969 1329 +f 143 636 1329 +f 2228 180 1173 +f 2150 3560 249 +f 3209 3207 929 +f 2115 929 3207 +f 3699 3151 109 +f 1173 28 1959 +f 389 952 3599 +f 2458 3209 889 +f 3049 2628 3357 +f 1937 748 3357 +f 993 2640 1956 +f 375 1305 2969 +f 2640 2969 1305 +f 1173 3877 3494 +f 3496 2955 3151 +f 3667 56 2564 +f 993 1671 3009 +f 3590 2505 3810 +f 952 1153 2532 +f 373 2603 2826 +f 28 3013 2216 +f 28 1709 3013 +f 701 3121 3397 +f 2604 790 1260 +f 2604 1260 332 +f 3397 3121 2604 +f 3397 2604 2291 +f 701 3397 581 +f 2291 332 2100 +f 2291 2604 332 +f 581 3397 2291 +f 1299 739 2830 +f 739 1299 3667 +f 649 830 3767 +f 3667 1299 3317 +f 2987 2830 739 +f 3767 1714 649 +f 2830 3317 1299 +f 3317 701 581 +f 2987 830 858 +f 3767 830 739 +f 830 2987 739 +f 3667 3317 581 +f 3767 739 3667 +f 2599 3090 1881 +f 2360 347 140 +f 3577 3578 1643 +f 1425 3577 1643 +f 1643 527 1425 +f 1425 3124 3577 +f 1425 2726 3124 +f 1425 2320 2726 +f 1245 795 2726 +f 795 1245 2372 +f 2372 2165 557 +f 1079 2019 2165 +f 3387 2019 1079 +f 3387 1846 2019 +f 1846 1030 2190 +f 2190 1030 34 +f 3250 615 2911 +f 3413 527 3578 +f 3413 1875 527 +f 3413 395 1875 +f 988 385 2325 +f 2325 385 26 +f 2440 2165 2019 +f 2089 3198 3522 +f 3441 3324 3146 +f 3441 2006 3324 +f 806 3524 2006 +f 2834 1927 121 +f 307 3080 2297 +f 3412 2297 488 +f 3412 1608 170 +f 3471 3378 2713 +f 3471 2713 2746 +f 879 2748 619 +f 121 3198 2834 +f 3198 2089 2834 +f 316 3441 1759 +f 316 759 3441 +f 3353 3766 1313 +f 762 3080 3498 +f 2671 170 1008 +f 2040 2713 3378 +f 2713 2040 21 +f 1564 1708 1136 +f 3324 535 3698 +f 556 1313 3524 +f 3412 3653 1608 +f 2746 3221 3771 +f 1288 349 2701 +f 199 2701 3213 +f 368 1846 3387 +f 368 1030 1846 +f 2908 2911 34 +f 2908 3250 2911 +f 3824 2232 3941 +f 2842 3941 2232 +f 2693 615 2789 +f 3124 3578 3577 +f 3578 527 1643 +f 2726 1944 3124 +f 527 1875 2320 +f 1367 1346 699 +f 699 379 1367 +f 379 699 1166 +f 379 1166 2703 +f 1425 527 2320 +f 199 690 1703 +f 1516 167 2703 +f 3044 988 1516 +f 1516 988 167 +f 3044 385 988 +f 1944 2726 795 +f 795 2372 557 +f 557 2165 2440 +f 2048 3522 3198 +f 2089 3522 235 +f 2089 235 3037 +f 1929 3198 121 +f 1929 2048 3198 +f 615 2190 34 +f 1759 3146 3698 +f 3146 1759 3441 +f 759 2006 3441 +f 759 806 2006 +f 3698 535 316 +f 1759 3698 316 +f 806 759 316 +f 1470 1367 379 +f 2703 1470 379 +f 167 1470 2703 +f 395 983 2120 +f 3353 556 806 +f 2822 226 3324 +f 3353 1313 556 +f 3766 3524 1313 +f 1180 3353 806 +f 795 2440 1944 +f 3498 3080 3112 +f 2297 3080 762 +f 3700 3221 2985 +f 2297 762 488 +f 3412 488 3653 +f 3498 3653 488 +f 3653 3498 1008 +f 1008 1608 3653 +f 1008 170 1608 +f 3498 1791 1008 +f 2670 1470 1013 +f 3824 615 3250 +f 2089 3037 226 +f 2089 226 2834 +f 226 535 3324 +f 2040 3378 2671 +f 21 2746 2713 +f 2746 21 1136 +f 21 1564 1136 +f 2746 1136 1708 +f 307 2431 3800 +f 2040 1564 21 +f 1977 1448 3848 +f 2016 1708 1564 +f 2016 1564 749 +f 806 556 3524 +f 3146 3324 3698 +f 2985 3221 2016 +f 749 2976 2016 +f 2418 3498 3112 +f 2474 1927 2834 +f 2834 226 2822 +f 3848 3471 82 +f 3221 3700 3771 +f 3615 2580 2748 +f 3848 170 3378 +f 615 2670 2789 +f 983 395 3413 +f 2693 1846 2190 +f 3262 3927 814 +f 1708 3221 2746 +f 1708 2016 3221 +f 517 2040 2252 +f 2522 1470 167 +f 1564 2040 517 +f 3824 2670 615 +f 3824 3941 2670 +f 121 1927 26 +f 3700 2985 3182 +f 2985 2016 2976 +f 2166 3389 2714 +f 2748 1222 619 +f 619 2845 879 +f 3389 619 2714 +f 2845 619 3389 +f 52 566 2351 +f 3626 2351 566 +f 2738 566 1992 +f 2598 2938 2719 +f 206 3270 2719 +f 1437 3470 3819 +f 2580 3615 1765 +f 1992 2484 2738 +f 2738 2484 3615 +f 2938 1364 2719 +f 3182 2985 2580 +f 3615 551 2738 +f 167 558 2522 +f 2453 122 2789 +f 737 675 2293 +f 3768 737 3565 +f 3650 940 166 +f 3036 167 2325 +f 206 2719 1364 +f 940 3650 2548 +f 1096 1066 2918 +f 428 1395 2091 +f 212 2932 2164 +f 428 2091 675 +f 2431 2340 3800 +f 3565 2293 2351 +f 166 2002 515 +f 348 1267 2674 +f 52 2484 1992 +f 49 3454 300 +f 2052 1258 2164 +f 2932 1456 617 +f 2598 1944 2938 +f 1318 3851 49 +f 290 1267 2340 +f 3851 3065 3454 +f 300 1259 1093 +f 300 3772 1259 +f 206 515 2002 +f 2 3650 166 +f 2091 3411 52 +f 1470 2522 1013 +f 1258 1950 1351 +f 1699 1086 2806 +f 1963 2918 1066 +f 1941 2453 1836 +f 1641 124 411 +f 3462 1836 2522 +f 1352 3688 1960 +f 3704 3687 1641 +f 1267 348 1963 +f 2580 1765 3182 +f 2002 940 1357 +f 737 2293 3565 +f 2053 3688 3411 +f 1351 212 1258 +f 1456 1537 3635 +f 3270 3163 983 +f 2900 2181 285 +f 2932 3308 1537 +f 2091 1395 2053 +f 1537 3946 1070 +f 2829 2405 1683 +f 1352 2551 3311 +f 2719 983 2598 +f 3411 3688 3182 +f 3411 3182 1765 +f 2096 290 1318 +f 1944 2440 3410 +f 1456 2932 1537 +f 854 515 3226 +f 558 167 3036 +f 3454 2481 300 +f 2474 3308 3036 +f 3308 2474 3946 +f 3687 3120 1635 +f 3120 3687 3704 +f 1941 3687 1635 +f 3687 1941 1402 +f 3454 1352 2481 +f 1330 2453 1941 +f 3411 2091 2053 +f 52 675 2091 +f 411 3704 1641 +f 737 1086 675 +f 2938 206 1364 +f 1950 558 1351 +f 2431 1448 3751 +f 290 2340 3751 +f 2834 3946 2474 +f 3688 3311 3700 +f 3700 3182 3688 +f 1963 2340 1267 +f 3065 2551 3454 +f 1352 3454 2551 +f 157 2900 285 +f 410 3215 157 +f 3462 1402 1836 +f 1402 3462 1950 +f 2028 3660 2709 +f 161 2 854 +f 1402 1941 1836 +f 1641 3687 1402 +f 1402 124 1641 +f 49 1093 1318 +f 2351 675 52 +f 2293 675 2351 +f 2829 124 2405 +f 3635 348 2674 +f 2002 166 940 +f 3615 2484 1765 +f 3635 1070 348 +f 206 2938 515 +f 2918 1963 348 +f 285 2674 157 +f 2674 1267 410 +f 3660 2052 2709 +f 3635 1537 1070 +f 2340 2431 3751 +f 2522 558 3462 +f 124 1402 1950 +f 2544 2481 1960 +f 2453 2789 1013 +f 2822 1070 3946 +f 1258 212 2164 +f 1013 2522 1836 +f 2932 617 2164 +f 1699 799 1086 +f 1395 428 3285 +f 1960 2053 1395 +f 675 1086 428 +f 2709 2052 2164 +f 2164 617 2709 +f 2709 617 3251 +f 247 2028 3251 +f 2851 3704 411 +f 2829 411 124 +f 2052 3660 1683 +f 3895 1318 1093 +f 369 3407 1699 +f 1310 2407 2289 +f 2289 3120 3293 +f 3293 3704 2851 +f 776 688 3026 +f 864 1459 369 +f 576 2361 2123 +f 194 893 2486 +f 2085 1046 2039 +f 1294 981 2038 +f 3543 1165 2494 +f 1946 1547 3758 +f 1555 3892 2953 +f 2038 981 1310 +f 888 1805 661 +f 2494 1805 3543 +f 193 2436 1683 +f 1683 3660 193 +f 2123 2965 3820 +f 3694 2851 3427 +f 3278 1949 1624 +f 3278 1132 1949 +f 688 776 3758 +f 1555 2038 893 +f 624 1132 555 +f 888 1555 194 +f 3278 3895 1093 +f 2862 3366 3147 +f 2039 569 2444 +f 576 2920 2361 +f 2578 3772 2544 +f 2892 1799 80 +f 2766 3285 799 +f 3075 1991 2897 +f 513 2766 3407 +f 888 3892 1555 +f 221 2022 3816 +f 80 861 2028 +f 2486 1297 194 +f 2829 3427 411 +f 2829 2436 3427 +f 3375 2578 2766 +f 2102 2260 2039 +f 3924 2147 1862 +f 2510 2805 502 +f 576 193 2920 +f 1946 3322 1547 +f 2892 1991 1799 +f 1721 3322 1946 +f 1046 2085 221 +f 2289 893 1310 +f 1555 2953 2038 +f 888 194 1805 +f 1046 569 2039 +f 1297 3543 1805 +f 159 3249 2510 +f 159 2510 502 +f 1172 524 1811 +f 957 569 2283 +f 3924 1862 1240 +f 570 3892 888 +f 799 1699 3407 +f 3543 3694 2965 +f 861 2920 193 +f 2965 3694 3820 +f 2123 3820 576 +f 2544 3285 2578 +f 1132 1259 148 +f 1165 2965 1849 +f 2897 1659 3816 +f 2486 3293 2851 +f 1459 864 3758 +f 3543 1297 3694 +f 1547 3375 513 +f 3407 1459 513 +f 1547 1459 3758 +f 1459 3407 369 +f 1862 2147 1099 +f 3820 3427 2436 +f 576 2436 193 +f 2283 502 957 +f 661 1805 2494 +f 194 1297 1805 +f 1259 1132 3278 +f 893 2289 2486 +f 2260 801 2085 +f 3375 555 148 +f 524 2102 1811 +f 864 688 3758 +f 2900 2510 445 +f 1419 3924 1240 +f 570 2496 3892 +f 1667 919 1910 +f 214 591 1044 +f 1721 169 1292 +f 1391 2093 2519 +f 2134 2494 1165 +f 3265 3597 3450 +f 3846 2022 221 +f 3846 221 2085 +f 3281 1044 1419 +f 3281 214 1044 +f 2807 1076 2335 +f 1910 919 776 +f 3281 3302 1237 +f 1237 214 3281 +f 3924 591 2147 +f 3796 661 2494 +f 3265 3450 1687 +f 3216 919 1667 +f 3302 3281 2496 +f 3640 3853 639 +f 909 1629 408 +f 3299 1969 2819 +f 2617 3779 3450 +f 2959 1237 3302 +f 408 1629 1125 +f 1419 591 3924 +f 3892 2496 1240 +f 722 2430 687 +f 2654 687 2033 +f 75 2425 1589 +f 639 3460 3640 +f 3007 775 3216 +f 2912 3741 3147 +f 873 3366 3075 +f 3208 3492 3640 +f 408 3841 909 +f 3670 431 3692 +f 1578 1667 1910 +f 415 3853 3492 +f 2335 4 3841 +f 2221 2197 813 +f 2102 3343 2260 +f 1312 3640 3460 +f 3155 214 2959 +f 2684 75 3837 +f 3841 1464 909 +f 1422 2617 87 +f 2335 3082 2807 +f 1511 1336 3208 +f 135 3155 1980 +f 2762 431 573 +f 573 588 2762 +f 2912 3299 3741 +f 87 3134 696 +f 1044 591 1419 +f 2496 1419 1240 +f 1009 2868 3134 +f 214 1237 2959 +f 1669 3092 3536 +f 909 1464 572 +f 408 1403 3082 +f 909 572 169 +f 2013 2221 3652 +f 3936 2022 3670 +f 477 1853 1969 +f 1619 477 3492 +f 801 431 3846 +f 1866 3265 1687 +f 3216 775 919 +f 1969 415 477 +f 137 1849 3741 +f 2912 3147 873 +f 1947 2868 1009 +f 1221 2807 3082 +f 2762 588 2425 +f 588 573 3343 +f 415 2912 3853 +f 696 3134 1384 +f 696 1384 3652 +f 2013 2819 2221 +f 1578 3216 1667 +f 1619 1336 446 +f 3034 1777 1853 +f 3670 2022 3846 +f 2013 3796 2134 +f 3846 2085 801 +f 3670 3460 3936 +f 169 3528 1629 +f 2335 3841 408 +f 2033 687 1669 +f 573 801 3343 +f 3732 536 775 +f 3779 1687 3450 +f 3492 3853 3640 +f 1464 524 572 +f 3640 1312 3208 +f 1312 3460 3692 +f 3837 75 1589 +f 2868 1947 2959 +f 2425 2098 1589 +f 1889 3692 431 +f 3936 3460 639 +f 796 1403 1125 +f 696 2251 87 +f 135 2344 3155 +f 75 2762 2425 +f 1721 3528 169 +f 2251 696 3652 +f 3528 1721 536 +f 2897 2022 3936 +f 1009 3134 87 +f 2221 2819 2197 +f 2335 408 3082 +f 2868 570 3583 +f 431 3670 3846 +f 1969 3299 415 +f 3450 1980 2617 +f 3528 3732 3089 +f 3302 2868 2959 +f 3796 2494 2134 +f 3302 2496 2868 +f 137 2819 2134 +f 477 415 3492 +f 1401 84 3007 +f 775 536 919 +f 3652 1384 2013 +f 2013 1384 3796 +f 2013 2134 2819 +f 2425 588 1076 +f 916 1853 1777 +f 477 3034 1853 +f 3383 3265 1866 +f 2904 3265 3383 +f 347 2360 3932 +f 1126 3837 1824 +f 3 87 2251 +f 1602 3133 1210 +f 2896 2274 1210 +f 1210 3886 1602 +f 3023 3365 1903 +f 349 1288 1587 +f 2759 3346 3857 +f 3779 3213 3383 +f 3384 3680 3470 +f 3034 1619 446 +f 2360 1824 3932 +f 1344 242 3707 +f 2896 3 2294 +f 3900 3582 3359 +f 2536 3359 641 +f 343 3886 2274 +f 641 2519 2093 +f 3857 51 3622 +f 1436 3932 466 +f 3857 977 3217 +f 487 826 977 +f 2904 3597 3265 +f 1603 3819 242 +f 487 218 1126 +f 2807 3690 2098 +f 1126 241 660 +f 1890 2360 3199 +f 3199 2360 140 +f 2454 347 3932 +f 461 1436 2867 +f 1160 466 2353 +f 349 1447 2904 +f 3023 1903 504 +f 702 1603 242 +f 3837 1589 1824 +f 487 3857 3346 +f 922 3023 504 +f 3779 1866 1687 +f 1582 3680 1882 +f 3779 2617 1422 +f 2360 1890 1824 +f 1422 87 3 +f 3690 3932 2098 +f 2701 199 1288 +f 1751 722 687 +f 343 2274 2896 +f 3857 487 977 +f 2852 3384 3470 +f 1723 2852 3470 +f 218 487 3346 +f 3819 3470 3680 +f 2274 3886 1210 +f 3837 1126 218 +f 1344 702 242 +f 2536 3900 3359 +f 3622 2519 3857 +f 3857 1037 51 +f 690 3213 3779 +f 3857 3217 1037 +f 826 487 1126 +f 241 1126 1824 +f 1882 2294 813 +f 3384 2294 3680 +f 3932 1589 2098 +f 3133 3 2896 +f 1747 241 1562 +f 1562 241 1824 +f 1890 1562 1824 +f 1582 1777 242 +f 242 1777 3034 +f 1221 3690 2807 +f 3359 242 3034 +f 3359 3034 446 +f 1334 3365 3023 +f 813 2294 2251 +f 343 3384 2852 +f 3346 1511 218 +f 199 3213 690 +f 218 2684 3837 +f 687 2654 599 +f 2701 349 3213 +f 3383 1866 3779 +f 3097 3825 3047 +f 1751 3180 722 +f 641 446 2759 +f 1378 474 1753 +f 3319 2079 1200 +f 2058 710 1518 +f 3326 416 3197 +f 362 2058 1052 +f 2204 362 1052 +f 178 2731 68 +f 99 362 2204 +f 2058 362 710 +f 3648 1753 1179 +f 3923 2111 3031 +f 3031 320 3923 +f 3648 746 181 +f 847 181 899 +f 3227 99 3648 +f 1684 3814 3354 +f 3814 1684 1658 +f 3695 710 362 +f 99 3695 362 +f 2945 2525 1370 +f 99 3227 1734 +f 2169 3595 2514 +f 1734 3227 1648 +f 1744 3833 1337 +f 3449 1518 710 +f 710 3695 3449 +f 178 68 3380 +f 1744 2064 3833 +f 306 2064 1744 +f 2169 2514 668 +f 1198 668 3058 +f 3072 2064 306 +f 2227 2064 3072 +f 1394 2735 1605 +f 2735 1394 1664 +f 3079 2227 3072 +f 2573 3079 3072 +f 969 3449 3695 +f 2424 405 956 +f 664 2424 956 +f 3040 969 3695 +f 1734 3040 3695 +f 969 306 3449 +f 3040 3100 969 +f 3100 2948 969 +f 3714 3040 1734 +f 3714 3842 3040 +f 3072 306 2948 +f 2948 2573 3072 +f 995 2948 3100 +f 1088 3326 3197 +f 845 1568 1434 +f 3100 441 995 +f 3179 2126 3713 +f 1434 1568 3001 +f 1568 3713 2126 +f 668 2514 2567 +f 2126 372 3001 +f 2269 3058 668 +f 2269 668 2567 +f 2286 278 3725 +f 3725 262 2286 +f 1174 278 845 +f 2286 694 1497 +f 278 1174 3725 +f 694 3244 2229 +f 3725 1174 1455 +f 915 519 2269 +f 1194 2391 3058 +f 3763 2632 3244 +f 1605 1519 3283 +f 262 3763 3244 +f 3244 2286 262 +f 2513 2632 3763 +f 2688 2632 2513 +f 1550 2461 3763 +f 2461 2513 3763 +f 1636 1550 262 +f 1550 3763 262 +f 3283 1394 1605 +f 2899 1664 1394 +f 2461 1213 2255 +f 1177 2899 2618 +f 2923 1177 3817 +f 2618 3817 1177 +f 1314 3283 3063 +f 1314 2618 3283 +f 1434 1822 1455 +f 1821 240 1822 +f 3001 1822 1434 +f 2923 3817 711 +f 3817 568 711 +f 568 2213 711 +f 568 3394 2213 +f 240 1455 1822 +f 2213 3394 3486 +f 3486 3394 2047 +f 3419 3394 568 +f 470 568 2618 +f 1636 1371 1213 +f 3451 568 470 +f 3504 1107 3419 +f 3369 2462 2227 +f 3369 915 2269 +f 3079 3369 2227 +f 2950 3369 3079 +f 1795 2950 3079 +f 2662 3807 2698 +f 1155 2126 3179 +f 1633 3179 2306 +f 1155 372 2126 +f 2662 1633 3807 +f 2662 3858 1633 +f 1654 3084 1975 +f 1002 372 1155 +f 1002 1155 3858 +f 3858 3325 1002 +f 3320 1098 1654 +f 1731 178 2313 +f 2698 1688 2662 +f 2448 1654 1098 +f 3516 2950 1795 +f 88 1795 2573 +f 88 3903 1795 +f 1795 3903 3516 +f 2997 3858 2662 +f 1063 3516 3903 +f 3325 3858 2997 +f 276 3325 2997 +f 3516 3778 1098 +f 630 2422 2615 +f 630 807 276 +f 3778 1974 2448 +f 2448 1974 3827 +f 3778 1063 2891 +f 881 1293 1821 +f 1002 1821 372 +f 1497 3827 1974 +f 1002 881 1821 +f 1812 881 1002 +f 3409 3084 1654 +f 3409 2844 3084 +f 1371 240 3684 +f 2448 3409 1654 +f 396 3260 3684 +f 3684 3260 2984 +f 68 2731 2753 +f 1178 360 3409 +f 2844 470 2618 +f 360 470 2844 +f 2998 396 881 +f 2998 1913 396 +f 3260 396 1913 +f 1913 1784 3260 +f 3504 3451 1069 +f 1483 1784 1913 +f 1812 1002 3325 +f 881 1812 2998 +f 1069 470 360 +f 1812 3325 2200 +f 1576 1812 2200 +f 1576 2998 1812 +f 1576 1048 2998 +f 984 3623 1069 +f 3325 276 2200 +f 807 2200 276 +f 3409 2448 638 +f 3409 638 1178 +f 1576 2200 2901 +f 2200 807 2901 +f 694 1178 638 +f 875 360 1178 +f 2901 1583 1576 +f 2229 875 1178 +f 1576 1583 1048 +f 2229 1178 694 +f 1497 694 3827 +f 2901 3708 1583 +f 1913 2998 1048 +f 1514 2229 2632 +f 2632 2229 3244 +f 1483 1913 1914 +f 875 2229 1514 +f 1514 984 875 +f 2119 1914 1048 +f 1075 2047 3765 +f 2047 1107 3765 +f 3765 1107 1201 +f 1201 1107 3719 +f 1914 2119 318 +f 2513 2802 2688 +f 1107 3504 3719 +f 2658 2802 2513 +f 2461 2658 2513 +f 1201 2740 2753 +f 2799 2461 2255 +f 3623 3719 3504 +f 1069 3623 3504 +f 3719 3623 434 +f 3295 73 1731 +f 783 3197 416 +f 1060 809 783 +f 2530 809 1060 +f 434 3623 483 +f 2799 2255 3550 +f 984 483 3623 +f 3550 1060 2799 +f 2688 483 984 +f 3719 434 3447 +f 3550 2518 1060 +f 3328 2740 3788 +f 2988 3550 3222 +f 3775 294 3788 +f 2688 984 1514 +f 2988 2518 3550 +f 2688 1558 483 +f 2518 3319 112 +f 1060 112 2530 +f 112 3319 1200 +f 112 1060 2518 +f 3319 2518 2079 +f 3677 2079 2518 +f 3602 3677 3444 +f 3444 3677 2518 +f 3602 3444 392 +f 392 3444 2163 +f 1517 1007 3673 +f 1007 1517 597 +f 3673 1007 1283 +f 394 2988 2984 +f 1129 1007 597 +f 2780 1129 597 +f 2988 394 2163 +f 2984 3260 394 +f 3456 1731 73 +f 3393 394 1784 +f 1784 394 3260 +f 1642 2020 1283 +f 1642 3500 2020 +f 1129 1642 1283 +f 3500 1642 1023 +f 2163 2506 392 +f 2506 3238 392 +f 2506 2163 2499 +f 3238 2506 266 +f 2499 264 266 +f 2499 2557 264 +f 3136 3323 3661 +f 3172 1129 3323 +f 2030 3393 1784 +f 318 2030 1483 +f 3393 2030 2501 +f 2501 2557 2499 +f 163 2030 318 +f 3136 3172 3323 +f 2030 163 2501 +f 163 1204 2501 +f 1794 264 2557 +f 1503 2557 2501 +f 1503 1794 2557 +f 2501 2188 1503 +f 789 2501 1204 +f 2188 2501 789 +f 789 575 1188 +f 789 1204 575 +f 1188 2188 789 +f 1204 60 575 +f 2368 982 2424 +f 2210 605 1325 +f 2970 305 1325 +f 2493 2615 182 +f 2493 630 2615 +f 2493 182 2424 +f 664 1054 2424 +f 1054 2368 2424 +f 720 2368 1054 +f 2970 605 679 +f 2970 679 2491 +f 605 2970 1325 +f 982 2493 2424 +f 2880 982 2368 +f 2970 2491 850 +f 3048 1838 2493 +f 1019 2321 3491 +f 3491 1590 1019 +f 1590 2951 1019 +f 630 2279 1342 +f 3381 2491 679 +f 1653 2279 96 +f 2680 1342 1653 +f 3708 807 1342 +f 3708 2901 807 +f 2873 3381 2321 +f 261 3138 2873 +f 3500 899 3632 +f 1342 2680 3708 +f 1583 3105 2119 +f 965 2119 3105 +f 899 865 847 +f 3839 847 865 +f 899 3500 865 +f 3500 1023 865 +f 965 3893 2119 +f 1475 3105 3708 +f 623 3839 865 +f 965 3105 1248 +f 1248 1035 965 +f 1195 3839 623 +f 3227 847 1648 +f 3893 318 2119 +f 3839 1648 847 +f 163 318 3893 +f 2737 163 3893 +f 2503 965 3474 +f 965 2503 3893 +f 2737 1204 163 +f 1648 3839 231 +f 60 1204 2737 +f 1532 872 1023 +f 2980 60 2737 +f 2266 3756 3109 +f 3327 1195 623 +f 3756 2266 3404 +f 3031 2111 764 +f 3031 764 1856 +f 3737 3031 1856 +f 3031 3737 320 +f 3544 281 3242 +f 872 1532 3544 +f 118 3737 1856 +f 1856 3756 118 +f 3544 3242 3327 +f 1595 3737 118 +f 1595 1264 3737 +f 3327 3242 321 +f 3466 321 3242 +f 2947 3756 715 +f 3327 3274 1195 +f 3184 3559 2947 +f 6 3559 2116 +f 1195 3274 231 +f 3756 2947 118 +f 231 3274 964 +f 118 2947 1439 +f 321 3274 3327 +f 2947 3559 6 +f 1119 321 3466 +f 321 1119 3274 +f 1119 964 3274 +f 474 1439 2906 +f 964 1119 2509 +f 505 6 2116 +f 505 2116 3421 +f 3100 3040 3842 +f 2848 505 3421 +f 2848 1211 505 +f 1474 505 1211 +f 1211 2961 1474 +f 2515 6 505 +f 90 3100 3842 +f 760 2515 1474 +f 90 3842 3845 +f 505 1474 2515 +f 2906 2515 1179 +f 964 3093 231 +f 1968 3845 3842 +f 755 90 3845 +f 3059 1474 2961 +f 3059 760 1474 +f 2168 3059 2961 +f 90 441 3100 +f 311 441 90 +f 3632 746 760 +f 3632 899 746 +f 1658 1684 1264 +f 3737 1684 3354 +f 88 441 2600 +f 3737 1264 1684 +f 1264 2054 1658 +f 2054 1264 1549 +f 3797 171 755 +f 238 1601 2054 +f 2054 1549 238 +f 3177 1601 238 +f 740 1042 238 +f 3177 238 1042 +f 3637 3845 3093 +f 3200 740 1549 +f 3637 1746 3845 +f 1746 755 3845 +f 2024 1746 3637 +f 2024 1049 1746 +f 2509 3637 964 +f 3637 2509 2024 +f 1399 2509 1119 +f 442 1370 3715 +f 2945 2475 2525 +f 1370 442 2945 +f 3108 2945 442 +f 1 2945 3108 +f 1 2475 2945 +f 1746 1049 755 +f 1920 1337 442 +f 3108 442 1337 +f 3833 2565 3108 +f 361 3659 3797 +f 3108 1337 3833 +f 357 1262 2565 +f 755 1049 3797 +f 2565 1262 1 +f 357 2514 3595 +f 357 2567 2514 +f 2462 2567 357 +f 1996 357 2565 +f 3833 1996 2565 +f 2064 1996 3833 +f 506 3242 281 +f 506 3466 3242 +f 2462 357 1996 +f 3046 850 2491 +f 1595 2796 1264 +f 474 1595 1439 +f 850 3046 281 +f 2846 506 3046 +f 2796 1052 3200 +f 2796 1378 1052 +f 2796 1595 1378 +f 3557 1119 3466 +f 3557 1399 1119 +f 3046 367 2846 +f 367 3138 1061 +f 1281 1061 3138 +f 895 3861 2060 +f 1640 3861 895 +f 3861 2492 2960 +f 3861 2866 2492 +f 2946 2866 1640 +f 1185 2678 3566 +f 3566 2678 2024 +f 3557 2960 1399 +f 3859 1185 3566 +f 1868 1049 2678 +f 57 1868 2678 +f 2644 1868 3403 +f 57 3403 1868 +f 3403 57 2027 +f 1185 1167 626 +f 626 2395 1171 +f 1692 3428 1171 +f 1692 1171 2395 +f 2394 2395 2446 +f 1692 2395 2394 +f 2951 1414 3309 +f 1414 2951 1790 +f 3204 1414 1790 +f 1414 3204 3107 +f 1075 3765 2731 +f 3107 3204 608 +f 3309 261 2873 +f 383 1281 261 +f 1414 419 3309 +f 1281 1935 1061 +f 2808 1322 1935 +f 2808 1281 383 +f 2808 1935 1281 +f 3150 383 401 +f 3150 2808 383 +f 172 2808 3150 +f 2808 172 1322 +f 2870 1322 172 +f 2870 31 1322 +f 752 2946 1640 +f 1322 413 1935 +f 2946 752 413 +f 500 2946 413 +f 31 413 1322 +f 413 31 2810 +f 2821 1539 413 +f 413 2810 3018 +f 413 3018 2821 +f 1539 500 413 +f 1933 1539 2821 +f 2086 500 1539 +f 2881 500 2086 +f 1291 500 2881 +f 500 1291 1244 +f 500 1244 1000 +f 2866 205 1826 +f 2946 500 205 +f 1430 2332 205 +f 2332 2446 205 +f 1000 3831 1430 +f 1773 1430 3831 +f 1773 2332 1430 +f 453 3301 2394 +f 453 2424 3301 +f 1144 453 2160 +f 405 2424 453 +f 405 453 2771 +f 2600 311 171 +f 2600 1191 3903 +f 3903 1191 1063 +f 171 3726 2600 +f 1191 2600 3726 +f 1191 2891 1063 +f 1191 2757 2891 +f 3726 2757 1191 +f 3659 3726 171 +f 3726 3659 2692 +f 2692 2757 3726 +f 2757 2692 1651 +f 2757 2401 2891 +f 2258 2891 2401 +f 2258 1974 2891 +f 2258 1497 1974 +f 278 1497 2258 +f 845 2258 2401 +f 845 278 2258 +f 2757 1651 2401 +f 3659 1815 2692 +f 3713 1651 2692 +f 2692 1815 3713 +f 361 1815 3659 +f 2306 3713 1815 +f 2644 361 1868 +f 1815 361 2644 +f 2306 1815 2644 +f 1651 3713 1568 +f 3807 2306 2644 +f 3403 2698 3807 +f 2027 2698 3403 +f 844 2027 3428 +f 95 3057 2081 +f 3253 587 1462 +f 1110 1074 3467 +f 3218 2081 3057 +f 1462 587 95 +f 1462 95 2081 +f 3284 1691 901 +f 3689 3420 3512 +f 3630 1340 1874 +f 1092 3612 2850 +f 244 1323 1335 +f 3665 2477 1588 +f 3254 1169 464 +f 3787 46 1272 +f 1027 2919 2196 +f 2386 1823 2337 +f 2066 2919 1027 +f 1032 273 268 +f 1984 2769 2158 +f 46 595 1272 +f 414 2365 1588 +f 927 541 2337 +f 2825 3619 1251 +f 1247 1251 3619 +f 1823 2386 2070 +f 275 3584 219 +f 1925 3584 275 +f 3272 219 3584 +f 1588 2208 414 +f 3110 464 595 +f 3272 3654 1309 +f 3076 1594 2825 +f 1588 2365 3665 +f 1984 273 3703 +f 2602 1984 3703 +f 2747 2769 2602 +f 1251 1247 3473 +f 1535 259 2042 +f 1535 2042 2337 +f 3787 2433 46 +f 3110 3211 464 +f 464 3211 3254 +f 3619 3787 1247 +f 3076 1251 962 +f 2477 1535 1823 +f 268 3211 1032 +f 414 2208 2602 +f 2602 2208 2747 +f 2208 1823 2747 +f 1027 2196 3826 +f 1085 2386 2337 +f 2452 3076 2066 +f 414 2602 3703 +f 2070 3070 3642 +f 1251 3076 2825 +f 2365 414 3654 +f 2066 3076 962 +f 3076 2452 1594 +f 2785 3826 2196 +f 1535 3157 11 +f 2825 1925 2433 +f 3164 3170 259 +f 1925 2797 3584 +f 273 1032 3703 +f 1363 2365 3654 +f 962 1251 3473 +f 2477 3665 3157 +f 1588 2477 2208 +f 3619 2825 2433 +f 2433 1925 275 +f 219 3110 595 +f 219 2683 3110 +f 2683 1309 1032 +f 1027 3826 1919 +f 1289 2797 3909 +f 2886 1873 3753 +f 3338 2797 3551 +f 1739 3461 805 +f 3764 2385 2831 +f 3934 2365 1363 +f 3157 3665 1246 +f 2162 1739 805 +f 142 3826 2785 +f 3436 251 1139 +f 2479 3400 901 +f 1873 2886 2584 +f 251 3753 2637 +f 1465 3338 3551 +f 1216 2886 3753 +f 391 1869 2380 +f 1355 3852 391 +f 2108 3056 3764 +f 3912 593 3390 +f 2162 1152 1873 +f 805 1117 2162 +f 3461 1117 805 +f 2785 1869 3884 +f 2234 3888 2913 +f 3873 3826 142 +f 2831 2108 3764 +f 3056 3551 2187 +f 2108 2890 3056 +f 3119 2452 1919 +f 3436 1216 251 +f 3912 1246 3934 +f 1381 1090 2463 +f 3912 1360 3575 +f 780 2831 2385 +f 3508 3643 192 +f 111 3461 3073 +f 3056 1465 3551 +f 456 3192 1469 +f 2715 3261 62 +f 2913 3888 2220 +f 2637 1139 251 +f 1216 3753 251 +f 1895 3517 3594 +f 2385 3764 2212 +f 3884 391 3852 +f 1480 1090 3852 +f 902 2924 3509 +f 62 1919 3873 +f 2924 2287 3509 +f 3762 862 1851 +f 2162 2584 1739 +f 1739 2584 3461 +f 2385 2212 3928 +f 1409 2393 1716 +f 2800 2234 3167 +f 3149 1469 3663 +f 3056 2890 1465 +f 1851 3261 2715 +f 3509 2890 2108 +f 501 986 862 +f 3119 1919 62 +f 3512 1340 1542 +f 2017 1216 3436 +f 3643 1978 3505 +f 1767 1416 111 +f 62 3873 1639 +f 2715 62 1639 +f 1978 1289 3909 +f 2234 3594 3888 +f 2800 3594 2234 +f 1092 1874 3612 +f 1117 1416 3575 +f 2612 3508 192 +f 111 1416 1117 +f 1978 3508 1289 +f 3338 1465 3562 +f 3272 3338 3562 +f 1416 11 1246 +f 3884 142 2785 +f 1416 1767 11 +f 2924 902 1921 +f 3164 1767 3332 +f 3665 3934 1246 +f 1090 3101 3852 +f 2924 593 2287 +f 62 3261 3119 +f 3192 1360 3390 +f 2768 2571 2366 +f 3753 1873 2637 +f 2612 986 3928 +f 1873 2584 2162 +f 2886 846 2914 +f 1355 2380 1895 +f 3928 2212 2612 +f 3762 1409 501 +f 2287 3912 3934 +f 2017 3436 1139 +f 1695 1594 3119 +f 2800 1895 3594 +f 1355 1895 2800 +f 3643 3505 1695 +f 1152 2162 1117 +f 2715 3762 1851 +f 1409 2347 501 +f 2850 3612 22 +f 2082 3356 1825 +f 640 986 501 +f 1778 2017 1139 +f 3045 1915 3534 +f 2017 3754 1216 +f 1915 3754 1778 +f 565 3592 1446 +f 1524 838 2733 +f 3167 2366 2571 +f 1893 2129 1837 +f 501 2347 640 +f 1554 646 1468 +f 2247 3928 986 +f 3483 640 2347 +f 2597 3757 471 +f 245 299 763 +f 24 2601 494 +f 3192 1139 1152 +f 456 3534 1139 +f 3754 2017 1778 +f 1631 7 565 +f 25 846 3754 +f 1376 2992 1876 +f 3165 1036 3190 +f 3799 2121 3374 +f 3592 565 7 +f 132 299 1915 +f 1324 2992 712 +f 671 3493 1631 +f 1554 1468 246 +f 7 1995 3592 +f 3067 329 3483 +f 3742 669 3757 +f 10 3045 1801 +f 994 144 24 +f 144 2488 374 +f 3190 1036 972 +f 3472 3592 1995 +f 1565 3363 2345 +f 1686 1716 1389 +f 299 3731 3754 +f 565 3019 2751 +f 1446 3019 565 +f 3190 972 1645 +f 1686 1389 2992 +f 246 1376 1317 +f 3067 3483 2256 +f 2768 1625 2872 +f 1876 3008 2872 +f 1728 1979 420 +f 607 312 3742 +f 144 994 1565 +f 315 2601 329 +f 3674 3811 3856 +f 2733 838 980 +f 2256 3483 2347 +f 3733 712 1468 +f 3161 640 3483 +f 1036 589 972 +f 1645 3310 2129 +f 646 3733 1468 +f 712 2992 1376 +f 2355 3305 1056 +f 3910 3240 315 +f 972 2071 1645 +f 1389 1876 2992 +f 3350 2601 315 +f 319 3731 299 +f 1686 2256 2347 +f 1631 3493 7 +f 2071 1801 1682 +f 312 607 2223 +f 669 1565 994 +f 2223 3190 312 +f 3811 1458 1995 +f 299 3754 1915 +f 2345 753 1565 +f 753 2345 1893 +f 1317 1876 2872 +f 25 3754 3731 +f 1524 3019 838 +f 1324 712 3733 +f 780 1565 753 +f 1458 246 1317 +f 2779 3472 2366 +f 1468 1376 246 +f 315 3067 3371 +f 1468 712 1376 +f 838 1446 3872 +f 3592 2779 1446 +f 2121 763 132 +f 763 2121 3475 +f 24 374 3161 +f 3910 667 1184 +f 1056 3525 2069 +f 158 3612 2597 +f 3525 1554 1638 +f 3305 420 1056 +f 1874 607 3612 +f 2885 1825 3350 +f 158 1825 3356 +f 1036 901 589 +f 184 2069 3525 +f 2885 3350 3240 +f 3867 3493 2884 +f 3612 607 3757 +f 3201 1825 2885 +f 2307 1056 1979 +f 598 1533 2307 +f 3165 3190 2223 +f 2705 2069 184 +f 3165 2223 3420 +f 1979 598 2307 +f 607 1874 1340 +f 671 2884 3493 +f 3240 3350 315 +f 1335 1323 1184 +f 3612 158 22 +f 1638 184 3525 +f 3847 3674 3867 +f 3512 3420 1340 +f 3240 3910 1184 +f 2307 667 911 +f 2996 1855 3284 +f 158 2597 1825 +f 3799 3374 3400 +f 3757 2597 3612 +f 3674 66 184 +f 2223 607 1340 +f 2996 1036 3165 +f 2996 901 1036 +f 3400 3374 901 +f 1476 3421 2116 +f 1601 3177 1042 +f 320 717 3923 +f 1476 1392 3184 +f 2116 1392 1476 +f 2525 2475 3307 +f 3307 2475 1 +f 2633 1605 2735 +f 1218 2663 2923 +f 1218 2923 711 +f 3486 1218 2213 +f 1075 3486 2047 +f 1809 597 1517 +f 998 2780 597 +f 998 371 2780 +f 1042 3715 1942 +f 676 3661 371 +f 2210 3136 676 +f 2907 3673 2168 +f 1517 3673 1809 +f 1809 998 597 +f 3922 1790 2951 +f 2638 3204 1790 +f 676 3136 3661 +f 3224 1257 2638 +f 2903 1050 3491 +f 1050 1590 3491 +f 3720 3109 129 +f 3720 129 397 +f 2929 2032 3109 +f 717 764 2111 +f 717 2111 3923 +f 3814 1932 3354 +f 1932 3814 1658 +f 828 1 1262 +f 942 1809 2907 +f 1257 608 3204 +f 2638 1257 3204 +f 2864 3939 1834 +f 1279 3595 208 +f 1605 1892 1519 +f 721 2608 2210 +f 3194 3058 2391 +f 1892 3194 2391 +f 3527 2809 721 +f 2899 2735 1664 +f 2663 2899 1177 +f 711 2213 1218 +f 2390 2929 3437 +f 3939 2929 2390 +f 1525 2864 1834 +f 1525 1834 3723 +f 1829 1601 1942 +f 1198 2169 668 +f 3486 1075 1328 +f 2210 676 721 +f 3723 1834 3611 +f 1253 1880 832 +f 1880 1253 1546 +f 1880 1546 1476 +f 3527 1379 804 +f 495 3938 225 +f 3939 2032 2929 +f 3527 721 1029 +f 1476 951 3421 +f 2054 1601 1829 +f 2527 1892 2633 +f 1328 3563 1218 +f 1029 998 1451 +f 3611 2630 3723 +f 2630 3611 717 +f 1029 676 371 +f 3106 149 2962 +f 2951 1590 3922 +f 1942 3588 1829 +f 2327 1942 3715 +f 3905 3194 1892 +f 3720 3437 2929 +f 2929 3109 3720 +f 3437 3720 397 +f 397 3611 3437 +f 832 3404 2032 +f 3184 715 1880 +f 1476 3184 1880 +f 2898 2864 1525 +f 721 2809 3429 +f 951 2961 1211 +f 1476 1546 2259 +f 2390 3437 3611 +f 3354 717 320 +f 3611 764 717 +f 1932 1658 1829 +f 2235 3307 652 +f 652 3307 828 +f 1042 1942 1601 +f 3055 208 1198 +f 717 3354 1932 +f 2214 3905 3122 +f 1025 2327 3715 +f 577 3423 3563 +f 1279 828 1262 +f 208 3595 2169 +f 1571 2898 3723 +f 804 2809 3527 +f 2962 149 3530 +f 3905 2214 3194 +f 2898 1525 3723 +f 2898 293 2864 +f 3938 2638 3922 +f 3054 1829 3588 +f 1379 1029 1451 +f 1379 3527 1029 +f 1834 2390 3611 +f 2809 1457 3429 +f 225 1050 1457 +f 3939 1253 832 +f 1476 2259 3106 +f 3106 2259 149 +f 2327 3588 1942 +f 366 1279 208 +f 3323 2780 371 +f 3661 3323 371 +f 1198 3194 2214 +f 1325 3172 2210 +f 3423 2663 3563 +f 3530 2907 2962 +f 1809 942 1451 +f 1571 2189 2898 +f 2706 3723 2630 +f 2421 1253 293 +f 1546 1253 2421 +f 1546 139 2259 +f 1296 3530 149 +f 1847 703 3054 +f 2907 2168 2962 +f 1809 3673 2907 +f 3588 2327 645 +f 2608 605 2210 +f 828 63 652 +f 1029 371 998 +f 3055 366 208 +f 3429 2608 721 +f 2527 2633 3423 +f 3563 168 577 +f 168 3563 602 +f 681 602 1328 +f 1328 1075 681 +f 1050 2903 3429 +f 1062 942 3530 +f 942 2907 3530 +f 764 3611 397 +f 2032 2266 3109 +f 2032 3404 2266 +f 3756 3404 715 +f 2962 2168 2961 +f 3184 2947 715 +f 3559 3184 1392 +f 1392 2116 3559 +f 3421 951 2848 +f 1211 2848 951 +f 1370 3307 1025 +f 3307 1370 2525 +f 1279 1262 3595 +f 1198 208 2169 +f 3058 3194 1198 +f 1605 2633 1892 +f 2663 2735 2899 +f 2923 2663 1177 +f 3486 1328 1218 +f 1273 2051 2113 +f 3257 2113 2051 +f 3257 955 2113 +f 1545 955 1387 +f 1387 3585 1545 +f 1387 3548 3585 +f 3585 3548 1405 +f 1405 2061 3585 +f 2917 2061 1114 +f 1690 2917 1114 +f 2421 293 2189 +f 1268 2372 1245 +f 1245 2726 2320 +f 2372 1268 2165 +f 2165 1268 1079 +f 368 1079 1268 +f 368 3387 1079 +f 2734 1030 368 +f 3430 3734 937 +f 3734 3430 2398 +f 34 1030 2908 +f 3824 3250 863 +f 2734 2908 1030 +f 2398 3477 3734 +f 2343 2865 1286 +f 2320 1875 395 +f 855 2259 139 +f 3351 3430 3747 +f 1243 3747 3430 +f 2888 1243 937 +f 2888 937 876 +f 216 2483 876 +f 2398 1010 1431 +f 1431 1010 1899 +f 1431 1899 2517 +f 438 63 366 +f 1705 3549 3395 +f 316 535 3263 +f 216 1040 2483 +f 741 3006 1040 +f 216 741 1040 +f 741 1510 3006 +f 3189 1510 58 +f 58 1510 344 +f 344 3776 1623 +f 1623 58 344 +f 2934 3231 2933 +f 1398 2933 480 +f 2094 602 681 +f 2349 3749 2467 +f 2349 2467 3160 +f 333 3160 2174 +f 2174 3160 2467 +f 3453 333 2174 +f 282 2459 1571 +f 2792 455 2459 +f 3477 455 2792 +f 252 3355 2382 +f 2649 1803 3188 +f 3545 3188 3210 +f 3868 3210 3188 +f 1793 1661 2177 +f 673 1999 1793 +f 3832 2964 1970 +f 1206 229 930 +f 1804 3813 930 +f 3335 2865 1082 +f 3813 1713 578 +f 738 578 1713 +f 942 912 1451 +f 2883 3335 1082 +f 2343 1082 2865 +f 2343 2883 1082 +f 2883 37 3335 +f 1196 37 2883 +f 2707 1033 3233 +f 741 344 1510 +f 1332 2421 2339 +f 1332 1546 2421 +f 139 1546 1332 +f 134 1939 37 +f 3515 3749 3085 +f 1580 2318 3061 +f 1196 1591 2202 +f 2534 912 1062 +f 1236 134 2820 +f 2820 1763 1236 +f 1236 2143 1939 +f 2933 3231 3069 +f 217 1580 2143 +f 217 53 1580 +f 2318 1580 53 +f 365 3671 2035 +f 2415 2878 3671 +f 2489 3061 2318 +f 3355 2250 497 +f 2382 918 252 +f 1803 1039 3188 +f 3188 1039 3868 +f 3061 2489 3849 +f 3868 1316 2145 +f 1316 1187 114 +f 1444 36 1732 +f 870 3666 36 +f 3849 3263 3061 +f 1545 3666 1273 +f 3477 2792 1559 +f 1970 673 3832 +f 3263 3849 1228 +f 2113 955 1545 +f 2061 2917 3585 +f 3676 3362 3005 +f 1690 1523 2917 +f 3231 3263 1228 +f 3263 3231 2934 +f 2833 587 1768 +f 2934 1622 3263 +f 1286 700 1348 +f 580 1196 2202 +f 3290 3938 495 +f 316 1622 1512 +f 1622 316 3263 +f 1512 110 316 +f 110 2649 3545 +f 390 890 2299 +f 211 139 1332 +f 1999 2418 3572 +f 2418 1999 2367 +f 3671 2534 2035 +f 5 3139 2109 +f 3041 804 1379 +f 3139 5 2243 +f 1523 2562 2917 +f 580 2820 134 +f 2459 455 1917 +f 3588 210 3054 +f 2109 438 2853 +f 2833 73 3295 +f 3676 1559 3362 +f 2498 1551 3203 +f 3549 1705 3515 +f 920 2035 1551 +f 2415 365 2648 +f 2648 365 267 +f 2218 2415 2648 +f 3549 3515 2180 +f 2459 2339 2189 +f 344 741 3676 +f 2218 3815 2581 +f 3005 3776 3676 +f 3776 3005 191 +f 2250 2218 2581 +f 2581 1827 918 +f 1827 2581 3633 +f 1379 1451 912 +f 3160 3085 2349 +f 3085 3160 333 +f 580 134 1196 +f 863 3250 2908 +f 1039 3339 480 +f 863 2232 3824 +f 3868 1039 3139 +f 2272 3832 1187 +f 2842 1367 3941 +f 3832 2272 822 +f 1346 1367 2842 +f 1346 2703 1166 +f 2703 1346 1516 +f 1939 3044 1516 +f 1516 1346 1939 +f 2343 1286 1348 +f 1268 2734 368 +f 2343 1348 1591 +f 385 3044 1939 +f 1939 26 385 +f 3351 2398 3430 +f 2398 3351 1010 +f 1243 3430 937 +f 876 937 216 +f 577 168 2667 +f 168 3532 1068 +f 2517 1917 1431 +f 3395 2180 2517 +f 2180 3395 3549 +f 121 26 1580 +f 1022 2094 2833 +f 3676 3776 344 +f 1580 1929 121 +f 235 2048 1929 +f 3522 2048 235 +f 580 3917 723 +f 1705 3749 3515 +f 3749 2349 3085 +f 3233 438 2109 +f 333 3453 2137 +f 3889 2137 3453 +f 2272 3122 2074 +f 217 1763 2820 +f 53 2820 2157 +f 863 2908 2734 +f 2707 3069 3849 +f 2934 2933 1398 +f 282 2792 2459 +f 3832 1793 1187 +f 938 1268 2320 +f 1847 1137 2706 +f 3776 2734 1268 +f 2339 2421 2189 +f 1058 3747 1243 +f 912 3355 3041 +f 3335 37 1939 +f 1632 210 3588 +f 1033 2437 3233 +f 284 3588 645 +f 2320 1510 938 +f 3776 938 1623 +f 2320 3006 1510 +f 3189 938 1510 +f 243 938 3189 +f 243 1623 938 +f 2729 882 495 +f 1268 938 3776 +f 1187 3139 2243 +f 2883 2343 1591 +f 1039 1512 3339 +f 2883 1591 1196 +f 1039 1803 1512 +f 2243 2272 1187 +f 3868 469 3210 +f 1316 3868 1187 +f 469 3868 2145 +f 2563 114 1187 +f 2563 1187 3736 +f 1187 2790 3736 +f 3671 2250 3355 +f 1187 2177 2790 +f 1793 2177 1187 +f 1793 3832 673 +f 2964 3832 1804 +f 1607 2964 1804 +f 1607 1804 2239 +f 1804 229 2239 +f 229 1804 930 +f 1804 1713 3813 +f 2174 552 3453 +f 3889 3453 552 +f 2074 3905 2527 +f 211 3515 3085 +f 333 2137 3340 +f 3362 282 2706 +f 1196 134 37 +f 134 1236 1939 +f 3362 2706 1137 +f 1763 2143 1236 +f 217 2143 1763 +f 2820 53 217 +f 2157 2318 53 +f 2318 2157 2489 +f 3178 719 1845 +f 1228 3849 3069 +f 3069 3231 1228 +f 1398 1622 2934 +f 1033 2157 2820 +f 1622 1398 248 +f 2820 580 1033 +f 2157 2707 2489 +f 2489 2707 3849 +f 480 3139 1039 +f 2295 495 225 +f 806 316 1180 +f 3832 822 1804 +f 2094 1022 3532 +f 1269 3340 2137 +f 3339 1512 248 +f 248 1512 1622 +f 1512 1803 110 +f 1803 2649 110 +f 2649 3188 3545 +f 937 1559 741 +f 741 1559 3676 +f 1999 3572 390 +f 1999 673 2178 +f 390 1793 1999 +f 580 723 1033 +f 1970 1724 673 +f 673 1724 2178 +f 3233 2437 438 +f 3340 211 333 +f 3355 1845 3041 +f 3889 3203 1808 +f 2109 3139 480 +f 291 3178 1845 +f 2250 3671 2878 +f 2243 3122 2272 +f 3355 497 2382 +f 3355 252 1845 +f 2794 3666 2729 +f 1845 3897 291 +f 3041 1379 912 +f 2917 867 3585 +f 1273 2113 1545 +f 482 2794 29 +f 1545 3585 867 +f 3290 495 882 +f 1559 2792 3362 +f 333 211 3085 +f 1449 1530 1068 +f 1022 1768 2364 +f 700 3005 3220 +f 2534 1062 3600 +f 3355 912 2534 +f 1137 1847 3054 +f 3868 3139 1187 +f 438 2437 63 +f 2074 822 2272 +f 1845 252 3897 +f 2214 2243 5 +f 3122 2243 2214 +f 2074 2527 2476 +f 2917 2562 867 +f 168 602 3532 +f 455 3477 1917 +f 3477 1559 937 +f 2517 2180 1917 +f 741 216 937 +f 1808 3600 1269 +f 3515 2339 1917 +f 1286 191 700 +f 3515 1332 2339 +f 29 2809 804 +f 29 1457 2809 +f 29 2794 1457 +f 3220 1137 3054 +f 3203 1551 2268 +f 2035 2268 1551 +f 365 2035 920 +f 2415 3671 365 +f 2218 2878 2415 +f 2250 2878 2218 +f 2581 497 2250 +f 2581 2382 497 +f 2581 918 2382 +f 918 1827 252 +f 252 1827 3897 +f 36 1444 870 +f 3666 870 706 +f 3666 706 1273 +f 745 1427 3452 +f 3561 2263 3452 +f 798 2502 941 +f 941 2502 3379 +f 1043 2482 3780 +f 3780 2645 3002 +f 2645 1726 3002 +f 2645 1472 1726 +f 2645 1275 1472 +f 1275 324 836 +f 2585 934 1500 +f 429 17 2531 +f 265 1154 1982 +f 816 3840 2110 +f 1487 2487 800 +f 1677 2416 1796 +f 921 3478 3683 +f 843 3682 3478 +f 3313 3896 3364 +f 3364 2838 3313 +f 966 17 429 +f 966 429 2193 +f 2502 798 989 +f 3416 2502 2511 +f 2482 1043 3416 +f 2585 1500 884 +f 816 2159 1154 +f 3488 1715 3925 +f 3488 3925 1113 +f 3025 1487 800 +f 921 3683 3744 +f 921 843 3478 +f 2838 3364 2378 +f 944 618 3268 +f 3268 3613 944 +f 3613 3268 1783 +f 2417 3405 3587 +f 3561 3452 1427 +f 277 2263 3561 +f 2531 17 2281 +f 2010 1575 2193 +f 2010 989 1575 +f 2502 3416 3379 +f 3416 2511 2482 +f 705 3780 3535 +f 3002 1121 3535 +f 1121 3002 1726 +f 3601 1883 2804 +f 2193 429 2010 +f 421 3364 637 +f 2579 429 2531 +f 989 2010 3029 +f 265 1982 3372 +f 816 2110 2159 +f 3840 816 819 +f 819 2487 3840 +f 3488 714 1715 +f 3601 2485 1883 +f 2485 3268 618 +f 2417 1783 3268 +f 2994 2482 3865 +f 2994 3276 2645 +f 2579 2531 3561 +f 2645 324 1275 +f 1982 1154 2249 +f 2110 1954 2159 +f 3840 1954 2110 +f 2487 3280 3840 +f 714 3625 1715 +f 1715 3625 2369 +f 2369 3925 1715 +f 3925 2369 3830 +f 3925 3830 1113 +f 1124 921 1677 +f 1487 1338 3748 +f 265 816 1154 +f 3025 1677 1796 +f 2416 913 1796 +f 3744 3876 2416 +f 3744 3683 2055 +f 2378 283 1484 +f 714 3333 3372 +f 3333 714 3488 +f 921 2788 843 +f 843 2711 3682 +f 2711 843 2788 +f 921 3896 2788 +f 3744 2416 1677 +f 2579 3064 429 +f 3601 2092 620 +f 2804 2092 3601 +f 2994 2645 2482 +f 3333 265 3372 +f 3275 1484 283 +f 1677 921 3744 +f 3276 324 2645 +f 104 757 2585 +f 1275 1712 1472 +f 1501 1677 800 +f 1500 934 327 +f 421 2378 3364 +f 3029 2533 2511 +f 3865 2482 2511 +f 2043 3276 2994 +f 1526 2720 104 +f 819 816 2641 +f 2004 3488 2547 +f 3364 921 637 +f 1677 1501 1124 +f 747 3268 2485 +f 3621 3029 2010 +f 2004 2641 3333 +f 921 1124 637 +f 2159 2249 1154 +f 1745 3372 2249 +f 2249 3840 3280 +f 3840 2249 1954 +f 3748 3280 2487 +f 3748 2487 1487 +f 2249 2159 1954 +f 3280 3748 2249 +f 714 1745 3625 +f 3830 820 1113 +f 1487 3025 3644 +f 3644 3025 1796 +f 2798 913 2416 +f 2798 2416 3876 +f 3876 3744 2055 +f 2055 3683 2434 +f 3683 3360 2434 +f 3360 3683 3478 +f 3360 3478 3682 +f 3360 3682 2711 +f 931 3896 3313 +f 1730 2711 2788 +f 2711 2434 3360 +f 1730 2434 2711 +f 2788 3896 931 +f 1730 2788 931 +f 1507 1730 931 +f 81 2434 1730 +f 1507 931 3313 +f 620 3547 2152 +f 2804 2592 2092 +f 1883 2592 2804 +f 944 2627 618 +f 944 3613 2627 +f 2417 3587 1783 +f 1783 1411 3613 +f 1411 1783 3587 +f 3405 2685 3587 +f 1087 3587 2685 +f 3405 1427 72 +f 72 2685 3405 +f 72 1427 745 +f 1276 745 3452 +f 3452 2263 1276 +f 2193 1666 966 +f 2193 1575 1666 +f 989 2752 1575 +f 989 325 2752 +f 989 798 325 +f 798 941 1865 +f 3379 561 941 +f 3379 1844 561 +f 3416 2874 1844 +f 1844 3379 3416 +f 3416 1043 2874 +f 3780 705 1043 +f 3535 3780 3002 +f 1712 1275 836 +f 2585 1161 934 +f 1500 327 884 +f 1161 2651 934 +f 1161 102 2651 +f 2170 421 637 +f 1413 3774 2322 +f 2010 3064 3621 +f 2954 3039 2963 +f 733 1656 3480 +f 3480 1656 812 +f 1785 1961 812 +f 1953 2963 15 +f 3469 1953 15 +f 3469 15 2469 +f 686 815 3740 +f 1183 2775 452 +f 3388 1861 733 +f 726 3487 1175 +f 1175 326 726 +f 3006 2625 1040 +f 1623 1345 58 +f 58 1345 2625 +f 1040 2625 2593 +f 2593 2625 1574 +f 2625 1345 1574 +f 1199 856 1445 +f 1479 125 3589 +f 2195 2857 1681 +f 1752 856 538 +f 771 209 435 +f 1321 1175 3487 +f 20 3564 3629 +f 20 3629 1967 +f 1203 326 1175 +f 3629 1990 1967 +f 1967 1990 631 +f 631 1597 1967 +f 631 1341 1597 +f 1341 498 1597 +f 1597 498 1553 +f 2302 1967 1597 +f 2302 1597 353 +f 353 1597 1553 +f 353 1553 3740 +f 1553 686 3740 +f 2643 1038 772 +f 1553 498 686 +f 1719 686 352 +f 1656 733 1861 +f 3039 2954 2641 +f 686 498 352 +f 686 1719 815 +f 819 1501 800 +f 2097 1097 2700 +f 3740 2480 353 +f 3740 815 2480 +f 352 2300 350 +f 766 1484 3275 +f 1424 747 766 +f 352 350 1719 +f 350 1038 1719 +f 1962 1038 3556 +f 1038 1962 544 +f 1038 350 3556 +f 2043 1526 3276 +f 435 209 1752 +f 2127 3607 2420 +f 2469 15 3039 +f 1127 1424 3043 +f 2175 538 856 +f 1861 2128 125 +f 3043 1424 1612 +f 3774 1413 3589 +f 3589 1413 1479 +f 3277 1561 1961 +f 683 637 1124 +f 388 2170 683 +f 1127 3043 2432 +f 2471 3621 3064 +f 3621 2533 3029 +f 2550 2994 3865 +f 2195 2993 2857 +f 1656 1785 812 +f 417 2004 2547 +f 1124 2755 683 +f 209 3607 736 +f 232 766 3275 +f 3348 2225 3225 +f 1413 2322 1953 +f 1719 2643 815 +f 2358 3621 2471 +f 3865 2533 1157 +f 3388 2593 1861 +f 388 2755 239 +f 2128 2593 1574 +f 1574 1350 3808 +f 1785 3277 1961 +f 1183 239 2775 +f 1084 2471 2893 +f 1038 2643 1719 +f 1084 726 2471 +f 2533 2358 2750 +f 1445 3408 3398 +f 3348 2550 1157 +f 727 1005 2043 +f 2469 3039 2641 +f 304 2954 2322 +f 388 239 2480 +f 3395 2517 3480 +f 925 3395 3480 +f 1705 3395 925 +f 232 772 3408 +f 1183 452 3564 +f 1183 3564 20 +f 2302 1183 20 +f 3006 842 2625 +f 842 58 2625 +f 2302 2480 1183 +f 2302 353 2480 +f 1752 538 435 +f 2420 3607 209 +f 726 1084 3607 +f 406 3487 2127 +f 2467 552 2174 +f 2893 3064 2579 +f 2358 2533 3621 +f 727 3348 3225 +f 2195 1005 2507 +f 727 2043 2550 +f 2550 2043 2994 +f 125 3774 3589 +f 1656 125 1479 +f 1479 1785 1656 +f 2593 2128 1861 +f 1350 2775 3808 +f 2963 3039 15 +f 2954 2963 2322 +f 1038 544 772 +f 544 3408 772 +f 772 459 2643 +f 1501 304 560 +f 856 1199 2175 +f 2755 1124 560 +f 2225 1202 3162 +f 3574 3225 2225 +f 3015 3225 338 +f 1445 856 1752 +f 1752 1612 1445 +f 736 3607 1084 +f 459 232 3275 +f 726 2127 3487 +f 1888 1202 2176 +f 1413 1953 3469 +f 1413 3469 1479 +f 1479 3469 2469 +f 1202 3348 2176 +f 1097 2507 3015 +f 1097 2097 2556 +f 2402 2556 2097 +f 2402 2828 2556 +f 2195 1681 1005 +f 2775 560 3808 +f 2755 560 2775 +f 683 2755 388 +f 435 538 301 +f 1840 435 301 +f 1840 771 435 +f 209 771 2420 +f 2007 2170 388 +f 3774 125 2128 +f 731 2170 2007 +f 2128 3808 2322 +f 1445 2014 3408 +f 3043 1612 1752 +f 3168 3043 1752 +f 726 2358 2471 +f 2127 426 406 +f 1321 1369 3838 +f 1369 3487 406 +f 3487 1369 1321 +f 1321 3838 1175 +f 1175 3838 255 +f 1175 255 1203 +f 727 2550 3348 +f 1479 417 1785 +f 2322 3774 2128 +f 3607 2127 726 +f 2469 2641 2004 +f 2750 326 1685 +f 2556 2195 2507 +f 239 2755 2775 +f 815 2643 2007 +f 232 459 772 +f 683 2170 637 +f 2170 731 421 +f 326 2358 726 +f 2358 326 2750 +f 1888 1685 3878 +f 1685 1888 2176 +f 1202 2271 3162 +f 2271 1202 1888 +f 1073 2225 3162 +f 2225 1073 3574 +f 1526 2043 1005 +f 1656 1861 125 +f 1574 3808 2128 +f 3225 3574 338 +f 2963 1953 2322 +f 3015 338 160 +f 452 2775 1350 +f 304 2322 3808 +f 304 3808 560 +f 3277 1696 1952 +f 3015 160 1097 +f 733 3480 2517 +f 812 925 3480 +f 812 489 925 +f 1705 925 489 +f 489 959 3749 +f 489 1961 959 +f 959 2467 3749 +f 552 2467 959 +f 489 812 1961 +f 956 3656 664 +f 1054 664 3919 +f 3919 720 1054 +f 1015 2368 720 +f 1541 2368 1015 +f 2368 1541 2880 +f 2379 982 2880 +f 3048 982 2379 +f 982 3048 2493 +f 1838 3048 1863 +f 1863 96 1838 +f 2990 317 3727 +f 3919 1015 720 +f 2880 1541 2379 +f 885 3048 2379 +f 1004 1248 2315 +f 2859 3727 3153 +f 3224 1302 3465 +f 2854 1102 1581 +f 3919 664 3191 +f 3191 1015 3919 +f 3191 1541 1015 +f 1863 3048 885 +f 1302 2708 3655 +f 2870 2409 2772 +f 3554 647 3691 +f 1697 1291 2681 +f 2921 3831 106 +f 1773 3831 2921 +f 2624 2264 2771 +f 1004 2428 3474 +f 3465 3107 608 +f 358 3150 401 +f 3150 2622 172 +f 2409 2870 2622 +f 647 2810 2772 +f 2708 1302 973 +f 647 3018 2810 +f 3018 647 3554 +f 3554 3691 1933 +f 3691 1697 2681 +f 3655 2708 401 +f 3691 2681 2086 +f 1697 1244 1291 +f 1697 106 1244 +f 3836 2676 2958 +f 2676 3634 1144 +f 310 2264 2624 +f 1311 96 1863 +f 479 96 1311 +f 2428 1004 2315 +f 1481 387 709 +f 3107 1302 3655 +f 1481 2389 2636 +f 2772 1420 647 +f 3107 3465 1302 +f 2638 2854 1581 +f 973 1102 2854 +f 2921 3836 1773 +f 1773 3836 2958 +f 3656 2771 2264 +f 664 3656 2264 +f 2264 3191 664 +f 1481 2636 387 +f 2350 387 2636 +f 1581 3224 2638 +f 1102 1302 3224 +f 2708 358 401 +f 479 1779 2067 +f 2925 1475 2067 +f 732 973 2854 +f 280 1697 647 +f 3836 2921 3033 +f 582 3836 3033 +f 2315 1248 2925 +f 317 227 3153 +f 2429 2925 222 +f 1071 2428 2315 +f 39 883 2428 +f 3224 1581 1102 +f 709 227 317 +f 476 358 2708 +f 3691 647 1697 +f 48 2167 1843 +f 2429 2315 2925 +f 2854 3938 732 +f 2922 358 198 +f 1374 310 582 +f 310 2624 582 +f 48 2076 2167 +f 222 2925 2067 +f 3521 476 973 +f 3223 1697 280 +f 3033 2921 3223 +f 3915 2497 2076 +f 2389 1481 1266 +f 2704 2350 2636 +f 198 358 476 +f 3223 2921 1697 +f 885 3191 2167 +f 2497 1311 2076 +f 479 1311 2497 +f 2497 1779 479 +f 2429 1071 2315 +f 1541 885 2379 +f 479 2680 1653 +f 479 2067 2680 +f 3727 317 3153 +f 1035 1004 3474 +f 3727 2837 2990 +f 2837 3727 2859 +f 419 3107 3655 +f 2870 172 2622 +f 2772 31 2870 +f 31 2772 2810 +f 3018 3554 2821 +f 2821 3554 1933 +f 1933 3691 1539 +f 1539 3691 2086 +f 2881 2086 2681 +f 2881 2681 1291 +f 106 1000 1244 +f 106 3831 1000 +f 2160 2332 2958 +f 2160 2958 1144 +f 2676 1144 2958 +f 3634 453 1144 +f 453 3634 2771 +f 3656 405 2771 +f 3656 956 405 +f 2577 1254 3526 +f 601 2577 3526 +f 2591 2348 601 +f 3415 2348 2591 +f 1223 3316 554 +f 1948 1159 2346 +f 2112 632 3898 +f 1285 2029 586 +f 1225 3352 1285 +f 1150 1305 375 +f 375 3440 2314 +f 2314 3440 1362 +f 2314 1362 891 +f 891 1650 3102 +f 891 351 1650 +f 961 582 3033 +f 582 961 1374 +f 3159 1235 3102 +f 3746 891 3102 +f 1819 1755 3898 +f 662 1071 2429 +f 811 732 1948 +f 1315 1375 1105 +f 1159 1948 3290 +f 2562 1523 2590 +f 1315 882 2562 +f 1912 1031 3614 +f 215 1031 1912 +f 2429 222 1894 +f 662 3781 1071 +f 2346 811 1948 +f 138 485 1326 +f 2736 2346 1159 +f 1375 2346 2736 +f 2155 2171 3710 +f 3415 2793 2348 +f 3316 3415 554 +f 3316 1916 2451 +f 1569 3337 2155 +f 3256 3337 65 +f 3256 65 2029 +f 1225 1150 3352 +f 1308 1150 2314 +f 1315 1053 882 +f 1315 1105 1053 +f 213 3286 1494 +f 961 3033 1646 +f 3539 138 3860 +f 662 2429 1894 +f 3214 93 1109 +f 518 2577 601 +f 2348 518 601 +f 2590 1065 1940 +f 2348 2793 2813 +f 1301 2704 2636 +f 2545 485 1521 +f 145 2813 2793 +f 3186 476 3521 +f 1150 1308 584 +f 1972 1235 1611 +f 331 2296 1972 +f 1972 1611 331 +f 2296 331 3614 +f 3614 230 1912 +f 215 1912 2743 +f 2743 1988 215 +f 1988 2743 3553 +f 3553 162 1988 +f 1896 162 3553 +f 2008 3729 516 +f 2008 94 3729 +f 2296 3614 1031 +f 2457 3214 1109 +f 1896 2008 162 +f 3290 1053 1159 +f 1053 2736 1159 +f 94 1326 485 +f 1326 94 2262 +f 3579 138 1326 +f 198 3186 3081 +f 1326 2500 3579 +f 1494 3898 1755 +f 1894 3539 662 +f 3290 1948 732 +f 3186 2813 3081 +f 973 811 3521 +f 3548 1387 1339 +f 1114 2061 1690 +f 1646 3223 2171 +f 2590 1523 179 +f 179 1523 3196 +f 3196 1065 179 +f 3943 1065 3196 +f 1065 3943 1254 +f 1109 48 1843 +f 1940 1315 2562 +f 1315 1940 1375 +f 93 516 3915 +f 516 79 3915 +f 2562 2590 1940 +f 1254 518 1711 +f 1563 3828 584 +f 811 1375 3521 +f 145 1239 3464 +f 2155 1646 2171 +f 525 3214 2457 +f 1396 3186 3521 +f 1896 2262 94 +f 1105 1375 2736 +f 138 3539 1521 +f 1616 2112 3898 +f 3828 1374 961 +f 1819 3898 3425 +f 662 3539 523 +f 523 3781 662 +f 2348 2813 518 +f 3316 2451 2793 +f 1894 108 1521 +f 1711 1396 1375 +f 3781 523 674 +f 811 973 732 +f 2813 145 3081 +f 198 476 3186 +f 485 138 1521 +f 145 3464 3081 +f 1374 2270 310 +f 516 3729 79 +f 485 3729 94 +f 2545 3729 485 +f 93 3915 48 +f 1396 1711 518 +f 1065 1711 1940 +f 674 2389 3781 +f 2793 3415 3316 +f 2577 518 1254 +f 1993 1301 2636 +f 1254 1711 1065 +f 2029 3795 3256 +f 3795 2029 584 +f 3795 1646 2155 +f 3337 3256 2155 +f 3795 961 1646 +f 1285 584 2029 +f 584 1285 3352 +f 584 3352 1150 +f 1563 1109 2270 +f 1109 1563 2457 +f 1031 516 93 +f 1972 2296 3214 +f 93 3214 1031 +f 882 1053 3290 +f 2736 1053 1105 +f 1396 3521 1375 +f 1972 3214 525 +f 2451 1239 145 +f 3794 1616 750 +f 2545 79 3729 +f 3794 2112 1616 +f 1616 3898 1494 +f 1755 2716 1494 +f 2636 3286 1993 +f 1772 1320 2817 +f 3883 1320 1772 +f 525 1320 3883 +f 1772 525 3883 +f 3102 2359 3746 +f 1772 2359 525 +f 2457 1563 2149 +f 704 2314 891 +f 2314 704 1308 +f 704 891 3746 +f 1772 3746 2359 +f 2817 1563 1308 +f 2817 704 1772 +f 1320 2457 2149 +f 2359 3102 1235 +f 2359 1235 1972 +f 525 2359 1972 +f 1563 2817 2149 +f 1772 704 3746 +f 1320 525 2457 +f 2817 1320 2149 +f 3785 1701 1983 +f 439 1701 2634 +f 3785 1983 3875 +f 1257 3465 3645 +f 1788 3785 1257 +f 1788 1257 3645 +f 1257 3875 608 +f 2847 3645 3465 +f 1983 3645 2847 +f 2634 1701 3785 +f 2634 3785 1788 +f 1788 1983 1701 +f 2226 608 3875 +f 1983 2847 3875 +f 2847 3465 608 +f 439 1788 1701 +f 2634 1788 439 +f 3875 1257 3785 +f 1788 3645 1983 +f 3224 3465 1257 +f 3875 2847 2226 +f 608 2226 2847 +f 1481 709 317 +f 266 76 3238 +f 264 76 266 +f 76 264 1794 +f 3153 1531 2859 +f 949 2079 3677 +f 3229 809 2530 +f 1200 3931 112 +f 1200 2079 949 +f 392 3238 3914 +f 2188 1188 1668 +f 1668 1188 575 +f 1794 1503 2812 +f 1112 709 2607 +f 2607 709 387 +f 227 1531 3153 +f 1531 227 1112 +f 3326 3431 294 +f 3326 1088 3431 +f 60 2473 1668 +f 2313 178 3380 +f 2241 1088 3197 +f 3380 68 574 +f 3569 3328 834 +f 2350 2607 387 +f 3236 2106 2956 +f 3236 2078 2836 +f 809 2241 3197 +f 3931 2106 3229 +f 3279 2078 3914 +f 2508 2131 2704 +f 2530 3931 3229 +f 409 3602 392 +f 2473 2607 2131 +f 76 3914 3238 +f 2812 1503 2188 +f 409 392 3914 +f 3431 1088 725 +f 44 725 1088 +f 2607 2350 2131 +f 709 1112 227 +f 237 2106 3236 +f 3229 2106 1596 +f 1074 3380 345 +f 237 1596 2106 +f 3279 3631 2078 +f 2753 574 68 +f 3132 834 3431 +f 3431 725 3132 +f 2241 3229 1596 +f 2106 3931 2956 +f 2078 409 3914 +f 2619 76 2812 +f 1668 2473 3649 +f 3117 1440 3649 +f 2078 3236 2956 +f 574 345 3380 +f 345 574 3328 +f 3569 834 3132 +f 112 3931 2530 +f 2473 2131 3649 +f 2131 2350 2704 +f 949 3677 3602 +f 409 2078 2956 +f 345 3328 3569 +f 3569 3132 635 +f 1112 2980 1531 +f 1531 2980 2837 +f 1531 2837 2859 +f 834 3328 3788 +f 213 2716 3435 +f 1074 345 3511 +f 3511 345 3569 +f 1301 1993 2508 +f 3658 3926 2203 +f 2203 19 3658 +f 537 2849 44 +f 19 3425 632 +f 1782 2265 2072 +f 67 906 1782 +f 594 67 1782 +f 3187 1802 67 +f 3187 594 30 +f 30 3761 3187 +f 3718 2146 2374 +f 2375 1938 2803 +f 3125 1596 237 +f 2246 3649 2131 +f 802 3435 2716 +f 3511 3569 635 +f 1679 1467 528 +f 2836 2078 3631 +f 1819 1905 1755 +f 313 821 546 +f 55 1241 3570 +f 1241 55 1162 +f 1241 1162 2659 +f 2659 1162 2292 +f 3724 1461 1128 +f 3724 3610 3812 +f 1766 2211 196 +f 196 2072 1766 +f 1074 3511 3467 +f 2449 3218 3467 +f 430 313 2072 +f 3286 213 3435 +f 594 1782 1012 +f 3761 2374 3187 +f 594 1012 30 +f 2292 1162 3801 +f 3761 3718 2374 +f 1128 3610 3724 +f 663 3718 3761 +f 3610 2724 3812 +f 2616 2211 2724 +f 237 3236 260 +f 2537 3718 3850 +f 3850 3438 2537 +f 3718 663 3850 +f 2537 3438 878 +f 878 1116 2537 +f 2616 2724 3610 +f 1490 878 3438 +f 1490 1116 878 +f 1819 3425 1905 +f 303 2472 2388 +f 3685 303 2388 +f 2849 975 725 +f 2388 2472 1232 +f 1679 818 2472 +f 1467 303 3513 +f 1467 1679 303 +f 1938 2375 3117 +f 1679 2392 484 +f 2131 2508 2246 +f 403 2803 3854 +f 220 663 3761 +f 663 220 3850 +f 3850 220 3438 +f 3438 220 1490 +f 313 1012 1782 +f 3513 1116 1694 +f 30 313 220 +f 3513 528 1467 +f 1679 3176 2392 +f 643 3236 2836 +f 3425 19 2203 +f 539 55 2449 +f 2803 1333 3298 +f 1694 643 528 +f 3926 2966 1333 +f 3869 2966 3926 +f 3658 3869 3926 +f 19 3869 3658 +f 632 3869 19 +f 734 3610 3801 +f 3610 1128 3801 +f 3425 802 1905 +f 1596 3125 537 +f 2211 2616 430 +f 1938 2376 1138 +f 1138 2376 802 +f 2376 2246 2508 +f 1301 2508 2704 +f 2449 3467 3511 +f 2694 2849 537 +f 30 1012 313 +f 1116 220 1694 +f 3176 1679 528 +f 3236 643 260 +f 2727 2836 3631 +f 1333 1138 3926 +f 802 2203 1138 +f 2203 3926 1138 +f 802 2716 1905 +f 821 537 3125 +f 55 3570 2449 +f 2246 2376 1938 +f 2376 1993 3286 +f 3631 787 2727 +f 1700 787 3631 +f 2375 3012 3117 +f 220 546 260 +f 2616 3610 735 +f 546 3125 237 +f 546 237 260 +f 3801 1162 55 +f 3801 1128 2292 +f 539 2449 3511 +f 2211 430 196 +f 430 2072 196 +f 1993 2376 2508 +f 735 734 975 +f 1782 2072 313 +f 220 3761 30 +f 220 1116 1490 +f 643 2727 528 +f 3513 1694 528 +f 2727 3176 528 +f 3866 787 3012 +f 2392 3866 403 +f 403 3866 2375 +f 2803 1938 1138 +f 643 2836 2727 +f 2716 1755 1905 +f 2203 802 3425 +f 3345 2659 2292 +f 1128 3345 2292 +f 1128 1461 3345 +f 1461 1441 3345 +f 3812 1461 3724 +f 3812 2276 1441 +f 1461 3812 1441 +f 2724 2276 3812 +f 2211 2549 2724 +f 2265 1766 2072 +f 2309 2265 3377 +f 906 3377 2265 +f 1782 906 2265 +f 1857 906 67 +f 1802 1857 67 +f 2146 3718 2537 +f 3513 3489 1116 +f 2472 818 1989 +f 303 1679 2472 +f 818 1679 670 +f 1679 896 670 +f 1679 484 896 +f 3854 2334 403 +f 3298 3854 2803 +f 2659 2141 1241 +f 3345 2141 2659 +f 2276 2549 2695 +f 2695 2549 3930 +f 2211 3930 2549 +f 2426 1766 1647 +f 2265 2309 1647 +f 2549 2276 2724 +f 1766 1926 2211 +f 2426 1926 1766 +f 1766 2265 1647 +f 2879 1142 829 +f 3482 829 1142 +f 1142 2879 1255 +f 545 3482 2935 +f 3863 545 2935 +f 2935 2465 3863 +f 1720 3638 2677 +f 1026 2192 857 +f 542 3237 2939 +f 542 2939 3485 +f 1186 2539 3202 +f 1041 3103 467 +f 467 1047 1182 +f 467 3103 1047 +f 3103 1041 1094 +f 3103 1094 2739 +f 2738 551 3626 +f 61 781 2330 +f 3626 566 2738 +f 1820 3424 3506 +f 559 3424 1820 +f 197 3097 559 +f 2288 3638 1720 +f 2288 958 3638 +f 2351 3626 3565 +f 3097 197 2023 +f 1628 2023 2876 +f 1902 2876 1026 +f 2876 1902 1628 +f 958 3267 3638 +f 343 3529 3886 +f 2353 1417 1160 +f 1742 1436 257 +f 1163 1224 579 +f 1163 1614 1224 +f 3821 2806 3768 +f 61 2330 1885 +f 2972 61 2151 +f 1742 2867 1436 +f 3135 2867 1742 +f 3135 461 2867 +f 3135 2454 461 +f 1428 2329 3792 +f 1106 1890 3199 +f 1106 1562 1890 +f 3792 2329 3267 +f 1747 1562 3202 +f 684 1747 3202 +f 1761 1699 2806 +f 979 1224 1614 +f 684 241 1747 +f 684 660 241 +f 1288 1509 784 +f 2776 1761 1528 +f 467 3235 1041 +f 3026 1224 2156 +f 864 1577 1224 +f 829 1418 2879 +f 2879 2967 1255 +f 1094 1041 3809 +f 785 979 1614 +f 369 1699 1761 +f 2806 1528 1761 +f 979 2156 1224 +f 2033 1669 3536 +f 2033 3536 1885 +f 2535 977 826 +f 3297 1885 3536 +f 2535 3217 977 +f 41 3782 1466 +f 2305 778 2928 +f 1412 41 1466 +f 2673 51 1037 +f 51 2673 3622 +f 1410 460 3484 +f 2519 3622 1391 +f 2305 2928 3885 +f 460 3782 41 +f 2194 3582 3900 +f 2941 3582 2194 +f 3707 2941 2245 +f 3707 2245 1344 +f 398 1955 3038 +f 2986 41 1412 +f 776 3026 1910 +f 3297 3092 1401 +f 2949 1401 634 +f 1771 979 785 +f 384 2156 1955 +f 2033 1885 550 +f 3885 2928 2710 +f 1578 384 3216 +f 2288 1672 958 +f 2654 2033 550 +f 2305 1885 2949 +f 1885 2305 833 +f 3885 833 2305 +f 1270 1603 702 +f 2156 384 1578 +f 1437 1603 1270 +f 1955 2156 1771 +f 2784 2677 3636 +f 3026 2156 1578 +f 2156 979 1771 +f 3297 3536 3092 +f 3538 3470 1437 +f 1723 3470 3538 +f 833 61 1885 +f 330 2528 224 +f 3092 84 1401 +f 2986 2852 1723 +f 330 224 756 +f 1578 1910 3026 +f 2949 3297 1401 +f 1885 3297 2949 +f 634 778 2949 +f 2305 2949 778 +f 398 384 1955 +f 3885 2710 2151 +f 2151 833 3885 +f 634 1401 3007 +f 3620 848 922 +f 3620 3863 848 +f 3863 3620 628 +f 2330 2049 1885 +f 3750 504 2009 +f 2465 1334 848 +f 2257 2691 553 +f 1609 83 2257 +f 83 1609 650 +f 2046 1891 412 +f 1897 1538 2653 +f 1270 3484 460 +f 1460 650 1609 +f 1897 2967 1538 +f 2465 848 3863 +f 3782 1410 2371 +f 550 1885 599 +f 1509 199 1703 +f 1509 1288 199 +f 860 197 559 +f 3267 958 653 +f 3529 343 2852 +f 1820 860 559 +f 958 3401 653 +f 2986 1723 3538 +f 1672 3401 958 +f 702 1344 1270 +f 3809 1190 1094 +f 3103 2739 1047 +f 3538 1437 460 +f 3202 1182 1047 +f 1077 467 1182 +f 1077 1182 2539 +f 599 2654 550 +f 1024 3098 91 +f 1024 3627 3098 +f 1182 3202 2539 +f 3482 545 1418 +f 3482 1418 829 +f 2967 1897 1255 +f 61 833 2151 +f 781 3728 1506 +f 628 2009 3863 +f 2677 1391 3622 +f 553 2691 2770 +f 2677 2673 1037 +f 553 1609 2257 +f 3202 2939 3237 +f 2535 1037 3217 +f 2046 1897 1891 +f 1186 3202 3237 +f 545 3863 2049 +f 1418 2049 1506 +f 2329 2245 2941 +f 1506 3728 2967 +f 2329 2941 2194 +f 1024 542 3485 +f 2194 3900 340 +f 3237 449 1186 +f 2329 2194 340 +f 3237 542 449 +f 340 3900 2536 +f 808 1703 1602 +f 1703 808 1509 +f 808 3529 3439 +f 2677 3622 2673 +f 1024 1026 857 +f 2535 3636 2677 +f 3529 808 3886 +f 1707 1538 2967 +f 3707 3582 2941 +f 2967 3728 1707 +f 2093 2536 641 +f 2986 3529 2852 +f 1047 826 660 +f 2192 475 857 +f 2594 475 2795 +f 1047 660 3202 +f 2192 860 475 +f 2396 3904 3439 +f 475 860 2795 +f 3439 1509 808 +f 3202 660 684 +f 1820 330 2021 +f 3770 3199 140 +f 347 3770 140 +f 3770 347 2454 +f 2454 3932 461 +f 756 2795 2021 +f 756 2021 330 +f 3202 1562 1106 +f 3202 1106 3199 +f 330 1820 2528 +f 1903 2009 504 +f 3202 3199 3770 +f 3770 2454 2939 +f 3932 3690 466 +f 1820 2012 2528 +f 1417 2353 1902 +f 1902 1770 1628 +f 1742 257 3485 +f 2023 1628 3825 +f 3825 3097 2023 +f 553 2528 2012 +f 2939 1742 3485 +f 2012 2465 1460 +f 559 3097 3047 +f 559 3047 3424 +f 567 3424 3047 +f 542 91 449 +f 3506 3424 567 +f 91 542 1024 +f 2465 2935 3624 +f 2046 2465 3624 +f 553 2012 1460 +f 2465 2046 1460 +f 3624 1255 2046 +f 3485 257 1160 +f 1142 3624 2935 +f 1142 1255 3624 +f 1142 2935 3482 +f 1160 1417 3485 +f 1417 1026 3485 +f 2528 2142 224 +f 1417 1902 1026 +f 1820 3506 1067 +f 1067 3506 3169 +f 3169 3365 1334 +f 1506 2330 781 +f 1067 3169 1334 +f 1506 2049 2330 +f 3023 848 1334 +f 848 3023 922 +f 3620 922 504 +f 3750 3620 504 +f 3750 628 3620 +f 628 3750 2009 +f 553 2142 2528 +f 784 1509 3439 +f 70 1024 857 +f 2879 1506 2967 +f 545 2049 1418 +f 1255 1897 2046 +f 1506 2879 1418 +f 1707 3728 926 +f 1508 1130 781 +f 926 3728 1130 +f 1897 2653 1891 +f 781 1130 3728 +f 1877 992 1368 +f 1368 2784 1877 +f 1190 2447 1094 +f 2447 992 2739 +f 1368 992 2362 +f 992 1877 3636 +f 2447 2739 1094 +f 2739 992 3636 +f 3000 1720 1368 +f 1877 2784 3636 +f 1720 2784 1368 +f 1720 2677 2784 +f 3000 2288 1720 +f 3087 70 35 +f 3098 2635 91 +f 2082 1825 3201 +f 2981 3028 1323 +f 2981 1323 244 +f 2971 3609 151 +f 3022 625 3809 +f 3809 625 985 +f 1335 585 175 +f 1728 1858 1979 +f 270 784 3439 +f 270 1702 784 +f 1672 3000 2179 +f 1672 2179 2233 +f 1056 1750 2355 +f 1056 2125 1750 +f 2125 1056 2069 +f 3906 3240 1323 +f 792 3867 3681 +f 3867 3255 3681 +f 3502 831 1252 +f 1278 133 835 +f 835 3507 1278 +f 3507 835 2570 +f 2613 3230 3507 +f 1958 2095 9 +f 1400 2095 945 +f 945 904 1400 +f 1702 904 407 +f 904 945 407 +f 3165 50 831 +f 407 3913 1702 +f 3913 898 1453 +f 933 2760 3798 +f 3913 407 898 +f 2198 1410 1428 +f 3356 1372 158 +f 3248 3201 2885 +f 3248 2885 3906 +f 1627 412 1891 +f 650 412 3947 +f 83 650 3947 +f 3947 412 1627 +f 2257 83 2691 +f 2142 2770 2764 +f 3126 853 2396 +f 1976 3904 2396 +f 66 3674 3847 +f 2576 70 3087 +f 3627 70 2576 +f 540 3867 2884 +f 540 2884 186 +f 3627 2576 3702 +f 2559 2308 3289 +f 3098 3627 2635 +f 3702 2635 3627 +f 1453 1300 1587 +f 1587 1300 1447 +f 1453 767 1300 +f 1453 898 767 +f 1572 871 91 +f 2412 440 2539 +f 871 1572 3219 +f 440 3219 2338 +f 440 2338 3880 +f 1190 985 2362 +f 3880 2338 823 +f 3880 3678 3406 +f 3880 823 3678 +f 1854 562 598 +f 3678 3235 3406 +f 3235 3678 823 +f 3235 823 228 +f 3235 228 3022 +f 1854 598 1858 +f 3022 228 625 +f 2996 831 1855 +f 625 2290 2308 +f 1854 1858 3887 +f 985 625 2308 +f 1854 3887 1615 +f 2362 3000 1368 +f 1349 2561 2282 +f 2282 2561 3443 +f 3401 1672 2233 +f 3443 2561 2220 +f 987 653 3401 +f 671 2760 2884 +f 1219 1428 3792 +f 2117 2323 1141 +f 2371 1412 1466 +f 2000 2661 2282 +f 1412 2371 3126 +f 3697 1750 2125 +f 2125 1735 3697 +f 2559 2179 3000 +f 2117 1141 2400 +f 2179 2559 562 +f 2233 2179 562 +f 2705 905 1735 +f 1964 2572 2824 +f 1454 1323 3028 +f 1733 3918 2323 +f 2827 2839 1408 +f 2827 1408 1964 +f 1141 2824 2572 +f 2751 3628 671 +f 3918 3019 1524 +f 2661 1349 2282 +f 1841 3443 1945 +f 3799 3475 2121 +f 1976 1278 3904 +f 903 188 3475 +f 3904 3230 3439 +f 1524 1215 3918 +f 2839 3918 1215 +f 2000 3156 1064 +f 3156 2000 3443 +f 2751 3019 1733 +f 2323 2824 1141 +f 2524 2117 2400 +f 609 2323 2117 +f 1215 3156 2839 +f 3156 3443 1841 +f 2824 2323 2827 +f 3443 2000 2282 +f 1215 1064 3156 +f 66 3847 1776 +f 1733 609 3628 +f 2839 2827 2323 +f 2733 980 1064 +f 1064 980 2000 +f 1408 3156 1841 +f 1141 2572 2400 +f 3156 1408 2839 +f 2824 2827 1964 +f 1524 2733 1215 +f 1215 2733 1064 +f 3439 1958 9 +f 2198 1219 3697 +f 9 1400 270 +f 756 224 2594 +f 3400 2479 3609 +f 186 2884 2760 +f 2142 2764 224 +f 871 1186 449 +f 158 1372 22 +f 2621 3628 609 +f 131 642 3870 +f 2613 3507 3255 +f 680 1874 1092 +f 3630 1874 680 +f 1776 792 2570 +f 2526 3689 3512 +f 773 3420 3689 +f 773 3759 3420 +f 1587 1447 349 +f 1252 50 3759 +f 831 50 1252 +f 625 228 2290 +f 784 3913 1453 +f 9 2095 1400 +f 3799 2971 151 +f 3230 1958 3439 +f 3507 3230 3904 +f 3507 3904 1278 +f 3219 1572 2338 +f 1976 133 1278 +f 2371 1014 853 +f 1410 2198 1014 +f 898 2621 767 +f 2570 3255 3507 +f 3255 540 2613 +f 3000 1672 2288 +f 2362 985 3000 +f 3867 540 3255 +f 3681 3255 2570 +f 2570 792 3681 +f 853 1776 835 +f 66 1776 905 +f 1776 3847 792 +f 1041 3022 3809 +f 1041 3235 3022 +f 91 2635 1572 +f 66 905 2705 +f 2396 853 1976 +f 3880 2539 440 +f 3126 2371 853 +f 2412 3219 440 +f 2069 2705 1735 +f 2069 1735 2125 +f 2355 1750 1615 +f 1566 3890 3087 +f 3165 831 2996 +f 2355 1615 3305 +f 420 3305 3887 +f 3887 1728 420 +f 1979 1858 598 +f 1615 3887 3305 +f 131 3475 3799 +f 1858 1728 3887 +f 1024 70 3627 +f 857 35 70 +f 1410 1014 2371 +f 1566 1151 3890 +f 1219 1854 3697 +f 1566 2594 1151 +f 585 1533 3289 +f 585 3289 175 +f 1533 562 2559 +f 1151 2594 3502 +f 175 3289 2308 +f 1335 175 648 +f 1335 648 244 +f 2308 648 175 +f 792 3847 3867 +f 767 2384 1300 +f 2186 1855 3502 +f 2186 3502 224 +f 3502 2594 224 +f 3028 2981 874 +f 874 1454 3028 +f 2308 2981 244 +f 244 648 2308 +f 874 2981 2308 +f 3906 1454 3606 +f 3606 3248 3906 +f 1454 874 2290 +f 2290 3606 1454 +f 1453 1587 784 +f 3606 2290 3248 +f 987 562 1854 +f 3913 784 1702 +f 3201 3248 823 +f 904 1702 270 +f 270 1400 904 +f 3201 823 2082 +f 2764 2186 224 +f 2186 2764 2770 +f 3356 2082 1372 +f 823 1372 2082 +f 2186 2770 2691 +f 1372 823 2338 +f 2691 3947 2186 +f 9 270 3439 +f 3947 3609 2186 +f 22 1372 1572 +f 3947 335 3609 +f 1572 3086 22 +f 22 3086 2850 +f 3086 1092 2850 +f 133 1976 853 +f 3947 2691 83 +f 3086 3702 1092 +f 3400 2971 3799 +f 503 2647 1627 +f 1627 2647 3947 +f 2653 1538 40 +f 3702 3630 680 +f 3702 680 1092 +f 3630 3702 1542 +f 2198 1428 1219 +f 987 1219 3792 +f 2526 3512 1542 +f 2384 767 2621 +f 1542 3702 3890 +f 1542 3890 2526 +f 3689 2526 773 +f 3890 3759 773 +f 773 2526 3890 +f 987 3401 2233 +f 874 2308 2290 +f 3759 3890 1151 +f 1151 1252 3759 +f 823 3248 2290 +f 945 898 407 +f 2095 2613 945 +f 467 1077 3235 +f 3406 1077 2539 +f 2613 2095 1958 +f 1190 3809 985 +f 1077 3406 3235 +f 823 2290 228 +f 2539 3880 3406 +f 3284 1855 1906 +f 3284 1906 1691 +f 2198 3697 905 +f 2412 871 3219 +f 1691 1906 2479 +f 871 449 91 +f 2479 1906 3609 +f 3230 2613 1958 +f 1186 2412 2539 +f 1186 871 2412 +f 3782 2371 1466 +f 1572 3702 3086 +f 35 857 1566 +f 540 186 2613 +f 905 1776 853 +f 133 853 835 +f 3798 3628 2621 +f 1014 905 853 +f 933 945 186 +f 1300 2384 1447 +f 186 2760 933 +f 642 1134 1707 +f 3321 3368 2647 +f 3368 335 2647 +f 3368 3259 335 +f 3368 131 3259 +f 3609 3259 151 +f 131 1134 642 +f 1707 926 642 +f 1627 1891 2653 +f 1627 2653 503 +f 131 3799 151 +f 642 926 393 +f 3259 3609 335 +f 3870 1130 1508 +f 40 1134 3321 +f 1134 40 1707 +f 3259 131 151 +f 3321 131 3368 +f 335 3947 2647 +f 3321 2647 503 +f 40 503 2653 +f 3321 503 40 +f 1538 1707 40 +f 131 3321 1134 +f 642 393 3870 +f 3870 393 1130 +f 3721 177 2655 +f 3114 3312 3760 +f 3312 3114 3399 +f 1852 2456 3879 +f 3879 2456 3228 +f 3228 2655 3879 +f 3024 2655 177 +f 3094 547 2875 +f 1852 1678 3399 +f 1678 1852 3879 +f 2244 616 1689 +f 3312 1678 3760 +f 1678 3312 3399 +f 3618 1593 2036 +f 3141 1113 1205 +f 3432 2363 177 +f 2244 3755 837 +f 3024 2623 3879 +f 3488 1113 3141 +f 2363 3024 177 +f 2858 3760 1678 +f 377 3432 2244 +f 3618 377 2244 +f 3141 1205 1573 +f 3618 2244 837 +f 2858 433 3760 +f 433 2858 1205 +f 2397 3618 2036 +f 3879 2623 1678 +f 1741 3024 2363 +f 1741 2623 3024 +f 2397 2036 2529 +f 2586 2363 3432 +f 2586 3432 377 +f 377 3618 2397 +f 2586 377 2699 +f 511 1373 1986 +f 2547 3488 3141 +f 2982 1696 1573 +f 2858 1585 1573 +f 1585 3604 2554 +f 3793 629 444 +f 1471 611 2005 +f 2218 2648 3815 +f 2586 444 629 +f 444 3891 3793 +f 1170 3481 427 +f 1952 1696 2982 +f 2397 3060 153 +f 2554 3481 1952 +f 552 959 1561 +f 552 3481 1170 +f 1170 427 2666 +f 3604 272 427 +f 1573 1696 3141 +f 2554 2982 1585 +f 1585 2982 1573 +f 2699 153 2005 +f 3481 2554 427 +f 1952 2982 2554 +f 153 1471 2005 +f 1839 611 1277 +f 511 308 3060 +f 1561 3277 1952 +f 2554 3604 427 +f 3604 1584 2669 +f 2858 1584 1585 +f 2586 2699 444 +f 153 2699 377 +f 3604 1585 1584 +f 3889 552 1170 +f 3889 1170 2666 +f 3203 3889 2666 +f 3203 2666 2498 +f 267 365 920 +f 2648 267 3815 +f 3815 3633 2581 +f 1789 1827 3633 +f 1839 3003 611 +f 817 2498 2666 +f 2498 817 2185 +f 2498 2185 267 +f 2185 3815 267 +f 3042 1789 3633 +f 3891 444 2173 +f 1789 3042 3003 +f 2199 1604 458 +f 2312 1407 18 +f 3318 886 1764 +f 2583 1148 2782 +f 1148 2583 2285 +f 1272 1774 2285 +f 869 1217 2312 +f 309 2312 1718 +f 869 2312 309 +f 309 1718 3552 +f 322 2084 3552 +f 322 1764 2084 +f 1764 886 914 +f 914 886 2782 +f 2782 1432 1493 +f 1493 1432 1148 +f 644 1493 1148 +f 2075 2609 644 +f 2609 2075 797 +f 1123 797 2303 +f 1123 2303 2639 +f 2303 1774 2639 +f 1169 2639 1774 +f 2675 1261 2050 +f 2050 346 2675 +f 18 3552 1718 +f 18 322 3552 +f 322 3318 1764 +f 2583 2782 886 +f 1148 1432 2782 +f 2285 644 1148 +f 2075 644 2285 +f 797 2075 1774 +f 797 1774 2303 +f 3254 1164 105 +f 3254 1570 1164 +f 3318 2583 886 +f 2199 2512 970 +f 3668 3777 2495 +f 2495 1994 3668 +f 2523 1994 2495 +f 2455 2818 1869 +f 1740 432 1407 +f 1168 2455 2785 +f 894 1860 1280 +f 963 2495 3402 +f 2242 3402 3829 +f 962 3473 432 +f 1774 1272 1169 +f 1693 2242 1168 +f 1740 1693 2919 +f 136 2765 3791 +f 432 3318 18 +f 533 1860 2944 +f 1604 1280 3711 +f 3829 3402 2495 +f 2196 1693 1168 +f 1693 2196 2919 +f 1247 2285 2583 +f 3241 1693 1740 +f 3241 1740 1407 +f 3829 3711 3032 +f 1740 962 432 +f 2242 3241 3402 +f 3473 3318 432 +f 3473 1247 2583 +f 2818 2455 136 +f 2455 2765 136 +f 894 2944 1860 +f 1860 3791 1280 +f 1407 432 18 +f 1693 3241 2242 +f 342 2380 2818 +f 2273 3921 3803 +f 3803 3071 2273 +f 1548 491 2761 +f 2761 491 3945 +f 3920 3921 1390 +f 1423 2182 1663 +f 1423 1663 3531 +f 3921 3920 3608 +f 3651 2182 3608 +f 3531 1663 3945 +f 3945 491 3929 +f 491 707 3929 +f 970 288 935 +f 970 2512 288 +f 2512 2968 288 +f 2968 2199 458 +f 2011 458 1717 +f 1499 2011 1717 +f 2050 1499 1717 +f 2050 1261 1499 +f 2675 1343 1261 +f 1343 2675 346 +f 346 2732 1343 +f 2732 346 3777 +f 2732 3777 77 +f 3777 3668 77 +f 2761 3651 1797 +f 2523 1435 1994 +f 859 128 3071 +f 128 859 3252 +f 1217 1435 963 +f 1634 3921 2273 +f 1390 3077 1797 +f 1797 3920 1390 +f 3651 3608 3920 +f 2182 3651 1663 +f 1548 707 491 +f 1548 935 707 +f 1548 970 935 +f 2199 2968 2512 +f 1604 1717 458 +f 963 1435 2495 +f 1634 2273 3071 +f 2690 3071 128 +f 1634 3077 3921 +f 2690 1634 3071 +f 3651 3920 1797 +f 2761 1663 3651 +f 1663 2761 3945 +f 2761 1797 1928 +f 3266 2761 1928 +f 436 1928 1634 +f 3598 2319 718 +f 1567 2540 2026 +f 3459 2944 1078 +f 1634 101 436 +f 436 3266 1928 +f 1078 2944 45 +f 894 1280 1548 +f 2944 894 45 +f 3519 2690 1552 +f 2419 2015 2215 +f 1298 2026 2490 +f 1298 1567 2026 +f 258 2015 1298 +f 1592 1298 203 +f 3908 101 1634 +f 3519 2540 2690 +f 101 3266 436 +f 3514 2215 1359 +f 1212 3514 1359 +f 45 89 2419 +f 3908 3266 101 +f 2026 2540 3519 +f 1634 2690 2540 +f 45 3266 2284 +f 1567 2284 3908 +f 2319 1078 718 +f 1359 2215 258 +f 2419 2215 718 +f 1359 258 1045 +f 2015 1450 1567 +f 2419 89 1450 +f 1298 2015 1567 +f 1450 2284 1567 +f 89 45 2284 +f 89 2284 1450 +f 258 2215 2015 +f 3282 3514 1212 +f 3894 2236 3234 +f 2561 2913 2220 +f 1504 2642 2236 +f 1504 2236 3894 +f 2236 571 3234 +f 571 1754 1775 +f 2466 2220 13 +f 2889 2403 1985 +f 1985 1754 2889 +f 571 3282 2889 +f 3888 13 2220 +f 13 2354 1495 +f 13 3888 2354 +f 1662 2723 2154 +f 3517 2354 3888 +f 2370 2642 3463 +f 2466 13 2642 +f 2889 1754 571 +f 571 3907 204 +f 2370 2466 2642 +f 2931 2723 672 +f 1886 3459 3269 +f 3901 1886 342 +f 3234 571 1775 +f 3443 2220 2466 +f 2354 2319 1495 +f 3894 2068 3463 +f 1895 1886 3517 +f 1662 2370 672 +f 2889 3282 1864 +f 3894 3234 2068 +f 2642 3907 2236 +f 1349 2913 2561 +f 3594 3517 3888 +f 1495 2319 3598 +f 2236 3907 571 +f 2370 1662 2466 +f 3463 1871 2370 +f 1662 3443 2466 +f 2723 1662 672 +f 1886 3901 3459 +f 3517 3269 2354 +f 3517 1886 3269 +f 1504 3463 2642 +f 3463 1504 3894 +f 1349 1135 2913 +f 3540 509 1945 +f 2344 1964 1408 +f 2995 2723 2931 +f 2000 1349 2661 +f 2995 2154 2723 +f 838 1135 980 +f 2995 1945 2154 +f 1945 2995 3540 +f 509 2201 1945 +f 2154 1945 3443 +f 838 3872 1135 +f 1135 1349 2000 +f 1083 104 2720 +f 3458 757 104 +f 3458 2596 757 +f 2132 104 1083 +f 104 2132 3458 +f 2687 2585 757 +f 1295 3943 3196 +f 1254 2356 3526 +f 2356 601 3526 +f 2591 601 1660 +f 563 1729 1681 +f 601 2356 1660 +f 1729 563 1900 +f 2591 1660 2056 +f 3802 1235 3159 +f 1235 3802 2115 +f 3373 1235 2115 +f 2591 2056 3415 +f 2543 3373 2115 +f 2543 2115 164 +f 164 2115 2955 +f 450 3496 2130 +f 554 3049 1223 +f 554 3415 2056 +f 1223 3049 748 +f 1916 1223 748 +f 1223 1916 3316 +f 1937 2423 748 +f 1327 155 1918 +f 1937 2915 2423 +f 38 3822 3175 +f 1520 685 3175 +f 748 2423 1916 +f 3336 2381 526 +f 156 526 302 +f 156 302 3306 +f 1083 2720 71 +f 1916 2915 990 +f 1916 990 100 +f 990 3501 100 +f 100 3501 1108 +f 2377 2438 1491 +f 2438 2377 2930 +f 1569 100 1108 +f 100 1569 1916 +f 3337 1569 1108 +f 3337 1108 1956 +f 2029 65 1956 +f 65 3337 1956 +f 2029 1956 586 +f 1305 1225 586 +f 1225 1305 1150 +f 375 2314 1150 +f 1285 586 1225 +f 3440 375 1676 +f 3017 891 1362 +f 3017 351 891 +f 3520 481 486 +f 443 1382 64 +f 1611 1235 2543 +f 1611 2543 164 +f 1611 164 2587 +f 450 331 2587 +f 331 450 296 +f 2587 331 1611 +f 1491 2505 3590 +f 3864 2377 3590 +f 296 323 3614 +f 3614 323 230 +f 331 296 3614 +f 2743 1912 230 +f 33 496 2153 +f 2787 71 2720 +f 1881 3944 1095 +f 2411 1095 1579 +f 1485 2205 1153 +f 364 1274 2555 +f 3520 486 851 +f 3476 1934 1485 +f 1491 3590 2377 +f 2377 3864 2930 +f 1485 443 399 +f 678 1115 2516 +f 1115 2588 2516 +f 3709 373 481 +f 33 2993 1231 +f 2205 1579 54 +f 2857 2993 1748 +f 1748 2153 3426 +f 1901 3823 1660 +f 486 2826 564 +f 2640 586 1956 +f 2640 1305 586 +f 3440 1676 1362 +f 1540 3017 1362 +f 3017 1540 351 +f 351 1540 1650 +f 678 2516 2556 +f 423 600 1290 +f 1729 2787 2720 +f 2857 563 1681 +f 1748 563 2857 +f 2228 364 180 +f 2034 3102 1650 +f 2034 3159 3102 +f 3802 3159 2034 +f 3373 2543 1235 +f 2955 2587 164 +f 2587 2955 450 +f 2130 296 450 +f 2130 323 296 +f 1738 230 323 +f 230 1738 3699 +f 3553 2743 230 +f 3553 1011 1896 +f 1011 2262 1896 +f 230 3699 1011 +f 1011 1489 3902 +f 2262 1011 3902 +f 2262 3902 2500 +f 3822 38 1918 +f 3590 1095 2411 +f 3579 3902 1327 +f 526 2381 302 +f 2500 3902 3579 +f 2411 1579 2205 +f 1918 2073 1327 +f 2073 1918 38 +f 2073 38 685 +f 3730 3174 1485 +f 685 38 3175 +f 3864 355 2930 +f 3579 1327 2073 +f 156 3336 526 +f 3258 1520 1798 +f 156 3306 3794 +f 3794 1798 156 +f 2505 3794 3306 +f 1491 770 2505 +f 770 1491 2438 +f 2930 355 2112 +f 2112 3794 770 +f 2112 770 2438 +f 2438 2930 2112 +f 2112 355 3869 +f 2516 33 1231 +f 2516 496 33 +f 1966 777 2228 +f 1681 1729 1526 +f 1254 3943 2356 +f 1900 2787 1729 +f 2205 54 1153 +f 180 364 1426 +f 3730 1485 399 +f 496 1290 2153 +f 1274 364 777 +f 337 851 564 +f 486 481 373 +f 373 2826 486 +f 2556 2516 1231 +f 496 2516 2588 +f 496 2588 3933 +f 622 3877 758 +f 423 1214 600 +f 1998 1966 2228 +f 54 1579 3944 +f 1660 2356 1901 +f 2056 1660 3823 +f 564 851 486 +f 373 3709 2045 +f 1540 1362 1676 +f 1650 1540 2034 +f 1380 64 2999 +f 1153 952 1485 +f 443 1485 1382 +f 364 2228 777 +f 2993 2153 1748 +f 33 2153 2993 +f 3158 1274 3296 +f 1274 3158 2555 +f 3115 2555 2374 +f 92 43 3115 +f 43 2555 3115 +f 380 370 2942 +f 3685 337 1358 +f 2942 43 92 +f 370 1358 1598 +f 337 1598 1358 +f 337 3685 3128 +f 851 337 3128 +f 3128 1006 851 +f 670 2758 376 +f 2103 1463 484 +f 1463 2103 1249 +f 376 3520 851 +f 376 851 1006 +f 2758 3520 376 +f 3520 2758 484 +f 481 3520 484 +f 1463 481 484 +f 3709 481 1463 +f 1249 3709 1463 +f 3173 3709 1249 +f 2045 3709 3173 +f 2045 2334 1380 +f 3173 2334 2045 +f 64 2334 3854 +f 443 3854 3298 +f 399 3298 2324 +f 2840 2324 3298 +f 2840 1366 2324 +f 948 1366 2966 +f 2966 2894 948 +f 1560 3869 355 +f 355 3864 3074 +f 1560 3074 3730 +f 3074 1560 355 +f 2672 496 3933 +f 423 496 2672 +f 1441 622 3345 +f 3345 1214 2141 +f 1214 423 2141 +f 622 1214 3345 +f 2276 3877 1441 +f 3494 3877 2276 +f 2695 3494 2276 +f 3494 2695 3930 +f 1998 3494 3930 +f 1998 3930 1421 +f 1421 3930 1926 +f 2426 3243 1926 +f 1926 3243 1421 +f 2426 3686 3243 +f 1647 3686 2426 +f 3686 1647 2309 +f 777 2309 3377 +f 3296 1274 3377 +f 906 3296 3377 +f 906 1857 3296 +f 3296 1857 3158 +f 3158 1857 1802 +f 3187 2555 3158 +f 3187 3158 1802 +f 3115 2374 2146 +f 3115 2146 92 +f 2537 2942 92 +f 92 2146 2537 +f 380 2942 2537 +f 3489 370 380 +f 370 3489 1358 +f 3685 2388 3128 +f 3128 2388 1232 +f 1116 380 2537 +f 1306 2411 1934 +f 3489 380 1116 +f 3489 3513 1358 +f 1306 1934 3476 +f 3685 1358 3513 +f 3513 303 3685 +f 1232 1006 3128 +f 1989 376 1006 +f 1989 818 376 +f 376 818 670 +f 2758 670 896 +f 896 484 2758 +f 493 1249 2103 +f 1249 493 3173 +f 1006 1232 2472 +f 1989 1006 2472 +f 2103 484 2392 +f 493 2103 2392 +f 2392 3173 493 +f 2334 3173 403 +f 403 3173 2392 +f 64 1380 2334 +f 443 64 3854 +f 399 443 3298 +f 948 399 1366 +f 3730 399 948 +f 2894 3730 948 +f 1560 3730 2894 +f 1366 2840 1333 +f 1333 2966 1366 +f 2894 3869 1560 +f 3869 2894 2966 +f 3590 1306 3864 +f 3174 1306 3476 +f 3074 3864 3174 +f 3174 3476 1485 +f 3864 1306 3174 +f 758 600 1214 +f 622 758 1214 +f 1441 3877 622 +f 1421 3243 1998 +f 1998 3243 1966 +f 2555 3187 2374 +f 2309 1966 3686 +f 777 1966 2309 +f 1274 777 3377 +f 1934 2411 2205 +f 1934 2205 1485 +f 3174 3730 3074 +f 423 2672 2141 +f 2141 2672 3570 +f 2324 1366 399 +f 1966 3243 3686 +f 1304 2445 1657 +f 1907 1884 659 +f 2560 2077 2552 +f 2552 1850 765 +f 1003 3206 3344 +f 1003 3344 2114 +f 2077 3116 508 +f 2077 2560 3116 +f 765 2916 2560 +f 2916 765 1850 +f 1089 928 2679 +f 2679 794 2254 +f 2254 794 3020 +f 3646 3020 794 +f 223 3020 2887 +f 3020 451 2887 +f 3591 2887 1870 +f 3591 1870 1600 +f 3568 1089 2679 +f 3568 2679 2254 +f 3020 223 2254 +f 1850 2786 2916 +f 892 1057 1570 +f 841 59 3752 +f 1884 1737 3701 +f 2811 3062 3361 +f 3062 2811 1303 +f 3701 3361 3062 +f 1303 794 3062 +f 794 1303 658 +f 3479 3130 658 +f 794 658 3130 +f 103 2104 3130 +f 2656 1057 3206 +f 3835 3130 2104 +f 3835 794 3130 +f 2445 1304 3546 +f 2445 3546 287 +f 3206 1911 3344 +f 2114 3344 381 +f 381 508 2114 +f 2560 2552 765 +f 3603 1164 1570 +f 2656 3446 141 +f 268 841 892 +f 892 841 1911 +f 268 334 841 +f 334 2158 3752 +f 59 334 3752 +f 841 3752 381 +f 1001 381 3752 +f 1884 1907 1737 +f 1737 1907 3361 +f 928 1884 3701 +f 3701 1737 3361 +f 2679 928 794 +f 3070 658 1303 +f 3835 1220 534 +f 3646 3835 534 +f 794 3835 3646 +f 451 3020 3646 +f 2158 1907 659 +f 2769 1907 2158 +f 2769 3642 1907 +f 1859 2957 195 +f 334 59 841 +f 3206 1057 1911 +f 841 381 1911 +f 1001 2158 659 +f 268 892 3211 +f 268 273 334 +f 2077 381 1001 +f 3206 1003 2936 +f 2786 1850 1089 +f 1907 2811 3361 +f 2811 3070 1303 +f 451 1870 2887 +f 2887 2166 223 +f 2887 3591 2166 +f 3580 1557 2702 +f 1644 3030 2877 +f 1644 418 3030 +f 1452 3580 2702 +f 1452 3783 3580 +f 1557 42 2702 +f 2989 1859 2504 +f 1080 1515 2697 +f 1515 269 2697 +f 1528 3783 1452 +f 908 2342 1072 +f 1072 2342 1528 +f 1528 1452 1072 +f 1859 195 2504 +f 3567 1644 2877 +f 3291 1415 3358 +f 839 295 2582 +f 3523 154 947 +f 3291 1990 3392 +f 947 3291 3523 +f 3392 3523 3291 +f 1415 947 2741 +f 947 1415 3291 +f 472 1123 2639 +f 3603 105 1164 +f 1878 3537 3131 +f 404 2238 2521 +f 2521 869 404 +f 2207 2219 404 +f 2238 404 2219 +f 1878 3131 117 +f 1657 117 3131 +f 3546 3647 472 +f 1831 287 472 +f 472 287 3546 +f 1831 472 298 +f 1304 1657 3647 +f 3546 1304 3647 +f 154 288 2968 +f 458 2011 154 +f 2011 1499 2741 +f 2741 1499 1261 +f 2741 1261 1343 +f 2741 1343 2732 +f 2732 77 447 +f 447 77 3668 +f 1994 447 3668 +f 1994 1435 126 +f 3291 3358 1990 +f 869 126 1217 +f 2582 1757 119 +f 2207 309 3552 +f 3552 2084 2207 +f 3537 1764 914 +f 2207 2084 3537 +f 3537 914 2782 +f 3131 2782 1493 +f 3647 1493 644 +f 3647 644 2609 +f 2609 797 3647 +f 3647 797 1123 +f 105 298 2639 +f 105 3603 298 +f 3118 447 126 +f 447 1994 126 +f 3523 1987 119 +f 3392 1987 3523 +f 298 141 1831 +f 3603 1057 298 +f 295 1757 2582 +f 288 154 1757 +f 119 1757 154 +f 1415 2741 447 +f 447 2741 2732 +f 3479 744 3130 +f 744 103 3130 +f 744 2104 103 +f 744 3078 2104 +f 2104 3078 1220 +f 3835 2104 1220 +f 1600 1736 3591 +f 2957 3591 1736 +f 534 2553 1438 +f 1736 1600 1870 +f 2553 3078 744 +f 534 1220 3078 +f 3784 1736 1870 +f 3784 2957 1736 +f 195 2957 3784 +f 1438 3479 3070 +f 3078 2553 534 +f 950 3646 534 +f 950 451 3646 +f 1842 451 950 +f 2553 744 3479 +f 1842 950 1438 +f 1146 2863 1085 +f 1146 3533 2863 +f 2275 2595 78 +f 2042 927 2337 +f 541 1146 1085 +f 3164 1536 3170 +f 1536 3164 2595 +f 2863 1438 2386 +f 78 259 2275 +f 3789 1842 529 +f 2863 3533 529 +f 529 1438 2863 +f 2595 514 78 +f 2080 2697 514 +f 3533 1146 541 +f 2386 1085 2863 +f 1842 1438 529 +f 259 3170 2275 +f 2080 514 2595 +f 514 3533 541 +f 3706 2080 2595 +f 514 2042 78 +f 2337 541 1085 +f 2042 514 927 +f 1957 3789 529 +f 927 514 541 +f 514 1957 3533 +f 2275 3170 1536 +f 2595 2275 1536 +f 42 1080 2702 +f 259 78 2042 +f 3331 579 713 +f 1072 3706 3332 +f 1163 579 107 +f 2914 2574 2584 +f 579 3331 107 +f 2574 3073 2584 +f 14 3332 3073 +f 713 2410 3331 +f 1965 728 908 +f 785 1091 1771 +f 1163 107 1091 +f 2410 728 3331 +f 14 1072 3332 +f 3332 3706 2595 +f 3475 188 763 +f 2025 245 763 +f 763 510 612 +f 188 2710 74 +f 2783 398 3038 +f 967 778 2783 +f 74 510 763 +f 3731 319 1158 +f 612 2025 763 +f 131 903 3475 +f 188 74 763 +f 1529 3038 1955 +f 1508 61 2972 +f 2972 2151 2710 +f 2504 195 3784 +f 3784 3789 2504 +f 1957 2877 3789 +f 2697 2702 1080 +f 2697 1957 514 +f 269 3874 3567 +f 3874 1644 3567 +f 3567 2877 1957 +f 2697 3567 1957 +f 2697 269 3567 +f 2504 3789 2877 +f 2702 2697 2080 +f 3073 2574 14 +f 1452 2702 2080 +f 926 1130 393 +f 2080 3706 1452 +f 2914 846 107 +f 1091 107 3558 +f 1965 1072 14 +f 2574 2914 107 +f 3331 1965 2574 +f 14 2574 1965 +f 3706 1072 1452 +f 728 1965 3331 +f 846 3558 107 +f 107 3331 2574 +f 908 1072 1965 +f 2783 245 2025 +f 612 510 967 +f 3442 1158 319 +f 1529 1955 3558 +f 1091 3558 1771 +f 781 61 1508 +f 1158 1529 3731 +f 3442 1529 1158 +f 778 967 2928 +f 903 2710 188 +f 1508 2972 3870 +f 3731 1529 25 +f 3558 1955 1771 +f 1529 3442 3038 +f 2783 3038 3442 +f 245 3442 319 +f 2025 967 2783 +f 967 2025 612 +f 2783 3442 245 +f 25 3558 846 +f 2710 967 74 +f 74 967 510 +f 967 2710 2928 +f 398 2783 778 +f 2972 2710 903 +f 1775 1985 939 +f 2001 2413 729 +f 1099 1871 1924 +f 672 1871 1099 +f 786 672 2147 +f 2931 672 786 +f 1871 2068 1924 +f 1871 3463 2068 +f 1775 1754 1985 +f 1985 2413 939 +f 1985 3239 2413 +f 1924 1294 1099 +f 1775 939 3234 +f 1099 1294 1862 +f 2147 3540 786 +f 2952 509 3540 +f 2952 3540 2147 +f 786 2995 2931 +f 3540 2995 786 +f 2201 509 2952 +f 3155 2344 214 +f 2344 2201 214 +f 2400 135 3597 +f 2201 2952 214 +f 214 2952 2147 +f 2344 2572 1964 +f 2384 2117 2524 +f 701 522 3121 +f 1513 3140 3121 +f 1373 2943 1762 +f 1986 1513 522 +f 2277 1986 522 +f 511 2277 308 +f 2940 522 701 +f 974 308 2277 +f 830 3573 858 +f 1295 3786 1714 +f 3245 3786 1295 +f 974 2277 2940 +f 3121 522 1513 +f 3573 830 649 +f 3317 2830 2940 +f 1756 1714 3786 +f 3573 2830 858 +f 2830 2987 858 +f 3573 649 1756 +f 1756 649 1714 +f 3786 3245 2728 +f 3786 2686 1756 +f 3786 2728 2686 +f 2728 3011 2686 +f 3140 3004 1665 +f 3121 3140 2604 +f 341 3499 308 +f 3499 341 1277 +f 1295 3196 742 +f 1277 1471 3499 +f 1887 341 974 +f 974 730 1887 +f 730 974 3573 +f 1486 1277 341 +f 341 308 974 +f 974 2940 3573 +f 341 1887 27 +f 3769 1887 730 +f 341 27 1486 +f 1887 3942 27 +f 3942 1887 3769 +f 730 1339 3769 +f 730 2614 1339 +f 742 3011 1295 +f 2728 3245 1295 +f 3769 932 3942 +f 2063 932 3769 +f 3769 1339 2063 +f 2614 730 742 +f 730 3573 3011 +f 742 730 3011 +f 1295 3011 2728 +f 3212 590 3937 +f 3212 2144 590 +f 3212 3834 2144 +f 200 3556 2300 +f 117 2781 1473 +f 189 3212 3937 +f 3212 1909 3834 +f 2445 189 1997 +f 2238 2730 2521 +f 2742 2219 3294 +f 117 1997 2773 +f 287 3212 189 +f 897 352 498 +f 2742 2730 2238 +f 3294 1878 3273 +f 1878 1473 3273 +f 117 1657 1997 +f 2445 1997 1657 +f 1831 1909 3212 +f 3118 2300 897 +f 2219 2742 2238 +f 1473 1878 117 +f 287 189 2445 +f 1831 3212 287 +f 2300 352 897 +f 2300 3556 350 +f 2730 1962 3556 +f 2742 1962 2730 +f 1473 1250 3273 +f 1473 2781 1250 +f 2781 117 2773 +f 1415 897 498 +f 2442 1377 3591 +f 879 2650 2088 +f 2088 2442 1859 +f 698 2650 3288 +f 3288 2650 3329 +f 1859 3591 2957 +f 879 2088 1859 +f 3329 879 2845 +f 879 3329 2650 +f 2088 2650 698 +f 2088 698 1377 +f 2088 1377 2442 +f 2166 3591 1377 +f 3329 2845 3389 +f 1557 3580 2841 +f 551 2231 418 +f 418 2231 2989 +f 1080 1649 1515 +f 3768 1080 3821 +f 3821 1080 42 +f 2748 879 2231 +f 2231 879 1859 +f 269 1649 3768 +f 269 1515 1649 +f 2841 2806 3821 +f 1557 3821 42 +f 3030 418 2989 +f 3874 551 1644 +f 551 418 1644 +f 3580 3783 2841 +f 269 3626 3874 +f 2231 1859 2989 +f 3783 2806 2841 +f 3821 1557 2841 +f 2776 1528 2342 +f 2696 1761 2410 +f 2410 1761 2776 +f 1614 1163 1091 +f 2806 3783 1528 +f 2342 908 2776 +f 1614 1091 785 +f 2410 713 2696 +f 579 2696 713 +f 728 2776 908 +f 2776 728 2410 +f 2828 678 2556 +f 3129 115 2823 +f 3129 2823 3468 +f 255 3838 3468 +f 3129 3838 1369 +f 1369 147 3129 +f 3468 3445 578 +f 3468 578 255 +f 255 578 738 +f 1203 255 738 +f 3148 3911 2754 +f 738 3878 1685 +f 3878 201 520 +f 768 520 2387 +f 2271 520 768 +f 2271 3878 520 +f 1888 3878 2271 +f 3162 2271 768 +f 3162 768 1073 +f 768 3574 1073 +f 3574 768 2520 +f 3574 2861 338 +f 160 1207 1097 +f 1097 1207 2700 +f 2700 1207 462 +f 462 1675 3113 +f 3113 1675 1674 +f 2097 2700 462 +f 1674 3911 3113 +f 3844 3911 1674 +f 3911 3844 2754 +f 2097 462 3113 +f 2828 3113 3911 +f 2097 2828 2402 +f 2097 3113 2828 +f 3911 378 2828 +f 378 678 2828 +f 678 378 1115 +f 378 3911 3148 +f 378 3148 3083 +f 621 378 3083 +f 1115 378 621 +f 2588 1115 621 +f 3933 2588 621 +f 3933 621 3330 +f 1241 2141 3570 +f 3933 3330 2672 +f 2672 3330 3570 +f 544 2742 3294 +f 3398 3273 1250 +f 3273 3398 3294 +f 2175 1199 1250 +f 1199 3398 1250 +f 2175 1250 2781 +f 538 2175 2773 +f 3596 2773 1997 +f 189 2333 1706 +f 1997 1706 3596 +f 1621 2664 1706 +f 1621 1706 2333 +f 2664 3099 633 +f 538 2773 3596 +f 301 538 3596 +f 1706 1840 3596 +f 1840 1706 2664 +f 301 3596 1840 +f 2664 771 1840 +f 771 2664 2420 +f 2420 2664 633 +f 2278 633 2763 +f 3696 2278 176 +f 2278 2763 176 +f 2127 3696 426 +f 1810 2178 1652 +f 2299 426 3696 +f 426 2299 2178 +f 426 2178 1810 +f 426 1810 1149 +f 2420 633 2278 +f 2127 2420 2278 +f 2127 2278 3696 +f 147 1149 2964 +f 1149 147 406 +f 147 2964 1951 +f 1951 2605 147 +f 406 426 1149 +f 1369 406 147 +f 1706 1997 189 +f 2120 814 395 +f 2120 3262 814 +f 3262 2120 3510 +f 1830 382 3510 +f 1830 3510 2120 +f 1552 3163 2490 +f 1552 2490 2026 +f 1792 203 3270 +f 2490 203 1298 +f 1592 203 1792 +f 1792 1104 1592 +f 1552 382 3163 +f 729 1045 2001 +f 2548 1104 1357 +f 1212 1359 1045 +f 1104 1045 1592 +f 203 3163 3270 +f 729 1212 1045 +f 382 1830 3163 +f 1045 258 1592 +f 3163 203 2490 +f 2026 3519 1552 +f 1298 1592 258 +f 1864 3282 729 +f 2403 3239 1985 +f 2403 2889 3239 +f 2413 1864 729 +f 3239 1864 2413 +f 1864 3239 2889 +f 1444 706 870 +f 706 1365 1273 +f 2051 1273 1365 +f 3257 2051 1365 +f 1387 955 932 +f 932 2063 1387 +f 3897 1827 1789 +f 3897 1839 291 +f 1839 3897 1789 +f 1732 291 1839 +f 1732 1486 1444 +f 1486 27 1444 +f 706 1444 27 +f 1365 706 27 +f 27 3942 1365 +f 3257 1365 3942 +f 955 3257 3942 +f 932 955 3942 +f 1339 1387 2063 +f 3071 3803 2090 +f 859 3071 2090 +f 3608 1423 2090 +f 1423 3608 2182 +f 295 1423 3531 +f 3531 3945 295 +f 3945 3929 295 +f 1757 295 707 +f 1757 707 935 +f 859 603 3252 +f 603 2090 839 +f 2090 1423 295 +f 707 295 3929 +f 3803 3921 2090 +f 839 2090 295 +f 2786 152 3021 +f 3021 2916 2786 +f 2786 3123 152 +f 3123 2786 3568 +f 3568 2254 2835 +f 2560 1904 3116 +f 2560 2916 1176 +f 697 3123 2835 +f 3568 2835 3123 +f 3389 2166 3288 +f 3288 2166 1377 +f 1377 698 3288 +f 2254 8 2835 +f 2166 2714 8 +f 8 223 2166 +f 2916 3021 1176 +f 223 8 2254 +f 1564 697 2835 +f 3288 3329 3389 +f 3183 840 3314 +f 1987 840 3183 +f 1987 1637 840 +f 1350 3314 840 +f 1350 840 452 +f 452 840 1637 +f 1987 3629 1637 +f 452 1637 3564 +f 1987 3392 3629 +f 3358 631 1990 +f 3358 1341 631 +f 3392 1990 3629 +f 1415 1341 3358 +f 1341 1415 498 +f 3253 1462 3330 +f 3218 1462 2081 +f 1207 2373 1768 +f 1675 1768 587 +f 462 1768 1675 +f 1675 587 3844 +f 2754 3844 587 +f 2754 587 614 +f 3735 614 1872 +f 3218 3570 3330 +f 2373 2364 1768 +f 614 587 3253 +f 1872 614 3253 +f 1872 3253 1482 +f 1482 3253 3330 +f 548 2373 1207 +f 1768 462 1207 +f 1675 3844 1674 +f 3148 2754 614 +f 614 3735 3148 +f 3083 3735 1872 +f 3735 3083 3148 +f 1482 3083 1872 +f 3330 3083 1482 +f 621 3083 3330 +f 2936 606 2671 +f 606 2252 2671 +f 2252 606 69 +f 2252 2040 2671 +f 2252 923 207 +f 207 517 2252 +f 1263 201 738 +f 1263 923 2252 +f 1263 2252 69 +f 1943 923 1263 +f 1449 1488 465 +f 1068 1530 2983 +f 1530 2328 2983 +f 2983 1263 738 +f 2983 1943 1263 +f 207 923 1943 +f 207 2328 976 +f 1530 517 976 +f 1530 465 517 +f 697 1488 1393 +f 1488 1347 1393 +f 976 2328 1530 +f 207 1943 2328 +f 465 1530 1449 +f 517 207 976 +f 1488 1564 465 +f 697 1564 1488 +f 2835 8 749 +f 1564 517 465 +f 1791 2671 1008 +f 2936 2671 1791 +f 2936 1791 2217 +f 3548 1339 1405 +f 2061 1405 2614 +f 1690 2061 2614 +f 1690 742 1523 +f 1523 742 3196 +f 2614 1405 1339 +f 2614 742 1690 +f 1263 400 201 +f 1263 69 400 +f 400 69 2387 +f 201 400 520 +f 400 2387 520 +f 2774 2387 1904 +f 768 2387 2774 +f 2774 1904 2978 +f 1176 3503 2978 +f 3503 768 2978 +f 3503 1176 3021 +f 3503 2520 768 +f 2520 3503 3021 +f 2520 3021 152 +f 3123 697 152 +f 738 201 3878 +f 2774 2978 768 +f 2861 2520 152 +f 2861 3574 2520 +f 1791 3498 2611 +f 3446 2656 2745 +f 2135 791 2656 +f 2656 3206 2135 +f 791 359 2253 +f 791 115 359 +f 115 791 2135 +f 2135 2217 2823 +f 2253 2745 791 +f 2656 791 2745 +f 2135 3206 2217 +f 1192 651 2059 +f 1192 2059 3137 +f 531 651 1192 +f 2657 531 1192 +f 1180 651 3112 +f 473 3112 651 +f 2483 2593 1807 +f 2593 2483 1040 +f 3747 1010 3351 +f 3747 1156 1010 +f 1156 3747 1058 +f 1807 1058 724 +f 1807 724 876 +f 1156 1058 1807 +f 1807 876 2483 +f 3388 1807 2593 +f 1156 1899 1010 +f 1807 3388 1156 +f 1156 733 1899 +f 2517 1899 733 +f 3388 733 1156 +f 395 814 2320 +f 2320 814 3927 +f 3927 2140 1243 +f 2888 2320 3927 +f 1243 2888 3927 +f 2791 2320 2888 +f 2888 724 2791 +f 1243 2140 1058 +f 724 2888 876 +f 627 2791 724 +f 627 2937 2791 +f 2320 2791 2937 +f 2140 3927 3262 +f 119 1760 2582 +f 1760 3183 98 +f 1760 2668 3571 +f 2668 1760 98 +f 1760 3571 2582 +f 2582 3571 3722 +f 119 3183 1760 +f 119 1987 3183 +f 2611 3498 2418 +f 1345 243 2582 +f 58 842 3189 +f 243 1345 1623 +f 2582 243 839 +f 1673 110 3545 +f 1673 2059 110 +f 2333 2059 1673 +f 2059 2333 999 +f 999 1621 2059 +f 3616 3137 2059 +f 2059 1621 2031 +f 2031 3616 2059 +f 2059 1180 110 +f 1180 2059 651 +f 3545 469 1673 +f 3545 3210 469 +f 469 2145 1673 +f 1673 2145 2333 +f 1345 2582 3722 +f 2983 738 1713 +f 508 606 2114 +f 2114 606 2936 +f 508 69 606 +f 2936 1003 2114 +f 1831 2744 274 +f 2744 1831 682 +f 682 1831 141 +f 596 2964 3370 +f 3370 173 596 +f 3446 173 3370 +f 3446 1951 173 +f 1951 3446 2745 +f 1951 1971 173 +f 141 3145 682 +f 141 3171 3145 +f 141 3370 3171 +f 3446 3370 141 +f 1574 3722 2316 +f 3571 2316 3722 +f 3571 2668 2316 +f 2668 98 3314 +f 98 3183 3314 +f 2316 2668 3314 +f 1574 2316 1350 +f 1350 2316 3314 +f 531 3736 2790 +f 86 2177 1661 +f 1316 2333 2145 +f 114 2031 1316 +f 2031 1621 1316 +f 1316 1621 2333 +f 2563 3616 2031 +f 2031 114 2563 +f 3181 3137 1505 +f 3181 1505 3736 +f 3181 3736 2657 +f 2563 3736 3616 +f 3616 3736 1505 +f 3736 531 2657 +f 531 2790 3834 +f 2177 2191 2790 +f 2790 2191 3834 +f 2177 86 2191 +f 1793 390 1661 +f 152 697 1698 +f 2855 1698 697 +f 2855 548 1698 +f 152 1698 2861 +f 338 1698 548 +f 160 338 1207 +f 839 3189 842 +f 839 243 3189 +f 3127 473 1725 +f 1725 473 2230 +f 473 3127 3572 +f 3572 2418 473 +f 2418 3112 473 +f 382 128 3510 +f 2690 128 382 +f 1552 2690 382 +f 842 3006 603 +f 3252 2937 1433 +f 1433 2937 627 +f 2937 603 3006 +f 173 2611 596 +f 2611 173 1971 +f 1971 2317 2611 +f 596 2611 2418 +f 2660 596 2418 +f 2660 2418 2367 +f 3116 1904 508 +f 508 2387 69 +f 2387 508 1904 +f 2560 2978 1904 +f 2560 1176 2978 +f 3510 2140 3262 +f 2140 3510 1433 +f 724 1058 1433 +f 1433 1058 2140 +f 2120 983 1830 +f 2224 229 1118 +f 229 1206 1118 +f 930 2823 1206 +f 2823 930 3468 +f 3468 3813 3445 +f 3468 930 3813 +f 578 3445 3813 +f 1393 2364 2855 +f 2855 2364 2373 +f 2855 2373 548 +f 1393 1347 2364 +f 2855 697 1393 +f 2816 2611 991 +f 359 991 2224 +f 991 359 115 +f 115 2816 991 +f 2816 2217 1791 +f 1791 2611 2816 +f 2816 2823 2217 +f 1206 2816 115 +f 1206 2823 2816 +f 3252 1433 3510 +f 1433 627 724 +f 3252 603 2937 +f 2660 3171 1810 +f 1810 3171 1149 +f 1149 3171 3370 +f 2964 1149 3370 +f 274 314 1909 +f 1831 274 1909 +f 2144 1192 1386 +f 2657 1192 2144 +f 1386 590 2144 +f 3834 1909 314 +f 1725 2230 1356 +f 1356 2460 1725 +f 1725 2460 2744 +f 2744 176 1725 +f 2744 890 176 +f 890 390 3572 +f 682 2299 890 +f 682 3145 1999 +f 1999 2178 2299 +f 682 1999 2299 +f 314 253 3834 +f 2744 2460 274 +f 274 2460 314 +f 682 890 2744 +f 2460 1356 86 +f 1356 2352 86 +f 1661 314 86 +f 314 2460 86 +f 633 2191 2763 +f 2763 2191 2352 +f 2763 2352 1356 +f 2763 1356 176 +f 390 3696 176 +f 253 473 651 +f 531 253 651 +f 2191 473 253 +f 473 2191 2230 +f 3834 253 531 +f 1724 2660 2367 +f 2178 1724 2367 +f 1970 1810 1724 +f 1970 2660 1810 +f 2964 596 2660 +f 2660 1970 2964 +f 128 3252 3510 +f 2605 1607 2239 +f 256 2224 2253 +f 1951 2964 1607 +f 1951 1607 1971 +f 1971 1607 2605 +f 256 2605 2239 +f 256 2239 2224 +f 2224 2239 229 +f 2605 2317 1971 +f 2611 2317 991 +f 2253 2317 2605 +f 2745 2253 2605 +f 2224 991 2317 +f 2253 2224 2317 +f 1908 3099 2657 +f 3099 3181 2657 +f 633 3099 1908 +f 2745 2605 1951 +f 147 2605 256 +f 1118 256 2253 +f 147 256 1118 +f 3937 2664 1621 +f 2664 3937 590 +f 3099 2664 590 +f 1908 2657 2144 +f 1908 2144 3834 +f 633 1908 3834 +f 633 3834 2191 +f 999 2333 189 +f 189 1621 999 +f 189 3937 1621 +f 590 3137 3616 +f 590 1386 3137 +f 314 2352 253 +f 1661 1725 176 +f 1661 176 1356 +f 3696 390 2299 +f 1652 1999 3145 +f 1652 2178 2367 +f 2660 3145 3171 +f 1652 2367 1999 +f 147 1118 1206 +f 147 1206 3129 +f 2135 2823 115 +f 3129 1206 115 +f 890 3572 3127 +f 176 890 3127 +f 390 176 3127 +f 1661 390 1725 +f 390 3127 1725 +f 3145 1724 1652 +f 3145 2660 1724 +f 2230 2191 86 +f 1505 3099 3616 +f 3616 3099 590 +f 1505 1386 3099 +f 86 2352 2230 +f 253 2352 2191 +f 359 1118 2253 +f 1118 359 2224 +f 1724 1810 1652 +f 1386 1505 3137 +f 3137 3181 1192 +f 314 2230 2352 +f 314 1661 2230 +f 1356 2230 1661 +f 3181 1386 1192 +f 3181 3099 1386 +f 547 3094 754 +f 3300 1028 1586 +f 2875 547 3154 +f 2439 3050 1028 +f 610 3790 3088 +f 3088 1762 2943 +f 339 3144 354 +f 1256 2122 2101 +f 636 143 1140 +f 3541 657 2301 +f 1319 1817 2441 +f 3396 1227 85 +f 2606 1388 2843 +f 610 3342 1835 +f 47 2712 2558 +f 3916 2860 2538 +f 1284 532 3664 +f 236 2973 996 +f 2843 1388 2882 +f 1385 3144 339 +f 954 2871 2910 +f 1630 1209 1655 +f 2546 1630 1655 +f 1385 1353 3144 +f 2261 3014 2717 +f 271 292 3448 +f 3518 23 1020 +f 2435 3232 2575 +f 1120 2161 1111 +f 356 1189 1930 +f 3143 3716 1655 +f 3426 1083 71 +f 1083 3426 1443 +f 3555 3805 2620 +f 1083 1443 2132 +f 354 3144 1813 +f 1113 1197 433 +f 433 1197 2044 +f 1411 3246 3613 +f 2975 3818 2895 +f 2895 2722 2975 +f 1833 917 774 +f 917 3434 3843 +f 2665 2267 3882 +f 1973 745 1276 +f 3066 1973 1818 +f 1426 2435 1709 +f 3823 2905 2628 +f 2478 1680 3357 +f 3810 3306 302 +f 180 1426 1709 +f 1229 1489 190 +f 1714 2356 3943 +f 2381 3810 302 +f 3306 3810 2505 +f 3667 2564 1714 +f 1901 2564 3414 +f 2905 3823 3414 +f 3560 564 1226 +f 1126 660 826 +f 2835 749 1564 +f 226 3037 535 +f 3615 2748 551 +f 615 34 2911 +f 3498 488 762 +f 3941 1367 1470 +f 988 2325 167 +f 1066 1096 3524 +f 557 2440 795 +f 3293 3120 3704 +f 983 2719 3270 +f 124 1950 2405 +f 1351 3308 212 +f 3946 2834 2822 +f 49 3851 3454 +f 1950 3462 558 +f 3895 2096 1318 +f 1950 1258 2405 +f 1258 2052 2405 +f 1210 3133 2896 +f 2405 2052 1683 +f 1960 3688 2053 +f 2453 1330 122 +f 157 3215 2805 +f 402 2283 1046 +f 1547 513 1459 +f 3278 1093 1259 +f 569 1624 2444 +f 1799 1991 3366 +f 2039 2260 2085 +f 2283 402 159 +f 569 1046 2283 +f 1511 3208 1312 +f 1165 137 2134 +f 1889 431 2762 +f 75 1889 2762 +f 3147 3366 873 +f 408 1125 1403 +f 1969 1853 916 +f 1464 953 524 +f 1619 3492 3208 +f 3082 1403 3027 +f 690 3779 1422 +f 801 573 431 +f 3583 1384 3134 +f 3692 3460 3670 +f 3582 3707 3359 +f 3819 1603 1437 +f 1052 2058 3200 +f 1648 3714 1734 +f 1744 1518 3449 +f 1744 3449 306 +f 2492 2866 1826 +f 1651 845 2401 +f 3179 3713 2306 +f 88 995 441 +f 1568 2126 3001 +f 2286 1497 278 +f 1519 3063 3283 +f 240 1371 1455 +f 568 3451 3419 +f 3451 3504 3419 +f 2462 3369 2269 +f 1371 3684 1287 +f 3858 1155 1633 +f 3778 2448 1098 +f 182 3428 3301 +f 396 1293 881 +f 3451 470 1069 +f 1483 1914 318 +f 2658 1060 783 +f 2658 2461 2799 +f 2988 3444 2518 +f 3673 1283 2168 +f 2020 3059 1283 +f 1483 2030 1784 +f 2903 679 605 +f 807 630 1342 +f 1653 1342 2279 +f 1023 623 865 +f 1248 3105 1475 +f 1035 3474 965 +f 397 3756 1856 +f 6 2906 1439 +f 6 1439 2947 +f 3714 1968 3842 +f 760 1179 2515 +f 311 90 755 +f 3200 1549 2796 +f 740 442 3715 +f 2678 1049 2024 +f 1167 3859 2492 +f 3301 3428 1692 +f 1281 3138 261 +f 500 1000 1430 +f 95 587 3057 +f 2833 3295 587 +f 2785 2196 1168 +f 464 1169 1272 +f 1984 334 273 +f 1638 3811 3674 +f 1309 414 3703 +f 1309 3703 1032 +f 3272 3584 3338 +f 1323 3240 1184 +f 3852 3101 3884 +f 2886 2914 2584 +f 3192 456 1139 +f 3101 3873 142 +f 2818 2380 1869 +f 2571 2800 3167 +f 1355 391 2380 +f 1639 3873 3101 +f 1289 3551 2797 +f 3551 1289 2187 +f 1465 2890 3562 +f 111 1117 3461 +f 142 3884 3101 +f 1695 3119 3261 +f 3754 2886 1216 +f 2488 3928 2247 +f 753 2831 780 +f 24 494 994 +f 3759 3165 3420 +f 1446 838 3019 +f 3161 2601 24 +f 2601 3161 329 +f 374 2488 2247 +f 319 299 245 +f 3149 1837 3310 +f 1801 2071 10 +f 1625 2366 3472 +f 1682 1469 3149 +f 144 780 2488 +f 3811 1638 246 +f 2479 901 1691 +f 2996 3284 901 +f 2235 1025 3307 +f 2853 366 3055 +f 1025 3715 1370 +f 1892 2391 1519 +f 3106 951 1476 +f 951 3106 2961 +f 2961 3106 2962 +f 1834 3939 2390 +f 1590 1050 3922 +f 942 1062 912 +f 703 717 1932 +f 293 2898 2189 +f 832 715 3404 +f 1075 3456 681 +f 282 3362 2792 +f 191 2734 3776 +f 2437 652 63 +f 2853 3055 5 +f 5 3055 2214 +f 2833 2094 73 +f 2734 191 863 +f 191 1286 863 +f 211 1332 3515 +f 2476 2527 577 +f 645 3917 284 +f 723 3917 2235 +f 1632 1348 700 +f 1346 1166 699 +f 1929 3061 235 +f 2437 1033 723 +f 2794 2295 1457 +f 2157 1033 2707 +f 1449 1068 1022 +f 3233 2933 2707 +f 1571 3723 282 +f 1545 2729 3666 +f 3734 3477 937 +f 1427 3405 2417 +f 277 3561 2531 +f 324 3276 104 +f 2417 1534 1127 +f 2485 3601 1484 +f 2533 3865 2511 +f 2838 1507 3313 +f 1507 2838 2152 +f 283 731 3275 +f 1199 1445 3398 +f 731 283 421 +f 326 1203 1685 +f 766 232 2014 +f 1785 417 3277 +f 3277 2547 1696 +f 2480 239 1183 +f 2547 3277 417 +f 417 2469 2004 +f 2925 1248 1475 +f 2680 2067 1475 +f 358 2922 2409 +f 2264 2167 3191 +f 3223 280 2171 +f 2389 1266 3781 +f 3634 2624 2771 +f 3150 358 2622 +f 108 222 1779 +f 2264 1843 2167 +f 1843 2264 310 +f 2636 1494 3286 +f 310 2270 1843 +f 162 2008 1988 +f 3579 3258 138 +f 215 1988 2008 +f 3579 2073 3258 +f 1646 3033 3223 +f 1308 1563 584 +f 1396 518 3186 +f 1109 1843 2270 +f 2636 2389 1494 +f 2188 1668 2812 +f 2956 949 409 +f 975 635 3132 +f 2849 725 44 +f 3187 67 594 +f 2392 3176 3866 +f 3117 3649 2246 +f 539 734 3801 +f 3125 546 821 +f 643 1694 260 +f 635 734 539 +f 430 2616 2694 +f 539 3801 55 +f 430 821 313 +f 1926 3930 2211 +f 2696 579 1577 +f 2396 3439 3529 +f 553 2770 2142 +f 2021 860 1820 +f 2795 756 2594 +f 1742 2939 3135 +f 2939 2454 3135 +f 553 1460 1609 +f 1460 412 650 +f 412 1460 2046 +f 2362 2447 1190 +f 2362 992 2447 +f 1566 3087 35 +f 1662 2154 3443 +f 3000 2308 2559 +f 3697 1615 1750 +f 2186 3609 1906 +f 3000 985 2308 +f 2186 1906 1855 +f 1854 1615 3697 +f 131 3870 903 +f 1572 1372 2338 +f 475 1566 857 +f 1741 2669 1584 +f 1741 1584 2623 +f 1678 2623 2858 +f 2858 2623 1584 +f 920 1551 267 +f 2498 267 1551 +f 2586 629 2363 +f 2699 2005 444 +f 2363 629 1741 +f 3003 1839 1789 +f 2050 2495 346 +f 2765 3052 3032 +f 3791 3711 1280 +f 3077 1928 1797 +f 1634 1928 3077 +f 3921 3077 1390 +f 1450 2015 2419 +f 3269 2319 2354 +f 2344 1408 1841 +f 1916 2423 2915 +f 2555 3935 364 +f 1485 952 1382 +f 564 2136 1598 +f 1011 3553 230 +f 1520 3258 685 +f 3794 2505 770 +f 337 564 1598 +f 64 1382 2999 +f 1306 3590 2411 +f 1598 43 370 +f 2942 370 43 +f 3211 892 1570 +f 1850 928 1089 +f 3254 3211 1570 +f 1057 3603 1570 +f 2158 1001 3752 +f 447 3118 1415 +f 1415 3118 897 +f 200 126 869 +f 2968 458 154 +f 2741 947 154 +f 3523 119 154 +f 3533 1957 529 +f 1529 3558 25 +f 903 3870 2972 +f 672 1099 2147 +f 2572 135 2400 +f 2277 522 2940 +f 1295 1714 3943 +f 2604 3140 2240 +f 1665 2240 3140 +f 3573 2686 3011 +f 200 2730 3556 +f 3129 3468 3838 +f 544 1962 2742 +f 3398 544 3294 +f 2175 2781 2773 +f 3282 1212 729 +f 603 839 842 +f 2090 3921 3608 +f 1462 3218 3330 +f 2983 2328 1943 +f 338 548 1207 +f 1016 1028 3300 +f 3193 2101 2003 +f 1722 2101 887 +f 887 3193 3091 +f 887 3091 3855 +f 1867 1710 1922 +f 2558 689 2682 +f 454 2468 2606 +f 2311 2468 454 +f 3014 354 2062 +f 2927 2610 666 +f 2973 1238 1816 +f 3232 2105 3457 +f 1931 1230 1599 +f 3166 2682 960 +f 1307 2752 1865 +f 1284 1867 1319 +f 2575 3232 3457 +f 1131 2752 3347 +f 2206 3315 3143 +f 1443 2973 1816 +f 3542 2161 1120 +f 810 2902 3271 +f 1816 2687 757 +f 1238 2687 1816 +f 3247 1769 1492 +f 116 954 2910 +f 2541 3009 1671 +f 3748 820 1745 +f 2152 3434 917 +f 1670 3662 512 +f 3675 532 1103 +f 3133 1422 3 +f 26 1927 2325 +f 3819 3680 1582 +f 2019 2693 2440 +f 2693 2019 1846 +f 2580 2985 2976 +f 122 1330 3226 +f 566 52 1992 +f 410 2096 3215 +f 410 290 2096 +f 2028 2709 3251 +f 2283 159 502 +f 2486 2289 3293 +f 1555 893 194 +f 1624 1949 2444 +f 2862 1799 3366 +f 1172 2568 624 +f 2684 3586 1889 +f 3583 570 661 +f 1980 3155 1947 +f 3692 3586 1312 +f 1582 1882 916 +f 477 1619 3034 +f 75 2684 1889 +f 570 2868 2496 +f 1947 3155 2959 +f 916 1882 2197 +f 218 1511 3586 +f 1436 461 3932 +f 2058 1518 1920 +f 1518 1744 1337 +f 2227 1996 2064 +f 2227 2462 1996 +f 1568 845 1651 +f 3431 834 294 +f 3244 694 2286 +f 2632 2688 1514 +f 3516 1098 2950 +f 844 3428 182 +f 844 182 2615 +f 396 3684 1293 +f 483 1558 3775 +f 3222 2255 1287 +f 1129 2780 3323 +f 1129 3639 1642 +f 281 305 850 +f 679 2903 2321 +f 1439 1595 118 +f 1179 760 746 +f 3648 1179 746 +f 3500 3632 2020 +f 3354 320 3737 +f 1549 740 238 +f 305 281 3544 +f 895 2846 1061 +f 1399 3859 3566 +f 2492 3859 2960 +f 3301 1692 2394 +f 3309 2873 1019 +f 1019 2873 2321 +f 1061 1935 752 +f 3467 3057 1110 +f 46 275 595 +f 1984 2158 334 +f 275 219 595 +f 2797 3338 3584 +f 2386 1438 2070 +f 2785 2455 1869 +f 3157 1535 2477 +f 3912 3575 1246 +f 1117 3575 1360 +f 1919 3826 3873 +f 1363 3562 2890 +f 1381 1639 1090 +f 3192 3390 1469 +f 593 3912 2287 +f 2393 1409 3762 +f 2393 3762 2715 +f 1767 259 11 +f 2121 3455 3374 +f 471 3757 669 +f 780 144 1565 +f 2779 3592 3472 +f 1335 1184 585 +f 3420 2223 1340 +f 225 1457 2295 +f 1829 1658 2054 +f 2259 855 149 +f 2663 3423 2633 +f 703 1829 3054 +f 1829 703 1932 +f 855 1296 149 +f 1346 2842 3335 +f 3041 1845 719 +f 867 2562 882 +f 855 139 3340 +f 284 3917 2202 +f 1269 3600 1296 +f 265 2641 816 +f 3268 747 1534 +f 2417 3561 1427 +f 2531 2281 277 +f 1424 1534 747 +f 2641 265 3333 +f 3064 2893 2471 +f 2195 2556 1231 +f 1967 2302 20 +f 3168 1084 2893 +f 2993 2195 1231 +f 1697 2921 106 +f 1035 1248 1004 +f 2545 1521 108 +f 3425 3898 632 +f 1494 2716 213 +f 1109 93 48 +f 674 1494 2389 +f 1375 811 2346 +f 750 1616 674 +f 704 2817 1308 +f 574 2753 2740 +f 2956 3931 949 +f 1440 2619 2812 +f 949 3931 1200 +f 3279 2619 1700 +f 2619 3279 76 +f 3914 76 3279 +f 294 834 3788 +f 539 3511 635 +f 821 430 2694 +f 2449 3570 3218 +f 2849 735 975 +f 2112 3869 632 +f 1700 3012 787 +f 635 975 734 +f 3636 2535 2739 +f 688 864 1224 +f 3126 2986 1412 +f 340 2536 2093 +f 2739 2535 826 +f 1047 2739 826 +f 1323 1454 3906 +f 1733 3628 2751 +f 3798 671 3628 +f 1776 2570 835 +f 2576 3087 3890 +f 2621 898 933 +f 3165 3759 50 +f 3114 3760 3051 +f 1986 1373 1513 +f 2631 3051 2044 +f 3633 3891 3042 +f 2173 2005 611 +f 153 377 2397 +f 1548 1604 970 +f 2199 970 1604 +f 2455 1168 3052 +f 2284 3266 3908 +f 894 3266 45 +f 1945 2201 1841 +f 1520 3822 3336 +f 2136 43 1598 +f 1173 3494 1998 +f 1798 1520 3336 +f 2840 3298 1333 +f 508 381 2077 +f 2786 1089 3568 +f 126 200 3118 +f 1757 935 288 +f 2782 3131 3537 +f 534 1438 950 +f 3140 1513 3004 +f 200 2300 3118 +f 1080 3768 1649 +f 2861 1698 338 +f 1022 2364 1347 +f 1488 1449 1347 +f 2937 3006 2320 +f 127 263 689 +f 2152 2838 620 +f 3705 155 336 +f 1540 2115 2034 +f 2568 2444 1949 +f 3026 688 1224 +f 2361 2920 2862 +f 864 369 1577 +f 1969 2197 2819 +f 2197 1969 916 +f 2496 3281 1419 +f 397 1856 764 +f 1549 1264 2796 +f 2313 1074 1110 +f 46 2433 275 +f 3663 1469 3390 +f 2187 1289 3508 +f 3045 3534 1801 +f 1893 1837 1921 +f 3906 2885 3240 +f 2706 2630 1847 +f 1347 1449 1022 +f 2378 620 2838 +f 2676 2624 3634 +f 2590 179 1065 +f 516 1031 2008 +f 2500 1326 2262 +f 2740 3328 574 +f 409 949 3602 +f 3502 1252 1151 +f 898 945 933 +f 2621 933 3798 +f 3514 3282 204 +f 2201 2344 1841 +f 2228 1173 1998 +f 685 3258 2073 +f 3206 2936 2217 +f 3030 2989 2504 +f 3030 2504 2877 +f 3789 3784 1870 +f 1447 2384 2524 +f 1756 2686 3573 +f 2521 2730 200 +f 2442 3591 1859 +f 2090 603 859 +f 1345 3722 1574 +f 2626 2977 1727 +f 3303 3271 2083 +f 900 1016 3300 +f 1016 2439 1028 +f 2311 2138 2468 +f 825 3457 3669 +f 1131 3391 2752 +f 1055 424 2041 +f 3247 463 2538 +f 3066 1818 2470 +f 2100 332 827 +f 1147 954 3376 +f 1227 1630 2546 +f 825 2575 3457 +f 3448 1265 271 +f 3448 716 1265 +f 2902 1265 716 +f 1780 1556 708 +f 852 3035 1477 +f 1586 3287 3300 +f 2261 1181 3014 +f 236 782 769 +f 3457 2105 16 +f 1813 2099 543 +f 354 1813 543 +f 532 1284 3617 +f 2468 3367 868 +f 3422 1242 1208 +f 968 3497 1442 +f 1143 1338 1610 +f 2895 3818 81 +f 2172 1620 1021 +f 1670 3843 3662 +f 2470 17 966 +f 2435 2336 249 +f 2261 2209 1181 +f 2336 2435 1426 +f 600 1959 1781 +f 2336 2150 249 +f 3414 3823 1901 +f 3471 2746 3771 +f 2474 3036 1927 +f 3353 1180 3112 +f 3353 3112 3766 +f 2789 2670 1013 +f 749 2714 2976 +f 2976 2714 1222 +f 2190 615 2693 +f 2932 212 3308 +f 2674 285 3635 +f 3065 1977 2551 +f 285 1456 3635 +f 1351 558 3036 +f 3471 3771 82 +f 2289 2407 161 +f 285 2181 1456 +f 2289 161 3120 +f 617 2181 3251 +f 3820 3694 3427 +f 2766 513 3375 +f 1172 2408 1292 +f 2102 2039 1811 +f 1297 2851 3694 +f 3147 1133 2862 +f 919 3758 776 +f 639 3075 3936 +f 873 639 3853 +f 3 2251 2294 +f 3732 2430 3089 +f 1946 536 1721 +f 1619 3208 1336 +f 873 3075 639 +f 3732 3528 536 +f 3299 2912 415 +f 218 3586 2684 +f 446 1336 2759 +f 2904 1447 2524 +f 3365 3169 3180 +f 3180 1751 3365 +f 1751 687 599 +f 3200 1920 740 +f 2269 2567 2462 +f 3283 2618 1394 +f 2618 2899 1394 +f 3394 3419 2047 +f 519 3320 1975 +f 844 1688 2698 +f 1688 2997 2662 +f 2422 630 276 +f 2997 2422 276 +f 984 1069 875 +f 2448 3827 638 +f 1048 1914 1913 +f 1583 2119 1048 +f 3222 3550 2255 +f 2499 3393 2501 +f 2499 266 2506 +f 2503 2737 3893 +f 1262 357 3595 +f 367 1061 2846 +f 1061 752 895 +f 3309 419 261 +f 419 383 261 +f 401 419 3655 +f 419 401 383 +f 752 1640 895 +f 3807 2644 3403 +f 2698 2027 844 +f 182 3301 2424 +f 1340 3630 1542 +f 2070 3642 2747 +f 595 464 1272 +f 2208 2477 1823 +f 3654 414 1309 +f 1594 3505 2825 +f 2365 3934 3665 +f 3272 2683 219 +f 1363 3654 3562 +f 3562 3654 3272 +f 3884 1869 391 +f 3101 1090 1639 +f 862 192 1851 +f 986 2612 862 +f 192 862 2612 +f 3643 1695 3261 +f 1360 1152 1117 +f 501 862 3762 +f 1978 3909 3505 +f 2393 1639 1381 +f 1152 1360 3192 +f 3067 315 329 +f 2345 2129 1893 +f 2129 2345 32 +f 1682 1801 456 +f 1893 1921 3804 +f 1458 1317 1625 +f 2256 1686 2992 +f 374 2247 3161 +f 3363 32 2345 +f 3742 3363 669 +f 715 832 1880 +f 3917 645 2235 +f 3905 1892 2527 +f 723 652 2437 +f 2074 3122 3905 +f 2534 3671 3355 +f 1245 2320 1268 +f 1804 822 1713 +f 3364 3896 921 +f 884 324 2585 +f 819 800 2487 +f 1484 620 2378 +f 714 3372 1745 +f 800 1677 3025 +f 3064 2010 429 +f 1501 560 1124 +f 1534 2417 3268 +f 3372 1982 2249 +f 884 836 324 +f 736 1752 209 +f 727 2507 1005 +f 1479 2469 417 +f 1097 2556 2507 +f 1420 280 647 +f 582 2624 3836 +f 79 2497 3915 +f 2497 79 1779 +f 280 3464 1239 +f 3256 3795 2155 +f 1896 94 2008 +f 1569 3710 1916 +f 2313 3380 1074 +f 975 3132 725 +f 2376 3286 3435 +f 220 313 546 +f 269 3768 3626 +f 634 398 778 +f 384 3007 3216 +f 2465 2012 1820 +f 2192 197 860 +f 2939 3202 3770 +f 609 2117 2384 +f 1219 987 1854 +f 1733 3019 3918 +f 475 2594 1566 +f 2621 609 2384 +f 2858 1573 1205 +f 3060 2397 2529 +f 3042 2173 3003 +f 2005 2173 444 +f 3793 272 2669 +f 3891 2173 3042 +f 2919 2066 962 +f 3791 2765 3032 +f 2495 1435 2523 +f 2370 1871 672 +f 554 2056 3049 +f 3049 2056 2628 +f 364 3935 1426 +f 1798 3336 156 +f 659 2552 1001 +f 3537 1878 2207 +f 869 309 2207 +f 3647 3131 1493 +f 1924 939 981 +f 1373 1762 1513 +f 2940 2830 3573 +f 1839 1277 1486 +f 2219 1878 3294 +f 1839 1486 1732 +f 1385 339 889 +f 3806 2209 2261 +f 2237 16 2105 +f 1122 2628 2905 +f 2150 564 3560 +f 2670 3941 1470 +f 3384 2896 2294 +f 2058 1920 3200 +f 2163 3444 2988 +f 1475 3708 2680 +f 3756 129 3109 +f 3872 3167 2234 +f 2256 2992 1324 +f 565 2751 1631 +f 3456 73 681 +f 2094 681 73 +f 2137 3889 1808 +f 1632 700 3220 +f 2014 232 3408 +f 3464 280 1420 +f 3117 3012 1440 +f 2049 2009 1903 +f 659 1884 2552 +f 1242 3422 3111 +f 1959 2216 2304 +f 84 2430 3732 +f 3419 1107 2047 +f 883 2990 2503 +f 1868 3797 1049 +f 281 3046 506 +f 2951 3309 1019 +f 1740 2919 962 +f 1246 3575 1416 +f 192 3643 3261 +f 3601 620 1484 +f 79 2545 1779 +f 537 44 1596 +f 831 3502 1855 +f 3010 2977 1981 +f 3871 968 2087 +f 3185 655 547 +f 127 2979 263 +f 3855 3091 150 +f 2629 2610 2927 +f 1710 3050 2439 +f 2439 1922 1710 +f 2184 2610 3855 +f 3367 2138 1898 +f 1786 3004 1613 +f 2832 691 1544 +f 1556 1780 3334 +f 2767 708 1496 +f 2909 3066 583 +f 1131 2470 3391 +f 47 3433 2712 +f 2311 454 3349 +f 3396 3349 3418 +f 2183 2777 271 +f 1120 174 3315 +f 1111 2161 1606 +f 1189 1265 1930 +f 2441 1817 23 +f 1265 2902 1930 +f 2991 825 2566 +f 1193 2083 3271 +f 1626 3739 549 +f 1626 549 946 +f 2087 968 2721 +f 1197 820 1143 +f 1620 774 695 +f 3357 1122 2478 +f 190 654 1229 +f 3151 2955 2115 +f 1066 3524 3800 +f 2544 1395 3285 +f 2407 2 161 +f 82 3311 2551 +f 1977 82 2551 +f 300 2481 3772 +f 3772 2481 2544 +f 1635 1330 1941 +f 2805 3215 502 +f 2039 2444 1811 +f 1292 572 1172 +f 1889 3586 3692 +f 3528 3089 1629 +f 919 536 1946 +f 2573 2948 995 +f 2950 3320 915 +f 2950 915 3369 +f 3320 2950 1098 +f 2615 1688 844 +f 2422 2997 1688 +f 1688 2615 2422 +f 3447 1201 3719 +f 394 3393 2163 +f 2499 2163 3393 +f 3105 1583 3708 +f 6 2515 2906 +f 3715 1042 740 +f 1640 2866 3861 +f 1935 413 752 +f 3642 2769 2747 +f 1535 2337 1823 +f 3642 3070 2811 +f 3909 2825 3505 +f 3909 1925 2825 +f 1925 3909 2797 +f 2187 3764 3056 +f 1978 3643 3508 +f 3157 1246 11 +f 2800 2571 1355 +f 1921 3663 2924 +f 3928 2488 2385 +f 2129 3310 1837 +f 2597 471 494 +f 901 3374 589 +f 732 3938 3290 +f 3429 2903 2608 +f 822 2074 2476 +f 482 1732 36 +f 602 2094 3532 +f 316 110 1180 +f 3561 1127 2432 +f 2893 2579 2432 +f 3561 2432 2579 +f 1534 1424 1127 +f 3043 3168 2432 +f 2893 2432 3168 +f 815 2007 2480 +f 2480 2007 388 +f 1681 1526 1005 +f 750 674 523 +f 2451 145 2793 +f 809 3229 2241 +f 1596 44 2241 +f 2241 44 1088 +f 3798 2760 671 +f 562 1533 598 +f 3024 3879 2655 +f 2529 2036 1502 +f 2529 2943 1373 +f 3760 433 2044 +f 136 3791 533 +f 136 533 3901 +f 718 1078 2419 +f 3598 204 3907 +f 1495 3907 2642 +f 3907 1495 3598 +f 155 1327 3902 +f 1290 496 423 +f 1729 2720 1526 +f 3935 2136 2336 +f 3070 3479 658 +f 200 869 2521 +f 1657 3131 3647 +f 2553 3479 1438 +f 939 2413 2001 +f 3564 1637 3629 +f 1319 2441 1284 +f 3541 2100 507 +f 1618 1620 695 +f 2453 1013 1836 +f 1027 2452 2066 +f 1027 1919 2452 +f 1324 3733 3371 +f 482 3666 2794 +f 3484 1270 1344 +f 342 2818 3901 +f 1265 1189 271 +f 1143 3748 1338 +f 2909 1973 3066 +f 2580 2976 1222 +f 1084 3168 736 +f 1420 2409 2922 +f 973 476 2708 +f 108 1779 2545 +f 3935 2336 1426 +f 3848 3412 170 +f 2977 3010 1727 +f 1613 3088 3790 +f 3391 1666 1575 +f 3143 3315 3716 +f 1143 820 3748 +f 1442 3497 943 +f 3741 3299 137 +f 3320 519 915 +f 1823 2070 2747 +f 3261 1851 192 +f 1645 2129 32 +f 3339 248 480 +f 1883 2485 618 +f 1127 3561 2417 +f 3899 1883 618 +f 324 104 2585 +f 1217 1407 2312 +f 2818 136 3901 +f 718 204 3598 +f 132 763 299 +f 240 1821 1293 +f 397 129 3756 +f 3863 2009 2049 +f 2419 1078 45 +f 1884 928 2552 +f 174 1492 3716 +f 2748 2580 1222 +f 2212 2187 3508 +f 1613 2718 3881 +f 2832 2860 3916 +f 1147 2832 3916 +f 803 1208 3195 +f 1208 868 3422 +f 1208 803 868 +f 28 2216 1959 +f 307 3524 3766 +f 49 300 1093 +f 3650 2 3679 +f 2565 1 3108 +f 1594 1695 3505 +f 2212 3508 2612 +f 1480 2768 2463 +f 3534 1778 1139 +f 3742 32 3363 +f 669 3363 1565 +f 3757 607 3742 +f 3307 1 828 +f 3267 2329 340 +f 1438 3070 2070 +f 2718 656 2404 +f 1283 3059 2168 +f 887 3855 2629 +f 821 2694 537 +f 2173 611 3003 +f 900 3300 3287 +f 1222 2714 619 +f 96 2279 1838 +f 2256 1324 3067 +f 1732 3178 291 +f 3679 2001 2548 +f 1790 3922 2638 +f 3142 2443 2927 +f 613 202 2222 +f 292 1599 2107 +f 3693 1522 463 +f 2606 2843 454 +f 1209 3382 3143 +f 2458 386 1181 +f 3717 1606 2620 +f 3717 1234 1606 +f 1143 779 1197 +f 2434 3386 2055 +f 3246 1087 1055 +f 929 1540 2969 +f 2431 307 3412 +f 1456 2181 617 +f 515 2938 3410 +f 3751 3065 3851 +f 290 3751 3851 +f 570 888 661 +f 2578 3285 2766 +f 3932 1824 1589 +f 3027 1403 796 +f 3825 3027 796 +f 3648 99 2204 +f 1174 845 1434 +f 1174 1434 1455 +f 1455 1636 3725 +f 2573 1795 3079 +f 1654 1975 3320 +f 3775 434 483 +f 3447 434 3788 +f 2740 1201 3447 +f 1052 1378 2204 +f 2846 895 2060 +f 3107 419 1414 +f 3211 3110 1032 +f 2602 2769 1984 +f 1594 2452 3119 +f 2584 3073 3461 +f 902 3509 2108 +f 3073 3332 111 +f 3390 1360 3912 +f 1682 456 1469 +f 1389 3008 1876 +f 640 2247 986 +f 3190 1645 312 +f 1645 2071 3310 +f 3939 832 2032 +f 482 29 719 +f 3005 700 191 +f 210 1632 3220 +f 284 1632 3588 +f 2729 1545 867 +f 1939 2143 1580 +f 2729 495 2295 +f 2933 3233 2109 +f 3178 482 719 +f 1632 284 1348 +f 459 3275 731 +f 1266 1071 3781 +f 1541 3191 885 +f 3794 750 1798 +f 1711 1375 1940 +f 584 3828 3795 +f 2812 76 1794 +f 3649 1440 1668 +f 1112 2607 2473 +f 3610 734 735 +f 735 2849 2694 +f 2023 197 2192 +f 3792 3267 653 +f 2049 599 1885 +f 3792 653 987 +f 2233 562 987 +f 3400 3609 2971 +f 2635 3702 1572 +f 1533 2559 3289 +f 3890 3702 2576 +f 1471 153 3060 +f 1471 3060 3499 +f 1961 1561 959 +f 1407 1217 963 +f 3473 2583 3318 +f 3052 2242 3829 +f 2215 3514 204 +f 571 204 3282 +f 2913 1135 2234 +f 980 1135 2000 +f 1911 1057 892 +f 135 2572 2344 +f 2731 3765 2753 +f 3765 1201 2753 +f 3788 2740 3447 +f 1283 1007 1129 +f 3390 593 3663 +f 3052 1168 2242 +f 2301 1496 3541 +f 1055 1973 2909 +f 1103 532 3617 +f 88 2573 995 +f 750 3860 1798 +f 2616 735 2694 +f 2323 609 1733 +f 143 1329 1331 +f 3365 1751 1903 +f 599 1903 1751 +f 2568 1949 624 +f 3741 1849 1133 +f 3741 1133 3147 +f 1135 3872 2234 +f 3710 1239 2451 +f 1294 1924 981 +f 502 3215 957 +f 2488 780 2385 +f 3515 1917 2180 +f 969 2948 306 +f 122 2693 2789 +f 3742 312 32 +f 1645 32 312 +f 2456 3721 3228 +f 943 202 3142 +f 1599 2464 2107 +f 2464 1599 1230 +f 492 499 116 +f 56 581 2291 +f 1881 2532 3944 +f 636 889 1329 +f 1881 1103 3617 +f 3902 1489 336 +f 2822 3324 1096 +f 3800 3524 307 +f 1944 2598 1059 +f 3411 1765 52 +f 3700 3311 3771 +f 2510 2900 2805 +f 3322 1292 2408 +f 2953 3892 1240 +f 572 1292 169 +f 2486 2851 1297 +f 1294 2953 1862 +f 2568 1811 2444 +f 247 3251 445 +f 1721 1292 3322 +f 3047 3825 567 +f 3180 567 113 +f 1532 1023 1642 +f 3639 1532 1642 +f 3381 367 2491 +f 1532 305 3544 +f 305 1532 3639 +f 872 3544 3327 +f 3632 3059 2020 +f 441 311 2600 +f 1378 1595 474 +f 57 2678 1185 +f 3509 1363 2890 +f 2287 1363 3509 +f 1895 2380 342 +f 1886 1895 342 +f 1921 902 3804 +f 1915 1778 3534 +f 1625 3472 1458 +f 3872 1446 2779 +f 2071 1682 3310 +f 225 3938 3922 +f 293 3939 2864 +f 2202 1348 284 +f 1157 2550 3865 +f 2076 885 2167 +f 60 1668 575 +f 220 260 1694 +f 2375 3866 3012 +f 2803 1138 1333 +f 2192 2876 2023 +f 2192 1026 2876 +f 817 272 2185 +f 18 1718 2312 +f 1280 1604 1548 +f 3332 2595 3164 +f 2548 1357 940 +f 2100 2280 2291 +f 3036 3308 1351 +f 2568 1172 1811 +f 3180 113 722 +f 2658 783 2802 +f 2903 605 2608 +f 2741 154 2011 +f 3675 3705 2118 +f 3226 1330 854 +f 2600 3903 88 +f 1277 611 1471 +f 3632 760 3059 +f 1878 2219 2207 +f 3464 1420 2922 +f 1147 1522 2871 +f 2628 1122 3357 +f 1133 2123 2361 +f 1907 3642 2811 +f 3663 593 2924 +f 1398 480 248 +f 480 2933 2109 +f 957 3215 3895 +f 957 3895 1624 +f 3332 1767 111 +f 2171 280 1239 +f 2215 204 718 +f 1495 2642 13 +f 1792 1357 1104 +f 983 3163 1830 +f 3633 3815 3891 +f 624 1949 1132 +f 1849 2123 1133 +f 1133 2361 2862 +f 3789 1870 1842 +f 3773 3535 1472 +f 3036 2325 1927 +f 2965 2123 1849 +f 1991 1659 2897 +f 861 193 3660 +f 3299 2819 137 +f 3597 1980 3450 +f 3597 135 1980 +f 3180 3169 3506 +f 1287 2255 1213 +f 809 3197 783 +f 1325 305 3639 +f 1480 2463 1090 +f 3161 2247 640 +f 3754 846 2886 +f 1056 646 3525 +f 1075 2731 3456 +f 3061 3037 235 +f 2176 1157 2750 +f 2176 3348 1157 +f 1653 96 479 +f 1521 3539 1894 +f 2076 48 3915 +f 1112 2473 2980 +f 3886 808 1602 +f 2795 860 2021 +f 3180 3506 567 +f 511 3060 2529 +f 3829 3032 3052 +f 3459 3901 533 +f 3537 2084 1764 +f 2844 2618 1314 +f 10 2071 972 +f 185 3247 1498 +f 2900 157 2805 +f 679 2321 3381 +f 736 3168 1752 +f 1448 1977 3065 +f 2470 966 3391 +f 1448 3065 3751 +f 2943 2529 1502 +f 3745 3541 507 +f 1371 1287 1213 +f 3541 1496 877 +f 2366 1625 2768 +f 3455 2121 132 +f 3825 113 567 +f 1325 3639 3172 +f 416 2802 783 +f 511 2529 1373 +f 2892 247 3249 +f 1152 1139 2637 +f 206 2002 1357 +f 3455 132 3045 +f 1870 451 1842 +f 2280 3541 877 +f 884 769 3773 +f 983 3413 1059 +f 3578 1059 3413 +f 3249 1659 2892 +f 1683 2436 2829 +f 861 3660 2028 +f 1799 2920 80 +f 2892 80 247 +f 981 939 328 +f 1659 159 402 +f 1882 813 2197 +f 3652 2221 813 +f 3089 1125 1629 +f 909 169 1629 +f 3133 690 1422 +f 416 3326 1558 +f 3084 2844 1314 +f 1975 1314 3063 +f 2984 1287 3684 +f 1633 1155 3179 +f 3172 3639 1129 +f 1185 3859 1167 +f 1167 2395 626 +f 2768 1480 2571 +f 1381 2463 3008 +f 2779 3167 3872 +f 3483 329 3161 +f 3167 2779 2366 +f 652 723 2235 +f 1348 2202 1591 +f 1071 1266 2428 +f 1436 466 257 +f 3484 1344 2245 +f 3484 2245 2329 +f 2495 3777 346 +f 298 1057 2656 +f 2001 1045 2548 +f 3222 1287 2984 +f 3222 2984 2988 +f 1355 1480 3852 +f 1929 1580 3061 +f 3816 1659 402 +f 1480 1355 2571 +f 1770 1902 2353 +f 1165 3543 2965 +f 869 2207 404 +f 2280 2100 3541 +f 983 1059 2598 +f 3471 3848 3378 +f 3124 1944 1059 +f 1059 3578 3124 +f 1765 2484 52 +f 257 466 1160 +f 2148 722 113 +f 466 1770 2353 +f 667 1533 585 +f 3477 2398 1431 +f 2148 113 796 +f 8 2714 749 +f 1152 2637 1873 +f 1169 105 2639 +f 424 1055 2909 +f 386 3014 1181 +f 2814 666 3195 +f 2183 3111 1898 +f 348 1070 2918 +f 80 2028 247 +f 1991 2892 1659 +f 2430 722 2148 +f 2430 2148 3089 +f 2509 1399 3566 +f 2024 2509 3566 +f 3272 1309 2683 +f 2393 2715 1639 +f 589 3374 3455 +f 10 589 3455 +f 10 972 589 +f 3263 535 3037 +f 421 283 2378 +f 1916 3710 2451 +f 3267 2093 1391 +f 3267 340 2093 +f 3484 2329 1428 +f 1410 3484 1428 +f 3782 460 1410 +f 2323 3918 2839 +f 298 472 2639 +f 2068 3234 939 +f 2068 939 1924 +f 2524 2400 3597 +f 3917 580 2202 +f 82 3771 3311 +f 2408 555 3375 +f 2510 3249 445 +f 3045 132 1915 +f 3027 1770 1221 +f 1628 1770 3825 +f 2897 3816 2022 +f 2473 60 2980 +f 1240 1862 2953 +f 3263 3037 3061 +f 298 2656 141 +f 2904 2524 3597 +f 259 1767 3164 +f 1952 3481 1561 +f 667 585 1184 +f 3477 1431 1917 +f 3015 2507 727 +f 3225 3015 727 +f 3254 105 1169 +f 457 120 1626 +f 120 1828 1626 +f 126 1435 1217 +f 2943 1502 1787 +f 1507 2152 917 +f 1312 3586 1511 +f 2050 1717 1604 +f 2999 373 1380 +f 1157 2533 2750 +f 373 2045 1380 +f 793 3010 2124 +f 849 2414 3497 +f 2037 3154 655 +f 793 2767 130 +f 2183 1898 2725 +f 463 3247 185 +f 3376 691 1147 +f 3418 3349 363 +f 1630 3396 3418 +f 1630 1227 3396 +f 3716 530 1655 +f 1234 3247 1492 +f 954 499 3376 +f 954 116 499 +f 16 1271 3457 +f 254 1618 97 +f 150 3091 1021 +f 254 2184 150 +f 1618 254 150 +f 254 97 2139 +f 2999 389 3862 +f 3699 190 1489 +f 3767 3667 1714 +f 1096 3324 2006 +f 3112 3080 3766 +f 1096 2006 3524 +f 82 1977 3848 +f 3120 161 1635 +f 1330 1635 854 +f 1267 290 410 +f 919 1946 3758 +f 3075 3366 1991 +f 3820 2436 576 +f 3853 2912 873 +f 588 3343 4 +f 3583 661 3796 +f 1384 3583 3796 +f 1076 2807 2098 +f 1009 2617 1980 +f 2425 1076 2098 +f 813 2251 3652 +f 1777 1582 916 +f 1753 474 2906 +f 181 847 3227 +f 181 3227 3648 +f 519 3063 1194 +f 1550 1636 1213 +f 1550 1213 2461 +f 240 1293 3684 +f 2844 3409 360 +f 1069 360 875 +f 3827 694 638 +f 2873 3138 3381 +f 3138 367 3381 +f 3327 623 872 +f 1920 442 740 +f 3797 3659 171 +f 1868 361 3797 +f 1171 57 626 +f 1171 2027 57 +f 2446 2395 1826 +f 1171 3428 2027 +f 205 2446 1826 +f 453 2394 2160 +f 3934 1363 2287 +f 3008 1716 1381 +f 753 3804 2831 +f 2831 902 2108 +f 1686 1409 1716 +f 1686 2347 1409 +f 3493 3856 7 +f 994 471 669 +f 1376 1876 1317 +f 1533 667 2307 +f 2214 3055 1198 +f 1457 1050 3429 +f 3172 3136 2210 +f 602 3563 1328 +f 2109 2853 5 +f 3340 1269 855 +f 1296 1062 3530 +f 1068 2983 2667 +f 211 3340 139 +f 2268 2035 2534 +f 2268 2534 3600 +f 3029 2511 2502 +f 1484 747 2485 +f 1484 766 747 +f 3029 2502 989 +f 1424 766 2014 +f 3398 3408 544 +f 2954 304 819 +f 1481 317 39 +f 1779 222 2067 +f 2409 1420 2772 +f 39 2990 883 +f 885 2076 1311 +f 2332 1773 2958 +f 787 3866 3176 +f 3176 2727 787 +f 634 3007 384 +f 634 384 398 +f 2465 1067 1334 +f 3793 3891 3815 +f 3793 2185 272 +f 3793 2669 629 +f 1774 2075 2285 +f 18 3318 322 +f 1247 3787 2285 +f 3829 2495 2050 +f 3032 3711 3791 +f 2455 3052 2765 +f 3266 894 2761 +f 472 3647 1123 +f 1685 1203 738 +f 2558 3490 689 +f 2431 3412 1448 +f 3800 2340 1963 +f 3027 1221 3082 +f 242 3359 3707 +f 2799 1060 2658 +f 3326 294 1558 +f 1638 3674 184 +f 2933 3069 2707 +f 2750 1685 2176 +f 358 2409 2622 +f 3010 793 130 +f 1800 3195 2184 +f 2440 2693 3410 +f 2862 2920 1799 +f 3218 3057 3467 +f 1716 2393 1381 +f 1269 1296 855 +f 2756 3051 2631 +f 161 854 1635 +f 2056 3823 2628 +f 2183 1017 3111 +f 2737 2503 2990 +f 3474 883 2503 +f 3315 174 3716 +f 1111 1234 1492 +f 3080 307 3766 +f 3679 2548 3650 +f 3693 463 185 +f 2846 2060 506 +f 1826 1167 2492 +f 2729 867 882 +f 569 957 1624 +f 1455 1371 1636 +f 1753 2906 1179 +f 3663 1921 3149 +f 2414 943 3497 +f 2918 2822 1096 +f 1389 1716 3008 +f 2794 2729 2295 +f 2918 1070 2822 +f 1066 3800 1963 +f 3270 1357 1792 +f 1357 3270 206 +f 2920 861 80 +f 1806 3166 2326 +f 803 3195 1800 +f 1770 3027 3825 +f 2999 1382 389 +f 2184 254 1800 +f 3793 3815 2185 +f 3205 3806 1543 +f 1284 3664 183 +f 1496 3743 877 +f 1496 708 3743 +f 492 751 1193 +f 3304 3805 250 +f 3292 3693 16 +f 2237 3292 16 +f 2383 2044 425 +f 2383 2631 2044 +f 1973 1087 72 +f 1959 758 1173 +f 1624 3895 3278 +f 3427 2851 411 +f 981 328 1310 +f 2407 1310 328 +f 84 3732 775 +f 87 2617 1009 +f 3583 3134 2868 +f 1378 1753 2204 +f 2688 2802 416 +f 416 1558 2688 +f 1753 3648 2204 +f 746 899 181 +f 3778 2891 1974 +f 3466 2060 3557 +f 2946 205 2866 +f 1430 205 500 +f 2160 2394 2446 +f 2705 184 66 +f 2433 3787 3619 +f 259 1535 11 +f 2768 2872 2463 +f 2751 671 1631 +f 2872 1625 1317 +f 3867 3856 3493 +f 366 2853 438 +f 1768 1022 2833 +f 29 804 719 +f 1346 3335 1939 +f 1713 822 2476 +f 804 3041 719 +f 2954 819 2641 +f 2225 3348 1202 +f 3474 2428 883 +f 1481 39 1266 +f 1266 39 2428 +f 198 3081 2922 +f 3081 3464 2922 +f 2535 2677 1037 +f 928 3062 794 +f 1743 2414 1981 +f 743 3882 2139 +f 122 3410 2693 +f 3007 84 775 +f 1700 3631 3279 +f 2977 1743 1981 +f 2685 72 1087 +f 122 3226 3410 +f 1464 3841 953 +f 1440 3012 2619 +f 1440 2812 1668 +f 3264 2712 3576 +f 1194 3063 1519 +f 2872 3008 2463 +f 2014 1612 1424 +f 272 3604 2669 +f 656 123 2404 +f 796 113 3825 +f 2814 1208 2721 +f 3012 1700 2619 +f 3679 2407 328 +f 1770 3690 1221 +f 1770 466 3690 +f 3062 928 3701 +f 1009 1980 1947 +f 506 2060 3466 +f 1826 2395 1167 +f 2160 2446 2332 +f 39 317 2990 +f 2122 2631 2003 +f 2712 3264 2399 +f 3342 610 2037 +f 2464 1544 691 +f 1383 425 779 +f 2674 410 157 +f 2038 2953 1294 +f 1310 893 2038 +f 796 1125 2148 +f 4 2335 1076 +f 641 3359 446 +f 3778 3516 1063 +f 1056 2307 646 +f 420 1979 1056 +f 1279 63 828 +f 1279 366 63 +f 3276 1526 104 +f 2049 1903 599 +f 1391 2677 3267 +f 1808 1269 2137 +f 1324 3371 3067 +f 911 3733 646 +f 2552 928 1850 +f 817 2666 427 +f 2667 2983 1713 +f 2677 3638 3267 +f 3787 1272 2285 +f 294 3775 1558 +f 84 3092 687 +f 2696 1577 1761 +f 2003 2631 2383 +f 2459 2189 1571 +f 4 1076 588 +f 953 3841 4 +f 2307 911 646 +f 817 427 272 +f 3152 1527 677 +f 3396 2311 3349 +f 3818 2975 1383 +f 1671 2341 2541 +f 2759 3857 2519 +f 568 3817 2618 +f 2493 1838 2279 +f 3910 315 3371 +f 168 1068 2667 +f 577 2667 2476 +f 2050 1604 3711 +f 2077 1001 2552 +f 2558 2712 3490 +f 2310 1743 2977 +f 1352 1960 2481 +f 1374 3828 2270 +f 524 1172 572 +f 911 3910 3371 +f 3733 911 3371 +f 863 2842 2232 +f 3220 3005 1137 +f 3045 10 3455 +f 1741 629 2669 +f 2148 1125 3089 +f 953 3343 2102 +f 524 953 2102 +f 911 667 3910 +f 3829 2050 3711 +f 810 3495 910 +f 641 2759 2519 +f 687 3092 1669 +f 178 1731 3456 +f 84 687 2430 +f 1917 2339 2459 +f 1731 1110 3295 +f 2737 2990 2837 +f 2244 3432 616 +f 1634 1567 3908 +f 3094 2875 616 +f 3349 2843 363 +f 1478 910 543 +f 1021 1618 150 +f 1680 2478 1671 +f 2532 866 952 +f 166 515 2 +f 1086 799 428 +f 799 3285 428 +f 3058 2269 1194 +f 2306 3807 1633 +f 1032 3110 2683 +f 2527 3423 577 +f 1062 1296 3600 +f 2482 2645 3780 +f 2092 3547 620 +f 1445 1612 2014 +f 3828 961 3795 +f 2376 3435 802 +f 1086 737 3768 +f 1437 1270 460 +f 3485 1026 1024 +f 43 2136 3935 +f 748 3049 3357 +f 993 1680 1671 +f 242 3819 1582 +f 703 1847 2630 +f 1808 3203 2268 +f 703 2630 717 +f 3335 2842 2865 +f 1395 2544 1960 +f 2408 624 555 +f 3346 1336 1511 +f 2155 3710 1569 +f 3725 1636 262 +f 2171 1239 3710 +f 4 3343 953 +f 1743 202 943 +f 1743 943 2414 +f 2407 3679 2 +f 2493 2279 630 +f 2555 43 3935 +f 2310 2977 2626 +f 3600 1808 2268 +f 178 3456 2731 +f 328 939 2001 +f 2313 1110 1731 +f 2737 2837 2980 +f 3432 177 616 +f 2540 1567 1634 +f 3679 328 2001 +f 3094 616 177 +f 1051 677 1527 +f 799 3407 2766 +f 1602 1703 3133 +f 1638 1554 246 +f 3525 646 1554 +f 3867 3674 3856 +f 2663 1218 3563 +f 3922 1050 225 +f 459 731 2007 +f 459 2007 2643 +f 3768 3565 3626 +f 247 445 3249 +f 877 3743 2905 +f 1286 2865 2842 +f 2667 1713 2476 +f 3828 1563 2270 +f 1194 2269 519 +f 521 478 1527 +f 900 3287 761 +f 2979 2399 3417 +f 3739 1016 900 +f 521 1527 3152 +f 3142 1442 943 +f 2725 2777 2183 +f 1477 1100 852 +f 3495 810 3303 +f 2209 3806 654 +f 2062 543 910 +f 490 2057 183 +f 1383 2869 425 +f 97 1670 743 +f 665 336 1229 +f 2969 1331 1329 +f 665 1229 880 +f 3414 56 2280 +f 3414 2564 56 +f 2905 3414 877 +f 373 2999 2603 +f 1448 3412 3848 +f 1318 290 3851 +f 1537 3308 3946 +f 3772 148 1259 +f 2578 148 3772 +f 1132 148 555 +f 3375 148 2578 +f 137 1165 1849 +f 801 2260 3343 +f 3213 349 2904 +f 3680 2294 1882 +f 2896 3384 343 +f 3695 99 1734 +f 1975 3063 519 +f 372 1821 1822 +f 850 305 2970 +f 2491 367 3046 +f 872 623 1023 +f 1518 1337 1920 +f 2187 2212 3764 +f 374 24 144 +f 1801 3534 456 +f 3804 753 1893 +f 1995 1458 3472 +f 293 1253 3939 +f 998 1809 1451 +f 1022 1068 3532 +f 304 1501 819 +f 1302 1102 973 +f 2638 3938 2854 +f 3860 138 3258 +f 1031 3214 2296 +f 2375 2803 403 +f 1938 3117 2246 +f 1224 1577 579 +f 3126 2396 3529 +f 945 2613 186 +f 1205 1113 433 +f 2547 3141 1696 +f 1986 2277 511 +f 3060 308 3499 +f 533 2944 3459 +f 1911 381 3344 +f 2748 2231 551 +f 2018 2124 1981 +f 2414 849 2018 +f 3899 3434 3547 +f 336 155 3902 +f 3057 587 3295 +f 494 471 994 +f 3149 3310 1682 +f 3811 246 1458 +f 1616 1494 674 +f 3241 1407 963 +f 3068 2991 2589 +f 3899 3662 3434 +f 3414 2280 877 +f 221 3816 402 +f 2321 2903 3491 +f 2663 2633 2735 +f 3697 1735 905 +f 552 1561 3481 +f 2008 1031 215 +f 1336 3346 2759 +f 3057 3295 1110 +f 2624 2676 3836 +f 2465 1820 1067 +f 889 339 386 +f 3688 1352 3311 +f 372 1822 3001 +f 533 3791 1860 +f 523 3860 750 +f 1939 1580 26 +f 3860 523 3539 +f 3241 963 3402 +f 3860 3258 1798 +f 3376 3035 691 +f 3035 2107 691 +f 57 1185 626 +f 2900 445 2181 +f 171 311 755 +f 996 3068 2589 +f 445 3251 2181 +f 3249 159 1659 +f 2548 1045 1104 +f 2999 3862 2603 +f 3417 900 761 +f 1147 691 2832 +f 3488 2004 3333 +f 2564 1901 1714 +f 492 1193 1100 +f 2391 1194 1519 +f 2671 3378 170 +f 1577 369 1761 +f 1137 3005 3362 +f 515 854 2 +f 1086 3768 2806 +f 624 2408 1172 +f 1703 690 3133 +f 1286 2842 863 +f 1406 478 849 +f 849 3871 1406 +f 849 3497 3871 +f 3417 2399 3264 +f 2869 3193 2003 +f 1353 3152 3144 +f 2927 297 1442 +f 1020 23 3605 +f 1284 183 1867 +f 490 183 3664 +f 2298 3016 2869 +f 2304 3068 692 +f 336 665 2118 +f 3705 336 2118 +f 3322 2408 3375 +f 3936 3075 2897 +f 3084 1314 1975 +f 1195 231 3839 +f 231 1968 3714 +f 3093 1968 231 +f 3093 3845 1968 +f 2060 3861 3557 +f 3861 2960 3557 +f 3859 1399 2960 +f 3856 1995 7 +f 494 3350 1825 +f 3856 3811 1995 +f 2597 494 1825 +f 1029 721 676 +f 2235 645 1025 +f 645 2327 1025 +f 482 36 3666 +f 3220 3054 210 +f 489 3749 1705 +f 108 1894 222 +f 3626 551 3874 +f 1587 1288 784 +f 3269 3459 1078 +f 214 2147 591 +f 3317 2940 701 +f 2927 1442 3142 +f 1547 3322 3375 +f 3637 3093 964 +f 494 2601 3350 +f 482 3178 1732 +f 282 3723 2706 +f 518 2813 3186 +f 339 354 3014 +f 3068 2304 2216 +f 3410 3226 515 +f 3051 3760 2044 +f 1319 1922 448 +f 1837 3149 1921 +f 1959 2304 1781 +f 263 2979 165 +f 3269 1078 2319 +f 307 2297 3412 +f 1944 3410 2938 +f 434 3775 3788 +f 2291 2280 56 +f 954 1147 2871 +f 3895 3215 2096 +f 902 2831 3804 +f 885 1311 1863 +f 2761 894 1548 +f 1714 1901 2356 +f 3495 2406 910 +f 1749 3581 2018 +f 3213 2904 3383 +f 231 3714 1648 +f 3126 3529 2986 +f 41 2986 3538 +f 41 3538 460 +f 2018 1981 2414 +f 1749 2018 478 +f 221 402 1046 +f 2198 905 1014 \ No newline at end of file diff --git a/utils/init_box_reversed.obj b/utils/init_box_reversed.obj new file mode 100644 index 0000000000000000000000000000000000000000..54823964f4256a0cca049315703a24205ce181da --- /dev/null +++ b/utils/init_box_reversed.obj @@ -0,0 +1,11897 @@ +v -0.27000064 -0.52210689 -0.10887048 +v 0.21038848 0.06428358 -0.29665637 +v 0.24776192 0.08334409 -0.13271043 +v -0.08708352 0.29819918 -0.01521328 +v 0.05185792 0.36226943 -0.39246556 +v -0.66112256 -0.01098444 -0.10579676 +v 0.21249536 0.31099534 -0.03464627 +v -0.09223168 0.65448046 -0.35759717 +v -0.35830784 0.21386869 -0.38654920 +v -0.65313792 0.09009011 -0.38760200 +v -0.35228416 -0.28916469 -0.00972969 +v 0.11041792 0.35942549 -0.03126616 +v -0.06717184 -0.33267465 -0.01421750 +v -0.10187008 0.68382722 -0.02001131 +v 0.22924544 -0.33157176 -0.07421441 +v -0.57617408 0.10580269 -0.38357994 +v -0.14688000 0.68138278 -0.16293868 +v -0.64098048 0.69160962 -0.10787300 +v 0.07796992 0.40226501 -0.40655795 +v -0.35431167 0.04194051 -0.39821979 +v -0.63273215 0.69053251 -0.30405110 +v 0.14977281 0.67199969 -0.26178542 +v 0.25594625 0.53890777 -0.12259605 +v -0.63527167 -0.32211763 -0.10282560 +v -0.63329023 0.55476594 -0.01286767 +v -0.06115839 -0.23702368 -0.01431240 +v -0.23153919 0.31443030 -0.38861418 +v -0.60836351 -0.35777915 -0.01985258 +v -0.53358847 -0.51496226 -0.19450869 +v -0.60561919 0.66293055 -0.24215306 +v -0.42473215 0.68937707 -0.11923976 +v -0.20180222 0.06296358 -0.38720560 +v 0.21908994 0.34413561 -0.28053543 +v 0.25059074 0.67859608 -0.11891252 +v -0.63414270 0.68893218 -0.11646010 +v -0.19297278 0.66484582 -0.11708182 +v 0.00365826 0.61536139 -0.39309707 +v -0.63398910 -0.48675492 -0.11368596 +v -0.48772606 0.37498656 -0.01234084 +v -0.56898046 0.00753850 -0.00882923 +v 0.21361922 0.46669605 -0.02014366 +v -0.60098302 0.47009915 -0.38629919 +v -0.62783742 -0.15975833 -0.20330814 +v -0.34139901 -0.48701292 -0.10694063 +v -0.37219325 0.67331761 -0.11734438 +v -0.14543357 -0.52764970 -0.20374049 +v -0.56998909 0.16052264 -0.02253948 +v -0.65382653 0.04550539 -0.11164206 +v -0.66424829 -0.13685215 -0.10723808 +v -0.50686717 -0.05986087 -0.02169860 +v 0.10862339 0.54396427 -0.39363924 +v -0.65748733 -0.12135780 -0.39188853 +v 0.03403779 0.61002707 -0.40766907 +v -0.58392316 -0.26430294 -0.38002303 +v -0.63168252 -0.19700441 -0.10171151 +v -0.09939452 0.65238744 -0.39014593 +v -0.63677692 -0.45052364 -0.02250757 +v 0.19487748 -0.49635941 -0.11960063 +v 0.21262084 0.48297489 -0.03342751 +v 0.24146692 0.13541140 -0.11124077 +v 0.19965956 0.66704267 -0.40445918 +v -0.65752572 -0.12622453 -0.14018983 +v -0.58534908 -0.51273119 -0.17963286 +v -0.66327804 -0.29124546 -0.10305539 +v -0.61017084 0.41780049 -0.02411528 +v -0.23136508 -0.42252982 -0.39561611 +v 0.22084612 0.39774221 -0.27604738 +v -0.23264763 0.51736182 -0.39009422 +v 0.12190725 0.14258856 -0.01737122 +v 0.24644357 0.38563851 -0.11633967 +v 0.19606277 -0.45805311 -0.09777649 +v -0.15125243 -0.49169424 -0.02828095 +v -0.40740603 -0.16277461 -0.02231054 +v -0.60659963 0.63697267 -0.38509446 +v 0.22244357 0.44487274 -0.38349718 +v -0.60985595 0.52215463 -0.11329318 +v -0.23299067 0.22605063 -0.40104648 +v -0.56524795 0.61446220 -0.02706985 +v -0.29315835 -0.49726814 -0.20103316 +v 0.25483525 0.51137012 -0.29126087 +v -0.65741819 -0.08263165 -0.29173708 +v 0.12575237 0.64979583 -0.29551974 +v -0.51188475 0.28612909 -0.02411954 +v -0.25293306 0.68407404 -0.15307495 +v 0.23205894 -0.29500318 -0.30309874 +v -0.61071098 0.63449371 -0.03028953 +v -0.07527930 0.68302524 -0.01998238 +v -0.28104186 0.66013587 -0.36884734 +v 0.21247238 0.11491731 -0.12103642 +v 0.23084038 -0.28027278 -0.06417876 +v -0.63156474 0.69348383 -0.17847708 +v -0.07511546 0.65858942 -0.03266834 +v -0.63675386 -0.42442805 -0.05634147 +v -0.65247226 0.10745503 -0.10802236 +v -0.21597946 -0.47155601 -0.38274440 +v 0.15830278 0.15757619 -0.39116192 +v -0.66219258 -0.11392658 -0.10606188 +v 0.07379462 0.00660110 -0.38968039 +v -0.65888250 -0.23184603 -0.00803814 +v 0.19375366 0.67084998 -0.21352039 +v -0.09774330 -0.49583745 -0.10963433 +v -0.16655354 -0.52675438 -0.15376604 +v 0.08973318 0.20402582 -0.01712015 +v -0.45868537 0.14692408 -0.39833066 +v 0.01062919 0.40614304 -0.03015932 +v 0.08272135 0.59962934 -0.39358369 +v 0.19490567 -0.48571157 -0.08236703 +v -0.28019705 -0.52358210 -0.11537027 +v -0.15825401 -0.49241623 -0.11279231 +v -0.66742265 -0.36338362 -0.10607123 +v 0.03452935 -0.49830696 -0.10583378 +v -0.64244217 0.03009302 -0.10644656 +v -0.38225657 -0.40508848 -0.39390263 +v -0.28327417 -0.42846125 -0.39494246 +v -0.66269177 -0.30096897 -0.01144719 +v -0.66528505 -0.37076634 -0.10764448 +v -0.11739641 0.10807776 -0.02682090 +v -0.65673465 -0.11872821 -0.10574441 +v -0.25827321 0.53802013 -0.40383297 +v 0.01967880 -0.38737354 -0.01543031 +v 0.24266760 0.01662592 -0.11999086 +v -0.32026616 0.62951273 -0.02965845 +v 0.07755272 -0.23646665 -0.01630778 +v 0.23488520 0.17381296 -0.11792611 +v -0.11113720 0.40170220 -0.40401298 +v -0.65113336 0.37821510 -0.10686190 +v -0.47409400 -0.48755687 -0.37877086 +v 0.24550408 0.05575015 -0.12109940 +v -0.22891256 -0.45697609 -0.02501659 +v -0.42027512 -0.15472077 -0.38319972 +v -0.60962552 0.51203036 -0.23598796 +v 0.21752584 0.47804761 -0.03814426 +v -0.63902456 -0.46277219 -0.37390262 +v 0.13504009 -0.25460368 -0.03046996 +v -0.08580087 -0.24380192 -0.38652921 +v 0.23747593 -0.00087325 -0.11641371 +v -0.54628855 0.67440987 -0.11102885 +v -0.28740087 -0.52363980 -0.22379138 +v 0.22577161 -0.50820524 -0.26429018 +v -0.66811639 -0.48924914 -0.10277368 +v -0.63728631 0.60472143 -0.26207712 +v 0.23306249 -0.14165947 -0.01896660 +v -0.58937335 0.67548215 -0.03656166 +v 0.22402313 -0.52022207 -0.09377125 +v -0.66135031 -0.02723576 -0.10825557 +v -0.63251191 0.65826654 -0.11158376 +v 0.20701449 0.07767658 -0.06567753 +v -0.17961207 -0.52237272 -0.39524588 +v 0.00393226 0.65379500 -0.15787426 +v 0.14287114 -0.09136681 -0.40208846 +v -0.65920502 -0.14651202 -0.01131017 +v 0.20066570 -0.16869804 -0.11525580 +v 0.19485450 -0.47601178 -0.11621285 +v -0.64068854 0.58920485 -0.01610054 +v -0.29072374 -0.51198769 -0.11113098 +v -0.44988406 -0.07084872 -0.39637655 +v -0.65172470 0.10561444 -0.11393788 +v -0.66220790 -0.05017064 -0.10423245 +v 0.11179786 -0.03135914 -0.40225738 +v -0.63072246 -0.37270129 -0.01909639 +v 0.16558090 -0.13642731 -0.03078359 +v -0.10703350 0.05992061 -0.01413622 +v 0.23900938 -0.12460553 -0.12339608 +v 0.01357322 -0.17802714 -0.02854479 +v -0.57689846 0.69699776 -0.06594435 +v -0.42877686 0.44233578 -0.39139405 +v -0.60899574 0.55054539 -0.34082639 +v 0.23447818 -0.22978565 -0.26503617 +v -0.42020598 0.22831938 -0.02421486 +v -0.55844086 -0.49090645 -0.17270242 +v -0.64491510 0.48285127 -0.05267199 +v -0.09107445 -0.50319952 -0.24662811 +v -0.61468661 0.21470328 -0.38314354 +v -0.65593845 -0.06076059 -0.39244896 +v -0.64215285 0.61297649 -0.11356893 +v -0.04116213 0.67724526 -0.12151303 +v 0.24460043 0.67563087 -0.02785668 +v -0.33995509 -0.48723695 -0.11027052 +v -0.56072181 0.66566437 -0.38433594 +v -0.46302965 0.36976755 -0.38644198 +v -0.38761461 -0.23050989 -0.02232203 +v 0.20159499 -0.25171500 -0.26965994 +v -0.01006837 0.66980183 -0.40661582 +v 0.01486603 -0.41263872 -0.02886097 +v -0.09518837 0.67926788 -0.12104238 +v 0.19385611 -0.51021802 -0.37981662 +v 0.22466315 -0.51220971 -0.02655068 +v 0.03336203 -0.03112573 -0.38910994 +v -0.62003189 0.08611272 -0.11113992 +v -0.30806261 -0.49012992 -0.10738873 +v -0.33086196 -0.48660374 -0.10939262 +v -0.30197236 -0.49144635 -0.10724617 +v -0.25870836 -0.42122528 -0.01113655 +v -0.38638580 -0.07637128 -0.00996246 +v 0.20999692 0.51621443 -0.02022834 +v -0.57378548 -0.15894102 -0.39402115 +v -0.66091764 -0.01285578 -0.11007733 +v 0.13997068 -0.51022398 -0.21137606 +v 0.03842060 0.54050779 -0.03131595 +v 0.08440588 0.57171196 -0.01852999 +v 0.22733580 -0.48814434 -0.20556480 +v 0.20183052 -0.25306645 -0.20314813 +v -0.00655091 0.15246767 -0.38984677 +v 0.20908301 0.00734768 -0.20435923 +v -0.61202163 0.36575529 -0.38365379 +v 0.21570317 0.11373857 -0.11755417 +v -0.61655027 -0.05446478 -0.39402243 +v 0.14960909 -0.50994706 -0.38351357 +v 0.20533261 0.49307236 -0.39382562 +v -0.66558707 -0.39732274 -0.05375288 +v -0.57443827 0.31822017 -0.02387059 +v -0.14648563 -0.51573300 -0.11326339 +v 0.25529101 0.57384783 -0.36049044 +v 0.08758285 -0.03263235 -0.01669163 +v 0.19445261 -0.10297026 -0.39016464 +v -0.24197875 0.03538423 -0.01175827 +v 0.25282061 0.55792814 -0.12098280 +v 0.13032205 -0.16332854 -0.40163013 +v -0.66125298 0.00715710 -0.10674871 +v -0.30991858 0.19809149 -0.01197317 +v -0.04182258 -0.52718097 -0.39602187 +v -0.61918962 0.12578498 -0.37917215 +v -0.65846002 -0.09855795 -0.01192849 +v 0.20401166 -0.53553551 -0.11790312 +v 0.20142350 0.05425467 -0.03131808 +v 0.21521422 0.23012547 -0.35310742 +v -0.52401906 0.09788346 -0.00979650 +v -0.66359538 -0.11147671 -0.10359328 +v 0.06910478 -0.29236293 -0.40019393 +v 0.20674574 0.56318516 -0.02019983 +v 0.24738574 0.40456191 -0.40630475 +v 0.21859598 0.54927266 -0.39421883 +v -0.14314738 -0.40763602 -0.39693934 +v 0.21667342 0.45186087 -0.39352879 +v 0.25600782 0.66446126 -0.36703217 +v -0.10836722 -0.52806240 -0.11548432 +v -0.61491698 0.48209912 -0.06435281 +v -0.20725746 -0.33454436 -0.38439319 +v 0.19783182 0.65848994 -0.40896869 +v -0.33949426 0.05202205 -0.02363909 +v 0.23531023 0.66617244 -0.12187049 +v -0.62706417 -0.12945215 -0.37787017 +v -0.13421553 0.58494228 -0.40572709 +v -0.20236017 0.27362260 -0.38865671 +v -0.62899441 0.08230184 -0.00907222 +v 0.20593679 0.06186302 -0.03335985 +v -0.43695089 0.42522225 -0.40100798 +v -0.14129137 -0.28952894 -0.38555917 +v -0.10976753 -0.09665939 -0.01373323 +v 0.21792527 0.06572086 -0.11708394 +v -0.29652977 0.66167378 -0.11701884 +v -0.26308849 0.19447549 -0.02507999 +v -0.38625777 -0.50934249 -0.11006286 +v 0.20488207 0.01406962 -0.11492302 +v 0.19639055 -0.37729913 -0.38733158 +v 0.18462479 -0.49867994 -0.08497988 +v -0.54084593 0.69177192 -0.23284572 +v -0.41894385 0.48772261 -0.38818330 +v -0.56770033 0.40022671 -0.38581002 +v -0.02762737 0.19412915 -0.40292209 +v -0.11084273 0.22512528 -0.02738731 +v -0.03937521 -0.50392276 -0.38164648 +v -0.00595185 -0.17117184 -0.01511540 +v -0.34009072 -0.33859539 -0.38266951 +v 0.25892112 0.63899302 -0.20230937 +v 0.22946832 -0.04624399 -0.01813891 +v -0.53137904 0.68900526 -0.33181396 +v -0.13288432 -0.15647171 -0.39912578 +v -0.28619248 0.50497150 -0.38932356 +v -0.23899120 0.65751123 -0.38767159 +v -0.61179888 0.60099822 -0.05962882 +v 0.20284688 -0.13364425 -0.03921196 +v -0.65239024 0.32957694 -0.10441884 +v -0.52842480 -0.28697318 -0.38049346 +v -0.46222320 0.66193336 -0.38662878 +v -0.66077936 -0.20536253 -0.39014784 +v 0.21184273 0.09080987 -0.13771017 +v 0.22695185 -0.02740294 -0.12119175 +v -0.65777391 -0.06630605 -0.10558782 +v 0.22172689 0.46100181 -0.39123702 +v -0.60919279 0.54119581 -0.29466483 +v -0.62348783 -0.13078237 -0.00757174 +v -0.12541167 -0.04562055 -0.01363450 +v -0.40183535 -0.02702717 -0.02290545 +v -0.45097455 -0.51708651 -0.13813572 +v -0.47058415 -0.01792595 -0.00957309 +v -0.62786543 -0.15327618 -0.32804427 +v 0.06749201 -0.52202982 -0.01709802 +v -0.57800174 0.68357229 -0.40297338 +v -0.03009006 -0.50582504 -0.33018282 +v -0.07070446 -0.52961004 -0.25839552 +v -0.29938158 0.21991563 -0.38722709 +v -0.56976110 -0.36999395 -0.39178532 +v -0.66280174 -0.27308676 -0.04400234 +v -0.46027502 -0.51635730 -0.39048785 +v -0.01264878 -0.50626141 -0.38087964 +v -0.32127982 -0.49791911 -0.31444272 +v -0.57881838 0.66951299 -0.02936695 +v 0.05233426 -0.49758360 -0.11349446 +v -0.46467566 -0.36828521 -0.38055730 +v -0.48035565 -0.51697201 -0.19538021 +v -0.47701997 0.67256737 -0.03526417 +v -0.01757421 -0.53090101 -0.20461115 +v 0.19877139 -0.23301221 -0.03276493 +v 0.23557651 -0.16942196 -0.30460304 +v -0.66284269 -0.31104586 -0.01143698 +v -0.30697709 0.39203975 -0.02663707 +v 0.05348627 0.67533386 -0.22881070 +v 0.18849811 0.64835864 -0.17635190 +v -0.66014701 -0.17322668 -0.34237942 +v 0.02340627 0.27171272 -0.39131957 +v 0.18074131 -0.30482936 -0.01787465 +v -0.61212397 0.57531345 -0.02896268 +v -0.65006572 0.37087417 -0.10299368 +v 0.24975124 0.36916694 -0.36821327 +v 0.22225940 0.59837604 -0.12209561 +v -0.12831980 -0.19773637 -0.38637111 +v -0.08175084 -0.51875025 -0.01490731 +v 0.19422740 -0.48898989 -0.09365422 +v -0.16490476 -0.49166360 -0.10995349 +v -0.53240812 -0.42375046 -0.02049686 +v -0.07494636 0.07737559 -0.02762051 +v 0.06794516 -0.49580938 -0.03186278 +v -0.62733036 -0.25277883 -0.37959236 +v -0.61613548 0.43695554 -0.10662189 +v -0.53088748 -0.20960441 -0.39400563 +v 0.14489621 0.14496869 -0.40379360 +v -0.16434155 -0.52644670 -0.37242681 +v -0.23969771 -0.22345400 -0.38524619 +v -0.12631787 -0.52766490 -0.35446557 +v -0.63876587 0.51889986 -0.01265617 +v -0.43756011 0.04825668 -0.38458678 +v -0.62040299 0.06708247 -0.38016430 +v 0.07503125 0.21471246 -0.03006101 +v 0.20755221 -0.25562626 -0.12100195 +v 0.23321621 -0.20410974 -0.02631833 +v -0.57867754 0.67082655 -0.19502316 +v -0.63956970 0.52557051 -0.10915049 +v -0.51372778 -0.35170031 -0.00755302 +v 0.03680534 0.44569159 -0.40642264 +v 0.13516822 0.64963692 -0.25507200 +v -0.37663978 -0.49691191 -0.34004679 +v -0.51026154 -0.39632875 -0.39212599 +v 0.17593110 0.37418997 -0.03243684 +v 0.25208598 0.44566372 -0.36594957 +v -0.63972330 0.53239506 -0.32237598 +v 0.21818390 0.48435962 -0.12001383 +v 0.12317974 -0.09105064 -0.03021293 +v -0.62979305 -0.22408247 -0.02110029 +v -0.59698409 0.00470413 -0.02144072 +v -0.62450409 -0.05383751 -0.19739048 +v -0.45470697 -0.51826239 -0.24474229 +v -0.55954921 0.68854308 -0.11286934 +v 0.19283479 -0.17497721 -0.03091764 +v -0.17939945 0.32079801 -0.02720985 +v -0.35371241 -0.52067852 -0.11127184 +v 0.18892823 -0.46912730 -0.01794529 +v 0.23876119 -0.00001435 -0.39669570 +v -0.60990697 0.50026643 -0.34420776 +v -0.64383209 0.43357953 -0.01304086 +v -0.65955049 0.00328571 -0.10066935 +v -0.05006313 0.67464912 -0.36553466 +v 0.19770135 -0.49347484 -0.17109215 +v -0.23808745 -0.47759429 -0.39487460 +v 0.02930455 0.18580137 -0.02935971 +v -0.26624233 -0.44628224 -0.38263312 +v -0.29638633 0.08180055 -0.39905941 +v -0.40577513 -0.48844966 -0.10580016 +v -0.59515113 -0.20298931 -0.38045815 +v -0.65018857 -0.02715945 -0.10586271 +v 0.24118808 0.35447624 -0.40631625 +v -0.63630056 0.63958293 -0.36253095 +v -0.61335528 0.06524249 -0.39576250 +v -0.01500136 0.63571417 -0.40719947 +v -0.40030184 -0.48851079 -0.10608102 +v 0.21442840 -0.52346498 -0.01880915 +v 0.25184536 0.62283677 -0.06531028 +v -0.65517032 -0.01528978 -0.21214800 +v 0.08474392 -0.49975801 -0.11531367 +v 0.22895384 -0.34714404 -0.02274928 +v -0.21404392 -0.52387321 -0.11001733 +v 0.01731096 -0.50736958 -0.34957135 +v 0.25589272 0.53447217 -0.24085365 +v 0.24151064 0.07355501 -0.35379875 +v -0.50689000 0.68896145 -0.11446726 +v -0.64581352 0.61418134 -0.11299828 +v -0.45304808 0.66588980 -0.31365249 +v -0.05916136 -0.10916171 -0.40016839 +v -0.64504296 0.37161320 -0.11459876 +v -0.63253736 -0.29826114 -0.02168243 +v -0.65938920 -0.16632481 -0.21511662 +v 0.25026584 0.49381208 -0.12063981 +v -0.05970664 -0.06472465 -0.38793734 +v -0.37618408 -0.48729950 -0.11008073 +v 0.19998233 -0.36888865 -0.11847165 +v -0.45665511 -0.11008269 -0.02197521 +v -0.26134503 -0.26888853 -0.02398804 +v 0.24786969 0.25815961 -0.12106493 +v 0.21587993 0.44827789 -0.05708575 +v 0.20605977 0.03719383 -0.11840910 +v -0.63277543 0.57316411 -0.40119246 +v -0.25527015 0.39723718 -0.38887802 +v -0.65699559 -0.01626507 -0.10621166 +v 0.07836185 -0.03438111 -0.02966909 +v 0.23834905 0.04111305 -0.02073134 +v -0.63913959 0.69202054 -0.06268637 +v 0.22530073 -0.48979512 -0.02411060 +v -0.65867239 -0.05511574 -0.10499588 +v -0.65801191 -0.11136902 -0.23216060 +v -0.66247910 -0.00681195 -0.10806534 +v -0.63220966 -0.11763546 -0.10580102 +v -0.16425958 0.62175542 -0.39147383 +v -0.32609510 0.68649572 -0.11703075 +v -0.10391270 0.39397964 -0.02873416 +v 0.22318874 0.61354226 -0.39429840 +v -0.63922918 -0.48236620 -0.37391132 +v -0.44064230 0.68942386 -0.11715417 +v -0.63592678 -0.38510293 -0.29237837 +v -0.65754342 -0.11067025 -0.11240081 +v -0.63063526 -0.25680250 -0.10584825 +v 0.20169754 -0.49639291 -0.11982617 +v -0.11874022 0.66929460 -0.40658265 +v -0.61668837 -0.00671060 -0.38216820 +v 0.13764635 0.58999252 -0.03303558 +v -0.62513381 0.69829494 -0.10623975 +v -0.53072613 0.69297945 -0.11920913 +v -0.27050725 0.58817661 -0.40428767 +v -0.00832741 -0.38926452 -0.39861023 +v -0.62106341 0.01628366 -0.38108581 +v 0.01531419 0.39124858 -0.40559983 +v 0.24412955 0.09679212 -0.23436110 +v 0.10914331 0.66697758 -0.40801907 +v 0.19697435 -0.51035261 -0.16549367 +v -0.63597029 -0.40088263 -0.02680644 +v -0.60036069 0.67079711 -0.22253133 +v -0.15533029 -0.50211048 -0.26133180 +v 0.18814236 -0.45883164 -0.38841394 +v -0.62639076 -0.10343879 -0.28111500 +v -0.49856740 0.66768986 -0.27455372 +v 0.23153180 -0.26139835 -0.02295992 +v 0.23485212 -0.12912320 -0.06017607 +v -0.61708772 -0.48387340 -0.10561590 +v -0.65834212 -0.10961787 -0.34145278 +v 0.20542236 -0.09210259 -0.33733904 +v -0.57155812 0.04769387 -0.02172329 +v -0.02891236 -0.50487721 -0.21691370 +v -0.06822884 0.41542131 -0.39116105 +v -0.65345508 0.03500081 -0.11410044 +v -0.16054499 -0.51659340 -0.11339446 +v -0.65924579 0.09711766 -0.10695637 +v 0.19929373 -0.18488812 -0.03210959 +v 0.22682397 -0.40715429 -0.11488345 +v -0.41279715 -0.18199298 -0.00928031 +v -0.24037603 -0.51607078 -0.11064288 +v -0.66499555 -0.43057397 -0.38919929 +v 0.21720605 0.29082522 -0.37681758 +v -0.63529187 -0.48124617 -0.02580215 +v 0.19636253 -0.44981366 -0.38300654 +v -0.02575587 0.43756256 -0.40547472 +v 0.24764957 0.38256201 -0.40582708 +v -0.65603811 -0.03472269 -0.11064969 +v -0.24578275 0.65378606 -0.39036208 +v 0.22937630 0.64118820 -0.31720006 +v -0.42990562 0.52266133 -0.01409792 +v -0.61401570 -0.35671526 -0.37832275 +v -0.61726946 0.20588106 -0.37722677 +v -0.18726626 0.68174702 -0.12015214 +v -0.47624674 -0.49565864 -0.37226489 +v -0.39165410 -0.10123855 -0.38394380 +v -0.10097378 -0.10787858 -0.38731390 +v -0.65669602 0.01194897 -0.10596570 +v -0.24839906 -0.23533298 -0.39742488 +v -0.50443745 -0.45243931 -0.39161682 +v 0.06444319 -0.09105949 -0.02942440 +v 0.22369823 0.66982746 -0.16487704 +v -0.59549665 -0.09067988 -0.02108454 +v 0.04479263 -0.26241842 -0.38794288 +v -0.17734113 0.67908257 -0.31113943 +v -0.66554081 -0.40456563 -0.13810295 +v 0.20348191 -0.11060281 -0.12005001 +v -0.32866785 -0.49520180 -0.11278976 +v 0.00689183 -0.49407494 -0.03081891 +v 0.25137183 0.49517107 -0.40623730 +v -0.66019553 -0.18810736 -0.15142255 +v -0.13393633 -0.50014997 -0.11526261 +v 0.12624671 -0.34237322 -0.40039840 +v 0.21612063 0.18562266 -0.11874869 +v -0.09180129 0.11617415 -0.40152502 +v -0.62590945 0.04351124 -0.10565590 +v -0.63704801 -0.48442185 -0.10380520 +v -0.51252961 -0.50570881 -0.00746876 +v -0.64879328 0.30529898 -0.11105906 +v -0.12769760 0.34884232 -0.01506476 +v 0.15808032 -0.49780986 -0.11413278 +v -0.10732000 0.66066778 -0.03325474 +v -0.34389728 -0.52201730 -0.29483676 +v -0.00763616 -0.49595407 -0.11084203 +v 0.20933408 0.02593829 -0.12463186 +v 0.21919520 0.54282838 -0.08379729 +v -0.61832672 0.02904317 -0.38204587 +v -0.15694816 -0.11531704 -0.02579789 +v -0.44923872 0.68198961 -0.40312400 +v -0.14645728 0.65656865 -0.33122692 +v 0.20617248 -0.14925122 -0.11762013 +v -0.62529504 0.03136342 -0.02660005 +v 0.20702496 -0.45345843 -0.11350936 +v -0.61343968 0.34689918 -0.19182050 +v -0.06480351 0.18868370 -0.02799031 +v -0.65921247 -0.10745570 -0.03784723 +v -0.35301599 -0.52110875 -0.13932045 +v -0.63143647 -0.28069070 -0.10549079 +v -0.18841311 -0.38923922 -0.39650530 +v -0.27557343 0.65817636 -0.38795117 +v 0.22055201 0.30439746 -0.11907338 +v -0.14052831 0.66035056 -0.03284621 +v 0.20489505 -0.10015153 -0.38334867 +v 0.22324513 -0.53374249 -0.11474174 +v 0.09682209 0.12602258 -0.40328509 +v 0.09094945 -0.53380811 -0.24088876 +v -0.66724575 -0.48394024 -0.10579463 +v -0.36573663 0.21077839 -0.01170635 +v -0.63636959 -0.45405838 -0.17272922 +v -0.60945886 0.43315729 -0.38481489 +v 0.23300642 -0.18716227 -0.11466046 +v -0.54154462 -0.50597996 -0.10739511 +v -0.55981278 -0.50549483 -0.00759089 +v -0.63778526 0.62392521 -0.10914028 +v 0.04117282 -0.20844622 -0.38832033 +v -0.16722910 0.68080580 -0.23657309 +v -0.04589022 0.65269858 -0.38741669 +v -0.65274334 0.05266364 -0.12954991 +v -0.63370717 0.66919363 -0.13023120 +v -0.35383773 -0.48855507 -0.11068160 +v 0.22814243 -0.42018387 -0.31118602 +v -0.00635357 -0.10418937 -0.40072712 +v 0.24989475 0.55202711 -0.02196031 +v -0.39232221 0.66580766 -0.15020719 +v 0.22251043 -0.53455919 -0.33587429 +v -0.66016221 -0.01246300 -0.10743937 +v -0.51203549 0.10424176 -0.39754978 +v -0.21317597 0.16325326 -0.40059820 +v -0.28000477 0.45336902 -0.01447155 +v 0.08703011 0.63338345 -0.40807885 +v -0.66590941 -0.35197273 -0.11262422 +v -0.00785372 0.50932389 -0.01736186 +v 0.06948388 0.06353086 -0.38994250 +v 0.03411748 0.56280792 -0.39316604 +v 0.01446948 0.29162917 -0.02953972 +v 0.09312548 -0.16538581 -0.38911715 +v -0.64347100 -0.04375956 -0.10629124 +v -0.61854684 0.32331002 -0.06329235 +v 0.16728868 0.50159276 -0.40757012 +v -0.60537308 0.65040147 -0.02752561 +v 0.01331492 -0.07843649 -0.02873416 +v -0.17082076 -0.00982961 -0.38720030 +v -0.19176668 -0.52418309 -0.39438009 +v 0.24215332 0.15248804 -0.02385059 +v -0.04749532 -0.52843475 -0.39338177 +v -0.18773468 -0.48666629 -0.11114545 +v -0.54554844 -0.00713043 -0.02161009 +v 0.24028964 0.14148033 -0.11702267 +v 0.21282084 0.13122879 -0.17286114 +v -0.65679580 -0.00151295 -0.10632572 +v -0.62139100 -0.01288083 -0.38119540 +v -0.11413980 0.49772850 -0.02946567 +v -0.65084124 0.14051896 -0.11423662 +v -0.56294107 0.67492944 -0.11300296 +v 0.20252709 -0.15456004 -0.11967978 +v -0.57921755 0.69082862 -0.30920383 +v 0.22557989 -0.53390706 -0.16730735 +v -0.30717403 0.09641618 -0.02405570 +v 0.22371621 -0.09769750 -0.01813423 +v -0.27233243 -0.23678282 -0.01083015 +v 0.25134629 0.36000431 -0.23124354 +v -0.63532507 -0.49061379 -0.25725973 +v -0.48917723 -0.50981611 -0.05230517 +v -0.07028955 0.41722229 -0.01616821 +v -0.65287131 0.08494579 -0.33451360 +v -0.05712347 -0.20339297 -0.38718370 +v -0.55250651 -0.22341649 -0.02053218 +v -0.14019802 -0.20357494 -0.39862281 +v -0.10675418 -0.50281698 -0.37924087 +v -0.66247898 -0.12452102 -0.10596315 +v -0.01099994 0.65630364 -0.03304878 +v -0.33209306 -0.49664623 -0.24500145 +v 0.22306342 -0.43863076 -0.11864868 +v -0.61577946 0.21777296 -0.02219479 +v 0.17471270 -0.22137584 -0.40154397 +v -0.25321946 0.68530810 -0.11748609 +v 0.24753702 0.20416266 -0.18370108 +v -0.59131098 0.01625452 -0.39559376 +v -0.04800986 0.65232009 -0.11765205 +v -0.34226650 -0.18823144 -0.02277779 +v 0.16893990 0.65428746 -0.03562333 +v 0.02679846 0.65992194 -0.03660251 +v -0.25033945 -0.52072436 -0.10846706 +v 0.10749479 0.67296630 -0.12310415 +v 0.21762855 0.26630840 -0.11959382 +v 0.23732263 -0.10416290 -0.31202987 +v -0.48944601 -0.48704383 -0.10436139 +v -0.03000793 0.19362055 -0.01556946 +v -0.60671961 0.63573056 -0.26543662 +v -0.20891353 0.39113650 -0.40288040 +v -0.07848153 -0.43924198 -0.02749752 +v -0.20496601 0.28686646 -0.40191844 +v -0.59613401 0.24637237 -0.39779404 +v 0.21070631 0.00650842 -0.11762609 +v 0.19766055 -0.50578535 -0.18956043 +v -0.36787161 -0.52123207 -0.34738874 +v 0.03060263 0.31311744 -0.01678865 +v 0.22525223 0.52257669 -0.16050372 +v 0.19448103 -0.43149987 -0.40043989 +v -0.02504409 0.65817934 -0.03354241 +v -0.56633049 0.68608803 -0.40199780 +v -0.42799065 -0.25288713 -0.38223138 +v -0.34206936 -0.26902387 -0.38331634 +v -0.52816856 -0.48566818 -0.06020118 +v 0.25834024 0.60885137 -0.16591282 +v -0.66897368 -0.35918009 -0.10159662 +v -0.31310040 -0.04549596 -0.38517788 +v -0.27696088 0.66204655 -0.17653999 +v -0.10915544 -0.50167352 -0.11631329 +v -0.60329688 -0.46137512 -0.00605169 +v 0.10856744 -0.42248240 -0.01674525 +v -0.64434904 0.68169647 -0.11412555 +v 0.22799912 -0.45396891 -0.25238043 +v 0.10094633 0.67373997 -0.12327649 +v -0.15396567 -0.33452219 -0.38510510 +v -0.64810199 0.31949288 -0.01375621 +v -0.65268183 0.20696397 -0.10973774 +v -0.31123927 -0.37500498 -0.02346292 +v -0.40360919 -0.51639217 -0.10658658 +v -0.05754327 -0.52946126 -0.15264216 +v -0.07259095 -0.34143341 -0.02745327 +v -0.61712599 0.38594276 -0.06067183 +v -0.63940567 0.49953428 -0.39851749 +v -0.60858583 0.41201994 -0.38529894 +v 0.23820585 0.03438032 -0.40286678 +v -0.40203479 -0.00131095 -0.38461915 +v -0.63534039 -0.40084791 -0.37750146 +v -0.44982231 -0.30202937 -0.39406988 +v 0.14073129 0.67690843 -0.12146622 +v -0.66468823 -0.50682068 -0.11635073 +v -0.64957911 0.19232866 -0.23397087 +v -0.37889495 0.68775797 -0.15553673 +v -0.60683990 0.50767243 -0.38576832 +v -0.35123158 -0.49576202 -0.18629947 +v -0.62907350 -0.30643755 -0.01940448 +v -0.66248918 -0.31247503 -0.39062658 +v -0.24824278 0.47754183 -0.40338954 +v -0.59687638 0.55737168 -0.01326343 +v -0.15186134 0.49892491 -0.01597543 +v -0.10069206 0.53445333 -0.39172554 +v -0.63609046 -0.48889893 -0.21590897 +v -0.24483798 -0.36544219 -0.01129485 +v 0.11888938 -0.11093814 -0.38955823 +v -0.17533910 -0.52445769 -0.11052330 +v -0.63406038 0.64900839 -0.40059862 +v -0.64321494 0.61543107 -0.10548484 +v -0.05728214 -0.49563378 -0.10916198 +v -0.20665558 0.66396928 -0.03344155 +v -0.11416534 -0.48974964 -0.02708687 +v -0.66793174 -0.43688557 -0.10447969 +v -0.61091286 0.45415500 -0.22215132 +v -0.37756118 0.66484314 -0.20105146 +v -0.50189525 0.66939777 -0.12091599 +v 0.23540267 -0.04740210 -0.01939427 +v -0.31894997 -0.35187879 -0.01017183 +v -0.37460437 -0.28205466 -0.02240288 +v 0.15698987 -0.46588635 -0.38727391 +v -0.13398229 -0.51965636 -0.03117297 +v -0.18638805 -0.49177790 -0.10793471 +v -0.46489301 0.63206142 -0.02855713 +v -0.16860117 -0.28923118 -0.02564384 +v 0.15780139 0.43204346 -0.39345071 +v 0.20171307 0.08696232 -0.03129254 +v -0.15741397 -0.49559522 -0.10893048 +v -0.59584981 0.68986881 -0.35111353 +v -0.65800661 0.14361776 -0.10667253 +v 0.23924780 0.03095481 -0.11589752 +v -0.35709140 -0.17876039 -0.00984501 +v 0.21963820 0.58966517 -0.03623484 +v -0.61984468 -0.00319460 -0.00858582 +v -0.43629780 0.67000163 -0.03276110 +v -0.62167764 0.04964154 -0.23627733 +v -0.65703380 -0.02425212 -0.10482907 +v -0.66878164 -0.40861440 -0.10170428 +v -0.03176916 0.53251016 -0.40636986 +v 0.22786604 0.63816363 -0.12585701 +v 0.24167724 0.18130645 -0.11615753 +v -0.63256276 0.69185120 -0.22280964 +v 0.04377132 -0.52341044 -0.11209229 +v 0.16830508 0.67837763 -0.05713044 +v -0.63516116 -0.37704176 -0.02297184 +v 0.23122732 0.04048571 -0.40404531 +v -0.08390868 0.68475097 -0.02564469 +v -0.62914515 -0.24977784 -0.37902042 +v -0.42064083 -0.51247430 -0.10789555 +v -0.54644179 0.69407064 -0.01593628 +v -0.57716179 0.67049056 -0.23293380 +v -0.66104531 -0.08988123 -0.10591761 +v 0.22960429 0.64154941 -0.19354695 +v -0.15961555 0.38346082 -0.38977060 +v -0.21256147 0.36340743 -0.02723283 +v -0.46878931 0.69424295 -0.05890156 +v -0.61612755 0.41763496 -0.02779413 +v 0.22603309 -0.48089185 -0.31893310 +v -0.65930963 -0.44325835 -0.10229324 +v -0.52099794 0.04452901 -0.39680338 +v -0.23649234 -0.49890968 -0.21736775 +v 0.16591150 0.08375434 -0.39084360 +v -0.66769874 -0.49710038 -0.08596077 +v -0.23770578 0.68415868 -0.12173985 +v 0.14374958 0.64959937 -0.18969235 +v -0.63742930 -0.48919776 -0.31105793 +v -0.13672914 -0.40009481 -0.38474807 +v -0.04101330 -0.49322620 -0.02957206 +v -0.09212114 -0.50257438 -0.14595769 +v 0.23299886 0.08310208 -0.01850062 +v 0.13536558 -0.47612786 -0.01710483 +v -0.66154706 -0.22560686 -0.35284403 +v -0.14053586 -0.50249773 -0.37864831 +v -0.43348178 -0.50988644 -0.10894836 +v 0.23567662 -0.08976644 -0.40076244 +v 0.19767086 0.00266935 -0.01861637 +v 0.23899950 -0.07335165 -0.26402125 +v -0.63524818 0.58687949 -0.40087095 +v -0.03491282 -0.49637970 -0.11447663 +v -0.61555666 0.27381486 -0.29020807 +v 0.22672430 -0.20409241 -0.01815210 +v -0.19597778 -0.49973387 -0.16840823 +v -0.35190225 -0.00862534 -0.39784384 +v -0.65660113 0.28930479 -0.11034925 +v -0.60415953 0.69464481 -0.11796781 +v -0.66438353 -0.26192027 -0.10359669 +v 0.12927791 0.51416862 -0.03241599 +v -0.63156945 0.68503034 -0.40028986 +v 0.21914159 0.56853843 -0.39441371 +v 0.21732143 0.29342937 -0.32833511 +v -0.64708817 0.36549586 -0.01578947 +v -0.45702609 0.53658247 -0.02710091 +v -0.33903569 -0.52208930 -0.23477642 +v -0.23473105 0.68798083 -0.07087898 +v -0.65873617 -0.06496398 -0.10585293 +v -0.14029521 0.47917476 -0.39078850 +v -0.65146321 0.13922988 -0.34156790 +v 0.24725296 0.12167338 -0.12361012 +v -0.33717200 0.69187427 -0.10547930 +v -0.22371536 0.10698999 -0.39999157 +v -0.39017168 -0.45317847 -0.38076711 +v -0.25507280 -0.05529604 -0.38586685 +v 0.02608944 -0.52905059 -0.39681977 +v 0.04316720 0.62876862 -0.01866744 +v -0.66716880 -0.47535729 -0.27087742 +v -0.22412239 -0.01891354 -0.38647261 +v -0.56878799 -0.38708204 -0.01992237 +v -0.12178895 -0.45094851 -0.39675188 +v -0.66243279 -0.28744262 -0.18637905 +v 0.23214129 -0.09534215 -0.01843722 +v -0.01345487 0.39769742 -0.39180854 +v -0.57901007 -0.10028449 -0.38142264 +v -0.30178767 -0.50923580 -0.11153525 +v -0.24079567 0.67703748 -0.11544559 +v -0.65495503 -0.31161779 -0.00599850 +v -0.09855183 0.07918645 -0.38847947 +v -0.63323087 -0.48327714 -0.03580929 +v -0.60632783 0.67521274 -0.11273486 +v -0.66268367 -0.07219830 -0.10946837 +v -0.63760078 -0.46067309 -0.21544513 +v 0.24716850 0.45556471 -0.40725756 +v 0.10567474 -0.50773787 -0.11977553 +v -0.23510990 -0.46748000 -0.01157018 +v 0.09912882 -0.53205174 -0.11488983 +v 0.17255986 -0.21929048 -0.03132616 +v -0.15503566 -0.51754797 -0.01422474 +v -0.61176270 0.42729416 -0.37834978 +v 0.08171058 0.07307810 -0.40274081 +v 0.22198834 -0.53315598 -0.38049856 +v -0.50087118 0.66702878 -0.33649218 +v 0.23249714 -0.07412603 -0.01848786 +v 0.20369458 -0.08841169 -0.11982489 +v 0.24173874 -0.01899157 -0.12181517 +v -0.66395086 -0.27795008 -0.11055479 +v 0.04064307 0.66871107 -0.40719479 +v -0.08579789 0.66315883 -0.03774127 +v -0.36301261 -0.47423002 -0.39343008 +v 0.20717363 0.29588071 -0.03235258 +v 0.19407155 -0.38402680 -0.03247046 +v 0.25899315 0.66711938 -0.17423864 +v -0.64801997 0.56985611 -0.10755640 +v -0.45139661 -0.42631966 -0.39268303 +v 0.23739699 -0.04095533 -0.02248757 +v -0.01348301 -0.49867022 -0.38550490 +v -0.44010445 -0.48545456 -0.10756703 +v -0.36272845 0.63958687 -0.40392807 +v -0.03045581 0.67739689 -0.01860573 +v -0.62746829 -0.14351438 -0.10700786 +v -0.25451213 0.41028360 -0.40260783 +v -0.65483469 0.00744874 -0.11149099 +v -0.35194829 -0.49543568 -0.14188521 +v -0.42313164 -0.05012600 -0.38406232 +v -0.66617548 -0.29576221 -0.10831174 +v -0.65568972 0.10163787 -0.10898495 +v -0.14384076 -0.52019423 -0.39552078 +v -0.39940044 0.18738382 -0.38595876 +v -0.11756492 -0.52839303 -0.30039248 +v -0.61362380 0.67086124 -0.11466216 +v -0.35527372 -0.48869616 -0.10721681 +v -0.61871052 0.14865789 -0.20570226 +v -0.59572428 0.37876174 -0.39899153 +v -0.28049099 0.33788514 -0.01333619 +v 0.10486837 -0.52314371 -0.11441535 +v 0.13047349 -0.19399464 -0.03030783 +v -0.60410571 0.17886041 -0.39700872 +v 0.00998965 0.16364759 -0.40296486 +v 0.22960949 -0.15086943 -0.01814316 +v -0.02448843 -0.24068154 -0.39963901 +v -0.58416331 -0.04289868 -0.02145094 +v 0.23767349 0.05635796 -0.40326169 +v -0.33376715 -0.09988604 -0.01033780 +v 0.21030709 0.05051184 -0.22735277 +v -0.62002891 -0.49260369 -0.36939543 +v 0.25886261 0.66549432 -0.21795118 +v -0.13892554 0.02669962 -0.40043670 +v 0.25691190 0.66675663 -0.27823365 +v 0.23070262 0.05512627 -0.12018916 +v -0.14939594 0.04883077 -0.38779774 +v -0.60877258 0.52940190 -0.38370463 +v -0.61330378 -0.40106434 -0.00617851 +v -0.52220362 -0.00368511 -0.00914158 +v 0.14432566 -0.41575164 -0.40007475 +v -0.07912650 0.67942536 -0.16657795 +v -0.20421322 0.43155879 -0.02800775 +v 0.18901302 0.66582394 -0.11985340 +v 0.21265206 0.13653463 -0.29312477 +v -0.30634442 0.63022137 -0.40425447 +v -0.03826890 -0.29984537 -0.39907408 +v -0.63792074 0.69631463 -0.07801370 +v -0.60837322 0.49286258 -0.02502510 +v -0.42799050 0.59361714 -0.38865715 +v 0.10156598 -0.13964532 -0.02988441 +v -0.50790602 0.56746030 -0.01396941 +v 0.18026039 -0.49530479 -0.11682266 +v 0.19933751 -0.37005550 -0.20437157 +v -0.65322697 0.16264234 -0.10888623 +v -0.60073161 0.44597542 -0.38594005 +v 0.18840119 -0.50024664 -0.09257673 +v -0.11893193 0.65719354 -0.12169474 +v 0.00890679 -0.05189550 -0.40122586 +v -0.22040009 0.67534041 -0.40393150 +v 0.07388983 -0.09302941 -0.01638736 +v 0.20432439 -0.03156023 -0.07820221 +v -0.06632905 0.65291977 -0.38739645 +v -0.51588809 -0.49255028 -0.22907709 +v 0.07135799 -0.50901020 -0.33509320 +v 0.24961847 0.40736988 -0.12064279 +v -0.31594953 -0.29955071 -0.02320674 +v -0.44174537 0.04105041 -0.01022545 +v 0.03689784 0.21670324 -0.40388298 +v -0.38318792 -0.45405456 -0.02249906 +v -0.31632328 0.56269419 -0.01517115 +v 0.12017208 0.67343950 -0.20573799 +v 0.21258296 0.66815060 -0.35462898 +v 0.22380856 -0.51510602 -0.11517112 +v -0.61364168 0.42717290 -0.11092544 +v -0.48057544 0.46567845 -0.40130204 +v -0.55921096 -0.33217204 -0.02018834 +v -0.15086280 -0.05862555 -0.02600258 +v 0.18470968 -0.11864319 -0.01770868 +v 0.24965432 0.06305247 -0.13475007 +v 0.16249144 0.09261863 -0.01782741 +v -0.18023112 -0.17046367 -0.39854792 +v -0.43842503 0.67628056 -0.40406936 +v -0.49478087 0.35659131 -0.39993837 +v -0.08177095 -0.49393964 -0.11055437 +v -0.45330375 0.68988007 -0.11801249 +v -0.38183111 0.05062471 -0.38510597 +v 0.20867129 -0.03558172 -0.11825250 +v 0.19360057 0.64562774 -0.11939935 +v 0.10202169 0.10112157 -0.01700100 +v 0.25017145 0.53618693 -0.02417528 +v -0.19937991 -0.20215975 -0.01197615 +v -0.22816455 -0.18482253 -0.39805129 +v -0.64293575 0.32693183 -0.01131315 +v -0.13567175 0.67148328 -0.40616924 +v -0.16449991 0.08446407 -0.02590683 +v -0.63371718 0.67698228 -0.01490774 +v -0.01697478 0.65337956 -0.30230764 +v -0.09561030 0.34309047 -0.40355766 +v -0.64805830 0.22921722 -0.01091058 +v -0.62247366 0.14899994 -0.05370522 +v 0.19935034 -0.38550803 -0.12052279 +v 0.20921658 0.08838195 -0.38935822 +v 0.22175546 0.64708149 -0.32051590 +v -0.20815302 -0.08060404 -0.02479360 +v 0.12685114 0.67965484 -0.02794222 +v 0.18522938 -0.14727512 -0.40203908 +v -0.23276742 0.68028355 -0.31336802 +v 0.20245050 -0.12534375 -0.08319302 +v -0.64160454 0.58772969 -0.02678601 +v 0.20121147 -0.18635504 -0.06825677 +v -0.29816261 0.47919083 -0.02764434 +v -0.35253701 0.46223632 -0.01404515 +v 0.23862587 -0.10848129 -0.11879337 +v -0.65329093 0.03615796 -0.17276923 +v -0.63818181 -0.49212712 -0.10289709 +v -0.40880325 0.32937396 -0.02529915 +v -0.12369093 -0.05294517 -0.38747093 +v 0.15222843 0.53797120 -0.01929682 +v 0.24668731 0.33708921 -0.40490004 +v -0.66356421 -0.10327424 -0.10481502 +v -0.34241989 0.09066327 -0.38576725 +v 0.03769659 -0.37277025 -0.39925155 +v -0.03701189 0.06625572 -0.40156969 +v 0.07507003 0.57486117 -0.40772161 +v -0.28595141 -0.06619199 -0.01094122 +v -0.47599813 0.63439840 -0.01494306 +v 0.24064316 0.01800819 -0.31999761 +v -0.61589444 0.38763794 -0.02514212 +v -0.63481796 -0.48841923 -0.15993178 +v -0.63416004 -0.49106315 -0.37467369 +v 0.22310460 0.49072909 -0.39064869 +v -0.29459652 0.01581952 -0.02382165 +v -0.50749892 0.67372698 -0.07327907 +v 0.20559932 0.66674578 -0.12086322 +v -0.28195524 0.13501681 -0.01181061 +v 0.08639036 0.59590989 -0.03244109 +v 0.04903740 0.67419040 -0.30122399 +v 0.19786556 -0.30529413 -0.38784882 +v -0.59183556 0.48947629 -0.40056011 +v -0.64554948 -0.00478894 -0.10659466 +v -0.40697283 0.66531390 -0.24677321 +v 0.08913981 0.68008292 -0.11899295 +v -0.11526851 0.66272372 -0.11686820 +v -0.58908099 0.05288633 -0.38266057 +v 0.02640445 -0.22658049 -0.02873714 +v 0.19816253 -0.34834403 -0.07201477 +v -0.41328323 0.11391652 -0.38527980 +v -0.57514691 -0.49295631 -0.37459731 +v -0.65572035 0.02885759 -0.10712489 +v -0.62422723 -0.34013394 -0.00633085 +v -0.41173187 0.66305244 -0.11503112 +v 0.24299069 0.39337888 -0.11901423 +v 0.15321149 0.48109931 -0.01921469 +v 0.24039997 0.08614235 -0.02268502 +v -0.14362819 0.68202341 -0.13235638 +v 0.21428797 0.08958698 -0.11762056 +v -0.20579523 -0.48973453 -0.02655026 +v 0.14352958 0.67837989 -0.02282630 +v -0.46060994 -0.51086944 -0.10484184 +v -0.24955586 -0.06050958 -0.02416209 +v 0.24525374 0.28019464 -0.40480280 +v -0.59487426 -0.49111262 -0.23867145 +v 0.24598590 0.14786568 -0.20498946 +v 0.07692094 -0.49375665 -0.11553496 +v -0.14914498 -0.04255728 -0.39989668 +v 0.23902014 0.15132853 -0.40479812 +v -0.24455106 -0.19186814 -0.01125485 +v 0.23387198 -0.22688270 -0.30980384 +v -0.66196418 -0.06640748 -0.10839216 +v -0.65661377 -0.05875659 -0.24979651 +v -0.66185153 -0.02845712 -0.10662530 +v -0.07654593 -0.21450107 -0.02714006 +v -0.64862913 0.07130419 -0.00869263 +v -0.11693761 0.68069953 -0.11956743 +v 0.10331455 -0.50202703 -0.39874005 +v -0.09195969 -0.06290638 -0.40021816 +v -0.52965057 0.30102980 -0.38551086 +v -0.08793025 0.65532136 -0.29923710 +v -0.62594497 -0.13180749 -0.11367915 +v -0.20191425 -0.50126231 -0.37628821 +v -0.64883393 0.22242433 -0.10752235 +v -0.65208513 0.17618929 -0.01450432 +v -0.45939904 -0.00102380 -0.38405913 +v 0.25035584 0.40882763 -0.40295976 +v -0.28965056 -0.09819216 -0.38516852 +v -0.63101888 -0.47731629 -0.37729549 +v -0.49469376 -0.11099500 -0.39548418 +v -0.58559424 -0.49333760 -0.37035972 +v -0.60721344 0.48671940 -0.38570067 +v -0.63142592 -0.44759265 -0.01895213 +v -0.53762752 -0.51543272 -0.29596806 +v -0.52252352 0.33246428 -0.01173231 +v -0.00076992 -0.49219254 -0.11579369 +v -0.33037248 0.25999880 -0.02512000 +v -0.46468800 -0.33849743 -0.02141859 +v -0.49019071 0.66805547 -0.20971856 +v -0.19657151 0.47727534 -0.39021805 +v -0.61595327 -0.07963667 -0.00799431 +v 0.23539777 -0.12141779 -0.11320212 +v 0.24670273 0.35220611 -0.40538153 +v -0.23038399 -0.10358353 -0.38601726 +v 0.22182721 0.52193326 -0.12306075 +v -0.21004735 0.16729423 -0.02557874 +v -0.66209471 0.04181843 -0.10888920 +v -0.26362815 -0.49747849 -0.37976110 +v -0.65288895 0.27669621 -0.10294305 +v -0.38873023 0.53545702 -0.38855991 +v 0.07199041 -0.23375463 -0.40068501 +v 0.25489217 0.66654617 -0.33585387 +v 0.21169218 0.24958855 -0.03668890 +v -0.61661118 0.21337366 -0.36300054 +v -0.38709182 -0.51422393 -0.10555334 +v -0.17206974 0.22905673 -0.38862586 +v -0.23229374 0.31436250 -0.01355195 +v -0.05597630 0.40091679 -0.40469661 +v 0.24552514 0.30338734 -0.40529132 +v -0.37963966 0.39710560 -0.02617152 +v -0.42746046 -0.13193375 -0.39600569 +v -0.64565694 0.32493746 -0.11546858 +v -0.18815422 -0.52030265 -0.10855132 +v -0.42931134 -0.23363149 -0.39500311 +v -0.27392957 0.44035429 -0.02744986 +v -0.03993789 0.04320445 -0.38876778 +v -0.23521213 -0.52254629 -0.39270791 +v 0.20721219 0.16834971 -0.03231598 +v 0.15610947 0.65639013 -0.11991383 +v -0.58180797 0.48828965 -0.38644749 +v -0.66105789 -0.22041382 -0.25603119 +v -0.61970621 0.10646689 -0.15245450 +v -0.10328253 -0.49377090 -0.11326339 +v -0.07901117 0.58547419 -0.01714951 +v 0.21918531 0.40710220 -0.39165917 +v -0.54090685 0.66967547 -0.02979675 +v 0.22656067 0.50144148 -0.12143303 +v -0.65027773 0.10849454 -0.10745086 +v -0.59839165 0.67114496 -0.23880762 +v -0.62392253 -0.51199526 -0.34032848 +v -0.64554173 0.35183439 -0.01229871 +v -0.46134460 0.69504184 -0.10184514 +v -0.34796476 0.66960627 -0.08060315 +v -0.25095612 -0.37447840 -0.39590293 +v -0.39119548 0.41947541 -0.01337917 +v 0.20196676 0.62335443 -0.02041686 +v -0.47894716 -0.48199400 -0.10679594 +v 0.08534852 -0.52598071 -0.10626869 +v -0.50773948 0.69255155 -0.11329318 +v -0.08041404 -0.26382199 -0.02714304 +v -0.08490940 -0.52608877 -0.11121311 +v -0.18254524 -0.37885255 -0.38429528 +v -0.60524732 0.66299915 -0.18326235 +v -0.62620348 -0.48892671 -0.10372180 +v 0.23369028 0.00873916 -0.11940870 +v -0.63327420 0.50656402 -0.40037200 +v -0.62472636 -0.27627745 -0.00685300 +v -0.62208700 0.13990386 -0.00977224 +v -0.63378876 -0.41547605 -0.01984067 +v -0.61542588 0.46732926 -0.03312239 +v 0.21102148 0.07336572 -0.12279691 +v 0.12456005 0.64841217 -0.11956360 +v -0.39251131 0.57075191 -0.01478901 +v -0.45212859 0.00048226 -0.02257438 +v -0.64145851 0.60720301 -0.05023318 +v -0.65977019 -0.03761568 -0.10695042 +v -0.39230907 0.37724349 -0.38712922 +v 0.22509381 -0.53200769 -0.24556169 +v 0.21727301 -0.49599940 -0.01821976 +v -0.58158779 0.69525641 -0.11080374 +v -0.08797371 0.67992800 -0.11972446 +v 0.05568581 0.67626750 -0.12842475 +v -0.62120122 0.18902393 -0.02580257 +v -0.00439738 0.24537830 -0.01607203 +v -0.63549370 -0.49210280 -0.31843221 +v -0.63346618 0.69824547 -0.05781982 +v -0.08686266 0.30573985 -0.39015123 +v 0.17463110 0.41945618 -0.01930661 +v -0.63335866 -0.20729738 -0.10688829 +v 0.09877062 -0.23628679 -0.38867608 +v 0.24822086 0.23763239 -0.12240285 +v -0.66271418 -0.26340798 -0.38702667 +v 0.19654982 -0.39351732 -0.11620519 +v 0.20119110 -0.31642830 -0.11879507 +v -0.35388857 0.33783695 -0.40073201 +v -0.60812217 0.38871968 -0.01168252 +v -0.54647225 -0.49275693 -0.27862513 +v -0.25739193 0.58058572 -0.39017954 +v -0.34119353 0.33414474 -0.02579704 +v 0.24797767 0.40107170 -0.11431577 +v -0.60791481 0.67284644 -0.03103764 +v -0.61315513 0.20204888 -0.38341293 +v -0.26054841 -0.01974452 -0.39840236 +v 0.22772039 -0.35960340 -0.11645669 +v -0.19623609 0.09777556 -0.01268468 +v -0.39076537 -0.28882530 -0.38241780 +v -0.24195257 0.65994895 -0.28300124 +v 0.05693511 -0.52404928 -0.06568307 +v -0.28186297 -0.51575452 -0.10935475 +v -0.62918073 0.69728279 -0.01685631 +v 0.02227783 -0.13967152 -0.38851821 +v -0.16905400 0.49625674 -0.02897672 +v -0.31188408 0.50475818 -0.01469837 +v 0.13773640 0.08697897 -0.40325850 +v -0.62637240 -0.10562977 -0.22583272 +v -0.65572792 -0.01618930 -0.11093268 +v -0.60780472 0.58384609 -0.02613833 +v -0.47143096 0.33018222 -0.02491105 +v -0.01070520 0.26473990 -0.40388808 +v 0.21611080 0.23619007 -0.16962273 +v -0.33493432 -0.17367907 -0.39676657 +v -0.05049016 -0.35765204 -0.39842534 +v 0.24902472 0.36055321 -0.40214929 +v -0.63429815 -0.46932274 -0.01992918 +v -0.43431607 -0.51260012 -0.10510184 +v -0.28354487 -0.17557180 -0.02351654 +v 0.04355657 -0.50520170 -0.38312674 +v -0.55148727 0.58451450 -0.40208673 +v -0.63073975 -0.25538123 -0.20641376 +v -0.62346935 -0.28860709 -0.39212322 +v -0.65098679 0.12671553 -0.14234352 +v 0.21016905 -0.52789086 -0.10651679 +v -0.21712567 0.67756170 -0.36852923 +v 0.20624201 0.07422449 -0.03327134 +v -0.03161271 0.45573407 -0.03003633 +v -0.63506615 -0.48310789 -0.37681100 +v 0.21035081 0.27360553 -0.03324069 +v -0.60026038 -0.49101686 -0.37607396 +v -0.60407990 0.42633295 -0.38587451 +v 0.18498634 -0.50106442 -0.10611507 +v -0.01091766 -0.33691499 -0.01507966 +v -0.16484790 0.27220133 -0.02696006 +v -0.09275062 -0.04436642 -0.02723922 +v -0.06060470 -0.52129048 -0.05914029 +v -0.00874934 0.38005832 -0.01664567 +v -0.65296310 -0.14624362 -0.00692917 +v 0.21344842 0.16315994 -0.38200095 +v 0.15255626 -0.32957295 -0.38857991 +v 0.12314442 0.65021574 -0.14196649 +v 0.10431819 -0.52007341 -0.01694951 +v 0.19148363 -0.32945666 -0.03140829 +v -0.66506165 -0.33902344 -0.10219876 +v 0.24986443 0.32855940 -0.12203731 +v -0.43945909 0.69341379 -0.02250203 +v -0.52649909 0.68502110 -0.11630946 +v 0.20163915 -0.18546155 -0.11971893 +v -0.56775349 -0.09976734 -0.00814538 +v 0.22132555 0.57188737 -0.12178411 +v -0.54150581 0.38407516 -0.01197572 +v 0.10997579 0.66744876 -0.11873081 +v 0.24237131 0.23656458 -0.11273316 +v -0.55518901 -0.27680385 -0.02038749 +v -0.26607797 0.65806216 -0.38809863 +v -0.53788084 0.05744571 -0.38368380 +v 0.24796236 0.32909393 -0.40234825 +v -0.61641908 -0.48540917 -0.10129958 +v -0.65630132 -0.04221363 -0.10610358 +v -0.27217588 0.68399638 -0.20230044 +v -0.44255924 0.38355514 -0.02560937 +v 0.05618764 -0.53267610 -0.18000692 +v -0.63977396 0.36227560 -0.01126719 +v -0.04404148 0.40313494 -0.02947972 +v -0.58595508 -0.18453777 -0.02035601 +v 0.23458892 -0.12474732 -0.40040052 +v 0.11731789 -0.52340007 -0.01824870 +v -0.55440563 0.67692995 -0.11193994 +v 0.23596109 -0.21604638 -0.17873156 +v 0.19798349 -0.42626283 -0.27643144 +v -0.20765875 -0.39453530 -0.02519021 +v -0.57465267 0.48963991 -0.02552469 +v -0.66438067 -0.25416729 -0.10932879 +v -0.45977011 0.68975091 -0.20063400 +v 0.24674125 0.36790588 -0.40588582 +v -0.64978611 0.17475151 -0.13837828 +v 0.15700045 -0.50058246 -0.08159509 +v -0.64840883 0.22019516 -0.18127632 +v 0.05219405 -0.36286527 -0.02942099 +v -0.15685555 0.36335659 -0.40308338 +v 0.25087309 0.66510040 -0.40737095 +v -0.63706803 0.66112846 -0.01542861 +v -0.63706291 -0.37753353 -0.10405968 +v 0.06550093 0.13356219 -0.39045444 +v -0.16779955 -0.52700609 -0.32494649 +v -0.42005682 0.43220812 -0.02626642 +v -0.30987698 0.68836844 -0.01853765 +v -0.20108210 -0.52456927 -0.11049309 +v -0.53791922 0.67459965 -0.11457109 +v -0.13542834 0.65748197 -0.21036538 +v -0.33459634 -0.48671046 -0.10899304 +v -0.35555762 0.66368920 -0.25551265 +v -0.45446834 -0.22625504 -0.00870753 +v -0.36781746 -0.04836317 -0.38461679 +v 0.06737998 0.65847880 -0.11846016 +v 0.23457870 0.12950891 -0.12019894 +v -0.00522417 -0.07677011 -0.38850331 +v 0.24251983 0.00690991 -0.16237654 +v -0.45796785 0.67225981 -0.07623789 +v -0.54905009 -0.09882496 -0.39494035 +v -0.60998321 0.45344529 -0.38390103 +v -0.40420017 0.51891166 -0.02730262 +v 0.16465487 0.64297277 -0.39208385 +v 0.11157839 0.65854424 -0.07867159 +v -0.48851377 -0.48573515 -0.02618556 +v -0.61497265 0.29559654 -0.33604154 +v 0.19708239 -0.08383892 -0.40231505 +v -0.62997425 -0.48476017 -0.10211834 +v -0.62732977 -0.16223764 -0.14413762 +v -0.11347377 -0.28729549 -0.02663579 +v -0.39919537 0.67626566 -0.40415108 +v -0.33034417 0.68856621 -0.11484642 +v -0.61338800 0.40759274 -0.02477573 +v -0.11521968 -0.49852166 -0.38266161 +v 0.24591696 0.40972176 -0.02100199 +v -0.65448624 -0.03764404 -0.10610188 +v -0.31041712 0.15430965 -0.02439657 +v -0.41657776 -0.49307135 -0.37926662 +v -0.55025584 0.52918696 -0.38720667 +v 0.21067344 0.25750467 -0.39222279 +v -0.46564016 -0.47279349 -0.39202875 +v 0.20090704 -0.26347551 -0.32711911 +v -0.63729072 -0.40909588 -0.34381905 +v -0.09786288 -0.49413875 -0.06499624 +v -0.61755824 -0.33875677 -0.39179471 +v -0.66145712 -0.11097002 -0.08977962 +v -0.63383728 -0.35729969 -0.18042523 +v -0.24716976 -0.32769126 -0.39645848 +v 0.22934608 -0.34442371 -0.10945944 +v -0.60753328 0.58348894 -0.38428932 +v -0.30407855 -0.03341396 -0.02355696 +v -0.05567151 -0.50292766 -0.11657201 +v 0.18222673 0.60530764 -0.03392242 +v 0.02231889 -0.27071258 -0.01552265 +v -0.09626287 0.65605968 -0.24919499 +v -0.65854383 -0.11172019 -0.10543887 +v 0.22294353 0.46077895 -0.27772638 +v 0.19677265 -0.33848375 -0.03338155 +v -0.38844335 0.65906405 -0.38687050 +v -0.50507951 0.63277888 -0.40292934 +v -0.64064431 0.66028649 -0.07594299 +v 0.23622993 -0.22644311 -0.12416971 +v -0.44861615 -0.21061462 -0.02175052 +v -0.61373359 0.11696362 -0.39604717 +v -0.61694127 0.21311986 -0.20974962 +v -0.62807983 -0.02500215 -0.10709637 +v -0.20791727 0.68255579 -0.19064941 +v -0.66658223 -0.50837064 -0.21827587 +v -0.57477295 -0.12858833 -0.02074878 +v -0.61209518 0.39621866 -0.11401065 +v -0.63035822 0.68868929 -0.37786934 +v -0.61525422 0.25827032 -0.38154137 +v 0.22889298 -0.45324394 -0.16524769 +v -0.63737774 -0.47102013 -0.37695441 +v 0.22707282 -0.38648033 -0.39823407 +v -0.62018478 0.13456164 -0.10895772 +v -0.66398638 -0.35094339 -0.17979032 +v -0.63707310 -0.48126236 -0.05896411 +v -0.35719854 -0.33902252 -0.39495865 +v -0.63365293 -0.31967464 -0.10520397 +v 0.20593747 0.04315552 -0.03390582 +v 0.14831187 0.31113243 -0.03160532 +v 0.09327187 -0.51999372 -0.11603583 +v -0.62236333 0.13984708 -0.02614812 +v 0.24080211 -0.00961026 -0.26258630 +v 0.06124627 0.25148898 -0.01693589 +v -0.40470189 -0.48533168 -0.10816407 +v -0.62160045 0.05635254 -0.29116407 +v -0.63203245 -0.18251728 -0.10617890 +v -0.06054061 -0.18779966 -0.01434219 +v -0.36001709 0.69122511 -0.11114800 +v 0.25413203 0.46946871 -0.25055823 +v -0.26881453 0.65855837 -0.11660223 +v -0.31845036 0.66288662 -0.21633963 +v 0.04858196 0.52552402 -0.01796997 +v -0.63823020 -0.44345480 -0.10389967 +v -0.66309804 -0.12729272 -0.10644529 +v 0.23933780 0.07408066 -0.40321678 +v 0.19805780 -0.45063010 -0.11934743 +v -0.64733356 0.28299099 -0.39233834 +v 0.22169428 0.49183148 -0.12312373 +v 0.14767444 0.59144598 -0.39394498 +v -0.62296236 0.21143152 -0.11023350 +v -0.25593516 -0.41011688 -0.02443146 +v 0.21397588 -0.04656403 -0.11634094 +v -0.66091436 -0.20796382 -0.30537710 +v -0.66869932 -0.46694541 -0.10179619 +v 0.24290644 0.30498770 -0.02011217 +v 0.20230740 0.24646528 -0.03184576 +v -0.60180396 0.54281998 -0.40095755 +v 0.19250260 -0.36460325 -0.01875595 +v -0.24718508 -0.20958947 -0.02408336 +v 0.03731796 -0.53169847 -0.35521305 +v -0.64946091 0.37074727 -0.08027504 +v -0.45413291 -0.08419602 -0.00933436 +v -0.63827115 0.08396147 -0.10724276 +v -0.29483435 -0.44258398 -0.02396081 +v 0.23670101 -0.05467001 -0.39739978 +v -0.63491243 0.51975465 -0.11165525 +v -0.37472939 0.66763216 -0.03191299 +v -0.32947883 0.44626760 -0.02706559 +v -0.56929195 0.17650180 -0.38413486 +v -0.29990315 -0.29219237 -0.38378337 +v -0.63608491 -0.41500482 -0.20177914 +v -0.12293803 0.60469508 -0.01700313 +v -0.62494379 0.66587275 -0.11131014 +v 0.14380373 0.67009282 -0.36452952 +v 0.22507605 0.64650071 -0.34010935 +v 0.19296597 -0.51006275 -0.32811233 +v 0.25720149 0.06851077 -0.13615438 +v -0.61243051 -0.48630959 -0.10404819 +v -0.61701035 0.39446780 -0.03161000 +v 0.23828565 -0.09139580 -0.11741417 +v -0.14894763 -0.52588010 -0.39155108 +v -0.62407595 -0.05020394 -0.14631514 +v -0.09190315 -0.49340439 -0.11306722 +v -0.30221483 -0.32895988 -0.39582357 +v 0.22928725 -0.41641974 -0.11653244 +v 0.20189270 -0.11210218 -0.38924843 +v -0.36820650 0.42497084 -0.38776287 +v -0.25546154 0.52254558 -0.01530520 +v -0.35111338 -0.45732149 -0.00979905 +v -0.64665258 0.30195823 -0.29209262 +v -0.04752554 -0.49211979 -0.11174037 +v 0.21135446 0.08219843 -0.19077495 +v -0.17727146 -0.02755141 -0.01259787 +v -0.63535786 0.65799439 -0.30561861 +v -0.27163050 -0.49013960 -0.06412259 +v -0.14303914 0.65655828 -0.11739545 +v -0.60825258 0.66586405 -0.11199059 +v -0.50462890 -0.45587867 -0.02067431 +v -0.44422057 -0.30983752 -0.38147265 +v -0.65193129 0.11396073 -0.29403713 +v 0.15088727 -0.26465973 -0.38891566 +v -0.54379177 0.68776393 -0.37111145 +v 0.11685719 0.53882676 -0.40767354 +v -0.12689321 0.20716111 -0.38900438 +v -0.40342441 -0.34866020 -0.39421690 +v 0.20150615 -0.29013628 -0.11862272 +v -0.44729769 -0.37139520 -0.39336157 +v 0.21534295 -0.53622180 -0.26248395 +v -0.52781481 0.67356163 -0.03369773 +v -0.41899177 0.11946841 -0.39834961 +v -0.32585129 0.25087762 -0.01224850 +v 0.16705367 -0.49558809 -0.11590178 +v -0.51581609 -0.36281011 -0.02070963 +v 0.21951319 0.48576054 -0.39361560 +v -0.13566889 -0.51741236 -0.01411026 +v -0.62980008 -0.24310413 -0.14468403 +v -0.61341608 0.54137486 -0.06383619 +v 0.20974936 0.02731894 -0.14811903 +v -0.45868200 0.32199121 -0.39991349 +v 0.24714072 0.08788946 -0.12266073 +v 0.20979032 0.65566379 -0.12129515 +v 0.10232664 0.51393902 -0.01857424 +v -0.61922216 -0.39295304 -0.39112785 +v -0.62903464 -0.19605313 -0.37764740 +v 0.22688344 -0.43579778 -0.11268380 +v -0.27970216 -0.18026514 -0.39747339 +v -0.66693032 -0.50458372 -0.15133786 +v 0.24155480 0.12102225 -0.02490637 +v -0.64775592 0.26702121 -0.35439813 +v -0.02974888 -0.19398552 -0.02786860 +v 0.21209688 0.11088041 -0.23536666 +v -0.60054696 0.67052823 -0.18956171 +v 0.21019736 0.13867943 -0.38905269 +v -0.23934376 -0.52512527 -0.25212508 +v -0.61875880 0.50690264 -0.01258042 +v -0.40636328 0.17260760 -0.01120038 +v -0.33817768 0.52601701 -0.38892183 +v -0.62521768 -0.06709157 -0.35378683 +v 0.21300569 0.31437328 -0.07305565 +v 0.20317785 -0.23698179 -0.12069768 +v -0.17101991 0.48771527 -0.40432662 +v -0.63629735 0.68818456 -0.01595586 +v -0.27254951 0.07032160 -0.38637111 +v 0.08344153 0.65639311 -0.03462968 +v 0.22246745 -0.52581114 -0.08375346 +v -0.28607655 -0.30205959 -0.01064504 +v 0.25329497 0.46144322 -0.30700418 +v 0.20484185 0.25966054 -0.03181426 +v -0.39742887 0.68601120 -0.11568049 +v 0.19898969 -0.32022610 -0.03930558 +v -0.12212135 0.66785359 -0.11970574 +v 0.21217625 0.43639815 -0.03330921 +v -0.15406503 -0.51939178 -0.11000413 +v -0.62774694 -0.48071334 -0.02117859 +v -0.19389606 -0.49907458 -0.11467195 +v -0.18528678 -0.32238492 -0.01226084 +v 0.17647962 0.43531907 -0.40709352 +v -0.34713766 0.68583047 -0.11666649 +v 0.24928346 0.52039659 -0.02196968 +v -0.62037158 0.08878486 -0.20142466 +v -0.63019174 -0.27059790 -0.10901942 +v 0.24012634 0.03225905 -0.37393388 +v -0.66360998 -0.35722607 -0.10385627 +v -0.23364006 -0.49179083 -0.11235230 +v 0.00170330 0.14119700 -0.01588266 +v -0.63003814 -0.49175414 -0.32047039 +v -0.28525478 0.18690300 -0.40013945 +v -0.46809766 0.24606963 -0.01140209 +v 0.22999898 -0.36219206 -0.30811146 +v -0.15328421 0.67442763 -0.40511346 +v 0.22748251 0.55467367 -0.12220157 +v 0.17116763 0.01209156 -0.03104998 +v -0.66044837 -0.13454327 -0.10784321 +v -0.31177637 -0.09152231 -0.02330844 +v -0.63028133 -0.48711848 -0.10517801 +v 0.23562843 -0.13978654 -0.35817719 +v 0.20811611 0.07145664 -0.12071556 +v -0.65953189 -0.03530771 -0.10665637 +v 0.24684635 0.29190651 -0.40194973 +v -0.58938277 -0.48425591 -0.06160804 +v -0.09453989 -0.27640101 -0.01385749 +v -0.10367397 0.68536180 -0.07330545 +v -0.31073701 -0.51373976 -0.11040840 +v -0.20323749 -0.14517152 -0.38603684 +v -0.19337125 -0.02728079 -0.02515191 +v -0.67046821 -0.25754428 -0.10490439 +v -0.07343269 0.59924275 -0.39224514 +v -0.60478884 0.51800001 -0.02521234 +v -0.22072484 -0.52497166 -0.13734506 +v -0.65911204 -0.50509703 -0.09187799 +v 0.02116444 0.12885560 -0.02912822 +v -0.17540772 0.20692110 -0.40141863 +v 0.22634332 0.64565462 -0.17201516 +v -0.04489380 -0.47798324 -0.01447793 +v -0.63667876 -0.39195988 -0.37482199 +v -0.51607460 -0.51443189 -0.38935503 +v 0.19761756 -0.41539451 -0.03770510 +v 0.21732956 0.27688733 -0.12276031 +v -0.16333732 -0.10327172 -0.01270213 +v 0.24368732 0.22185963 -0.03379305 +v -0.21491107 -0.49161896 -0.05844622 +v 0.03353437 -0.49771315 -0.08427731 +v -0.34718883 0.40486446 -0.40139267 +v -0.60539299 -0.48012963 -0.10488098 +v 0.12534109 0.41052550 -0.01852658 +v -0.65122467 0.13331941 -0.24177177 +v -0.48471203 -0.48629734 -0.05137833 +v 0.10251101 0.35541397 -0.40626305 +v -0.63148963 0.69374222 -0.12754898 +v -0.66357923 -0.11946817 -0.10362860 +v -0.20547235 -0.24506302 -0.02487701 +v 0.25004381 0.50615579 -0.06448643 +v -0.30242211 0.31001616 -0.40102798 +v -0.65101475 -0.34350774 -0.10614613 +v 0.07971933 0.17574245 -0.40376893 +v 0.22654301 0.63167691 -0.39370945 +v -0.60707235 -0.22948779 -0.39287964 +v -0.29970595 -0.17933269 -0.01051823 +v 0.09746525 0.40218428 -0.39324135 +v -0.51332259 0.66069525 -0.38813946 +v -0.62339491 -0.48411503 -0.37695399 +v -0.63707811 -0.49092272 -0.35608327 +v -0.11998883 0.13907631 -0.38862652 +v 0.24311389 0.05099617 -0.10854238 +v 0.19986269 -0.25043452 -0.07060409 +v 0.20186718 -0.39762059 -0.12043555 +v 0.15468894 0.31631383 -0.39270175 +v -0.41480610 -0.18337065 -0.39569482 +v 0.16585310 -0.37518787 -0.03120701 +v -0.30992034 -0.47567156 -0.39383709 +v -0.00515746 0.65261436 -0.35626030 +v -0.63493794 -0.38207585 -0.37748295 +v 0.10836062 -0.32096821 -0.01674354 +v -0.45392290 0.09859758 -0.02311780 +v -0.62232482 0.69225150 -0.11268252 +v 0.21851486 -0.52718115 -0.09620494 +v 0.19953246 -0.31773096 -0.11911550 +v -0.66545570 -0.37927750 -0.34656104 +v -0.54413986 0.69445682 -0.11132843 +v 0.23901790 -0.09764384 -0.20738657 +v 0.19708766 -0.25406191 -0.03212747 +v -0.30745250 0.68689805 -0.11669968 +v -0.13431969 -0.45990986 -0.02651323 +v -0.63762081 -0.45657784 -0.10419160 +v 0.21436255 0.15523754 -0.11942956 +v -0.57394081 -0.25510404 -0.00717641 +v 0.23881055 0.09129263 -0.01951555 +v -0.49183649 -0.02265346 -0.02203138 +v -0.04421537 0.66245306 -0.03892642 +v 0.18585695 -0.50996888 -0.38499531 +v -0.63655841 -0.47975469 -0.10753554 +v 0.21680991 -0.08994717 -0.12083557 +v -0.61936545 0.12242987 -0.35068694 +v -0.04833184 0.68380147 -0.05989819 +v -0.61258912 -0.49183327 -0.29756492 +v -0.28487328 -0.52381545 -0.28554219 +v -0.38902688 -0.21570033 -0.38306910 +v -0.37590432 0.26324859 -0.01205275 +v 0.15232864 -0.05445436 -0.38988039 +v -0.61508256 0.28326884 -0.20234895 +v -0.14842272 -0.49300024 -0.10794321 +v -0.24606112 0.03924494 -0.39900303 +v -0.27358368 -0.07825564 -0.39807960 +v 0.19893856 -0.29913419 -0.07515232 +v -0.40588704 -0.50823426 -0.10980966 +v -0.20913312 -0.15137182 -0.01184551 +v -0.02102688 -0.17052226 -0.40017030 +v -0.62133408 0.12319306 -0.02272502 +v 0.22932576 0.64060587 -0.20926322 +v -0.65602720 0.21475689 -0.10585038 +v -0.03928223 -0.49609235 -0.10928836 +v 0.24645729 0.15682010 -0.14432700 +v -0.37676191 -0.49413761 -0.11227740 +v 0.21372257 0.16669177 -0.25914532 +v -0.38125727 0.67048359 -0.11607710 +v 0.08412257 0.67162043 -0.36611319 +v -0.61120671 0.36495963 -0.02364888 +v 0.06119265 0.38412914 -0.01750655 +v -0.01349535 -0.43603954 -0.01489582 +v -0.40661919 0.65189344 -0.40360296 +v 0.11321185 0.17556676 -0.03048401 +v -0.02883230 0.35319576 -0.40455067 +v -0.01045918 -0.39886907 -0.38624451 +v 0.23207778 -0.33234426 -0.15341453 +v -0.47632542 0.46174473 -0.38732114 +v 0.19549026 -0.50849736 -0.12683576 +v 0.21712482 0.64548814 -0.11965638 +v -0.44333214 0.45014846 -0.38775009 +v -0.18771102 0.42676112 -0.38987166 +v -0.23423390 0.37054729 -0.01409835 +v -0.23026590 0.67623836 -0.40063351 +v 0.17803106 0.54959172 -0.03332708 +v -0.66465950 -0.12200271 -0.10823642 +v 0.21221986 0.53191400 -0.40781546 +v 0.11065698 0.64289421 -0.39312580 +v -0.63775390 0.63040572 -0.11055181 +v -0.07279006 0.60325289 -0.40651029 +v 0.21493602 -0.08586650 -0.01815295 +v -0.09695901 -0.31929839 -0.39822704 +v 0.09185379 -0.52297860 -0.01786018 +v -0.61591709 0.24243148 -0.15934835 +v -0.01887133 -0.18301256 -0.38777626 +v -0.34713501 0.68348920 -0.31242499 +v -0.23821469 0.66662484 -0.08268406 +v -0.48396957 -0.49476135 -0.37634525 +v -0.17788573 0.39018047 -0.01483412 +v 0.08360803 0.40752137 -0.03115637 +v -0.66210973 -0.01331151 -0.10756618 +v -0.66210973 -0.50711137 -0.10410989 +v -0.60310429 0.48255306 -0.38627580 +v 0.21092451 0.37518054 -0.40633476 +v -0.62003613 0.43926314 -0.11244209 +v -0.24250525 0.63395756 -0.40490749 +v 0.07406436 0.29756808 -0.01732016 +v 0.25175908 0.08097702 -0.11213697 +v -0.63290012 -0.33431250 -0.13547266 +v 0.23813988 0.00768836 -0.40220505 +v 0.23749220 -0.01702932 -0.11745205 +v -0.61835164 0.40158626 -0.11104289 +v 0.09146468 -0.50837851 -0.16793714 +v -0.64119196 0.47689426 -0.15965475 +v -0.62471068 -0.04860605 -0.25134507 +v -0.61727387 0.26181820 -0.11037394 +v 0.10037605 0.24066442 -0.40481749 +v -0.41538203 0.08239086 -0.01062844 +v -0.63752091 0.48985237 -0.11136971 +v -0.52871835 -0.51395148 -0.10883728 +v -0.17801371 -0.50168270 -0.32775041 +v -0.54697371 0.27431336 -0.39873472 +v -0.39425179 -0.28507280 -0.39499417 +v 0.19952741 0.21155944 -0.40481749 +v -0.21531035 -0.28475755 -0.39723679 +v -0.13140635 -0.31727445 -0.01320215 +v -0.52459675 0.69029987 -0.28016710 +v 0.07178853 0.67960703 -0.02126753 +v -0.63533467 0.69355893 -0.11348382 +v -0.63925147 -0.40363154 -0.10630827 +v -0.63329691 -0.39030746 -0.01965556 +v -0.05653403 0.65101761 -0.39045209 +v 0.19914341 -0.32037386 -0.34833154 +v 0.23209830 -0.23243587 -0.06157867 +v -0.64997274 0.19183049 -0.34136087 +v -0.21344666 -0.50016272 -0.27138767 +v 0.25579622 0.55628002 -0.30574456 +v -0.45383322 0.41791299 -0.38705793 +v -0.28014746 -0.51795751 -0.10736702 +v -0.16019098 0.30969793 -0.40252379 +v 0.04764006 0.02738519 -0.02932141 +v 0.24425574 0.23866482 -0.02415103 +v 0.22659174 -0.35217118 -0.01895936 +v -0.63715994 -0.31310502 -0.10726532 +v -0.64498842 0.41786662 -0.11162078 +v 0.11527782 0.67895329 -0.02216500 +v -0.62916762 -0.49223989 -0.30898446 +v -0.62652826 -0.10756123 -0.34002680 +v 0.22480743 -0.40808931 -0.01909596 +v 0.24846439 0.34093073 -0.40167993 +v 0.20195431 0.03074051 -0.03147808 +v -0.04378009 0.55479831 -0.01720569 +v -0.62768537 0.03597482 -0.10892835 +v -0.66435993 -0.32056290 -0.38574344 +v 0.22958183 0.64259273 -0.18408407 +v 0.11570023 0.66918296 -0.40506345 +v -0.63205785 -0.25675485 -0.37051418 +v 0.07312231 0.65173995 -0.16466342 +v -0.21975449 -0.05786220 -0.39885408 +v 0.05754215 0.51173353 -0.40705395 +v -0.36957592 0.66965532 -0.11416385 +v -0.30657944 -0.51622069 -0.10568952 +v -0.65652120 -0.17468764 -0.39274493 +v -0.55946392 0.28062779 -0.01104548 +v -0.14184600 0.28999862 -0.01440644 +v -0.10643864 -0.51786876 -0.01445623 +v -0.63724184 -0.43035236 -0.25091952 +v 0.22653800 -0.52850550 -0.16065010 +v -0.06377880 0.64649701 -0.39263090 +v 0.21226344 0.08108180 -0.11818101 +v -0.63769752 0.57866102 -0.39863002 +v 0.21232232 0.37930319 -0.03340326 +v 0.15533929 -0.53436577 -0.34766597 +v 0.21432425 0.36711109 -0.07794220 +v -0.64023703 0.50337583 -0.20793892 +v -0.62314391 0.00572514 -0.27283645 +v -0.25613719 0.08727853 -0.01184636 +v -0.09092759 -0.49873719 -0.10990456 +v 0.23679081 0.59784943 -0.12304117 +v 0.20469865 0.24025382 -0.01891128 +v -0.31618199 -0.14201440 -0.02312504 +v 0.06723945 0.48931840 -0.03140957 +v -0.66078615 0.00605334 -0.10848749 +v -0.51723927 -0.38184276 -0.37962770 +v -0.56235415 -0.51443416 -0.34398693 +v -0.17174935 0.15295103 -0.01328726 +v -0.49008790 0.04199398 -0.38402784 +v -0.49853846 -0.50902522 -0.10731085 +v -0.66225302 -0.11638806 -0.10623720 +v -0.63406742 -0.37107569 -0.37767017 +v -0.61637270 0.45036060 -0.10996115 +v 0.21794410 0.43347144 -0.12167857 +v 0.14196074 -0.52020532 -0.11703118 +v -0.03816086 -0.38635749 -0.01459751 +v 0.03917930 -0.20005272 -0.01574691 +v 0.19847786 -0.42032200 -0.22301348 +v 0.04185194 -0.48513699 -0.39806363 +v 0.22681451 0.57906795 -0.30899894 +v -0.22993557 0.66107506 -0.12121430 +v -0.66107285 -0.25616834 -0.13793443 +v -0.48736149 -0.25338390 -0.39404732 +v -0.56793237 0.69288796 -0.11633541 +v -0.29070997 -0.48551807 -0.11022244 +v -0.66295189 -0.06582170 -0.10547122 +v 0.22571883 0.64427406 -0.15755680 +v 0.12086891 0.59395653 -0.40809694 +v 0.07640683 -0.34708092 -0.39983070 +v 0.22130539 -0.53254074 -0.39756745 +v -0.35212949 0.06920515 -0.01092377 +v -0.63848597 0.69682634 -0.09912419 +v -0.64693397 0.19255088 -0.00996842 +v -0.65597844 -0.02777270 -0.11054628 +v -0.42719892 -0.41039345 -0.38067091 +v -0.60409492 0.66827911 -0.13651396 +v -0.59818900 0.66698879 -0.38343611 +v -0.63216788 -0.48978034 -0.20873342 +v -0.65114772 0.18304540 -0.01144294 +v 0.24670316 0.36056292 -0.06917212 +v 0.16221036 -0.11847650 -0.38972080 +v -0.65501076 -0.02085453 -0.13703611 +v -0.64778644 0.28075263 -0.01261404 +v -0.64646804 0.31133434 -0.11395150 +v 0.21735020 0.47997385 -0.08923110 +v 0.24949868 0.49741668 -0.11559794 +v 0.20888172 0.10766653 -0.12074152 +v 0.23641196 -0.06771920 -0.06153995 +v -0.17571220 0.02857814 -0.02561108 +v -0.37821588 -0.16601080 -0.39626592 +v -0.48956820 0.69002920 -0.24419099 +v -0.65728915 0.00686003 -0.05497079 +v -0.64416403 0.50167167 -0.11210421 +v -0.61461651 0.46050584 -0.11094672 +v -0.64208275 0.55718589 -0.01855722 +v -0.60993427 -0.48932639 -0.13466454 +v 0.00090221 0.51084071 -0.39263985 +v -0.54894227 -0.27187485 -0.39311665 +v -0.66420883 -0.32491863 -0.32584843 +v 0.18323053 0.27829435 -0.03190150 +v -0.46229139 0.66501313 -0.11450344 +v -0.43863443 -0.48358700 -0.00848752 +v -0.02312083 0.45250410 -0.39201301 +v -0.35997331 -0.12871385 -0.02277949 +v 0.25803885 0.66728270 -0.22994733 +v -0.21114515 0.48146603 -0.01534690 +v 0.22275437 -0.52453417 -0.02335696 +v -0.62000531 0.11096994 -0.25416771 +v 0.20575853 0.03053931 -0.06592350 +v 0.07998062 -0.50842392 -0.22079723 +v -0.60219538 0.62585729 -0.38741988 +v -0.38458514 -0.48716494 -0.02536937 +v -0.63164818 0.01194633 -0.10705169 +v 0.23345006 -0.17834978 -0.05759811 +v -0.25930130 0.68788928 -0.01967769 +v 0.21128558 0.24840584 -0.06716014 +v 0.25191790 0.46160981 -0.40227589 +v -0.32270226 -0.24107090 -0.01015609 +v 0.21282414 0.12203497 -0.11902656 +v 0.16895086 0.67212999 -0.16831759 +v 0.10027631 0.35217014 -0.39289686 +v -0.64872849 0.22982869 -0.28838611 +v -0.48547217 0.51163286 -0.38770008 +v -0.64603025 0.32268989 -0.35017648 +v -0.28164241 -0.48813573 -0.02598386 +v -0.32096145 0.68784511 -0.11564049 +v -0.66825873 -0.49833563 -0.10324646 +v -0.13279633 0.01236978 -0.01361833 +v -0.63611025 -0.48972982 -0.23139545 +v -0.66296721 -0.07606190 -0.10633167 +v 0.23356527 -0.12981027 -0.11699373 +v 0.25584751 0.67106187 -0.39059529 +v 0.23623791 -0.07244360 -0.02130327 +v -0.18710673 -0.49100736 -0.11277317 +v 0.22119023 0.64870816 -0.11995724 +v -0.55009681 -0.51032913 -0.10442905 +v 0.20820591 0.12862298 -0.07579745 +v -0.66012305 0.02065555 -0.10345072 +v -0.38813841 -0.22883347 -0.00936627 +v 0.25086319 0.56411338 -0.05835856 +v -0.03945617 -0.28445756 -0.01462432 +v 0.03723375 0.47279635 -0.01767634 +v -0.32516241 0.14573541 -0.39934537 +v 0.23911791 0.02558654 -0.40121588 +v -0.04854929 -0.40270197 -0.02790647 +v -0.61077648 0.68713605 -0.11143652 +v -0.64943504 0.00155612 -0.00791771 +v -0.21228944 0.68496519 -0.11699756 +v 0.23732848 -0.13246724 -0.26061708 +v -0.60996240 0.67183626 -0.08464243 +v 0.04278384 -0.42459011 -0.39873812 +v 0.25381744 0.07010473 -0.13973024 +v -0.40481168 -0.01192676 -0.39734766 +v 0.19683440 -0.50535882 -0.26677921 +v -0.62629008 0.69430244 -0.19295374 +v -0.29728144 0.68883139 -0.08173339 +v 0.00360560 0.04725009 -0.01578521 +v 0.24089456 0.24225815 -0.01963130 +v 0.24093552 0.07810067 -0.39929155 +v -0.05264784 0.25268504 -0.40328041 +v -0.63589776 -0.44461468 -0.10617975 +v 0.21348464 0.16942425 -0.35259783 +v -0.61158288 0.66063833 -0.11253784 +v 0.06020208 -0.52362454 -0.02420720 +v -0.66591632 -0.14049830 -0.10857388 +v -0.20181903 -0.52554494 -0.36162326 +v -0.32565391 0.17359468 -0.38655770 +v -0.02609295 -0.49530062 -0.11302935 +v 0.24854641 0.22249772 -0.12800984 +v 0.12210289 0.11575555 -0.03042400 +v 0.21797745 0.67347759 -0.02131519 +v -0.59331983 -0.46190387 -0.37749508 +v -0.41561231 0.57580864 -0.02781370 +v -0.50001550 -0.24743664 -0.02111816 +v 0.24971890 0.30438003 -0.23914933 +v -0.65989262 -0.12887357 -0.10876537 +v 0.19551346 -0.28589556 -0.03197597 +v 0.24927090 0.67058164 -0.02162584 +v 0.24274802 0.18274859 -0.07148326 +v -0.63278222 -0.48887041 -0.14893651 +v -0.01665934 0.67767346 -0.16421404 +v -0.66085774 -0.32034671 -0.00674363 +v 0.19968626 -0.24118730 -0.11734651 +v 0.22524786 -0.26153579 -0.01817891 +v -0.22037902 0.68086761 -0.11735246 +v -0.66207886 -0.04202865 -0.10692914 +v 0.14142834 -0.01419512 -0.01742612 +v -0.07797901 0.36086777 -0.01570393 +v 0.20251763 -0.20845626 -0.28919634 +v 0.07603315 -0.50890386 -0.27605015 +v -0.65200269 0.10939414 -0.39352030 +v 0.23740019 -0.13593979 -0.11785546 +v 0.22637939 -0.44902146 -0.35969830 +v -0.05656973 0.66137993 -0.11712012 +v -0.33309069 0.32301816 -0.38736731 +v 0.01360243 0.08381008 -0.38953441 +v -0.42678413 -0.49497744 -0.26459169 +v -0.42609549 0.68135542 -0.40302083 +v 0.14998899 0.20671199 -0.39167362 +v -0.05898124 0.50142539 -0.03005718 +v 0.23731060 -0.04363853 -0.11401703 +v -0.60671628 0.24684931 -0.01049610 +v -0.06402700 0.64819038 -0.01803933 +v -0.41856396 -0.51188213 -0.05405374 +v 0.20863092 0.02044220 -0.12115132 +v -0.07642252 -0.20811638 -0.39929751 +v -0.66358668 -0.09756169 -0.10585463 +v -0.63388556 -0.36098170 -0.02089390 +v -0.61216396 0.51215196 -0.02584938 +v 0.22082420 -0.53565919 -0.21266164 +v -0.65143180 0.25302169 -0.06536943 +v 0.07669365 -0.37377420 -0.01627714 +v -0.65661323 -0.07697656 -0.13762720 +v -0.66444939 -0.50035536 -0.00699768 +v 0.14165621 0.64910603 -0.01978960 +v -0.37730187 -0.51290101 -0.10902793 +v -0.63677067 0.59855634 -0.15003824 +v -0.63479435 0.64981914 -0.16999637 +v -0.49699467 0.69315571 -0.01624863 +v 0.19965301 -0.36120731 -0.15035060 +v -0.26760843 -0.12638570 -0.01102761 +v -0.60040843 -0.42769608 -0.01948746 +v -0.66068363 0.08010653 -0.10849855 +v -0.30806923 0.66868699 -0.11595242 +v 0.25424501 0.67414868 -0.38044411 +v -0.63895434 -0.19674690 -0.10847132 +v -0.27652490 -0.51461911 -0.01277362 +v -0.46524298 -0.48487714 -0.02386038 +v 0.23337846 -0.26891643 -0.26491424 +v 0.20914806 0.27506727 -0.39254579 +v -0.09885578 0.65676659 -0.16118628 +v -0.45272714 -0.49373823 -0.21438766 +v 0.09976182 0.01442385 -0.01687419 +v -0.22985354 -0.52480811 -0.11382001 +v -0.40679818 -0.49413964 -0.16370551 +v -0.65645450 -0.07946112 -0.11266380 +v 0.20862326 0.66494000 -0.40820950 +v -0.44145802 0.40288675 -0.01289958 +v -0.04027274 0.00064265 -0.40109223 +v -0.13787274 0.27324909 -0.38928971 +v -0.01726090 -0.52046788 -0.01662950 +v -0.11552650 0.67508692 -0.39665252 +v 0.19841142 0.61668491 -0.39438137 +v -0.64699018 0.26273096 -0.13622077 +v -0.62405258 0.04235478 -0.02281992 +v -0.58930314 -0.51182258 -0.38913396 +v 0.19452278 0.64809024 -0.22864047 +v -0.62003082 0.10433932 -0.37846339 +v -0.63442314 -0.33196789 -0.37598693 +v -0.57933193 0.21821183 -0.02292545 +v 0.22417271 -0.53192472 -0.31851032 +v 0.07937911 0.07936749 -0.02973590 +v -0.65853065 0.22756350 -0.11091353 +v -0.64342409 0.38535649 -0.10907006 +v 0.24524407 0.34849697 -0.02194797 +v -0.52337801 0.68477392 -0.40244401 +v -0.08034441 0.68356210 -0.11807462 +v -0.64581257 0.54097688 -0.11317530 +v 0.10656375 -0.42469722 -0.38726583 +v -0.64184201 0.46068105 -0.34704575 +v -0.12207241 -0.50309378 -0.30376706 +v -0.64306313 0.52876866 -0.02218585 +v -0.62078089 -0.14364269 -0.38082901 +v -0.02502537 0.30931497 -0.01607161 +v -0.51579529 0.22701609 -0.01088505 +v 0.04333944 0.12347060 -0.40282720 +v -0.10607240 0.27773020 -0.40284529 +v 0.14233464 0.64709800 -0.12002958 +v -0.13091208 -0.49242607 -0.11055266 +v -0.23960456 -0.29917476 -0.01135230 +v -0.63476616 -0.43030059 -0.02016876 +v -0.64325768 0.41433367 -0.28513727 +v -0.62815624 -0.14488123 -0.02164073 +v -0.10970248 0.63454258 -0.40629795 +v -0.58679688 0.21872921 -0.38410124 +v -0.18701960 0.06546194 -0.40011010 +v -0.28858760 0.39097634 -0.01377876 +v -0.64107656 0.28940096 -0.39796364 +v -0.65480584 0.00856821 -0.10658317 +v 0.20046712 -0.30628234 -0.24140112 +v -0.31857032 0.68063295 -0.35962531 +v -0.63969672 0.64414638 -0.01865552 +v 0.24438648 0.22980349 -0.11912104 +v -0.16716680 0.24073030 -0.01378344 +v -0.61333895 0.61614537 -0.11248379 +v -0.63811207 -0.48091769 -0.07985886 +v 0.23024249 -0.36330613 -0.11622605 +v 0.16932985 0.64859855 -0.14089368 +v -0.57641095 0.37032798 -0.02433146 +v 0.19825785 -0.22766751 -0.38888142 +v -0.52132231 0.69193500 -0.17895114 +v 0.21140857 0.10757285 -0.33806160 +v -0.63399559 0.68707740 -0.31460336 +v -0.22569607 -0.51729393 -0.10792492 +v -0.04022407 0.68229419 -0.02007898 +v -0.19750023 0.67612016 -0.11562858 +v -0.66466439 -0.32820326 -0.06025692 +v -0.07022215 0.13434727 -0.02779200 +v -0.63379335 0.67495841 -0.11497536 +v -0.65926534 -0.14957733 -0.27743870 +v 0.10424442 -0.52362531 -0.02009302 +v 0.24065402 -0.05429315 -0.15746574 +v 0.17972346 -0.25760478 -0.03108573 +v -0.46970758 0.43419451 -0.02601364 +v -0.66236550 -0.04473292 -0.10619039 +v -0.16166790 -0.49257916 -0.06240679 +v -0.65448838 0.02818096 -0.11185143 +v 0.16603002 -0.50350654 -0.11820398 +v -0.27162758 -0.28179216 -0.39668825 +v -0.62803334 -0.15506923 -0.26122391 +v -0.10563718 -0.47897148 -0.01369238 +v -0.18545285 -0.23540320 -0.39797917 +v -0.58779269 0.69563067 -0.01710994 +v -0.39573893 -0.51070279 -0.00957266 +v -0.17682309 -0.50152767 -0.37821743 +v -0.57919621 0.65322626 -0.01483795 +v -0.12567173 -0.48763368 -0.11259954 +v -0.66627717 -0.50123304 -0.04368190 +v -0.62289029 0.05133022 -0.10816024 +v -0.14512005 -0.49267545 -0.11073097 +v 0.22582139 0.61627537 -0.12364119 +v -0.65421957 -0.34122938 -0.10340264 +v 0.24307835 0.14581329 -0.39634016 +v -0.47510916 -0.38564485 -0.00800367 +v -0.63757700 -0.36135751 -0.10620783 +v -0.00644484 -0.29772392 -0.38712814 +v -0.65980548 -0.16506985 -0.39006996 +v 0.08337276 -0.05033654 -0.38951868 +v -0.26836100 0.34822661 -0.02649536 +v -0.51936388 -0.24147622 -0.00788452 +v 0.22452092 -0.53452885 -0.25279489 +v 0.24680316 0.22305851 -0.29896221 +v 0.19702908 -0.51680762 -0.11342765 +v 0.03126908 -0.05991092 -0.01591330 +v 0.22723708 -0.50436860 -0.15596227 +v 0.24562812 0.17771436 -0.12046363 +v -0.03877764 -0.07187463 -0.02801711 +v 0.19715452 -0.40718222 -0.08392326 +v -0.66133636 -0.01313870 -0.10553505 +v -0.08066948 0.67665058 -0.01822359 +v 0.13824892 0.39219150 -0.40674710 +v 0.20772988 -0.04657547 -0.12029640 +v -0.28826499 -0.49966595 -0.10740447 +v -0.61331075 0.35334858 -0.14061154 +v 0.08596093 -0.53168422 -0.39596739 +v -0.41075331 0.63894272 -0.01564734 +v -0.61585283 0.37233791 -0.02457019 +v 0.08087677 0.67363793 -0.12249051 +v -0.66224259 0.03334937 -0.10903559 +v -0.11891587 0.67865509 -0.27494437 +v -0.07692163 0.10080257 -0.01471496 +v -0.23284355 -0.48932064 -0.02648557 +v 0.13358717 0.25647163 -0.01793933 +v -0.00590211 0.67657727 -0.25334430 +v -0.11859586 0.49019417 -0.40494704 +v -0.46197122 -0.49212712 -0.10948411 +v -0.12049794 -0.25659919 -0.39841342 +v 0.11938174 0.04013688 -0.39026740 +v 0.08996734 0.65017766 -0.33743137 +v -0.06992002 -0.07009333 -0.01453240 +v 0.25648254 0.67594725 -0.37492818 +v -0.65378946 0.03853312 -0.23400235 +v 0.07372414 -0.53140557 -0.39538544 +v 0.10653822 0.30791834 -0.03094743 +v -0.59534210 -0.10795207 -0.39422393 +v 0.17538942 -0.53537434 -0.21393189 +v -0.48715138 0.41077691 -0.40058181 +v -0.01463682 0.44951165 -0.01699291 +v 0.24687742 0.26033986 -0.35755590 +v -0.64438146 -0.32544190 -0.10799173 +v 0.00719998 -0.34867671 -0.38690963 +v -0.61080194 0.46094224 -0.02488126 +v 0.21564031 0.46841291 -0.03434626 +v -0.29015681 0.62691754 -0.01615842 +v -0.45786241 0.04881941 -0.02278290 +v -0.63414401 -0.28553286 -0.10440905 +v 0.07430527 0.65932214 -0.12052406 +v -0.64264321 0.43510804 -0.11047649 +v -0.56659073 -0.04046582 -0.00857816 +v 0.25732735 0.65577912 -0.34463587 +v -0.64706433 0.27382925 -0.11481366 +v -0.66472065 -0.48125133 -0.10143108 +v -0.62402177 0.08677684 -0.07001726 +v -0.45554817 -0.15622649 -0.00897520 +v -0.66395009 -0.11175440 -0.10831387 +v 0.21426047 0.30610034 -0.12109217 +v -0.25944704 0.66693789 -0.11333148 +v 0.20462976 -0.28427607 -0.11535453 +v 0.25517696 0.66806442 -0.20525202 +v -0.56923008 -0.50287557 -0.10767512 +v -0.50823808 -0.32678470 -0.39295644 +v -0.63271296 -0.13665552 -0.10562059 +v 0.20222336 -0.20346320 -0.35874721 +v -0.66145152 -0.03907673 -0.10929007 +v -0.49104768 0.56423861 -0.38799053 +v 0.05361792 -0.39058754 -0.38699603 +v 0.23127168 -0.31955412 -0.11652095 +v -0.58790272 0.68675381 -0.40209547 +v -0.05706112 0.49534446 -0.40572688 +v 0.06949505 -0.18745075 -0.02940056 +v -0.66449279 -0.50414509 -0.08227469 +v -0.63891327 -0.28274891 -0.10788789 +v -0.61073023 0.47241485 -0.37948364 +v -0.52812159 -0.43896884 -0.37849531 +v -0.35377279 0.09952407 -0.02379612 +v 0.20868225 0.01078504 -0.31093028 +v -0.57578111 0.68752253 -0.39875600 +v 0.22786689 0.61111826 -0.21129224 +v -0.62472063 -0.50530618 -0.06175272 +v 0.25411969 0.65730357 -0.40774927 +v 0.20122498 -0.14650439 -0.03307431 +v 0.12392066 -0.22310480 -0.01694653 +v 0.22224770 0.42263380 -0.17778514 +v 0.20324738 -0.09026460 -0.38896993 +v 0.20134786 -0.46437022 -0.11384129 +v -0.17016190 0.67693758 -0.36621702 +v -0.63467646 -0.26307872 -0.10605081 +v -0.01992062 0.16055405 -0.02864011 +v 0.00236418 -0.02394625 -0.02868692 +v 0.25140098 0.40465826 -0.32778636 +v 0.06139010 -0.52982670 -0.39730680 +v 0.21615234 0.11279367 -0.01871510 +v -0.66464126 -0.19651519 -0.10387670 +v -0.60695422 0.61540616 -0.38464466 +v -0.65609854 -0.06545196 -0.11287700 +v -0.40374398 -0.48666772 -0.02485871 +v -0.16451454 0.66479456 -0.07801582 +v -0.33883774 -0.52046800 -0.39207685 +v 0.17693314 -0.51328754 -0.11995298 +v 0.22810243 -0.39063710 -0.36178112 +v -0.63167101 -0.27091706 -0.26513106 +v -0.64799613 0.42951807 -0.11199186 +v -0.46642557 0.19162433 -0.01095739 +v -0.34190205 0.14994220 -0.01142975 +v -0.64723837 0.35631540 -0.01586479 +v 0.05405315 -0.50615299 -0.11925679 +v -0.65364605 0.02257414 -0.12674342 +v -0.65165693 -0.01214038 -0.00803772 +v 0.20555395 -0.09996175 -0.28461555 +v -0.38359421 0.46783993 -0.02692048 +v -0.62474877 -0.24428068 -0.38007155 +v 0.22659971 -0.43225926 -0.02263608 +v 0.21160835 0.21097663 -0.12071854 +v -0.44112253 0.28631762 -0.38622898 +v 0.21754499 0.62176704 -0.39462927 +v -0.15926909 0.68237281 -0.12449270 +v 0.21495427 0.26726857 -0.39060488 +v 0.22874500 -0.33722806 -0.11578007 +v 0.23202692 0.51378179 -0.12324586 +v 0.01969540 0.67988354 -0.01985301 +v -0.63491964 -0.34718403 -0.37561586 +v -0.34125692 0.46489334 -0.40222761 +v 0.19748996 -0.13448378 -0.03136106 +v -0.63797116 -0.47096312 -0.04210269 +v -0.63460988 -0.36004117 -0.10467332 +v 0.20622212 0.02779926 -0.03770085 +v -0.60175228 -0.24322581 -0.02000621 +v -0.29110652 -0.01320736 -0.01104037 +v 0.23583108 -0.02731563 -0.01937385 +v -0.62584955 0.00019639 -0.02600471 +v -0.52351099 -0.50984621 -0.10422351 +v -0.07185019 -0.26159701 -0.39890748 +v -0.66685563 -0.50263768 -0.07517998 +v -0.61568379 0.43315533 -0.05119875 +v 0.23113349 0.60766977 -0.12158453 +v -0.63979131 0.61637747 -0.01567627 +v -0.64035195 0.50702345 -0.39468840 +v 0.21293701 -0.52182716 -0.11849208 +v -0.45340794 0.66747802 -0.15640314 +v -0.24824698 0.39849448 -0.02715283 +v -0.00768122 -0.35698566 -0.02851926 +v -0.42448762 -0.37857628 -0.00868625 +v 0.08673926 -0.12644881 -0.40155268 +v -0.61251706 0.39341527 -0.29169685 +v -0.61419386 0.32647142 -0.29076788 +v 0.22073734 0.38883767 -0.22343178 +v -0.55127162 0.67002058 -0.11694352 +v -0.45294714 0.16751297 -0.02359058 +v -0.63052410 0.50209993 -0.11097948 +v 0.19328646 0.16436595 -0.40437108 +v 0.17367175 -0.09110025 -0.03111764 +v -0.61078393 0.46605751 -0.28257677 +v -0.40756857 0.31897616 -0.40017626 +v -0.50513273 -0.51560950 -0.14271502 +v -0.66500217 -0.45650029 -0.10186514 +v -0.07177593 0.18304633 -0.38943782 +v -0.30112633 0.01642675 -0.39839557 +v -0.58488441 -0.51019752 -0.10424394 +v -0.62064761 0.20205593 -0.02372250 +v -0.47053176 0.00023360 -0.39684850 +v -0.45544824 0.66654098 -0.25589713 +v -0.27448696 0.28096882 -0.38789755 +v -0.65650040 -0.06974356 -0.19281246 +v 0.23808648 0.20338523 -0.01912150 +v 0.24270728 0.03820984 -0.21755585 +v -0.46114168 0.69018167 -0.15018550 +v 0.20407432 -0.17260642 -0.17632297 +v -0.57511032 0.42978171 -0.02505531 +v -0.61107832 0.65202296 -0.07972354 +v 0.00701832 0.66086727 -0.07761794 +v -0.63055992 -0.19863406 -0.10593038 +v -0.15738744 -0.52479470 -0.39502949 +v -0.60039032 -0.29885325 -0.02040537 +v 0.14083976 -0.52629912 -0.11036542 +v -0.65923959 -0.00858151 -0.10626018 +v 0.24646793 0.18721217 -0.25149551 +v -0.64153719 0.53216910 -0.11182164 +v -0.30009463 0.66181016 -0.27047083 +v -0.64936823 0.03827838 -0.00842922 +v 0.21308553 0.17755069 -0.38929802 +v -0.65786487 -0.12196919 -0.17839666 +v -0.44748151 -0.51100433 -0.10830875 +v -0.31901559 0.59032875 -0.02870905 +v -0.19315831 0.54324758 -0.02916524 +v -0.29536119 0.66837507 -0.07537957 +v 0.10706057 -0.49922875 -0.11151780 +v 0.18983305 0.06131369 -0.03144744 +v -0.65415543 0.01429706 -0.00900073 +v -0.45093495 -0.05435153 -0.02230075 +v -0.09376375 -0.09878836 -0.02707155 +v -0.59051383 0.43451291 -0.39989859 +v 0.01112713 0.57234484 -0.01780996 +v -0.62388086 -0.02730909 -0.10915134 +v 0.23204234 -0.19525707 -0.01923767 +v -0.13031030 -0.18144502 -0.02622088 +v -0.44744566 -0.51399159 -0.10546781 +v -0.65383542 -0.29015091 -0.10344349 +v -0.61777014 0.17996067 -0.16084628 +v -0.64920950 -0.05327344 -0.10604443 +v 0.04685706 -0.43334302 -0.01578904 +v -0.34896246 -0.48625565 -0.10824493 +v -0.54152310 0.24225569 -0.38499659 +v -0.46153334 0.16600288 -0.38533574 +v -0.62468982 -0.04847271 -0.30423301 +v -0.63511926 -0.40894628 -0.15018123 +v 0.19786890 -0.46330345 -0.14625515 +v -0.61802614 0.17982157 -0.25699377 +v -0.62910581 -0.12549701 -0.05918497 +v -0.13298805 0.68302292 -0.11872826 +v -0.16967285 -0.52518535 -0.39341241 +v -0.26731381 0.66527390 -0.03318410 +v -0.01890165 0.63093829 -0.01812785 +v 0.18822539 -0.53454846 -0.11610775 +v 0.14568587 -0.53504658 -0.25011632 +v -0.63872373 0.67588514 -0.01997557 +v -0.09273461 0.65654439 -0.03187895 +v -0.19947125 0.65840137 -0.11735587 +v -0.64304501 0.40294823 -0.15771638 +v -0.51521653 0.19247077 -0.02323014 +v -0.65545589 0.03408334 -0.01267021 +v 0.23455371 0.08198375 -0.40424171 +v -0.14358133 0.68466055 -0.02006876 +v 0.19922571 -0.36841762 -0.25538605 +v 0.12123787 -0.49478593 -0.11610349 +v -0.62848628 -0.09860842 -0.02632174 +v -0.63693684 0.64182460 -0.11155057 +v -0.63881332 0.62085277 -0.11224123 +v 0.15878540 -0.51046497 -0.15727253 +v -0.29271156 0.62539834 -0.38955355 +v 0.25192332 0.36663905 -0.17909284 +v -0.01807476 -0.50323588 -0.11711884 +v 0.02063756 -0.50755858 -0.29764983 +v -0.59730804 -0.14294748 -0.38053963 +v -0.62019700 0.25257415 -0.07007087 +v -0.64049780 0.59836602 -0.01671205 +v -0.64492404 0.33610353 -0.16424553 +v -0.30997619 0.68549258 -0.16013306 +v -0.60029811 0.66901487 -0.36531168 +v 0.21301389 0.24674787 -0.39157575 +v 0.04658061 -0.09211843 -0.40135905 +v 0.21963149 0.36945105 -0.33490300 +v -0.32033139 -0.05707197 -0.39771554 +v -0.54969203 -0.07747101 -0.02123348 +v 0.22171789 0.42379177 -0.40671733 +v -0.60401523 0.66808826 -0.16762352 +v -0.46700659 0.68762267 -0.30827188 +v -0.16743283 0.52730417 -0.39087233 +v 0.17142157 0.66670144 -0.40661243 +v -0.36514163 -0.51879311 -0.10748788 +v -0.34810227 0.55864352 -0.02809031 +v 0.25030541 0.34920174 -0.29399437 +v 0.24507533 0.13831846 -0.25447157 +v 0.14946190 -0.41530427 -0.03094402 +v -0.62974578 -0.21330085 -0.24548934 +v -0.35971186 0.67049259 -0.11659414 +v 0.25832590 0.64392650 -0.25412899 +v 0.23608206 -0.19958675 -0.11809419 +v -0.39761010 -0.12576598 -0.00964415 +v -0.64067954 0.53475070 -0.01413835 +v -0.58889842 -0.49287874 -0.33721220 +v -0.00159346 0.64738441 -0.39230898 +v -0.31939954 -0.38506660 -0.39499012 +v -0.65553266 -0.00585126 -0.11112290 +v -0.66077298 -0.04847674 -0.10738192 +v 0.19386254 -0.45014250 -0.03320027 +v 0.22469518 0.51375467 -0.28517556 +v 0.21687694 0.26583955 -0.23073927 +v -0.43782258 -0.46326774 -0.37983322 +v -0.37225586 -0.51384503 -0.06704226 +v -0.21291633 -0.26027817 -0.01177444 +v -0.03334001 -0.50536877 -0.27073252 +v -0.61595249 0.19611014 -0.38276652 +v -0.63625073 0.65546995 -0.11595710 +v -0.43758449 -0.48743200 -0.06888487 +v 0.08605327 0.45238218 -0.01815934 +v -0.37069681 0.51670581 -0.01441963 +v -0.60924017 0.66246402 -0.02975845 +v 0.24077711 0.12935445 -0.02048920 +v -0.25000817 -0.49132711 -0.10594741 +v -0.18248817 -0.17416251 -0.02523021 +v 0.25726351 0.59605020 -0.25898853 +v 0.03841167 -0.52244061 -0.01838275 +v -0.11614833 -0.52053267 -0.10928624 +v -0.36583793 -0.38455501 -0.02267651 +v 0.18336655 0.64837599 -0.29186621 +v 0.13096079 -0.50134486 -0.11519112 +v -0.57154673 0.49196106 -0.01283618 +v -0.41161841 -0.42351717 -0.02212287 +v 0.23915663 0.01541019 -0.39634976 +v -0.49675632 0.66092390 -0.38801011 +v 0.20382352 -0.03504263 -0.03353262 +v 0.25434512 0.65773046 -0.12015086 +v -0.26994032 -0.48922831 -0.10984072 +v -0.66823024 -0.28127521 -0.10110341 +v -0.66090864 -0.31020877 -0.10277496 +v -0.62937200 0.40743575 -0.11003605 +v -0.45695600 -0.43398792 -0.00821262 +v -0.63059568 -0.22698659 -0.35600987 +v 0.01101968 0.01253456 -0.40165439 +v 0.25612432 0.62463403 -0.36723197 +v -0.66268528 -0.27282408 -0.30265296 +v -0.63908976 -0.47839209 -0.10456055 +v -0.62397552 0.21716335 -0.11059351 +v -0.62332016 0.06220449 -0.02274970 +v 0.20505744 -0.12849164 -0.22139470 +v -0.61217648 0.39996904 -0.21012837 +v -0.54269552 0.66880894 -0.28466663 +v 0.20513937 0.01525293 -0.03350751 +v 0.20384145 -0.03335150 -0.11579496 +v -0.30010223 0.50028741 -0.40300849 +v -0.43311471 -0.50980657 -0.00877944 +v -0.65510511 0.12098091 -0.10813258 +v 0.09801873 0.66106790 -0.11727715 +v 0.11902609 -0.21328573 -0.40121692 +v -0.40984431 -0.35151953 -0.38152817 +v -0.65230447 0.08499728 -0.00969948 +v -0.22949487 0.13883971 -0.01240552 +v 0.12050577 -0.01662561 -0.39000335 +v -0.65738863 -0.04659466 -0.10569931 +v -0.09375343 0.65382546 -0.38770649 +v 0.20706193 -0.08439153 -0.11944573 +v -0.23202415 -0.12792376 -0.39837301 +v 0.20849554 -0.02163649 -0.12051257 +v 0.25441426 0.07915910 -0.10106468 +v 0.19609746 -0.51038158 -0.14875990 +v 0.03100562 0.65016735 -0.11867507 +v -0.66093934 0.01163624 -0.10803130 +v -0.58031982 0.62681454 -0.38721815 +v -0.62421870 0.01557997 -0.10774789 +v -0.62355822 -0.00461280 -0.37528840 +v -0.62813038 -0.07284499 -0.06084673 +v -0.54354030 0.33761668 -0.39932773 +v 0.20883858 -0.05381140 -0.11742481 +v -0.21967214 -0.49144945 -0.03232747 +v -0.50340718 0.13390879 -0.02289822 +v -0.66069102 0.13198894 -0.10996371 +v -0.30284142 0.57327956 -0.38964206 +v -0.49155438 0.58380997 -0.02732645 +v 0.05966483 0.68150777 -0.08141551 +v -0.66973549 -0.27286029 -0.10533419 +v -0.61662573 0.40915790 -0.03135808 +v -0.08774253 0.44664621 -0.40480429 +v 0.15671699 -0.45464540 -0.03148957 +v -0.53661805 0.60063267 -0.01405154 +v -0.57069165 -0.04813089 -0.39519778 +v -0.64867181 0.22448462 -0.10965689 +v -0.29214829 0.36361876 -0.40163970 +v 0.20140179 -0.28283912 -0.15139319 +v -0.53712493 0.69608271 -0.02012578 +v -0.64394349 0.34599951 -0.39625782 +v 0.07106963 0.05206551 -0.01656396 +v -0.65899885 -0.10614377 -0.10900580 +v -0.03246189 0.61095369 -0.03148957 +v 0.22641811 0.58898121 -0.39149448 +v 0.19830163 -0.45541418 -0.11993213 +v -0.19032429 -0.25909835 -0.38531959 +v -0.61169517 0.54340547 -0.02693410 +v 0.24489363 0.19159816 -0.35958999 +v -0.07883117 0.25062081 -0.38990143 +v -0.35338861 -0.42774016 -0.02293481 +v 0.16307603 0.62342441 -0.40839505 +v 0.19915411 0.05968098 -0.01831083 +v 0.25226131 0.40885791 -0.26946399 +v 0.20312211 -0.19393104 -0.23503602 +v -0.61082733 0.48403940 -0.11292636 +v 0.18091924 -0.52589411 -0.11299658 +v -0.66051692 -0.28226683 -0.10311922 +v -0.66443884 -0.09276865 -0.10926964 +v -0.30333292 -0.12741011 -0.39737171 +v -0.66046572 -0.17283762 -0.38489595 +v 0.20133780 0.66904247 -0.12278371 +v 0.23082900 -0.21639331 -0.11573879 +v -0.61982316 0.35944757 -0.11023393 +v -0.61849964 0.27582896 -0.02456764 +v -0.27049068 0.13650413 -0.39977497 +v 0.20664468 0.04228842 -0.39017189 +v 0.25124756 0.54959387 -0.12071386 +v -0.13930348 0.19801894 -0.01395366 +v -0.61443692 0.32327056 -0.11151780 +v -0.60395116 -0.48998284 -0.17758216 +v -0.15696748 0.06070537 -0.01322428 +v 0.07773332 0.65097696 -0.27613932 +v -0.63060588 -0.26918980 -0.37870809 +v 0.00124052 -0.53115898 -0.15766574 +v -0.23009388 0.02318453 -0.38660708 +v 0.21359508 0.10671014 -0.11820398 +v 0.17961621 -0.23561843 -0.01787592 +v -0.44416875 0.68780023 -0.01596011 +v -0.18840683 -0.52593762 -0.11380512 +v -0.62159723 0.28381172 -0.10999690 +v 0.23698837 0.16259676 -0.01908405 +v -0.25181291 -0.52458787 -0.19651045 +v -0.62988907 -0.35626569 -0.37893683 +v -0.01570154 -0.45105606 -0.02866820 +v -0.12579690 0.10923331 -0.01388898 +v 0.23276694 -0.11387367 -0.11611370 +v 0.22754198 0.59439248 -0.15733765 +v -0.42910570 0.43391311 -0.38816351 +v 0.09322134 0.34495172 -0.01775719 +v -0.64010602 -0.40043697 -0.10343881 +v 0.02641302 -0.44954866 -0.38611472 +v -0.66299242 -0.03925100 -0.10881261 +v 0.09854870 0.18103708 -0.39117467 +v -0.66294122 -0.34778315 -0.10206471 +v 0.22312598 0.52991712 -0.39291665 +v 0.15826070 0.37449163 -0.39306581 +v 0.16946326 0.67271191 -0.12794644 +v -0.12804714 0.68138224 -0.12407823 +v -0.40657514 -0.49100560 -0.37955940 +v -0.33182314 0.26183152 -0.38716412 +v -0.48638314 0.66545922 -0.38297546 +v 0.23058070 -0.39210290 -0.16051818 +v -0.63710570 0.59928614 -0.20356049 +v -0.43518314 -0.51901662 -0.34519291 +v -0.61736810 0.13912536 -0.38216799 +v 0.17039511 0.00894941 -0.39015612 +v 0.12455063 -0.50737220 -0.38462830 +v -0.38395497 0.12109938 -0.01099952 +v 0.11608727 0.01902887 -0.03020570 +v -0.63556969 -0.37249085 -0.06302127 +v -0.23251049 0.22214448 -0.38795456 +v -0.63948393 0.54001468 -0.16826397 +v 0.06767255 -0.28788573 -0.01617544 +v -0.10335593 -0.52622104 -0.39488479 +v -0.54894441 0.20017672 -0.39805067 +v -0.63475561 -0.39775559 -0.10531078 +v -0.61771369 0.17783134 -0.34335989 +v 0.03240599 0.24184273 -0.02955461 +v -0.37650281 0.15477695 -0.39897728 +v -0.31749225 -0.23872602 -0.02308716 +v -0.43283561 -0.49282554 -0.11401576 +v -0.07041641 0.65564030 -0.20372261 +v 0.02330775 -0.50668538 -0.23432620 +v -0.14377065 0.68044484 -0.11978276 +v -0.61100137 0.44203505 -0.17426333 +v 0.23049623 -0.25471032 -0.11591199 +v -0.66334569 -0.06874736 -0.10777087 +v -0.63572329 0.69675308 -0.11080374 +v -0.03867753 -0.13227999 -0.38784519 +v -0.30080360 0.43558344 -0.40232909 +v 0.21698456 0.22210433 -0.11980191 +v 0.24410264 0.08970826 -0.09096690 +v 0.25723544 0.66757464 -0.12488719 +v -0.60254312 0.59960711 -0.40148184 +v -0.45833576 -0.33063295 -0.00834156 +v -0.07755368 -0.48327565 -0.39687487 +v -0.43465832 0.67263472 -0.11521792 +v -0.61027688 0.65291286 -0.03141808 +v -0.62533736 0.00591053 -0.10759427 +v 0.24499352 0.16281562 -0.30823591 +v 0.08771992 -0.39710739 -0.39945582 +v -0.54400104 0.68741018 -0.11578305 +v -0.45813864 0.62572032 -0.40319827 +v 0.23527320 0.15548196 -0.12022533 +v 0.08328089 -0.49127725 -0.01635757 +v 0.19803545 -0.53471816 -0.36117813 +v 0.20263065 0.35747081 -0.01951342 +v 0.24394137 0.28326568 -0.02182031 +v 0.20790169 -0.03567821 -0.17551954 +v -0.63881063 0.51825702 -0.39895281 +v -0.42315623 -0.27687842 -0.00885732 +v 0.12482969 0.67389083 -0.14410529 +v -0.64638311 0.37382853 -0.11159525 +v -0.09519975 -0.51459181 -0.01389579 +v -0.21773671 0.06284276 -0.02495233 +v 0.22465177 0.64139342 -0.14445040 +v 0.23777945 -0.01274566 -0.40184802 +v 0.21812889 0.53554654 -0.03540247 +v -0.63906407 0.54482102 -0.22538206 +v -0.02756455 -0.49621779 -0.07682174 +v 0.21306522 -0.53481317 -0.39451012 +v -0.34003046 0.40678886 -0.01357407 +v -0.66372710 -0.32691941 -0.22365136 +v 0.22757018 -0.11550236 -0.12079386 +v -0.02136678 0.22832134 -0.39037251 +v -0.30052966 -0.46940637 -0.01066589 +v -0.58101350 -0.50535619 -0.10691679 +v 0.24081306 0.07061268 -0.08686591 +v 0.23886746 -0.01884986 -0.36442739 +v 0.25730202 0.57956004 -0.20626354 +v -0.20928870 0.65988874 -0.18697269 +v -0.01393510 -0.22870776 -0.01498604 +v 0.19448730 -0.53542197 -0.17046149 +v 0.23449498 -0.06133094 -0.01913128 +v -0.63254118 -0.28802210 -0.33321890 +v -0.53620070 -0.14933193 -0.39460012 +v 0.01169050 0.67607439 -0.12213603 +v -0.63772774 -0.42972356 -0.10021827 +v -0.02953318 0.68316591 -0.02429231 +v 0.22082202 0.34916046 -0.12009426 +v -0.52694374 -0.49380538 -0.37492371 +v -0.38402405 0.02640376 -0.01050461 +v 0.23862171 0.31473047 -0.01959045 +v -0.14343525 0.44197977 -0.02870224 +v -0.13545829 0.17294011 -0.40157056 +v 0.21261211 0.28600982 -0.03721359 +v -0.16813157 0.64035904 -0.01727122 +v -0.64972645 -0.51105499 -0.30807292 +v -0.66020197 -0.18091373 -0.39002314 +v -0.46357605 -0.49194679 -0.37864190 +v 0.25146267 0.62852812 -0.11534261 +v 0.22525339 -0.46896544 -0.07470380 +v -0.35141733 -0.03897041 -0.01038546 +v -0.63639653 -0.44741178 -0.37719059 +v -0.06229093 -0.49451038 -0.11296041 +v -0.21428068 -0.49817222 -0.38117072 +v 0.15337628 0.19994597 -0.01798401 +v -0.53470564 -0.48030651 -0.10629890 +v -0.08838756 -0.51845247 -0.11313403 +v -0.57728612 0.10604616 -0.00944202 +v 0.17252508 0.15051596 -0.01812742 +v -0.64793956 0.09068380 -0.39557716 +v 0.07561372 -0.52515990 -0.10059913 +v 0.02126492 0.67668450 -0.18552114 +v 0.12155548 -0.26876613 -0.01692951 +v -0.60652900 0.64305496 -0.11376298 +v -0.21981028 -0.29902375 -0.02476254 +v 0.21868956 0.32244867 -0.12284840 +v 0.10465948 -0.52482682 -0.05994074 +v -0.37309796 0.68228918 -0.36114770 +v 0.19192988 -0.49718484 -0.07601874 +v -0.51257956 -0.10089085 -0.38254291 +v -0.50652003 -0.49196839 -0.16811205 +v -0.21541987 0.36360276 -0.38906291 +v -0.50702947 -0.42566365 -0.00747344 +v 0.22435741 -0.43676218 -0.39877409 +v 0.13850525 0.04595699 -0.01742484 +v -0.41394019 -0.51977265 -0.29806644 +v -0.25795427 0.66091090 -0.23375086 +v -0.63149923 -0.24757467 -0.37432963 +v 0.25263005 0.67190278 -0.40382829 +v -0.63729763 -0.48115537 -0.14323887 +v 0.15439005 0.26655987 -0.40539664 +v 0.25794461 0.67548543 -0.39586505 +v -0.60773987 0.39159307 -0.02375867 +v -0.62346339 0.62003285 -0.11147568 +v -0.63587171 -0.44144806 -0.13390408 +v -0.63322979 0.68930697 -0.32558799 +v -0.60830051 0.56027657 -0.18183507 +v -0.18675810 -0.49544638 -0.38243353 +v -0.51688546 -0.50780594 -0.01083696 +v 0.20083614 -0.32168695 -0.13173252 +v -0.01672546 0.09198368 -0.01555882 +v -0.11369314 0.16332963 -0.02709240 +v 0.23561630 -0.09910005 -0.02124370 +v -0.61892962 0.22831722 -0.02325951 +v -0.41576546 0.59791452 -0.40315083 +v -0.05199202 -0.52997077 -0.31262842 +v 0.13159582 -0.06801768 -0.01718824 +v -0.60107362 0.18019143 -0.00986672 +v 0.03270302 -0.14779608 -0.40088287 +v -0.66124129 -0.23946829 -0.39084169 +v -0.65533537 -0.25921825 -0.10473417 +v 0.25760671 0.66737026 -0.25201914 +v 0.24651167 0.31976363 -0.40504366 +v 0.19212191 -0.50212616 -0.04577091 +v -0.63163233 -0.48320830 -0.04715734 +v -0.35961953 -0.11912576 -0.39682275 +v -0.60665697 0.63776714 -0.35892549 +v -0.63875425 -0.46018216 -0.35223976 +v -0.61317217 0.35992870 -0.34150788 +v 0.22438815 0.55586874 -0.39254111 +v -0.19300193 0.28176230 -0.01372642 +v -0.00556641 0.68093288 -0.11921763 +v -0.40138337 0.04959304 -0.02328334 +v 0.22657439 0.56360281 -0.19794497 +v -0.23082337 -0.49091724 -0.10848068 +v 0.19651231 -0.51065487 -0.17521144 +v 0.02287263 -0.49717379 -0.11113226 +v -0.64226913 0.43753621 -0.21303910 +v 0.19474848 -0.51145893 -0.22615358 +v 0.17513888 -0.50074154 -0.11224804 +v -0.09486176 -0.37602928 -0.01379153 +v -0.57129568 -0.42171121 -0.39118743 +v -0.24950112 -0.51688039 -0.06318000 +v -0.41653600 -0.01649690 -0.01003779 +v 0.01735840 -0.50497371 -0.11799078 +v -0.62740064 0.00829166 -0.10841770 +v 0.13872032 0.65204602 -0.11841463 +v 0.25290400 0.40108073 -0.12379907 +v -0.44824159 0.22984083 -0.38590428 +v -0.63762015 0.59886676 -0.31818795 +v 0.00726689 0.65061021 -0.38894162 +v -0.66116959 -0.15674485 -0.06985640 +v -0.60542047 0.61491042 -0.01381493 +v 0.22296225 -0.49804604 -0.01916022 +v 0.08339361 -0.52286971 -0.11425576 +v -0.15496543 -0.50264239 -0.37585607 +v -0.29975903 -0.51943284 -0.10771470 +v -0.46031967 0.68750232 -0.11599455 +v 0.15263137 0.35060376 -0.01866063 +v -0.36633438 0.21970621 -0.02451870 +v -0.64025694 0.50689667 -0.36680663 +v -0.02743390 0.57610559 -0.40680116 +v 0.19553186 -0.51051944 -0.27665338 +v 0.19034530 -0.51272249 -0.26854479 +v 0.17069730 -0.41877967 -0.01774145 +v -0.64783454 0.33418396 -0.01494816 +v -0.35026526 0.00112899 -0.02333994 +v -0.08741470 -0.00793593 -0.01444261 +v 0.22388898 -0.47073880 -0.01980365 +v -0.46028638 -0.45503640 -0.02173988 +v -0.61624414 0.24338987 -0.25262299 +v -0.32918110 0.30144146 -0.01268809 +v -0.65617758 -0.05195381 -0.11209357 +v -0.33307230 0.35342664 -0.01312895 +v -0.04864350 0.65372437 -0.35633478 +v 0.19717282 -0.43563166 -0.33394212 +v -0.62464350 0.00604619 -0.02232203 +v 0.20440482 -0.05983739 -0.12006234 +v 0.19723426 -0.48914170 -0.13315512 +v 0.22744995 -0.48830858 -0.11648435 +v 0.15746467 -0.50094938 -0.11567497 +v 0.19684003 -0.43066800 -0.11821505 +v 0.20994979 0.15066223 -0.12088833 +v -0.25462109 -0.52455378 -0.34942496 +v -0.50163037 -0.51682103 -0.34398946 +v -0.63398749 -0.28585863 -0.10159960 +v -0.63592541 0.63434958 -0.12807113 +v 0.14526883 -0.50109625 -0.11377745 +v 0.23580067 -0.19667622 -0.23988914 +v 0.03199395 0.65272349 -0.20025952 +v 0.24827811 0.31584913 -0.36550722 +v -0.20250204 0.52198845 -0.40431747 +v -0.64878172 -0.00998097 -0.10873728 +v 0.20844708 -0.03615453 -0.12130324 +v -0.62253916 -0.00680717 -0.12296883 +v -0.17278300 0.68665272 -0.07167603 +v -0.04954204 0.65518552 -0.15725297 +v -0.22121820 -0.49011460 -0.11195186 +v 0.06199204 0.67007905 -0.11863847 +v 0.23441316 -0.10210125 -0.11682522 +v -0.40910172 0.18903616 -0.39910218 +v 0.22797476 -0.13494556 -0.01803593 +v -0.61077595 0.44715443 -0.13110271 +v 0.22943141 0.40865055 -0.01992365 +v -0.57796699 0.28012916 -0.38474107 +v 0.22813605 -0.31336734 -0.11707416 +v -0.02876251 -0.50444055 -0.15242088 +v -0.66637659 -0.42382595 -0.25560883 +v -0.65324891 0.11425601 -0.11035861 +v 0.16868261 -0.52340603 -0.01867935 +v -0.60660571 0.63007337 -0.21418977 +v -0.32676187 -0.50056595 -0.10693126 +v -0.62552667 -0.20351508 -0.37992260 +v 0.10389669 0.45753095 -0.03175937 +v -0.66805851 -0.33963874 -0.10753427 +v -0.63387483 0.59998077 -0.01326130 +v -0.48638555 -0.28526169 -0.00814793 +v -0.66134107 -0.21039791 -0.05538314 +v -0.65869915 0.20268638 -0.11019053 +v -0.65472859 0.01572468 -0.11112162 +v -0.61137754 0.64576930 -0.03915451 +v -0.47960666 -0.49448392 -0.30686012 +v 0.23740582 -0.15677682 -0.20293961 +v -0.66239578 0.00401277 -0.10892537 +v 0.25381798 0.43157834 -0.16182162 +v 0.02643110 -0.50637251 -0.18048225 +v -0.64988506 0.16591163 -0.01014587 +v 0.24141990 0.12353105 -0.05954712 +v -0.43932506 0.68256420 -0.40049818 +v -0.56514138 0.21797727 -0.01044844 +v -0.55201370 0.67273957 -0.03226577 +v -0.08145498 0.19737543 -0.01483454 +v 0.24559014 0.06789567 -0.09811139 +v -0.60374874 0.66777658 -0.24636725 +v -0.06241370 0.24166052 -0.02820095 +v -0.14625882 -0.46008593 -0.38388231 +v 0.08641446 0.65104365 -0.22150491 +v 0.20780711 -0.05296354 -0.13600926 +v -0.29611865 0.24115975 -0.40053776 +v -0.19840345 0.00118608 -0.39940113 +v 0.06114727 0.65952873 -0.08760636 +v -0.64794457 -0.07340571 -0.10590314 +v 0.22440103 -0.50064403 -0.36546975 +v -0.19303001 -0.05605757 -0.38667962 +v -0.17008217 -0.36590552 -0.01254851 +v 0.09801383 -0.36154014 -0.38772860 +v 0.19244967 -0.48211050 -0.38763392 +v 0.14531495 0.19849057 -0.40443790 +v -0.65552473 0.01998051 -0.10940666 +v 0.01561255 -0.48586500 -0.01532264 +v -0.09902169 -0.49321473 -0.10919006 +v -0.66783577 -0.50606346 -0.31265989 +v -0.03081561 -0.30523863 -0.02806690 +v -0.65405017 -0.49403337 -0.10231324 +v -0.63826776 -0.45304069 -0.30098143 +v 0.22820264 0.61699802 -0.27575314 +v -0.66176856 -0.27439919 -0.39104873 +v 0.19881128 0.10916068 -0.40382254 +v -0.11310424 0.15463479 -0.01420048 +v -0.54824024 -0.46388447 -0.39088231 +v -0.13601112 -0.23651196 -0.02612046 +v 0.03181224 0.47384661 -0.39280814 +v -0.46547288 0.07774775 -0.39763382 +v 0.11059112 0.46333882 -0.40713671 +v 0.19390632 0.66957057 -0.31401676 +v 0.11419561 -0.53410941 -0.19342099 +v 0.21387689 0.06100275 -0.11737332 +v 0.19619241 -0.48504955 -0.11936615 +v -0.60789335 0.63699859 -0.02728177 +v 0.01282985 0.33535331 -0.39169341 +v -0.46218839 0.68722445 -0.11418300 +v -0.19413079 -0.49202341 -0.10863600 +v 0.20830633 0.10776106 -0.03900046 +v -0.12459351 0.33389920 -0.02797456 +v -0.45750871 0.69246078 -0.01731718 +v -0.51383895 0.52585900 -0.02648728 +v 0.06383274 -0.53303903 -0.29812494 +v -0.12293718 0.05371583 -0.02669282 +v 0.20411050 -0.15163691 -0.27568525 +v -0.63927382 0.63859046 -0.01627629 +v -0.27872854 0.23595208 -0.01248553 +v -0.57624662 0.68793064 -0.11524133 +v -0.65086550 0.45030060 -0.10895516 +v -0.10034774 -0.38400194 -0.39768320 +v -0.62787670 0.07464954 -0.10900623 +v 0.15538859 0.41703442 -0.03231002 +v -0.55436373 -0.49015066 -0.10753214 +v -0.11722837 -0.49373132 -0.11318892 +v -0.35443541 0.14061317 -0.38596365 +v -0.63356757 0.69065881 -0.26251820 +v 0.18178219 -0.02613922 -0.40277508 +v -0.58819413 -0.30553845 -0.00681512 +v -0.02775381 0.12978174 -0.40219951 +v 0.03259051 -0.29495916 -0.02901034 +v -0.47328085 0.09420820 -0.01028375 +v -0.63580757 0.67731643 -0.11009478 +v -0.00271445 0.48541498 -0.40625390 +v 0.02941099 0.42109296 -0.01731505 +v -0.20185685 0.57282197 -0.40482664 +v -0.42662996 0.27932078 -0.01192338 +v -0.50252372 0.16900474 -0.39827940 +v 0.23020972 -0.05543799 -0.40319148 +v -0.15906388 -0.49815115 -0.38202330 +v -0.55227220 0.66837287 -0.33890972 +v -0.64851540 0.21236852 -0.11602604 +v -0.29394516 0.29845753 -0.02580002 +v 0.15567020 -0.19034839 -0.38934311 +v 0.23762348 -0.06780502 -0.35831484 +v -0.58681172 0.00147328 -0.38250163 +v 0.22523052 0.64615464 -0.22852813 +v 0.06139308 -0.51787722 -0.01590394 +v -0.06768212 -0.42966050 -0.01413239 +v 0.18463916 -0.53549963 -0.29104257 +v -0.23085396 -0.01913493 -0.01171997 +v -0.62592852 -0.01802156 -0.02330844 +v -0.66381139 -0.43723714 -0.00661469 +v -0.23906131 0.67460626 -0.40476471 +v -0.66696787 -0.50276142 -0.08750593 +v -0.57576019 0.45195618 -0.38614514 +v 0.09414573 -0.07659421 -0.40187865 +v -0.61101139 0.66796488 -0.10582867 +v -0.44386899 0.67786336 -0.11288040 +v -0.38750035 -0.33429158 -0.00920457 +v -0.48514131 -0.04956710 -0.38339695 +v 0.24892589 0.25828212 -0.17779195 +v 0.08180909 -0.24232467 -0.02964908 +v -0.08194899 0.66304642 -0.11912061 +v 0.22281389 -0.48754308 -0.11731161 +v -0.18609490 -0.50030220 -0.22086191 +v 0.17044654 0.09915315 -0.03105381 +v -0.39438930 -0.48217550 -0.02260331 +v -0.64254034 0.51653677 -0.01514519 +v 0.24811438 0.07775860 -0.14465083 +v 0.03450542 -0.53237700 -0.24774027 +v -0.16800082 0.25539604 -0.40194014 +v -0.66538066 -0.32761508 -0.10190003 +v -0.57555026 0.67077780 -0.14496404 +v -0.66660434 -0.43149024 -0.30471283 +v 0.05561006 0.67983705 -0.11981766 +v -0.63511634 -0.26826644 -0.10431203 +v -0.44243538 -0.50582421 -0.00876370 +v -0.09244242 -0.15178546 -0.02695623 +v -0.32114002 0.66705942 -0.03322580 +v -0.62194514 0.00693477 -0.11259656 +v -0.08780881 -0.52018791 -0.01917724 +v 0.24065711 0.11412074 -0.11819590 +v -0.18465617 0.17073841 -0.38806733 +v -0.41655377 -0.31560552 -0.02198117 +v 0.24012719 0.27797922 -0.11922529 +v -0.40732497 0.68074018 -0.40303358 +v -0.52528721 0.64209038 -0.38816372 +v 0.22056367 0.65165251 -0.03797702 +v -0.51144785 -0.30695593 -0.02088709 +v 0.24551855 0.31092653 -0.02353016 +v -0.66730577 -0.49927801 -0.10310943 +v -0.37301841 -0.48710525 -0.10677253 +v -0.66267473 -0.37502339 -0.00713854 +v 0.03332527 -0.20132214 -0.40053713 +v -0.64100945 0.41718999 -0.11025861 +v -0.50728273 0.02559243 -0.02214841 +v 0.23393199 -0.19871622 -0.35524347 +v -0.03010128 0.28577822 -0.39074636 +v -0.33482832 -0.43308726 -0.39423648 +v -0.30583888 -0.41206452 -0.01038886 +v 0.05726640 0.35065073 -0.03043805 +v 0.15627952 -0.40561020 -0.38782245 +v -0.65635920 -0.03860943 -0.34782171 +v -0.55803472 0.58630669 -0.38732731 +v -0.63265872 -0.30827686 -0.22581485 +v 0.20489904 -0.03280006 -0.38859141 +v -0.05525840 0.14030039 -0.01512519 +v 0.05746864 -0.14990406 -0.01602735 +v -0.44188496 0.54414284 -0.38820159 +v -0.63586640 0.45134884 -0.11305998 +v 0.21348272 0.26784176 -0.12128877 +v 0.21613744 0.34175628 -0.39183575 +v -0.63046736 -0.39929527 -0.01895213 +v -0.65401679 0.00066811 -0.11576220 +v -0.55833167 0.69388914 -0.13484029 +v -0.35450447 -0.48491359 -0.10975647 +v 0.22225329 0.60893893 -0.12142665 +v -0.40161359 -0.51357943 -0.10648019 +v 0.24032177 0.04596558 -0.07834136 +v -0.09646671 -0.49947292 -0.38255608 +v 0.24697009 0.67113328 -0.41100514 +v -0.40238670 0.68519789 -0.31607598 +v 0.24124850 0.32880452 -0.40609795 +v -0.04906830 0.65381312 -0.03208406 +v 0.23412146 -0.27945223 -0.15428606 +v -0.49787214 -0.32588685 -0.38053453 +v 0.21978034 0.29282495 -0.11924828 +v -0.26647118 -0.33884445 -0.38370144 +v 0.16018610 0.14844628 -0.03109807 +v 0.00161202 0.31367487 -0.40457982 +v 0.21985714 0.35365158 -0.16659285 +v -0.61569870 0.12445524 -0.38259289 +v -0.53243214 0.51560599 -0.40152076 +v -0.11356750 -0.52824354 -0.14939524 +v -0.03940686 -0.49127135 -0.11495409 +v 0.21888690 -0.51266336 -0.01824062 +v 0.21760435 0.29441640 -0.26648834 +v -0.02028877 0.68205631 -0.02050112 +v -0.05302349 -0.45664221 -0.38468531 +v -0.07988301 0.65609533 -0.12229051 +v -0.54844749 -0.05049544 -0.38253865 +v 0.21057715 0.05452554 -0.12168495 +v -0.30909261 0.65903372 -0.38754371 +v 0.22832307 0.63861549 -0.14055197 +v 0.24477363 0.26453358 -0.02587109 +v -0.62683213 -0.01572125 -0.07005002 +v 0.21533875 0.21676348 -0.24546529 +v 0.21930675 0.51461405 -0.12060790 +v -0.66605133 -0.50909078 -0.28391129 +v -0.15028045 0.67692888 -0.11703628 +v -0.63980877 0.51031601 -0.11185441 +v 0.21021363 -0.09282830 -0.11617115 +v -0.29833549 0.68396592 -0.11798270 +v -0.63842893 0.58303624 -0.01374855 +v -0.08590157 -0.01467798 -0.40059626 +v -0.66650701 -0.47382054 -0.15697806 +v -0.65597516 0.13376300 -0.07555446 +v -0.47770700 -0.17918685 -0.39499289 +v -0.07727180 0.55201542 -0.03024740 +v 0.21962932 -0.53595817 -0.31772730 +v 0.17978036 0.47479558 -0.03300367 +v -0.65114444 0.18994811 -0.01251234 +v 0.02060724 0.08083180 -0.02901119 +v -0.10912076 -0.49436241 -0.10992328 +v -0.62682444 -0.11081831 -0.02117603 +v -0.45512012 0.57960904 -0.01445496 +v -0.62484556 0.05515678 -0.02857501 +v -0.16539212 -0.10323561 -0.38671708 +v 0.22128820 0.64613968 -0.15010506 +v 0.13675700 0.67524487 -0.02005089 +v 0.24423860 0.24837480 -0.06023054 +v 0.23180468 -0.27325886 -0.35264975 +v -0.51069772 -0.51619625 -0.24712068 +v 0.07525812 -0.52764213 -0.39809257 +v -0.28377676 0.66078037 -0.32360771 +v 0.19866037 -0.41232887 -0.15926707 +v -0.24812363 0.66653216 -0.11667159 +v -0.59532875 -0.51318038 -0.29725832 +v -0.63790411 -0.48633927 -0.26649261 +v 0.19246005 -0.24913380 -0.03113509 +v 0.13217717 0.64875019 -0.12188241 +v -0.53754699 0.66934758 -0.22598761 +v -0.21247563 0.68537259 -0.01839977 +v -0.28455243 -0.49054325 -0.10750575 +v -0.62925899 -0.23917373 -0.37880915 +v -0.65678155 -0.08678219 -0.10532270 +v -0.09314123 0.28493884 -0.02800775 +v -0.07739979 -0.16093752 -0.38722286 +v -0.65799755 -0.12031502 -0.11168461 +v 0.13237941 -0.51062250 -0.26327804 +v 0.12585142 0.65600276 -0.03551950 +v -0.01108298 0.34755498 -0.02952100 +v -0.05629514 0.30479780 -0.40373254 +v -0.62182730 0.21898215 -0.10814194 +v 0.18362550 -0.35881791 -0.40090054 +v -0.62625098 -0.07065738 -0.10692190 +v -0.60331082 0.67603564 -0.08649951 +v -0.04493386 -0.51635909 -0.11489493 +v -0.35680842 -0.05875126 -0.02306376 +v 0.20750518 0.08538838 -0.03642846 +v 0.04705462 -0.32032835 -0.38753563 +v -0.07213642 0.67200625 -0.40558794 +v 0.12058550 -0.51081049 -0.33499172 +v 0.22741431 -0.40270454 -0.06821166 +v -0.27912521 -0.49922258 -0.33936420 +v -0.08512585 0.44931430 -0.02938695 +v -0.63361353 -0.37849796 -0.10804832 +v -0.25084233 -0.51320112 -0.11179781 +v 0.16336311 -0.48485810 -0.39963388 +v -0.64398921 0.67778116 -0.11267146 +v -0.35160649 0.63252068 -0.01586054 +v -0.60981321 0.49450827 -0.16853079 +v -0.28471881 0.00552298 -0.38583067 +v 0.24029879 -0.06899947 -0.12069301 +v 0.19510455 -0.48041067 -0.10879048 +v -0.38940489 -0.49519873 -0.22516291 +v -0.10888521 -0.15132040 -0.01364174 +v 0.21880759 0.32770911 -0.22121298 +v -0.61545801 0.50846714 -0.11173951 +v -0.40311113 -0.44570395 -0.00898626 +v -0.16192073 0.20790629 -0.02654770 +v -0.61215049 -0.20463865 -0.00717343 +v -0.39980873 0.66319025 -0.35103673 +v 0.19530936 -0.49904406 -0.11744268 +v -0.50461000 0.66647440 -0.38032368 +v -0.63365704 0.68917322 -0.19126432 +v 0.08531128 0.64971775 -0.11866357 +v -0.34981960 0.00403495 -0.38511470 +v -0.63620424 0.53247058 -0.11129822 +v -0.66687560 -0.46853590 -0.22030529 +v -0.38178632 0.68942779 -0.11412385 +v -0.09801288 0.66344225 -0.07175221 +v -0.12679496 -0.42093182 -0.01332641 +v 0.19962040 -0.22727591 -0.10867260 +v 0.19903672 -0.25969204 -0.03453138 +v 0.23117752 -0.25142395 -0.02022579 +v 0.20172472 -0.17706285 -0.03614803 +v -0.32057160 -0.48740059 -0.10885856 +v -0.06424136 -0.29164550 -0.38647324 +v -0.48496199 0.52308393 -0.01375748 +v -0.19790407 -0.19192621 -0.38577577 +v -0.47707975 -0.26841161 -0.38137987 +v -0.43709767 -0.51075560 -0.01344641 +v -0.62057287 -0.51140833 -0.21202715 +v -0.64672071 0.42618951 -0.06051949 +v 0.20462009 -0.17404874 -0.12077259 +v 0.11818425 -0.47489262 -0.38685900 +v 0.13592505 -0.53230339 -0.39748296 +v 0.24474809 0.15044789 -0.11979340 +v -0.65817159 -0.10015455 -0.10523589 +v -0.61483335 0.30223209 -0.01147571 +v -0.33498439 -0.44407123 -0.38159734 +v -0.63314247 -0.50793689 -0.10375073 +v 0.24438457 0.25356352 -0.02309652 +v -0.27399239 0.66822255 -0.11740609 +v -0.65343815 -0.09569942 -0.00741642 +v -0.17762375 0.65396470 -0.38902843 +v 0.25037754 0.31073040 -0.17594679 +v -0.10219334 0.67952186 -0.21787331 +v 0.21304506 0.47040585 -0.40729734 +v -0.62570566 -0.07869589 -0.37710419 +v 0.23412922 -0.26553321 -0.11762992 +v -0.42905670 0.33636758 -0.38653898 +v -0.51613766 0.69582123 -0.07284671 +v -0.16417606 0.10270242 -0.38786393 +v 0.23822266 0.38618800 -0.11960403 +v -0.56588358 0.08027744 -0.39670125 +v -0.66482246 -0.32165316 -0.10288730 +v -0.26497862 0.67898947 -0.36038086 +v -0.22170694 -0.08617765 -0.01172125 +v -0.67062342 -0.35462958 -0.10449629 +v -0.46875462 -0.39756662 -0.02135476 +v -0.62865222 0.69158214 -0.31554830 +v -0.27601734 0.66259658 -0.12117600 +v 0.15328698 -0.53458506 -0.14997144 +v -0.27921990 0.24857862 -0.02539915 +v -0.66648901 -0.50737351 -0.17307264 +v -0.61338437 0.35550067 -0.24388695 +v -0.66252613 -0.27824649 -0.24148516 +v 0.13701819 0.67040932 -0.12152877 +v -0.12445765 -0.51616371 -0.11363787 +v -0.38980421 0.46117139 -0.40179226 +v -0.61196357 0.41586643 -0.25287512 +v -0.63358533 0.68505871 -0.38831842 +v 0.21447611 0.64597052 -0.14195117 +v -0.61781317 0.21076189 -0.11029436 +v 0.22958779 -0.42066988 -0.20836404 +v -0.65377861 0.04580921 -0.28681585 +v -0.61114949 0.44942591 -0.11310934 +v 0.21674427 0.13539475 -0.12208114 +v 0.19578043 -0.40955734 -0.38738900 +v 0.23056315 0.02002790 -0.40375575 +v -0.64299333 0.41446039 -0.39614058 +v -0.11139652 0.65228975 -0.01819849 +v -0.23192644 0.62803215 -0.39008209 +v 0.21771452 0.28848019 -0.16644306 +v 0.03094716 0.65244985 -0.24849285 +v -0.02670404 0.10478622 -0.02841287 +v -0.65056324 0.16430333 -0.28733373 +v -0.36410436 -0.15034476 -0.38386613 +v -0.61373252 0.50086975 -0.02770604 +v -0.28668228 0.66004777 -0.38328058 +v -0.16084804 0.13777164 -0.02618599 +v -0.23947844 -0.48941210 -0.10921261 +v -0.10839876 0.24496691 -0.01462517 +v 0.19701436 0.67134786 -0.14083198 +v 0.23898812 -0.11450754 -0.15369412 +v -0.54089028 0.39826480 -0.40001944 +v 0.23047356 -0.30251020 -0.02444295 +v 0.12046268 -0.53440237 -0.29484078 +v 0.23712188 -0.17621508 -0.15079275 +v 0.20900284 0.32798842 -0.03241045 +v -0.24578372 0.29024896 -0.40153310 +v 0.19069372 -0.50020730 -0.38726774 +v -0.11526979 0.67646909 -0.35154036 +v 0.23338941 -0.11919007 -0.01890149 +v -0.24501315 -0.28437757 -0.38456872 +v -0.61983043 0.68939036 -0.11503537 +v 0.01328061 -0.11862311 -0.01546393 +v -0.63091779 -0.23316789 -0.37594673 +v -0.40611395 -0.08881129 -0.02259395 +v 0.22046909 0.58625400 -0.11822611 +v -0.54892099 0.25842595 -0.02349951 +v 0.21461181 0.19866550 -0.30157486 +v 0.21540029 0.36901054 -0.12014320 +v -0.20468035 -0.51597369 -0.01388770 +v -0.46354243 0.47112578 -0.01344173 +v -0.66398275 -0.25072643 -0.10338902 +v -0.66261315 -0.08812839 -0.10398521 +v 0.01700541 0.68241459 -0.05334861 +v -0.11405635 0.68549579 -0.11503622 +v -0.63112003 0.69141543 -0.11271869 +v 0.23922110 0.07199078 -0.11679755 +v 0.23399614 -0.05500325 -0.11848442 +v 0.25891006 0.66322279 -0.23824292 +v -0.30986306 0.68399531 -0.25249958 +v 0.22102718 -0.34924594 -0.12057598 +v 0.25643966 0.54727769 -0.12395652 +v 0.24335550 0.13517141 -0.35760355 +v -0.66758722 -0.45945081 -0.10452863 +v 0.14964670 0.59072930 -0.01940151 +v 0.09134526 -0.53295469 -0.35610944 +v -0.65573186 -0.01574133 -0.01011183 +v 0.11630014 -0.50981677 -0.38204736 +v -0.49840450 0.28424871 -0.01150039 +v 0.20225470 -0.36331481 -0.11518644 +v -0.66280514 -0.50886023 -0.14949483 +v 0.10204094 -0.45583323 -0.03025889 +v -0.62144578 0.20036465 -0.06657075 +v 0.17223102 -0.51088715 -0.18963064 +v -0.66071618 -0.01558189 -0.10790491 +v -0.65868098 -0.45971453 -0.10240856 +v -0.65669698 -0.11334942 -0.00805091 +v -0.26085442 -0.11808199 -0.02388634 +v -0.12453186 0.40875444 -0.01553669 +v 0.25299391 0.54864615 -0.40625709 +v -0.33477953 0.66384357 -0.16156970 +v 0.22163135 -0.43168718 -0.01836956 +v -0.57067585 -0.51387620 -0.24277201 +v -0.59031105 -0.48847306 -0.37696889 +v -0.56788033 -0.16037983 -0.00775771 +v -0.66549313 -0.50715059 -0.38823372 +v 0.21474239 0.18629716 -0.15243620 +v -0.09392705 0.00441202 -0.38808522 +v -0.52448577 0.13241875 -0.38456297 +v -0.66274625 -0.20312881 -0.10486651 +v -0.65160257 0.15318535 -0.11137141 +v -0.61752385 0.33938593 -0.02585236 +v 0.14072767 -0.49911907 -0.11836229 +v -0.62800449 -0.13740942 -0.37307256 +v 0.24561855 0.44810054 -0.02065346 +v 0.22139071 -0.34301126 -0.01824657 +v -0.49096513 0.65442824 -0.38893354 +v -0.64300865 0.65806657 -0.11325914 +v -0.20191553 -0.42779350 -0.38359550 +v 0.15862976 0.66820633 -0.12052108 +v -0.66380352 -0.35985029 -0.39033785 +v -0.01887808 -0.53006339 -0.36265987 +v -0.06802240 -0.49630779 -0.11005903 +v -0.27968320 0.67598963 -0.40410957 +v -0.05945408 -0.41516569 -0.39782491 +v -0.04041280 0.29152390 -0.02877331 +v 0.06188480 0.02093512 -0.40217632 +v -0.64249408 0.45882377 -0.01277703 +v -0.65821248 0.25307891 -0.11067820 +v 0.20688064 0.05056455 -0.04031668 +v -0.02558528 -0.13164134 -0.02803626 +v -0.47968063 0.66663420 -0.02958355 +v -0.65464127 0.07016590 -0.01305193 +v -0.60545343 0.45219153 -0.01230807 +v -0.19056191 -0.34086108 -0.02538511 +v -0.34058559 -0.51309156 -0.11003903 +v -0.19703871 0.65903831 -0.24332373 +v -0.63856703 0.54277647 -0.11025478 +v -0.60636991 0.62795126 -0.15631804 +v 0.23511745 -0.13285068 -0.02280928 +v -0.01517631 0.65409184 -0.20212170 +v -0.64492607 0.26663402 -0.10889474 +v -0.13175359 0.67710346 -0.11775716 +v 0.02029249 0.07624573 -0.40215993 +v -0.64354622 0.67235279 -0.11427023 +v -0.39513662 -0.48556286 -0.10779513 +v 0.00924866 -0.53165227 -0.30625182 +v -0.55753022 -0.48478091 -0.10445586 +v -0.04690238 -0.49543560 -0.11145057 +v -0.63341630 -0.39454320 -0.37797338 +v -0.19951934 0.57758158 -0.39083147 +v 0.22976194 -0.25895777 -0.39952070 +v -0.38429502 0.31019670 -0.38681516 +v 0.20359106 -0.15598327 -0.33449638 +v 0.21276866 0.32261494 -0.03461649 +v -0.49826622 0.69521803 -0.10987137 +v -0.32709438 -0.49705952 -0.37787443 +v -0.12391998 -0.49590427 -0.10956667 +v -0.18679614 0.60225540 -0.02996016 +v 0.25236675 0.43663651 -0.12207135 +v 0.16455619 -0.49691978 -0.11826399 +v 0.24385987 0.22821261 -0.40386298 +v -0.61463869 0.30386639 -0.23978873 +v -0.10804029 0.62661445 -0.39168596 +v -0.65198141 0.07114535 -0.12160325 +v -0.63885885 -0.44221884 -0.10508525 +v -0.21999933 -0.52571273 -0.30851805 +v -0.53127229 0.47921678 -0.38699263 +v -0.64979261 0.31326595 -0.06020203 +v -0.66276157 -0.05902210 -0.10625932 +v -0.48111421 -0.51748556 -0.29616913 +v -0.61534268 0.44424605 -0.02842819 +v 0.20719044 -0.04164941 -0.29290944 +v -0.20762940 0.11484808 -0.02532171 +v -0.56086332 -0.48252255 -0.02219607 +v -0.63686204 0.69727892 -0.02544979 +v -0.12108860 0.42900184 -0.39060315 +v -0.60802108 0.67482114 -0.07469018 +v -0.65781564 0.03751931 -0.08674803 +v 0.20726980 0.09830390 -0.03433307 +v -0.05284412 -0.52293795 -0.39661890 +v -0.61447740 0.47333860 -0.02742773 +v 0.25977796 0.67111444 -0.39949942 +v -0.45455676 -0.48587871 -0.10555675 +v -0.36271676 -0.39458403 -0.38175884 +v -0.48329532 -0.49370006 -0.25805232 +v -0.66341436 -0.10847657 -0.10714276 +v 0.19969732 -0.26995814 -0.11877379 +v -0.01788220 0.54814881 -0.03078529 +v -0.60898364 0.57702041 -0.11315105 +v -0.64108092 0.41227058 -0.39837533 +v -0.49031484 -0.47890690 -0.00772835 +v 0.06744004 0.24097762 -0.39153087 +v 0.18802884 -0.49231106 -0.11481238 +v 0.03274180 0.66344577 -0.12109089 +v -0.13109308 0.56089199 -0.02984143 +v 0.25301445 0.41504249 -0.21533449 +v 0.17765573 -0.50214589 -0.11561241 +v -0.07002683 -0.50353336 -0.18635990 +v -0.09419323 -0.16481303 -0.39945814 +v 0.11755205 0.11142868 -0.39066616 +v -0.13928251 0.66325140 -0.11768184 +v -0.66615611 -0.28203189 -0.10404862 +v -0.55536443 0.43651518 -0.01239957 +v -0.63527995 0.65196782 -0.24466953 +v -0.45500475 0.27138081 -0.39960817 +v 0.23911621 -0.04201607 -0.31166136 +v -0.51187515 -0.12132299 -0.02139264 +v 0.18895046 0.67592609 -0.12189219 +v 0.20147142 -0.39290839 -0.11486770 +v -0.66350138 -0.26801047 -0.11745758 +v -0.29443642 0.52767110 -0.02813627 +v -0.08199994 0.67671579 -0.31882820 +v -0.64802873 0.22805248 -0.39543864 +v -0.60346425 0.60700971 -0.38719794 +v 0.22105543 0.00302742 -0.11705331 +v 0.20861895 0.22246617 -0.03263089 +v -0.61252409 0.32608071 -0.38308483 +v -0.66231865 -0.01138275 -0.10785130 +v -0.56007481 -0.20438144 -0.00758877 +v -0.42616633 -0.48217988 -0.02207819 +v -0.33964857 0.50143236 -0.02757497 +v 0.25214919 0.66852021 -0.23618838 +v -0.62325561 -0.00405224 -0.34142321 +v -0.29294136 -0.48898169 -0.10848962 +v -0.39177528 -0.51886296 -0.39145768 +v -0.19713080 -0.41078117 -0.01213445 +v -0.20626232 0.66966355 -0.11496345 +v 0.23480520 -0.14813298 -0.02313440 +v -0.63087672 -0.24282020 -0.30960107 +v 0.16409032 0.56693488 -0.40816993 +v -0.59481144 0.51204884 -0.38670474 +v -0.63347512 0.68265229 -0.11161526 +v -0.65443128 0.02990514 -0.34719574 +v -0.58560824 0.69358468 -0.18304446 +v 0.08818632 0.45401999 -0.39328325 +v 0.12689608 0.02311926 -0.40274444 +v -0.10895160 -0.39560220 -0.02692559 +v -0.48244792 -0.51322186 -0.10505503 +v -0.65020984 0.15603502 -0.18925405 +v 0.22475976 0.44644219 -0.12044746 +v -0.17300791 -0.10878054 -0.39909983 +v -0.65665847 -0.00075226 -0.01514051 +v 0.05905353 -0.03948654 -0.40180165 +v 0.24849609 0.28375086 -0.29532996 +v 0.11160009 0.68018454 -0.06418940 +v 0.06967241 0.27605152 -0.03024996 +v -0.36544567 0.68951058 -0.01783209 +v -0.39621431 0.24508411 -0.38637963 +v -0.61245495 0.58270210 -0.03162277 +v 0.09144521 0.67373419 -0.26282290 +v -0.10186295 -0.50017697 -0.10991732 +v 0.07973833 0.31239805 -0.39236194 +v 0.03977161 -0.49252927 -0.11532389 +v -0.18173495 -0.44014823 -0.39608973 +v -0.07162167 0.48399472 -0.39165151 +v 0.15580361 -0.03916406 -0.03076146 +v -0.58931255 -0.39992565 -0.37850660 +v -0.61682487 0.22356895 -0.31442359 +v 0.22078665 -0.51812851 -0.01869382 +v 0.22092234 -0.50835282 -0.11895677 +v 0.09251530 -0.30654415 -0.02993675 +v 0.23147210 -0.35828257 -0.19580150 +v -0.66626102 -0.36394650 -0.11528091 +v -0.64657462 0.28473818 -0.17755364 +v -0.58690870 -0.50793105 -0.09202523 +v -0.15040566 0.65785092 -0.15329453 +v -0.65634102 -0.26700279 -0.39235961 +v -0.45447478 0.48494858 -0.02662643 +v -0.32601142 0.20004740 -0.02463828 +v -0.22178870 -0.48377398 -0.02484467 +v -0.27153718 0.34554887 -0.38825309 +v -0.53955638 0.53394938 -0.01345364 +v 0.24034762 0.10532028 -0.02071049 +v -0.47985718 -0.43729579 -0.37954023 +v -0.29238838 -0.23634106 -0.38448212 +v -0.61218870 0.38012174 -0.11462897 +v -0.65608758 -0.08293577 -0.00819389 +v -0.61020726 0.48530275 -0.38131902 +v 0.24241610 0.13616404 -0.40141758 +v 0.25034955 0.56332833 -0.11586390 +v 0.10043083 0.30412358 -0.40567005 +v 0.03883211 0.19555491 -0.39077318 +v 0.20137163 -0.02706640 -0.03169511 +v -0.28556085 0.68552238 -0.11745077 +v -0.09940021 -0.33961406 -0.38573301 +v 0.02064075 0.54147166 -0.40699095 +v -0.03899445 -0.35159510 -0.38635856 +v -0.48527157 0.68592006 -0.36818773 +v -0.15492661 0.42996776 -0.40385363 +v -0.63726133 -0.33929592 -0.10657679 +v -0.54091829 0.66651618 -0.11358085 +v -0.54153013 0.16064513 -0.01012332 +v -0.41692981 -0.48635396 -0.02485956 +v -0.01517365 -0.24258079 -0.38741690 +v -0.23009589 0.67139262 -0.40546387 +v 0.22278091 0.27975419 -0.11846995 +v 0.20719820 -0.05418757 -0.22864558 +v -0.63231540 -0.24267800 -0.06726822 +v -0.20696884 0.46192855 -0.40367723 +v 0.00671948 -0.52891153 -0.39607570 +v -0.66431540 -0.13651088 -0.10513631 +v 0.20276172 -0.07167640 -0.11376341 +v -0.03774260 -0.49771807 -0.11063011 +v 0.25926092 0.67174214 -0.40966913 +v -0.01728564 -0.53103215 -0.25920489 +v -0.17518388 0.63561654 -0.40559241 +v -0.17123380 -0.43944523 -0.02600215 +v 0.15056588 -0.53292888 -0.11491238 +v -0.62209332 -0.48178625 -0.10498396 +v -0.61746228 0.11253525 -0.38216779 +v -0.66125619 -0.03301529 -0.10820067 +v -0.56491059 0.56371355 -0.02652047 +v 0.21564621 0.25607669 -0.38822863 +v -0.63434035 0.68195361 -0.37982771 +v 0.20456141 -0.21823741 -0.11698862 +v 0.08332749 0.65841526 -0.11800823 +v 0.10301901 0.66893947 -0.11925594 +v -0.15574323 0.32111937 -0.38940760 +v 0.25451469 0.67670673 -0.38003173 +v 0.22437581 0.58482176 -0.12320501 +v 0.24937165 0.42897221 -0.40571454 +v -0.65433395 0.23852056 -0.11067352 +v -0.12282419 0.62633538 -0.03088487 +v -0.07951667 0.61694646 -0.03106828 +v 0.11646925 -0.12893064 -0.01683291 +v 0.24953549 0.67571628 -0.04509471 +v 0.22542797 -0.53399473 -0.20271705 +v -0.51119667 -0.05193432 -0.39592355 +v -0.15592754 -0.38523343 -0.02609918 +v -0.23323186 0.65827823 -0.11682394 +v -0.66770482 -0.25104979 -0.10660615 +v -0.66501170 -0.37318280 -0.23858081 +v -0.49053490 0.61665225 -0.38828287 +v 0.22915278 0.63373262 -0.19367291 +v -0.22630194 0.63750780 -0.01692568 +v -0.64440370 0.32025322 -0.10917049 +v 0.24361166 0.10904557 -0.29656914 +v -0.03550258 0.55363452 -0.39249963 +v 0.04315342 -0.49783328 -0.11088204 +v -0.18293554 -0.52521294 -0.39215684 +v -0.66002738 -0.29532593 -0.00686661 +v 0.22340046 -0.53434265 -0.30811316 +v -0.46143794 -0.49282098 -0.15397072 +v 0.25621966 0.53514719 -0.17494547 +v 0.19396302 -0.40769750 -0.03207513 +v -0.13283889 -0.50307494 -0.34955499 +v -0.40610865 0.68780327 -0.20691463 +v -0.08169265 0.01607534 -0.02748646 +v -0.42310193 0.69170451 -0.01819764 +v 0.19821519 -0.38387284 -0.32879597 +v -0.07506993 -0.52873725 -0.35954255 +v 0.19760847 -0.46329242 -0.12029001 +v 0.02394831 0.19570974 -0.01626906 +v -0.06052145 0.67754698 -0.26361483 +v -0.40776753 0.24602561 -0.39963880 +v -0.66398001 -0.32628775 -0.27218342 +v 0.07341008 -0.46006903 -0.38657281 +v -0.64545584 0.29711014 -0.39667189 +v -0.39312944 0.66435993 -0.29538187 +v -0.13622576 -0.33545056 -0.02635238 +v -0.17222448 -0.49730724 -0.39549887 +v 0.20986064 0.60047334 -0.40826696 +v 0.23214288 -0.18594223 -0.40019497 +v -0.62439984 0.69596004 -0.12239264 +v -0.35307312 0.58850253 -0.40347573 +v 0.20005840 -0.07557082 -0.03164149 +v -0.65384752 0.05531681 -0.01003566 +v -0.26840624 -0.16493925 -0.38525000 +v -0.65898800 -0.01920299 -0.08825021 +v 0.10650832 -0.50607622 -0.38520661 +v 0.11307472 -0.36718148 -0.03028272 +v 0.21990864 0.66931754 -0.26639324 +v -0.64647216 0.28063071 -0.39593080 +v -0.66743088 -0.42730781 -0.10355669 +v -0.63594544 0.63197660 -0.01415537 +v 0.17080528 0.65696597 -0.07502721 +v 0.20169680 -0.18653797 -0.38746244 +v 0.21615824 0.24969563 -0.29012638 +v -0.25635120 0.65912837 -0.38304654 +v 0.22215888 -0.53526139 -0.16125479 +v -0.63111472 0.02289249 -0.10955986 +v -0.62586671 -0.10887217 -0.15695593 +v 0.22618577 -0.53239518 -0.15421456 +v 0.04073169 -0.13740332 -0.02902055 +v -0.61253679 -0.50241971 -0.00632404 +v 0.12222417 0.66309053 -0.40850079 +v -0.10900527 0.67476541 -0.11996064 +v -0.35932207 0.14839137 -0.02408762 +v -0.11222575 -0.10930826 -0.39967114 +v 0.24113105 0.34510747 -0.40623966 +v -0.14155823 -0.15288201 -0.38654387 +v 0.24773586 0.40392986 -0.02383953 +v 0.24582866 0.54294705 -0.02060750 +v -0.61726254 0.05739957 -0.02101092 +v -0.34194222 0.47950405 -0.38846353 +v -0.53785390 -0.46545267 -0.00694662 +v -0.66685230 -0.45327282 -0.34533951 +v -0.60983086 0.51976764 -0.37982962 +v -0.30114350 -0.48888022 -0.11055096 +v -0.45379630 -0.20902103 -0.38228267 +v 0.25729746 0.66492486 -0.33146372 +v -0.16866606 0.44500762 -0.01538435 +v 0.20425938 0.10895322 -0.03174490 +v -0.33833262 -0.09582955 -0.38455233 +v -0.52641070 0.33044004 -0.02439997 +v -0.18557742 0.03024615 -0.01263915 +v -0.49548846 0.04712170 -0.00976033 +v -0.64486957 0.44822383 -0.01735037 +v -0.64421421 -0.48112664 -0.10313752 +v -0.66038573 -0.06987677 -0.08803616 +v 0.19619539 -0.44850692 -0.11747885 +v -0.66232365 -0.03605393 -0.10590442 +v -0.61445165 0.30286932 -0.15655166 +v -0.63607085 -0.48337001 -0.09833863 +v -0.04224557 -0.05991019 -0.40067586 +v -0.42624813 -0.51895857 -0.19076984 +v 0.21207507 -0.53416705 -0.11479323 +v 0.13149907 0.65230787 -0.11807206 +v -0.65814829 -0.15969801 -0.00843603 +v -0.66272813 -0.10278014 -0.10783172 +v -0.63912237 0.54961288 -0.28056583 +v -0.20326957 0.68315154 -0.14073113 +v -0.33433133 -0.19731490 -0.38406700 +v 0.23387347 -0.08470992 -0.01863765 +v 0.22143443 0.42143646 -0.33194098 +v 0.19850707 -0.47535551 -0.11559454 +v -0.03151149 0.65076119 -0.38991612 +v -0.43125549 -0.49558723 -0.33174247 +v -0.40569645 0.12041015 -0.02363824 +v -0.06874412 0.34498373 -0.02875927 +v -0.66642988 -0.45786545 -0.05441758 +v -0.63031596 0.64052171 -0.01379876 +v -0.15583020 -0.50031209 -0.11558390 +v -0.05420076 -0.12752345 -0.01455581 +v 0.12340692 0.67427218 -0.01970747 +v -0.62823212 -0.16255702 -0.39326006 +v 0.22710484 0.64502984 -0.19023152 +v -0.65564972 0.06798155 -0.10747682 +v 0.20615892 -0.09784816 -0.16286081 +v 0.23042516 -0.00092224 -0.40361893 +v -0.61887276 0.15125710 -0.30119357 +v -0.33108780 0.04421348 -0.38555557 +v 0.21357268 0.03725065 -0.12169006 +v -0.43711787 0.48720270 -0.40184715 +v -0.48101419 -0.15482767 -0.38244012 +v -0.34008875 -0.48646003 -0.02508468 +v -0.21883179 -0.14522761 -0.02455657 +v -0.30986795 -0.52304274 -0.34546781 +v -0.65192235 0.19765379 -0.01796656 +v -0.60141355 0.66996175 -0.11338723 +v -0.66627371 -0.25283986 -0.09824587 +v -0.66031659 -0.04004382 -0.09555896 +v 0.22442709 -0.50122219 -0.10311454 +v -0.23228203 0.24993090 -0.01304214 +v -0.05923115 0.48332554 -0.01671120 +v -0.20663851 0.68196195 -0.11938403 +v -0.06242859 0.03632914 -0.01486646 +v -0.15937835 -0.48854700 -0.02669835 +v 0.15597525 -0.31419086 -0.03086870 +v -0.62312490 -0.09978956 -0.02020323 +v -0.61193258 0.39955550 -0.14869097 +v 0.14436822 -0.50908697 -0.11986021 +v -0.62500906 -0.50543481 -0.10543462 +v 0.20088278 -0.12608513 -0.03165383 +v -0.61371946 -0.05466486 -0.38156137 +v -0.29516074 0.04817756 -0.01124506 +v -0.26171690 0.65461856 -0.39028081 +v -0.24466474 -0.49816063 -0.16233526 +v -0.62603050 0.02417433 -0.10395244 +v -0.66331178 -0.02103842 -0.10935943 +v 0.25489110 0.47811145 -0.19285204 +v -0.65537578 -0.01179792 -0.28127289 +v -0.61797929 0.04847952 -0.38218671 +v -0.60419369 0.66220725 -0.38391250 +v 0.04658391 -0.33090633 -0.01589628 +v -0.63751721 0.69663966 -0.04253675 +v -0.56410921 0.67537045 -0.06963043 +v 0.20069847 0.57708549 -0.39414734 +v -0.18330921 0.59229338 -0.01640523 +v -0.50108713 0.15904155 -0.01044418 +v -0.13534249 0.57699549 -0.39154106 +v -0.33126441 0.01503561 -0.01080122 +v -0.29652777 -0.49676567 -0.14650664 +v -0.63075113 -0.29216912 -0.11335786 +v 0.21205975 0.26282245 -0.03700721 +v 0.24617431 0.43644980 -0.11705373 +v 0.03777239 0.09471402 -0.01623417 +v -0.63853097 0.56803024 -0.36750028 +v -0.07949097 -0.50232548 -0.11653584 +v -0.65160233 0.04675284 -0.39482141 +v 0.18801367 -0.53245378 -0.39916983 +v -0.62525225 -0.08913921 -0.38002089 +v -0.19925289 -0.52615952 -0.20502861 +v -0.63358760 -0.32393667 -0.28015307 +v -0.62570536 0.14204806 -0.10843727 +v -0.34903336 0.21388854 -0.39979836 +v -0.64452904 0.40985256 -0.10810194 +v 0.19305688 -0.53018093 -0.11381235 +v -0.23814696 0.49457029 -0.02835798 +v -0.65428776 -0.11069191 -0.10549121 +v -0.58613032 0.65971339 -0.02812435 +v 0.19708632 -0.47678074 -0.24449846 +v -0.66207016 -0.25834110 -0.11350170 +v -0.17185064 -0.46538228 -0.01283022 +v 0.20695512 -0.03506628 -0.34483182 +v -0.64167720 0.66367447 -0.11455791 +v 0.07923672 -0.49920267 -0.10567931 +v -0.41424680 0.69256026 -0.06642351 +v -0.64228904 0.40127420 -0.39726532 +v 0.23592152 0.49163392 -0.12351310 +v 0.22813657 -0.32197714 -0.39911687 +v 0.23863001 0.01522998 -0.10765725 +v -0.66379815 -0.11628105 -0.10826237 +v -0.24236327 0.68219322 -0.25705633 +v -0.48873255 -0.48603097 -0.10597123 +v 0.24398041 0.12665744 -0.11617456 +v 0.22498009 -0.45120096 -0.11689287 +v 0.20929497 0.17832412 -0.08162020 +v -0.63083303 -0.18953852 -0.06437111 +v -0.63338023 0.42041585 -0.39936835 +v -0.07107623 -0.39066419 -0.38565576 +v -0.64319527 -0.37059048 -0.10760150 +v 0.06924249 0.14928165 -0.02977164 +v 0.25868249 0.63681978 -0.12324160 +v 0.06740185 0.67452359 -0.12247221 +v -0.32250407 0.10100197 -0.01129272 +v -0.24779303 -0.49648550 -0.11401874 +v -0.56678694 -0.32112864 -0.39238471 +v -0.65259814 0.08498502 -0.25487903 +v -0.62309158 -0.00042446 -0.21905249 +v -0.49981734 0.42510217 -0.01271958 +v -0.61053222 0.64469874 -0.03071976 +v -0.66749990 -0.38021967 -0.10394393 +v -0.65895462 -0.12675788 -0.10529333 +v 0.22513370 0.08978630 -0.11772992 +v 0.20155610 -0.44650346 -0.12046704 +v -0.49112102 0.67094123 -0.03211215 +v -0.29906726 -0.49613908 -0.11331446 +v -0.14106406 -0.52440804 -0.11020031 +v -0.07616294 0.18508881 -0.40232462 +v -0.51845670 -0.06456984 -0.00880030 +v -0.33492774 -0.27691475 -0.39594164 +v -0.37948966 0.31546980 -0.01250425 +v 0.23000026 -0.33358487 -0.35483900 +v -0.61998630 0.38986906 -0.10998456 +v -0.52287781 -0.49782690 -0.00745898 +v -0.66311717 -0.36811811 -0.10256218 +v -0.61133093 0.43981087 -0.38128391 +v 0.00340187 -0.52270377 -0.06598010 +v -0.58900261 0.30820069 -0.39853045 +v -0.40671781 -0.49682599 -0.37330791 +v -0.63148069 -0.29344985 -0.16241527 +v -0.42304549 0.68428403 -0.11410129 +v 0.22316763 0.47455770 -0.34883219 +v -0.54612005 0.05137474 -0.00931010 +v 0.20888795 0.00831584 -0.25883722 +v -0.55522853 -0.50791204 -0.06043523 +v 0.01367259 -0.52402598 -0.39750937 +v -0.50730789 0.39294216 -0.02518127 +v 0.22805211 0.62130862 -0.38933098 +v 0.02819292 0.60837692 -0.03207172 +v 0.24708828 0.06404192 -0.14645813 +v -0.18015012 -0.51886928 -0.07681706 +v -0.64408100 0.62018865 -0.11394937 +v -0.48219940 0.51894242 -0.40198568 +v 0.17649372 -0.29350346 -0.40118587 +v 0.24007644 0.26950160 -0.01966066 +v -0.15964708 0.38029471 -0.02798988 +v -0.62159652 0.04250915 -0.37895513 +v -0.62679332 -0.12597847 -0.10757086 +v -0.66081828 -0.22439086 -0.20101826 +v -0.41632548 -0.47147110 -0.39275920 +v -0.51462436 0.41802236 -0.38651261 +v 0.22265820 0.44766787 -0.22966647 +v 0.17223644 0.65829539 -0.11869762 +v -0.29831204 0.43526193 -0.38859969 +v -0.00073508 0.49612567 -0.03064188 +v -0.31199524 0.68441230 -0.11781929 +v -0.32768548 0.67971140 -0.11799802 +v -0.62189603 0.02831778 -0.37874404 +v -0.51965475 0.66981697 -0.01520349 +v -0.23760419 0.30035254 -0.02632344 +v -0.60262179 0.66860372 -0.30249998 +v -0.38567203 0.36860666 -0.01291278 +v 0.23954397 -0.09817869 -0.12062960 +v -0.26897955 0.05331220 -0.02423401 +v -0.64514083 0.37198758 -0.01228850 +v -0.05516835 0.24688450 -0.01540265 +v -0.41487139 0.05550219 -0.39779085 +v 0.19542494 -0.42017698 -0.03222704 +v -0.64599842 0.31657475 -0.22496673 +v -0.63745826 -0.48819506 -0.32061082 +v -0.02397474 -0.08466470 -0.01512647 +v -0.49642530 -0.49157456 -0.10857473 +v -0.63908130 0.60620522 -0.11535963 +v -0.52100641 -0.21935833 -0.38126561 +v 0.18618335 -0.50165862 -0.11829931 +v 0.19799007 -0.46784583 -0.19170603 +v -0.63693345 -0.47998032 -0.03067380 +v -0.28826657 -0.52119350 -0.39218217 +v -0.19585569 0.65843302 -0.29687768 +v 0.24521439 0.30006558 -0.06479963 +v -0.59690017 0.04725945 -0.00889094 +v -0.49201185 0.26537991 -0.38574767 +v -0.13743649 -0.49355856 -0.03662762 +v 0.21515231 0.42636541 -0.03652549 +v -0.62172705 0.03698574 -0.16255569 +v -0.17410593 -0.52700883 -0.26416656 +v 0.13653727 -0.49592054 -0.03264195 +v -0.48667169 -0.47855943 -0.02078581 +v -0.44602913 0.32976258 -0.01223190 +v -0.22320673 0.65873212 -0.34542292 +v -0.23236385 -0.49997941 -0.32906941 +v 0.01936863 0.67331874 -0.35619053 +v 0.21794783 0.64024979 -0.39423415 +v -0.41447201 -0.36555681 -0.02200883 +v -0.65164065 0.50195700 -0.10929007 +v -0.62996769 -0.49059963 -0.22206834 +v 0.20761311 0.25347471 -0.03216789 +v -0.66532129 -0.37512749 -0.28905952 +v 0.22860511 0.64220208 -0.12151899 +v -0.05244193 0.11091047 -0.38907376 +v -0.62981153 -0.32439342 -0.37909278 +v -0.62915105 -0.06914567 -0.10678871 +v 0.24528864 0.66897595 -0.22443607 +v -0.66562080 -0.50694752 -0.13877532 +v -0.13541664 -0.00378878 -0.02641195 +v -0.64389664 0.39507586 -0.36081791 +v -0.37453088 -0.52070832 -0.18668588 +v 0.24285920 0.16241215 -0.40264741 +v -0.65488160 -0.05206061 -0.10653040 +v -0.64435488 0.37145326 -0.22105001 +v 0.24695264 0.16702758 -0.12401312 +v 0.23066336 -0.27433091 -0.02093305 +v -0.19663136 0.53985268 -0.01594607 +v -0.01833759 0.66119856 -0.03614335 +v 0.24840929 0.43446589 -0.06797165 +v 0.22681569 0.64426607 -0.38764286 +v -0.64541471 0.34307286 -0.29111195 +v -0.63743007 -0.47780845 -0.05043234 +v -0.52322847 0.66513360 -0.38538620 +v 0.17675745 -0.52573061 -0.07069430 +v -0.33243167 -0.48895961 -0.06440132 +v 0.20305121 -0.08499546 -0.03476032 +v -0.55782175 -0.41308367 -0.00675427 +v -0.54465055 -0.15288138 -0.38156945 +v -0.65797406 -0.04081611 -0.04726627 +v -0.35190302 -0.51331443 -0.10620741 +v 0.21039842 0.31192288 -0.40575495 +v -0.40431646 -0.49267682 -0.11140333 +v -0.63624734 -0.37986991 -0.09655645 +v -0.62902302 -0.20821683 -0.17778471 +v -0.11184414 0.46275678 -0.01607629 +v 0.24807650 0.24635540 -0.24617702 +v -0.39483166 0.38334459 -0.40079692 +v 0.20482786 -0.31561261 -0.11473451 +v -0.62551326 0.02935059 -0.05986159 +v -0.51864094 -0.00210366 -0.38337335 +v -0.06114078 -0.49019748 -0.11409449 +v -0.22757150 0.42586640 -0.01464347 +v -0.13317406 -0.52122432 -0.10554994 +v 0.22859234 0.63451403 -0.14839223 +v -0.61153054 0.42865348 -0.35205889 +v -0.39063582 0.61153466 -0.02866522 +v 0.22935778 0.63501835 -0.18016182 +v 0.20122083 -0.32658532 -0.11696309 +v -0.63436061 -0.31686822 -0.06595755 +v 0.20326883 0.65626061 -0.11105779 +v 0.13183971 0.27167630 -0.39227960 +v -0.63487005 -0.36816004 -0.23788632 +v 0.23287779 -0.16223504 -0.01918278 +v 0.23158243 -0.33535275 -0.25398132 +v 0.03049443 0.65904200 -0.11749843 +v -0.04998429 0.34730434 -0.39089659 +v -0.04210461 0.66207027 -0.07032663 +v 0.19386595 -0.05422966 -0.01800954 +v -0.14553629 -0.35843924 -0.39736041 +v -0.50138909 0.67280811 -0.11323743 +v -0.66584092 -0.41683641 -0.20212638 +v -0.49871388 -0.50746405 -0.00794495 +v -0.10024732 0.68235290 -0.11926019 +v -0.11132700 0.35714611 -0.39015442 +v -0.65133852 -0.02435196 -0.10825174 +v -0.39443484 -0.49514222 -0.37843999 +v -0.63165212 0.53607273 -0.11108502 +v -0.40031004 0.17683578 -0.02401272 +v 0.19811556 -0.50706625 -0.11975510 +v -0.54894620 0.67809820 -0.11545411 +v -0.24157724 -0.00380643 -0.02439572 +v 0.22570980 0.54464197 -0.38615578 +v 0.22647780 0.64632648 -0.20070463 +v -0.19549212 0.68352836 -0.12649618 +v -0.66339612 -0.32160863 -0.14087497 +v 0.22987492 -0.39161333 -0.25701824 +v -0.63775772 -0.47057033 -0.03103466 +v 0.19987684 -0.32470766 -0.29411927 +v 0.20056549 -0.31043312 -0.19211158 +v 0.16452837 0.21204074 -0.03148064 +v -0.64481819 0.36510313 -0.32510883 +v -0.43509275 0.68781817 -0.26120180 +v 0.19869925 -0.43652755 -0.12005341 +v -0.24425499 0.45163384 -0.38945505 +v -0.66826779 -0.47380915 -0.10278985 +v -0.31568411 0.38196790 -0.38788116 +v -0.62304795 -0.01040689 -0.17170835 +v 0.25580773 0.66179359 -0.39844662 +v 0.05007589 0.28258398 -0.40487918 +v 0.10120165 -0.45246473 -0.39913368 +v 0.24806629 0.39433214 -0.10903899 +v -0.65565979 -0.03468561 -0.10631635 +v -0.29055771 0.12627026 -0.38651133 +v 0.24749541 0.18760061 -0.12386715 +v 0.21030629 -0.00879881 -0.11754949 +v -0.27330843 -0.49832350 -0.26734602 +v 0.14941157 0.06071085 -0.03066188 +v -0.63829786 0.57618600 -0.11058117 +v -0.01568026 -0.52216345 -0.11027138 +v -0.51962906 -0.49417794 -0.34013361 +v -0.42588186 0.68343711 -0.37436134 +v -0.62370330 0.22615388 -0.11073309 +v 0.03562726 0.65188187 -0.32171595 +v -0.60726810 0.67518008 -0.09386998 +v 0.21783526 -0.52978361 -0.40066308 +v 0.22440422 -0.53292209 -0.27617207 +v -0.57838362 -0.50838810 -0.10312475 +v -0.63342106 0.68700773 -0.36940119 +v -0.61958170 0.67726517 -0.11149227 +v 0.06858470 -0.49855235 -0.11095183 +v -0.04213786 -0.02014999 -0.02807158 +v -0.25840154 -0.51754928 -0.10356306 +v 0.20564198 -0.00729299 -0.11990319 +v -0.61943066 0.03532921 -0.02086964 +v -0.08268826 0.66054994 -0.03301771 +v 0.23128806 0.39611238 -0.12273946 +v -0.61540890 0.36725459 -0.11040670 +v 0.22097895 0.62254566 -0.11496218 +v -0.66047001 -0.13524193 -0.10520653 +v -0.47887641 0.31894988 -0.38612875 +v -0.15770137 -0.30060378 -0.39769000 +v -0.61438489 0.51025134 -0.03275812 +v -0.02320153 0.67553353 -0.31220031 +v 0.24157415 0.29090750 -0.40571326 +v -0.66275865 -0.03630340 -0.10886793 +v -0.63768345 -0.48403898 -0.20262130 +v -0.35912216 0.69129342 -0.07276671 +v -0.19723032 -0.33951312 -0.39689636 +v -0.01700632 0.66012579 -0.11787929 +v 0.22548456 0.54667640 -0.34261835 +v -0.23511064 -0.38732272 -0.38357505 +v -0.44284952 0.28002706 -0.02455530 +v 0.19875048 -0.41412532 -0.11950786 +v -0.03454744 -0.50319344 -0.11645329 +v 0.01398760 -0.33129084 -0.39934006 +v 0.19684328 -0.47201252 -0.29575062 +v -0.64586776 -0.24257019 -0.10464948 +v -0.60979736 0.59190017 -0.02650260 +v 0.14452456 -0.50248599 -0.11786313 +v -0.53899544 -0.29936343 -0.00738706 +v 0.19305448 -0.48988077 -0.11670011 +v -0.39740440 -0.51963562 -0.14394698 +v 0.03973352 0.00327814 -0.01618991 +v 0.22589672 -0.53121293 -0.21465404 +v -0.66341656 -0.50389177 -0.06756951 +v -0.13388568 -0.48847583 -0.02668856 +v 0.03411689 0.44637823 -0.03072146 +v 0.22487785 0.32380515 -0.11885167 +v -0.11061527 0.22699901 -0.40232888 +v 0.19742697 -0.50670934 -0.15623166 +v -0.51756567 0.46277136 -0.02586343 +v -0.57062423 0.34306973 -0.38514534 +v -0.60796183 0.60159177 -0.32380220 +v -0.63344407 -0.13089016 -0.10556356 +v -0.60780311 0.62653047 -0.38103858 +v 0.22254825 -0.53123277 -0.38562322 +v -0.43739927 0.36325610 -0.40030882 +v -0.38562071 -0.06978293 -0.39699912 +v -0.63857943 0.57308674 -0.39332774 +v -0.33444887 0.66218054 -0.33004558 +v -0.04139799 0.65449208 -0.24942671 +v -0.65216535 0.08232674 -0.14478020 +v -0.62133271 -0.00672662 -0.02072666 +v -0.63765526 -0.48712817 -0.22577910 +v -0.08120598 0.65206796 -0.38979101 +v -0.56145430 0.66978079 -0.11485579 +v 0.04349162 -0.45501152 -0.02956823 +v -0.11896598 -0.52841145 -0.24623106 +v 0.01932778 -0.53037941 -0.39397010 +v -0.60316694 0.08912034 -0.38303781 +v -0.60509974 0.66523522 -0.22281389 +v -0.25643030 0.16906442 -0.38722923 +v 0.21543658 0.67690474 -0.06465793 +v -0.41755158 -0.48539284 -0.10646912 +v 0.22669546 0.57020158 -0.26116008 +v -0.34592533 0.67803967 -0.40383404 +v -0.36027157 0.27451450 -0.40026242 +v -0.66302997 -0.27739984 -0.36169389 +v -0.44674581 -0.27624986 -0.02166371 +v -0.53631765 0.45245770 -0.40074202 +v -0.16006933 -0.17772150 -0.01265149 +v -0.66338325 -0.04841752 -0.10899304 +v 0.02215915 0.02523370 -0.38922441 +v -0.45162773 0.55578947 -0.40258485 +v 0.08973547 0.53720725 -0.03193299 +v -0.28108564 0.28561908 -0.01288299 +v -0.25933844 0.12075564 -0.02465615 +v -0.65844244 -0.50513822 -0.10395967 +v 0.21781228 -0.52649015 -0.11513367 +v 0.21325548 -0.02307240 -0.11654989 +v -0.62756884 -0.25632259 -0.01959641 +v -0.39622420 -0.48776549 -0.02883416 +v -0.34240532 -0.51892614 -0.10746277 +v -0.62024212 0.10149787 -0.30572349 +v -0.63783956 0.55443436 -0.39944303 +v 0.22989036 -0.09915059 -0.11611795 +v -0.40316948 0.53705907 -0.40258145 +v -0.13639444 0.55253702 -0.01644907 +v 0.22603756 -0.33961028 -0.40102097 +v 0.21164012 0.04423599 -0.11734438 +v -0.36495891 0.09649916 -0.39855877 +v -0.29674771 -0.39261451 -0.38271290 +v -0.25676563 -0.34045693 -0.02422848 +v 0.19674605 -0.50907135 -0.20089060 +v -0.04834323 -0.49715808 -0.11501665 +v -0.62634259 -0.15508839 -0.37970322 +v 0.20926189 0.03771371 -0.34849027 +v -0.55878675 0.13700223 -0.39738062 +v -0.62099731 0.06778723 -0.34686041 +v 0.22648045 -0.30768842 -0.01852743 +v -0.37579539 0.68577480 -0.26355824 +v -0.39608851 0.68134850 -0.39935112 +v -0.57694227 0.65140748 -0.40240377 +v -0.14630163 0.65719128 -0.27063337 +v -0.01457427 -0.44323036 -0.39791939 +v -0.47854099 0.23533711 -0.02389527 +v 0.20829165 0.14016114 -0.11701288 +v -0.04074515 0.67805392 -0.21159266 +v 0.20503277 0.18940631 -0.01869850 +v 0.20833005 -0.08167195 -0.12131473 +v -0.64358419 0.54611278 -0.05725469 +v 0.09896174 -0.29532102 -0.38820499 +v -0.50328082 0.29872796 -0.39943880 +v 0.22358254 0.47324425 -0.17011680 +v -0.12175634 0.67358226 -0.40570858 +v -0.18333714 0.33317494 -0.01432304 +v -0.51686162 0.35507593 -0.38598877 +v -0.44999698 -0.10149595 -0.38334996 +v -0.66753554 -0.31624931 -0.10405882 +v 0.18381806 0.52832335 -0.39383987 +v -0.01670418 -0.25560459 -0.02813542 +v -0.49901330 0.23642510 -0.39888728 +v -0.62678802 -0.10816282 -0.10658189 +v -0.09001234 0.54234093 -0.40584880 +v 0.24296686 0.23017220 -0.02038920 +v 0.25487342 0.66535342 -0.39754000 +v -0.08588050 -0.50428081 -0.34508824 +v -0.56625426 -0.48494837 -0.10278475 +v -0.34205201 0.68607748 -0.11856059 +v 0.09905647 0.62715477 -0.01908405 +v -0.08770833 0.04853076 -0.40103033 +v -0.01559313 0.23185213 -0.02888055 +v 0.22760943 0.60422379 -0.36488253 +v -0.24097297 0.33837298 -0.40199420 +v 0.21255919 0.09658940 -0.11950020 +v -0.65246481 0.35299140 -0.10747426 +v -0.50500369 -0.19261639 -0.00828070 +v 0.01999855 -0.26579651 -0.39990029 +v -0.01790481 -0.49749923 -0.39710787 +v -0.25972497 0.67016196 -0.11799759 +v -0.36519185 -0.33388782 -0.02258502 +v 0.13878255 -0.36496133 -0.01719675 +v 0.06944495 -0.51723629 -0.01597415 +v -0.53150225 0.66979468 -0.16830695 +v -0.30666512 -0.51575410 -0.07049727 +v -0.49927184 -0.48278970 -0.02230628 +v -0.35246864 0.52090830 -0.40275165 +v -0.34389776 0.68584365 -0.21049221 +v -0.62433040 -0.05244434 -0.37917492 +v 0.04480752 -0.52943385 -0.11347872 +v -0.15830544 -0.25409612 -0.01271532 +v -0.63658512 0.61669630 -0.11146376 +v -0.64569616 0.17707129 -0.00962330 +v 0.23449072 -0.25392228 -0.21124542 +v 0.22075632 0.60153794 -0.08542629 +v -0.63836944 0.45377403 -0.39913025 +v -0.28379408 -0.48883149 -0.11033946 +v -0.63103759 -0.11452333 -0.10764533 +v -0.02143503 -0.01630797 -0.38858268 +v -0.58240527 0.69174898 -0.26364931 +v -0.31629327 -0.14295462 -0.38457105 +v -0.64853519 0.32882234 -0.01844445 +v -0.40666127 -0.48370865 -0.10803215 +v 0.24940017 0.07102349 -0.12752728 +v 0.18589169 0.47281402 -0.39335391 +v -0.62544399 -0.11740898 -0.11492898 +v 0.21931505 -0.29482886 -0.11479451 +v 0.07908081 -0.18020238 -0.40115204 +v -0.62935311 -0.19743429 -0.30964234 +v 0.12433393 0.30853772 -0.01804316 +v -0.60697871 0.47718918 -0.38561320 +v 0.19334386 -0.51169026 -0.19361801 +v -0.61596686 0.56746072 -0.11322977 +v -0.11776014 -0.52030092 -0.07283607 +v 0.12933362 -0.51113200 -0.37714249 +v -0.65315598 0.18905145 -0.06621841 +v -0.31168270 0.56087828 -0.40351999 +v 0.19306226 0.30037233 -0.01904915 +v 0.23971058 0.03923202 -0.40015647 +v -0.39271182 -0.52043194 -0.24678917 +v -0.60894734 -0.49196211 -0.37491775 +v -0.13443086 0.67471081 -0.40213057 +v -0.65665805 -0.13019885 -0.00765685 +v -0.22210829 0.11440583 -0.38723242 +v 0.25048307 0.60248977 -0.02261098 +v 0.24201715 0.07777549 -0.11848995 +v 0.09458931 -0.18488503 -0.01653843 +v 0.05796339 -0.09471933 -0.38913760 +v 0.05602035 0.14561328 -0.01653460 +v -0.60390669 0.38230211 -0.38537407 +v 0.22382835 -0.53496790 -0.22573911 +v 0.21400307 0.33119375 -0.04211759 +v -0.63137549 -0.26078963 -0.02203478 +v -0.21841933 0.23486869 -0.02593534 +v -0.66143245 -0.07716814 -0.10675935 +v -0.65940237 -0.32134876 -0.39172342 +v -0.35595021 0.66154164 -0.11538091 +v 0.09041395 -0.49920481 -0.07508551 +v -0.50726157 -0.13359091 -0.00850752 +v -0.65273100 0.09233136 -0.11193398 +v -0.62973708 -0.51035345 -0.15527885 +v -0.63823372 -0.47244465 -0.25544840 +v -0.65451020 -0.02114247 -0.39269578 +v -0.64139020 0.47158259 -0.24198411 +v 0.06381300 -0.51910335 -0.11547709 +v 0.22309364 -0.52376562 -0.39240345 +v 0.06257652 0.64793408 -0.39089444 +v -0.63405836 -0.46113300 -0.37717271 +v -0.45249036 -0.50880665 -0.00819219 +v 0.21340916 0.26353058 -0.40525302 +v -0.09133324 0.68568128 -0.11354085 +v -0.07745804 -0.49431476 -0.11400640 +v -0.66391820 -0.11576720 -0.10703765 +v -0.32181260 -0.52236855 -0.18344490 +v -0.37205771 -0.49643460 -0.28615305 +v 0.20925429 0.17019832 -0.39165106 +v -0.02387979 0.03986482 -0.02838564 +v 0.22258677 -0.48706499 -0.39939368 +v -0.24660747 0.58187717 -0.01585458 +v -0.62133771 0.16632973 -0.02420635 +v -0.28904203 0.65445429 -0.38996911 +v -0.60948747 -0.45791310 -0.39048719 +v -0.25493515 0.61703527 -0.02962738 +v 0.14811638 0.33199680 -0.40601838 +v -0.59961098 0.33976823 -0.01139102 +v -0.64940554 0.16576941 -0.39491543 +v 0.17112310 -0.50508225 -0.11376426 +v 0.07610358 -0.41358119 -0.02978611 +v 0.24066806 -0.03595268 -0.21183735 +v -0.56542218 -0.33109885 -0.37963408 +v -0.46315530 -0.51643378 -0.10964029 +v 0.19220982 -0.51171297 -0.15754701 +v -0.64065802 0.49947450 -0.27238321 +v -0.50891274 0.20165138 -0.38520959 +v 0.23362038 0.12141566 -0.01859807 +v -0.43281162 -0.48584509 -0.02427061 +v -0.57173258 -0.35410416 -0.00673597 +v -0.60840714 0.57448429 -0.25468156 +v 0.19703542 0.64758068 -0.37216851 +v -0.65881610 -0.12672907 -0.10505971 +v -0.13648906 -0.50146341 -0.18222700 +v -0.03030794 -0.52739227 -0.11151610 +v 0.21415158 0.17439657 -0.20742317 +v -0.13171721 0.07952362 -0.40085989 +v -0.62127113 0.09215154 -0.10846578 +v -0.61602825 -0.23962489 -0.37999538 +v -0.25291529 0.68217754 -0.11754737 +v 0.22287863 -0.52698082 -0.11110885 +v 0.20061943 -0.24393125 -0.03916898 +v 0.18690039 -0.51003122 -0.12139729 +v 0.11748087 0.67209202 -0.31261352 +v -0.66369033 -0.12569015 -0.10350221 +v -0.34118921 -0.51262063 -0.01070079 +v -0.39552009 -0.51142502 -0.10935305 +v 0.24021751 0.09823307 -0.40350595 +v -0.16151305 0.11957002 -0.40082586 +v -0.61302793 0.37589508 -0.38049027 +v 0.23304695 -0.30655974 -0.20025271 +v 0.12175607 0.67122114 -0.12184156 +v -0.66068745 -0.25293836 -0.10486226 +v 0.19869176 -0.37180942 -0.11981212 +v 0.19476216 -0.48968992 -0.10666232 +v -0.55365640 -0.51367426 -0.14144775 +v -0.49925384 0.57124513 -0.40239015 +v -0.66429704 -0.28573284 -0.11516516 +v -0.25793800 0.18537468 -0.01233318 +v -0.42043912 0.22580339 -0.01149656 +v -0.57247752 0.10738008 -0.02210330 +v -0.66744584 -0.49809116 -0.06601670 +v 0.22854392 -0.34289986 -0.39682508 +v -0.65356296 0.11504606 -0.01284086 +v 0.25350392 0.58539283 -0.40689671 +v -0.63122952 0.42596480 -0.11033904 +v 0.12054008 -0.26483560 -0.40087268 +v -0.11661319 0.65396619 -0.11683074 +v -0.11004935 -0.21672481 -0.01357322 +v -0.38987783 -0.48815528 -0.05631850 +v -0.61463559 0.42927790 -0.02585874 +v 0.06956793 0.50505865 -0.39329177 +v -0.34320647 -0.49536127 -0.11280636 +v -0.30902791 -0.22298773 -0.39684317 +v -0.09139207 -0.52901810 -0.20072336 +v 0.25730297 0.62014657 -0.31067514 +v -0.65559047 0.06493165 -0.04563047 +v 0.22461689 0.50678307 -0.22848217 +v 0.24762617 0.53182572 -0.02117433 +v -0.19932167 0.19372171 -0.01307789 +v -0.24575238 0.55560988 -0.02886736 +v -0.15137286 -0.50826055 -0.11472940 +v 0.14945786 0.64883357 -0.34561974 +v -0.46317062 -0.16186543 -0.02175052 +v 0.21972474 0.65496773 -0.08511351 +v 0.20210682 -0.18238749 -0.01805635 +v -0.66951942 -0.35487580 -0.10827259 +v -0.61905414 0.19260652 -0.02259268 +v -0.43280646 0.66655362 -0.19307077 +v 0.25466618 0.67515147 -0.39487928 +v 0.22321402 0.40008506 -0.12028448 +v -0.52388358 0.63692075 -0.02805201 +v -0.07549702 -0.50416863 -0.29693428 +v -0.28976646 0.68174875 -0.31127518 +v -0.63512582 -0.35523501 -0.34303752 +v -0.02365702 -0.02108955 -0.01531881 +v -0.16190982 -0.49282035 -0.03279728 +v -0.63695109 -0.46937093 -0.02402507 +v 0.19608571 -0.48591617 -0.34440333 +v 0.19871739 0.04723299 -0.40349403 +v -0.09614085 0.51858133 -0.01657928 +v -0.50836229 0.07588014 -0.02245310 +v -0.36867845 -0.22153518 -0.39596888 +v 0.03832315 0.42387262 -0.39267111 +v -0.62087429 0.22112493 -0.02752901 +v -0.64249861 -0.43269134 -0.10734532 +v 0.25470459 0.48427367 -0.12488208 +v -0.63654405 0.66592711 -0.11235400 +v -0.66208005 -0.03122318 -0.10751554 +v 0.24235003 0.05636233 -0.27999562 +v -0.66528517 -0.51017731 -0.32207659 +v 0.19521275 -0.49475265 -0.03484330 +v -0.39195141 0.67987889 -0.40339679 +v 0.19073276 -0.41199470 -0.03146872 +v -0.62707460 -0.07956229 -0.02242884 +v 0.22889468 0.64328146 -0.30109718 +v -0.44535044 0.13680288 -0.01073185 +v -0.51939076 0.69056422 -0.01569330 +v 0.14973436 -0.18138863 -0.01727463 +v 0.25396988 0.50884330 -0.35667649 +v 0.06655740 0.67536402 -0.15994114 +v -0.51867908 -0.18016067 -0.02110795 +v -0.00288260 -0.50317603 -0.38375187 +v -0.60439044 0.66769314 -0.20962495 +v -0.61878532 0.14559942 -0.02199393 +v 0.24497916 0.33630115 -0.11659712 +v -0.14751236 -0.24028635 -0.38582066 +v -0.15075588 0.53273755 -0.40501472 +v -0.04904963 0.64662766 -0.39278579 +v -0.38241795 0.27273330 -0.02487360 +v -0.62747651 -0.51166022 -0.26196629 +v -0.64446467 0.33822104 -0.12678555 +v -0.39999491 0.67093360 -0.08090656 +v -0.12199427 -0.49303427 -0.03019932 +v -0.09375747 -0.44527298 -0.38481829 +v 0.20652542 -0.37462792 -0.11407406 +v -0.29415938 -0.52287507 -0.14278652 +v 0.23800574 -0.00463429 -0.07464762 +v -0.54114306 -0.00564540 -0.39605314 +v -0.45197570 0.21332717 -0.39905006 +v 0.24533758 0.09525146 -0.17179728 +v -0.51881218 0.47862369 -0.01311661 +v 0.03693566 0.34660614 -0.40544239 +v -0.64786178 0.25568464 -0.23745055 +v 0.24015102 0.17996152 -0.01969045 +v 0.12288510 0.24997334 -0.03091764 +v -0.17658882 -0.49020281 -0.02706304 +v -0.35222018 0.59976828 -0.38913888 +v 0.20224766 -0.30023938 -0.11663882 +v -0.50553346 -0.51432341 -0.10694233 +v 0.20164606 -0.43104675 -0.11453365 +v -0.47718146 0.09584764 -0.38460553 +v 0.19718398 -0.19353290 -0.38956803 +v -0.64293122 0.66689354 -0.11187015 +v -0.63570178 -0.49093771 -0.29960436 +v -0.66576385 -0.24248360 -0.10239068 +v -0.65197057 0.09390455 -0.19912799 +v 0.13327359 0.48854741 -0.39359751 +v 0.21725695 0.49061164 -0.03600419 +v -0.64769793 0.36309928 -0.03546077 +v -0.24264961 -0.48631513 -0.11118077 +v 0.17090815 -0.49553725 -0.11809760 +v 0.22275071 0.63078403 -0.12154580 +v -0.35982081 -0.39791062 -0.00957011 +v -0.40895745 0.46754321 -0.01372387 +f 3398 2303 1204 +f 2123 513 3796 +f 1141 513 2123 +f 3314 1141 2123 +f 462 2885 270 +f 675 3082 1438 +f 3314 2123 1076 +f 3314 1076 2885 +f 2885 2997 412 +f 1441 921 416 +f 2847 2885 412 +f 2231 68 1076 +f 2154 1116 3756 +f 1438 5 1685 +f 2997 2885 1076 +f 3565 1524 2385 +f 2997 1076 68 +f 1446 2300 1685 +f 2214 56 3010 +f 3439 1063 1385 +f 1446 1685 3099 +f 2214 3010 3630 +f 2141 2997 68 +f 3328 2997 2141 +f 853 3630 1595 +f 853 1595 1558 +f 530 1558 3911 +f 530 3911 3274 +f 2688 2253 2369 +f 203 2369 2050 +f 3328 653 1406 +f 3129 2369 203 +f 3010 1595 3630 +f 3911 1558 1595 +f 2155 37 2477 +f 1595 3010 1406 +f 3911 1595 1406 +f 203 2050 3469 +f 477 3687 1068 +f 3687 477 2791 +f 37 3911 1406 +f 1320 1068 3687 +f 3274 3911 37 +f 2155 3274 37 +f 37 1406 3185 +f 547 37 3185 +f 1014 1760 3469 +f 203 3469 1760 +f 311 3039 3099 +f 2297 3039 3129 +f 37 106 3782 +f 3708 3407 415 +f 3537 2456 232 +f 37 547 106 +f 922 1333 280 +f 2710 3917 1200 +f 1514 106 1268 +f 3917 2729 3368 +f 1514 1268 1189 +f 3516 3039 1763 +f 226 3160 2133 +f 226 2133 2074 +f 226 2074 1726 +f 3129 203 1170 +f 203 1760 1170 +f 971 2400 3460 +f 2295 1502 793 +f 1547 1879 971 +f 793 2729 262 +f 2295 793 3905 +f 1170 1760 546 +f 3047 1170 546 +f 1502 2729 793 +f 262 290 296 +f 3129 1170 2297 +f 1356 3242 269 +f 3204 1960 2295 +f 1978 516 2696 +f 3423 402 3549 +f 68 988 2141 +f 68 269 3549 +f 2572 1960 1823 +f 1823 1960 3204 +f 2914 226 1726 +f 1823 2692 2572 +f 988 3549 1508 +f 1823 2837 670 +f 1823 3204 2837 +f 2837 3204 3217 +f 2056 3118 1759 +f 2300 2702 1023 +f 2418 1508 402 +f 1068 1320 2633 +f 27 2418 3118 +f 1446 2702 2300 +f 929 1854 1320 +f 929 1320 1126 +f 1320 3687 1126 +f 745 2141 988 +f 745 988 1508 +f 416 2454 1441 +f 921 1441 822 +f 3756 822 2154 +f 2154 979 1116 +f 3023 745 1508 +f 2133 1206 1763 +f 653 2141 745 +f 255 929 1126 +f 653 745 3102 +f 3102 745 3023 +f 1794 2702 1446 +f 1794 1446 3516 +f 2011 1794 3516 +f 3516 1763 1206 +f 670 2692 1823 +f 2133 1763 2074 +f 2011 3516 1206 +f 3023 1508 703 +f 703 1508 2418 +f 853 1558 2501 +f 2501 1558 530 +f 2155 1450 3274 +f 208 670 2311 +f 2311 670 2837 +f 1450 2155 2477 +f 1063 715 1573 +f 1063 1573 1385 +f 1573 1474 1385 +f 711 96 3047 +f 1189 3865 1514 +f 2881 255 2692 +f 437 2692 670 +f 3708 415 2248 +f 2418 3165 703 +f 1294 3485 1435 +f 2456 3537 3595 +f 2299 3595 3537 +f 244 1804 3165 +f 458 437 2573 +f 437 670 2573 +f 670 2904 2573 +f 1469 2904 670 +f 456 2011 3160 +f 3023 703 3529 +f 96 3791 1763 +f 2074 1763 3791 +f 1352 1726 1125 +f 96 1352 3791 +f 96 711 893 +f 2793 2934 3217 +f 2793 3750 2934 +f 555 827 2279 +f 244 27 2056 +f 2056 2315 244 +f 755 2279 2803 +f 555 2279 755 +f 2803 751 755 +f 2292 1306 166 +f 2803 620 751 +f 976 3736 3215 +f 1804 244 1004 +f 2570 755 751 +f 653 3102 3185 +f 992 751 976 +f 992 976 3215 +f 1661 3185 3102 +f 2154 939 979 +f 3565 2385 939 +f 3565 468 1524 +f 3531 3398 342 +f 2214 3630 8 +f 2501 530 3274 +f 2570 555 755 +f 1661 3102 1667 +f 447 1667 3102 +f 1661 1667 2588 +f 3595 2248 2456 +f 3023 3529 447 +f 760 1667 447 +f 456 739 2135 +f 1726 2074 1125 +f 2570 908 555 +f 2570 751 992 +f 51 547 3854 +f 3854 1661 2588 +f 2588 3082 3854 +f 2570 2758 908 +f 582 3702 2710 +f 3854 3082 3940 +f 3854 3940 51 +f 3940 3082 675 +f 3940 675 3740 +f 2910 369 2539 +f 369 2910 3821 +f 3821 2775 2005 +f 324 2005 697 +f 324 3821 2005 +f 369 2127 3671 +f 3875 290 3702 +f 444 1305 3000 +f 393 908 470 +f 1753 1957 1208 +f 470 908 2758 +f 2287 1616 465 +f 54 324 2277 +f 54 3821 324 +f 54 2277 3470 +f 3805 54 3470 +f 3215 3736 3270 +f 1189 1807 3462 +f 1435 3485 3462 +f 274 54 3805 +f 3325 1807 1268 +f 3470 465 3805 +f 1268 51 3695 +f 329 3215 3122 +f 1807 2009 1435 +f 1807 1435 3462 +f 3407 1435 2009 +f 3407 2009 415 +f 1807 3325 415 +f 415 2009 1807 +f 415 3325 2248 +f 738 3325 232 +f 232 3325 3695 +f 2248 3325 738 +f 2248 738 2456 +f 2456 738 232 +f 3215 1403 992 +f 232 3695 2299 +f 232 2299 3537 +f 3695 51 3940 +f 382 296 290 +f 1573 715 3937 +f 1613 332 3934 +f 922 3695 209 +f 2299 3695 922 +f 1333 209 3740 +f 922 209 1333 +f 938 3934 332 +f 280 1333 3740 +f 280 3740 234 +f 675 234 3740 +f 234 75 280 +f 1023 75 234 +f 974 332 1613 +f 1023 234 675 +f 1613 3505 974 +f 974 3505 2650 +f 801 974 2650 +f 2011 456 1794 +f 922 280 3401 +f 761 2416 2731 +f 1206 3160 2011 +f 3160 1206 2133 +f 2702 2135 1023 +f 2650 2731 2416 +f 2718 2907 1289 +f 3791 1125 2074 +f 1125 3791 1352 +f 2416 761 3493 +f 3292 2416 3493 +f 3693 2650 2416 +f 801 2650 3693 +f 3238 1403 2831 +f 2416 3292 3693 +f 1753 3000 1957 +f 3292 130 3693 +f 1295 186 1469 +f 332 974 639 +f 33 2135 739 +f 877 332 639 +f 639 974 801 +f 172 3875 1825 +f 2250 2907 238 +f 2907 2718 238 +f 456 3160 226 +f 1040 238 3596 +f 1856 1726 1352 +f 3289 877 2818 +f 628 238 1040 +f 3460 2400 996 +f 801 1180 639 +f 2780 1295 2793 +f 1474 1573 3937 +f 3693 469 801 +f 1075 3565 2154 +f 1040 716 628 +f 2795 3460 996 +f 3132 628 716 +f 2482 2629 1879 +f 2482 722 2629 +f 3132 716 3368 +f 3368 3134 3132 +f 716 3917 3368 +f 3275 3398 1204 +f 1180 801 469 +f 382 3905 296 +f 1014 3469 765 +f 382 855 1105 +f 3251 1180 469 +f 1014 2954 3734 +f 393 3734 2954 +f 3736 2890 3270 +f 444 516 1305 +f 3000 1305 3224 +f 2914 3225 226 +f 75 1023 3272 +f 3443 3292 3493 +f 1208 1957 1854 +f 456 226 739 +f 1014 3734 3648 +f 3443 369 54 +f 3443 54 274 +f 855 382 2126 +f 2311 2934 3750 +f 2311 3750 208 +f 3443 3247 3292 +f 1469 208 1295 +f 3247 3443 2832 +f 855 2793 3217 +f 3238 317 2778 +f 2778 470 3238 +f 516 3352 2696 +f 2489 3601 1295 +f 2333 470 2778 +f 2832 3443 274 +f 2793 208 3750 +f 2716 274 3805 +f 1295 208 2793 +f 3104 3805 465 +f 3104 1610 3805 +f 2333 1521 1089 +f 2716 3805 1610 +f 1014 3648 1760 +f 436 1547 1562 +f 546 1760 3648 +f 3648 98 546 +f 1754 2793 855 +f 546 98 1922 +f 1610 300 2716 +f 188 98 3648 +f 1753 2605 3000 +f 188 1892 98 +f 2209 1318 300 +f 546 1922 3047 +f 3601 2489 3349 +f 711 3047 1922 +f 2832 1318 615 +f 1922 98 2212 +f 2212 98 1892 +f 2310 1922 2212 +f 1892 656 2212 +f 2890 469 130 +f 1892 188 3763 +f 616 3270 1476 +f 3122 3270 616 +f 3215 3270 3122 +f 3122 616 1289 +f 739 3225 2727 +f 739 226 3225 +f 3763 1089 549 +f 549 1089 528 +f 1084 615 1318 +f 1562 1547 3460 +f 656 1478 2212 +f 3763 656 1892 +f 2165 290 3875 +f 290 2165 2126 +f 317 3909 135 +f 579 317 135 +f 1753 1208 182 +f 1476 1084 616 +f 929 255 1559 +f 3275 3790 342 +f 579 135 3141 +f 1289 616 264 +f 2333 2778 1521 +f 1616 1451 465 +f 3104 1451 2996 +f 3104 2996 640 +f 1736 2398 3783 +f 579 3141 1521 +f 67 2135 33 +f 3104 640 2398 +f 477 3141 1890 +f 977 3783 1240 +f 1736 3783 977 +f 1736 977 1440 +f 1440 977 1114 +f 835 1856 2 +f 3134 2829 3132 +f 2950 1736 1116 +f 1890 3141 2829 +f 3134 1890 2829 +f 1890 2791 477 +f 2126 2165 2327 +f 2327 1754 2126 +f 3403 2 1970 +f 953 1075 1474 +f 1736 1968 3104 +f 3104 1968 1610 +f 1068 528 477 +f 1089 1521 528 +f 2502 3881 3601 +f 1718 2489 2490 +f 2490 2489 1295 +f 1524 1968 2385 +f 300 1610 3121 +f 3121 1610 1968 +f 127 3121 1524 +f 2162 1639 3121 +f 366 3667 2842 +f 127 2394 2162 +f 2914 1726 835 +f 2162 2394 1204 +f 3561 3460 2795 +f 1562 3460 3561 +f 436 1825 1547 +f 264 2209 3031 +f 3031 2209 1639 +f 3667 366 3596 +f 3031 1639 750 +f 1204 2303 2162 +f 750 1639 2162 +f 3531 750 2303 +f 182 1208 3543 +f 3197 2502 1156 +f 3790 297 342 +f 3601 1156 2502 +f 2842 3667 3031 +f 1040 3596 2965 +f 716 1040 2965 +f 3003 2842 3531 +f 2842 3031 750 +f 95 2562 2965 +f 3531 2842 750 +f 3531 2303 3398 +f 2562 95 2434 +f 2434 95 2400 +f 2434 2400 971 +f 2562 2434 1879 +f 2965 2562 716 +f 2562 1879 2629 +f 722 2562 2629 +f 835 1726 1856 +f 2400 95 996 +f 1879 2434 971 +f 3790 585 297 +f 722 582 1200 +f 2737 2914 1493 +f 1200 582 2710 +f 3193 2482 1547 +f 1547 1825 3193 +f 3561 297 585 +f 382 290 2126 +f 296 793 262 +f 3905 793 296 +f 172 1825 436 +f 1075 2154 1474 +f 3672 3352 1970 +f 1075 2548 3565 +f 1761 3275 2548 +f 1754 855 2126 +f 1105 3204 2295 +f 1754 2780 2793 +f 2837 3217 2934 +f 3601 3349 1156 +f 1295 2780 2490 +f 2696 2270 2310 +f 549 2633 656 +f 1362 3558 912 +f 1478 1978 2696 +f 1978 1645 215 +f 516 1978 215 +f 1305 215 1645 +f 516 215 1305 +f 1645 3224 1305 +f 3224 1645 2633 +f 3759 3637 3558 +f 2633 1854 3935 +f 3759 3558 1362 +f 1362 2279 32 +f 1320 1854 2633 +f 3224 2633 3935 +f 2673 1004 2315 +f 3224 3935 1957 +f 1854 1957 3935 +f 1854 929 1208 +f 1323 1804 1004 +f 2253 1065 1804 +f 458 2881 437 +f 2573 3881 458 +f 3881 2573 2904 +f 186 3881 2904 +f 1804 1323 2253 +f 2311 2837 2934 +f 1469 670 208 +f 186 2904 1469 +f 2855 3759 32 +f 32 827 2855 +f 1442 1323 2673 +f 2050 1323 1442 +f 2677 2184 1439 +f 2050 1442 3469 +f 275 3180 840 +f 2222 2694 2677 +f 827 765 2855 +f 2963 2184 2677 +f 2963 2677 3180 +f 1442 765 3469 +f 998 840 2699 +f 840 998 3930 +f 1018 1529 3746 +f 1205 828 3078 +f 646 980 3078 +f 1018 3078 980 +f 1018 980 1529 +f 1018 1205 3078 +f 1018 3746 42 +f 3014 1018 2645 +f 3692 3617 968 +f 1839 2096 2531 +f 1080 1839 173 +f 2166 1839 1080 +f 1839 2166 1288 +f 1288 2309 2722 +f 16 1288 2722 +f 16 2722 3157 +f 1142 2955 16 +f 1205 1018 3014 +f 2699 1959 1687 +f 1687 1205 3014 +f 3014 1504 1687 +f 2645 3420 3014 +f 1504 3014 3420 +f 1687 1504 258 +f 2699 1687 258 +f 166 258 1507 +f 166 1507 2292 +f 3692 259 3617 +f 3420 259 3692 +f 3692 180 3420 +f 3585 180 3692 +f 180 1564 3420 +f 1055 1564 180 +f 180 3585 2853 +f 1356 998 3242 +f 3242 258 166 +f 1306 3423 3242 +f 1055 1306 2292 +f 1055 2292 1564 +f 2999 1055 2853 +f 3551 1306 1055 +f 3423 1306 3551 +f 3585 968 3451 +f 3809 3451 2096 +f 3451 2008 3585 +f 2008 3451 2475 +f 2475 3095 2008 +f 805 2475 2097 +f 1288 2955 3809 +f 2097 3809 2955 +f 2955 1288 16 +f 3934 2955 1142 +f 2097 2955 3934 +f 938 2097 3934 +f 9 3095 805 +f 2999 2008 3095 +f 292 2304 9 +f 805 2097 938 +f 1731 9 2615 +f 3637 1731 3558 +f 292 9 1731 +f 292 1731 3637 +f 2615 912 3558 +f 2615 805 938 +f 938 912 2615 +f 333 935 3635 +f 2635 1142 935 +f 16 3635 935 +f 2635 500 429 +f 2635 423 564 +f 3312 564 3724 +f 3312 3724 2851 +f 3312 2851 3339 +f 2127 242 1827 +f 2635 564 3312 +f 761 3312 3339 +f 921 1114 416 +f 1114 921 1440 +f 1440 921 822 +f 822 3756 1440 +f 1736 1440 3756 +f 1116 1736 3756 +f 979 2950 1116 +f 1736 2950 939 +f 979 939 2950 +f 2385 1968 939 +f 1524 3121 1968 +f 468 127 1524 +f 468 2394 127 +f 2394 468 3275 +f 3275 1204 2394 +f 2737 3225 2914 +f 2605 1753 2258 +f 608 3445 3669 +f 3465 654 1642 +f 3747 3669 2940 +f 2727 33 739 +f 89 1683 562 +f 172 2165 3875 +f 1844 182 3543 +f 1072 2436 3513 +f 1784 2936 2436 +f 854 1075 953 +f 854 3032 1075 +f 2886 514 2412 +f 2886 2717 514 +f 2717 2886 1417 +f 67 2040 3421 +f 1674 1754 2327 +f 2117 1844 3543 +f 2465 1718 2490 +f 1417 2886 1098 +f 1098 598 1417 +f 2335 598 1098 +f 2953 3818 562 +f 89 562 277 +f 647 585 2806 +f 1337 277 1311 +f 204 2353 1337 +f 1337 2732 1049 +f 878 2217 2564 +f 2564 2227 1906 +f 2215 2227 2564 +f 2215 2564 3286 +f 2 821 1350 +f 3144 204 3403 +f 1784 844 2766 +f 3144 2003 2199 +f 2727 2161 2808 +f 1622 3349 3445 +f 731 3816 2655 +f 3275 1761 3790 +f 198 1674 1539 +f 1311 1350 821 +f 821 3403 204 +f 2353 3286 2564 +f 504 3286 2061 +f 2465 2940 3669 +f 33 2040 67 +f 2040 33 2808 +f 2436 1325 2242 +f 33 2727 2808 +f 1693 3465 953 +f 446 2165 172 +f 2327 2165 446 +f 3144 2199 3286 +f 1754 1674 2780 +f 2061 2258 202 +f 3144 3403 3019 +f 3019 2003 3144 +f 2605 2199 2003 +f 844 1622 2766 +f 3445 2766 1622 +f 2465 3349 1718 +f 1642 953 3465 +f 2274 170 953 +f 170 854 953 +f 3818 2737 1493 +f 2325 1799 3190 +f 1784 2436 3544 +f 844 1784 3544 +f 2436 2242 3544 +f 2100 2766 3445 +f 710 731 2655 +f 953 575 1693 +f 2490 2780 198 +f 170 2417 854 +f 2886 2412 2721 +f 1796 2417 3190 +f 3045 172 3816 +f 3045 446 172 +f 2806 1799 647 +f 1098 3818 2953 +f 204 3144 2353 +f 1844 2117 844 +f 1972 2582 3640 +f 1098 2737 3818 +f 1311 562 1350 +f 1493 835 1350 +f 800 647 1799 +f 2655 436 1562 +f 821 204 1311 +f 1337 1311 204 +f 3286 2353 3144 +f 1799 2806 1796 +f 1674 198 2780 +f 3403 821 2 +f 3873 1977 2721 +f 2412 2384 2721 +f 1098 2953 2335 +f 1683 1464 562 +f 2953 562 1464 +f 1799 2325 3497 +f 2564 2217 2353 +f 1972 3640 2460 +f 1098 2161 2737 +f 2100 3933 2766 +f 198 2940 2465 +f 2533 446 3045 +f 2806 3790 1761 +f 2806 585 3790 +f 3898 894 463 +f 1294 1435 3407 +f 1294 3407 3708 +f 463 894 1294 +f 463 1294 1624 +f 3898 463 2582 +f 1624 3708 3595 +f 1624 1294 3708 +f 2582 463 1624 +f 1488 3181 3538 +f 3181 1488 1972 +f 1412 1581 3512 +f 1972 1488 2636 +f 702 3538 3181 +f 3512 2291 1412 +f 3538 2636 1488 +f 2636 3898 2582 +f 702 1581 3284 +f 3512 1581 3181 +f 1581 702 3181 +f 1972 2636 2582 +f 3512 3181 1972 +f 2489 1718 3349 +f 1517 572 759 +f 1361 1088 3022 +f 2109 1361 3022 +f 3022 3323 2109 +f 2109 887 1361 +f 2109 1168 887 +f 2109 1227 1168 +f 1846 3279 1168 +f 3279 1846 2606 +f 2606 2030 3222 +f 2129 2542 2030 +f 154 2542 2129 +f 154 2744 2542 +f 2744 1659 25 +f 25 1659 2153 +f 2658 1354 331 +f 1064 3323 1088 +f 1064 406 3323 +f 1064 838 406 +f 884 629 2841 +f 2841 629 1647 +f 2479 2030 2542 +f 3728 1637 1643 +f 3214 245 964 +f 3214 2073 245 +f 2082 683 2073 +f 2444 1766 890 +f 989 3124 2846 +f 282 2846 2943 +f 282 1124 3266 +f 1045 99 3188 +f 1045 3188 764 +f 1973 3232 1882 +f 890 1637 2444 +f 1637 3728 2444 +f 3859 3214 2979 +f 3859 2114 3214 +f 3089 2933 2002 +f 223 3124 509 +f 2544 3266 151 +f 294 3188 99 +f 3188 294 115 +f 1861 1746 306 +f 245 3846 2210 +f 1711 2002 683 +f 282 3758 1124 +f 764 941 2618 +f 3173 3638 177 +f 1742 177 1735 +f 900 2744 154 +f 900 1659 2744 +f 1826 331 2153 +f 1826 2658 331 +f 2974 3255 360 +f 2835 360 3255 +f 651 1354 2181 +f 887 1088 1361 +f 1088 3323 3022 +f 1168 1880 887 +f 3323 406 1227 +f 3434 3942 740 +f 740 1149 3434 +f 1149 740 1999 +f 1149 1999 1029 +f 2109 3323 1227 +f 1742 1034 3760 +f 2492 3800 1029 +f 3737 884 2492 +f 2492 884 3800 +f 3737 629 884 +f 1880 1168 3279 +f 3279 2606 3222 +f 3222 2030 2479 +f 2752 1643 1637 +f 3728 1643 973 +f 3728 973 2553 +f 3296 1637 890 +f 3296 2752 1637 +f 1354 25 2153 +f 2979 964 2210 +f 964 2979 3214 +f 2114 2073 3214 +f 2114 2082 2073 +f 2210 3846 3859 +f 2979 2210 3859 +f 2082 2114 3859 +f 1074 3434 1149 +f 1029 1074 1149 +f 3800 1074 1029 +f 838 165 425 +f 3089 1711 2082 +f 2404 1046 245 +f 3089 2002 1711 +f 2933 683 2002 +f 1656 3089 2082 +f 3279 2479 1880 +f 509 3124 3494 +f 2846 3124 223 +f 3812 941 829 +f 2846 223 2943 +f 282 2943 3758 +f 509 3758 2943 +f 3758 509 151 +f 151 1124 3758 +f 151 3266 1124 +f 509 2478 151 +f 2980 1074 3050 +f 2974 1354 2658 +f 3728 2553 1046 +f 3728 1046 2444 +f 1046 3846 245 +f 294 99 2544 +f 115 764 3188 +f 764 115 306 +f 115 1861 306 +f 764 306 1746 +f 989 1135 1943 +f 294 1861 115 +f 3066 2951 2812 +f 2683 1746 1861 +f 2683 1861 210 +f 2082 1711 683 +f 964 245 2210 +f 829 941 2683 +f 210 2642 2683 +f 3257 509 3494 +f 2556 1766 2444 +f 2444 1046 2404 +f 2812 1045 1465 +f 941 3812 2618 +f 526 623 3232 +f 2812 3266 99 +f 1354 2980 2181 +f 165 838 1064 +f 651 2744 25 +f 2332 18 1636 +f 1746 941 764 +f 1746 2683 941 +f 2188 294 3298 +f 1137 1074 3800 +f 1861 294 2188 +f 2974 2980 1354 +f 2974 360 2980 +f 890 1766 1647 +f 3812 829 3492 +f 829 2683 2642 +f 1273 3844 3278 +f 3232 1778 1882 +f 1882 3610 1973 +f 3844 1882 3278 +f 3610 1882 3844 +f 3038 3527 3784 +f 576 3784 3527 +f 2435 3527 491 +f 699 3428 2243 +f 1783 2854 2243 +f 1819 2530 2351 +f 623 526 3243 +f 491 3393 2435 +f 2435 3393 526 +f 3428 3900 2243 +f 3492 829 623 +f 526 3404 2435 +f 3800 983 1137 +f 3924 3119 2181 +f 2205 1666 2667 +f 1768 2205 2833 +f 3196 1132 2282 +f 1590 3800 2841 +f 1783 2243 3900 +f 1132 3196 3355 +f 3450 40 3402 +f 2810 2036 2191 +f 1387 1997 3842 +f 2810 2191 1666 +f 1135 3388 1943 +f 2833 2667 3784 +f 2282 2601 917 +f 3254 286 858 +f 3038 3393 491 +f 453 1179 1703 +f 1477 2626 3842 +f 1997 3899 1355 +f 699 1880 3428 +f 2152 1948 453 +f 1281 286 3388 +f 1948 3712 1179 +f 1703 1682 681 +f 1703 11 1682 +f 1783 917 2601 +f 1910 3196 2282 +f 2191 2419 3038 +f 1074 1137 3050 +f 2626 3458 2935 +f 3828 1878 2163 +f 830 3402 40 +f 2917 3924 3378 +f 1033 3431 2366 +f 39 3378 1137 +f 2543 339 2339 +f 903 3947 1033 +f 286 3254 830 +f 623 3243 3492 +f 2601 1132 705 +f 2205 2667 2833 +f 1888 339 2419 +f 2935 1387 2626 +f 3899 3327 2621 +f 2854 1458 165 +f 1635 2312 1544 +f 1997 1829 3327 +f 2191 2036 1888 +f 3327 3139 227 +f 2500 3390 2498 +f 2543 1894 3605 +f 2243 165 699 +f 2419 339 3492 +f 2419 3492 3243 +f 194 1281 2152 +f 1880 2479 2238 +f 3899 1997 3327 +f 2756 917 842 +f 983 3800 1590 +f 1179 2355 1703 +f 2556 1829 1590 +f 1829 2556 3139 +f 3947 2170 464 +f 2170 3947 903 +f 2917 3947 464 +f 3947 2917 1802 +f 1179 2543 2355 +f 2830 3924 2917 +f 2419 2191 1888 +f 3038 1666 2191 +f 2366 903 1033 +f 2205 1878 1666 +f 3428 1783 3900 +f 3458 983 2935 +f 1135 2951 3774 +f 1281 3388 3774 +f 2444 3139 2556 +f 339 3605 3812 +f 3812 3492 339 +f 830 3388 286 +f 3712 1894 1179 +f 2543 1179 1894 +f 2386 1635 1544 +f 2470 2397 2386 +f 39 1802 3378 +f 1802 39 3458 +f 220 1330 521 +f 1051 1910 2756 +f 1802 2917 3378 +f 1033 3947 1802 +f 1802 3431 1033 +f 453 681 2152 +f 3784 1666 3038 +f 2667 1666 3784 +f 2500 3431 3390 +f 2621 3254 858 +f 2601 2282 1132 +f 526 3393 3243 +f 2621 227 3254 +f 1783 3428 917 +f 3402 830 3254 +f 1544 858 2386 +f 858 286 2470 +f 1330 1477 521 +f 2621 3327 227 +f 3388 1135 3774 +f 1137 983 39 +f 3431 1802 3458 +f 2649 2355 2339 +f 3924 2181 3050 +f 2404 227 3139 +f 2626 1387 3842 +f 3050 1137 3378 +f 1997 1355 3842 +f 3828 1308 1878 +f 2036 2810 3946 +f 2339 1888 2036 +f 1666 1878 2810 +f 521 1477 3842 +f 3842 1355 521 +f 521 1355 1998 +f 926 220 1998 +f 542 903 2366 +f 2500 2366 3431 +f 1477 1330 2498 +f 820 2152 681 +f 1791 2370 3828 +f 1938 2801 861 +f 861 2170 1091 +f 1091 903 542 +f 1334 775 671 +f 2916 772 1791 +f 1005 2457 3691 +f 3482 3794 1307 +f 283 1312 1418 +f 2773 1679 3182 +f 3249 2945 3500 +f 2823 3073 3351 +f 3326 1291 2391 +f 3182 1679 1938 +f 3663 652 3883 +f 3500 652 3249 +f 3651 811 2498 +f 2498 1330 3651 +f 3691 1525 1509 +f 3507 542 1841 +f 1437 958 1785 +f 1437 573 958 +f 775 1334 3351 +f 3326 3182 3794 +f 2164 573 1834 +f 3663 3326 3482 +f 1437 820 681 +f 1848 2272 2895 +f 1418 2860 1485 +f 1005 3301 2457 +f 668 11 2649 +f 2211 3862 3841 +f 2690 3946 1308 +f 2585 1612 2289 +f 193 2690 2370 +f 3663 1291 3326 +f 1692 162 2275 +f 3841 2607 220 +f 1307 1670 3482 +f 2500 1841 2366 +f 2500 811 1841 +f 655 668 2690 +f 2807 249 1418 +f 14 1401 2116 +f 3313 3329 2022 +f 1005 3651 3301 +f 2823 2571 3073 +f 2211 1612 3862 +f 2467 2571 2823 +f 1312 283 1692 +f 861 3794 1938 +f 3326 2391 3182 +f 3663 3482 652 +f 1312 2860 1418 +f 1670 3249 652 +f 216 1603 3313 +f 216 3313 2022 +f 3726 3851 3646 +f 916 2860 2640 +f 14 2116 2884 +f 1022 1291 3663 +f 1308 3828 2370 +f 3249 3507 1525 +f 2607 3301 3651 +f 1525 3507 1509 +f 3691 1509 1005 +f 2649 3946 668 +f 573 1682 1365 +f 2945 1525 493 +f 2289 1083 2275 +f 1307 1091 542 +f 772 2916 3351 +f 3249 1670 3507 +f 3073 655 193 +f 2370 772 193 +f 3073 772 3351 +f 772 2370 1791 +f 2116 1401 2522 +f 1509 1841 811 +f 1005 811 3651 +f 2640 2022 916 +f 3883 652 3500 +f 3482 1670 652 +f 1682 573 1437 +f 3794 861 1307 +f 249 1924 283 +f 655 1834 1365 +f 3851 2807 3646 +f 2916 775 3351 +f 1635 3313 3373 +f 1904 14 2884 +f 1022 1767 1291 +f 2358 1875 1592 +f 1473 696 87 +f 2467 13 1552 +f 557 3927 2285 +f 577 3500 2945 +f 896 3092 1572 +f 2494 162 1692 +f 2494 1692 283 +f 1859 87 1904 +f 1859 1473 87 +f 3762 2698 1621 +f 1592 1875 1334 +f 1859 796 2383 +f 2383 1473 1859 +f 14 696 1401 +f 3302 3883 3500 +f 896 1572 3282 +f 318 1875 2358 +f 796 1859 1767 +f 1384 601 2697 +f 1705 1119 1220 +f 1828 610 1123 +f 3705 1779 1572 +f 2728 2383 796 +f 1220 1119 3322 +f 1904 696 14 +f 1291 1767 2884 +f 624 2094 2349 +f 1128 2349 1519 +f 214 851 2443 +f 2697 2437 1384 +f 1805 1413 318 +f 3435 4 2895 +f 2558 2272 2585 +f 3764 3200 1384 +f 1220 2376 1705 +f 3304 3878 1721 +f 2671 2358 1592 +f 1062 601 3200 +f 1621 263 2376 +f 2624 1497 2169 +f 2807 3281 249 +f 3334 1384 2437 +f 2920 1473 2728 +f 1797 214 1751 +f 2376 26 1705 +f 2931 3705 200 +f 1621 123 3762 +f 880 69 3764 +f 2233 2920 1554 +f 1898 3878 3440 +f 3440 2909 1898 +f 3435 1828 4 +f 200 2086 1261 +f 87 696 1904 +f 1767 1904 2884 +f 753 2106 2086 +f 1473 2383 2728 +f 3718 2637 288 +f 1705 26 1400 +f 1220 2317 123 +f 1705 1400 13 +f 1932 2624 1706 +f 1915 162 3304 +f 1252 1533 610 +f 103 1252 3200 +f 1924 3878 2494 +f 2760 896 3282 +f 318 1413 1875 +f 610 1062 1252 +f 1752 493 4 +f 3435 2895 2558 +f 2014 2106 753 +f 1976 3762 123 +f 1898 2909 851 +f 2909 3440 3281 +f 1062 3435 601 +f 1261 2086 545 +f 1261 545 1706 +f 1932 1123 2624 +f 2671 318 2358 +f 103 69 2401 +f 1917 3745 1533 +f 3304 162 2494 +f 1932 3302 577 +f 2494 283 1924 +f 3304 2437 1915 +f 13 1620 1119 +f 1621 2376 1220 +f 1519 2349 3718 +f 3440 1924 3281 +f 1498 2638 1413 +f 1779 3282 1572 +f 3200 601 1384 +f 26 3851 1400 +f 1384 3334 3764 +f 3334 2437 1721 +f 1751 214 2443 +f 2106 2014 2728 +f 851 3172 2443 +f 3608 1721 3878 +f 1915 2437 2697 +f 1452 2317 3322 +f 1261 1341 200 +f 2233 2458 2920 +f 214 1898 851 +f 2467 1620 13 +f 1341 1261 1706 +f 1620 2467 2638 +f 2289 162 1915 +f 753 2086 200 +f 2624 1123 1497 +f 1621 1220 123 +f 2106 1022 1578 +f 3878 3304 2494 +f 610 1828 1062 +f 1572 1554 3705 +f 1620 1498 120 +f 796 2106 2728 +f 3302 3500 577 +f 796 1767 2106 +f 1752 1123 577 +f 1252 1062 3200 +f 2176 2576 1805 +f 1413 2638 1875 +f 1706 545 1932 +f 1932 545 3302 +f 1932 577 1123 +f 851 2909 2698 +f 2293 1533 3745 +f 1252 1917 1533 +f 949 896 2760 +f 693 896 949 +f 572 1517 869 +f 725 1751 3523 +f 909 200 1341 +f 536 230 3240 +f 195 3861 3240 +f 3240 881 536 +f 1057 357 2536 +f 3638 3173 34 +f 2405 871 1986 +f 1779 1735 949 +f 41 1066 2530 +f 1917 103 2401 +f 1517 3523 869 +f 2352 3753 3414 +f 195 909 944 +f 3700 1722 1606 +f 2058 1606 3684 +f 1378 881 3861 +f 3684 2285 3927 +f 1986 3810 2172 +f 816 869 3868 +f 1986 719 1466 +f 2256 405 719 +f 693 3092 896 +f 2387 2351 3753 +f 2256 2421 725 +f 3762 3901 3172 +f 725 266 2023 +f 780 1517 3271 +f 3271 1517 759 +f 2906 572 869 +f 2528 816 142 +f 2088 3868 730 +f 3638 3055 693 +f 1057 2536 2726 +f 1274 2387 3753 +f 1751 2443 3523 +f 2256 1986 871 +f 2480 1057 2726 +f 1779 2760 3282 +f 2485 1066 1424 +f 1779 3705 2931 +f 1517 780 3523 +f 2931 200 909 +f 3901 869 3172 +f 177 1742 3173 +f 720 624 2349 +f 1378 3861 195 +f 1986 2256 719 +f 2961 41 2530 +f 1201 2961 2530 +f 2421 2256 871 +f 2351 2530 1066 +f 3861 881 3240 +f 1751 725 2421 +f 2352 1274 3753 +f 2058 3700 1606 +f 2172 2285 1986 +f 1986 3120 3810 +f 1034 1735 1779 +f 1986 1466 3120 +f 405 2256 725 +f 266 725 3523 +f 1424 944 2169 +f 41 944 1066 +f 869 2443 3172 +f 230 909 195 +f 667 266 2378 +f 2378 266 3523 +f 780 2378 3523 +f 2485 3745 3753 +f 3753 3745 1917 +f 1976 3901 3762 +f 1606 3753 1917 +f 1606 1917 2401 +f 2495 357 1057 +f 2169 944 1341 +f 1378 41 2961 +f 871 880 2421 +f 1742 1735 1034 +f 2421 1797 1751 +f 2349 1128 1153 +f 177 3638 1735 +f 949 2760 1779 +f 3675 312 2962 +f 720 2491 624 +f 3684 2401 2405 +f 866 3412 3291 +f 148 2104 3187 +f 874 1338 3688 +f 3072 786 1992 +f 3622 874 1931 +f 247 3622 1931 +f 2578 3244 3893 +f 3502 3622 247 +f 874 3622 1338 +f 2872 3291 3662 +f 3624 1597 401 +f 401 3660 3624 +f 2872 3722 2016 +f 2334 2016 2204 +f 1422 3502 2872 +f 1044 2031 2354 +f 2031 1044 636 +f 2047 1338 3622 +f 3502 2047 3622 +f 1842 1266 3205 +f 3502 1422 1073 +f 10 1755 2406 +f 1073 1422 2241 +f 3697 2319 1548 +f 3052 3688 1338 +f 1338 2047 3052 +f 2578 3893 2392 +f 3697 2627 2319 +f 3922 2627 3697 +f 10 2406 3337 +f 3080 3337 3778 +f 104 2627 3922 +f 540 2627 104 +f 1589 2264 1891 +f 2264 1589 2393 +f 2589 540 104 +f 1329 2589 104 +f 3202 3052 2047 +f 3882 2115 1264 +f 819 3882 1264 +f 3642 3202 2047 +f 1073 3642 2047 +f 3202 3922 3052 +f 3642 3343 3202 +f 3343 2527 3202 +f 1432 3642 1073 +f 1432 2565 3642 +f 104 3922 2527 +f 2527 1329 104 +f 2323 2527 3343 +f 609 3072 1992 +f 2216 3088 872 +f 3343 1707 2323 +f 967 3236 956 +f 872 3088 268 +f 3088 956 3236 +f 3337 2406 373 +f 3236 3046 268 +f 591 3778 3337 +f 591 3337 373 +f 3856 1345 472 +f 472 1873 3856 +f 883 1345 2216 +f 3856 3885 1099 +f 1345 883 472 +f 3885 3389 1549 +f 472 883 1876 +f 3921 2239 591 +f 207 174 3778 +f 1303 1245 3389 +f 1891 52 3283 +f 1873 1303 3389 +f 3389 3856 1873 +f 2156 1245 1303 +f 113 1245 2156 +f 1214 1032 1303 +f 1032 2156 1303 +f 1551 1214 1873 +f 1214 1303 1873 +f 3283 1589 1891 +f 276 2393 1589 +f 1032 3593 512 +f 2446 276 1436 +f 1070 2446 3114 +f 1436 3114 2446 +f 196 3283 1929 +f 196 1436 3283 +f 872 581 1876 +f 1770 1921 581 +f 268 581 872 +f 1070 3114 2583 +f 3114 1108 2583 +f 1108 649 2583 +f 1108 3771 649 +f 1921 1876 581 +f 649 3771 1580 +f 1580 3771 2967 +f 1211 3771 1108 +f 1662 1108 1436 +f 1551 3586 3593 +f 3375 1108 1662 +f 293 1342 1211 +f 709 2857 540 +f 709 3921 591 +f 2589 709 540 +f 2054 709 2589 +f 3436 2054 2589 +f 849 1803 2193 +f 388 3236 967 +f 3262 967 2745 +f 388 3046 3236 +f 849 3262 1803 +f 849 535 3262 +f 978 2380 1186 +f 1486 3046 388 +f 1486 388 535 +f 535 2445 1486 +f 3175 156 978 +f 754 2578 2739 +f 2193 3090 849 +f 1009 978 156 +f 1717 2054 3436 +f 3666 3436 1329 +f 3666 20 3436 +f 3436 20 1717 +f 2134 535 849 +f 732 1717 20 +f 2445 535 2134 +f 2037 2445 2134 +f 1717 3623 156 +f 150 2646 159 +f 150 218 2037 +f 3623 2452 1009 +f 1009 2452 1654 +f 3623 732 2136 +f 817 2026 1770 +f 1486 1770 3046 +f 1099 1654 2452 +f 1486 817 1770 +f 2684 817 1486 +f 2748 2380 978 +f 2748 326 2380 +f 3586 1921 1518 +f 1009 2748 978 +f 837 1100 1518 +f 1518 1100 2610 +f 3893 3244 2952 +f 1012 1627 2748 +f 326 1662 1436 +f 1627 1662 326 +f 3713 837 817 +f 3713 3600 837 +f 1100 837 3600 +f 3600 428 1100 +f 293 3375 1955 +f 913 428 3600 +f 2684 1486 2445 +f 817 2684 3713 +f 1955 1662 1627 +f 2684 2445 3743 +f 999 2684 3743 +f 999 3713 2684 +f 999 229 3713 +f 1326 343 1955 +f 2445 2037 3743 +f 218 3743 2037 +f 2748 1009 1447 +f 2748 1447 1012 +f 999 3743 2208 +f 3743 218 2208 +f 3885 1012 1447 +f 641 1627 1012 +f 2208 3849 999 +f 1549 641 1012 +f 999 3849 229 +f 1549 1012 3885 +f 1099 3885 1654 +f 2208 588 3849 +f 3600 3713 229 +f 1324 1549 1245 +f 1245 1549 3389 +f 913 3600 1633 +f 641 1549 1324 +f 1324 1326 641 +f 486 1633 229 +f 1457 2967 455 +f 2967 1342 455 +f 455 1342 3797 +f 3797 1342 2468 +f 1633 486 2345 +f 2156 2689 113 +f 1342 293 2468 +f 114 2689 2156 +f 1032 114 2156 +f 3797 1810 2952 +f 66 1032 512 +f 343 2468 293 +f 1955 343 293 +f 2468 343 473 +f 2820 2534 754 +f 1449 1992 786 +f 364 3447 1449 +f 1015 3447 364 +f 473 343 791 +f 66 512 3101 +f 1326 791 343 +f 3101 364 66 +f 113 791 1326 +f 2468 473 2586 +f 3101 3208 364 +f 1611 1810 1415 +f 757 3101 233 +f 1207 295 1415 +f 113 1326 1324 +f 757 3208 3101 +f 113 3419 791 +f 3208 148 556 +f 364 556 1015 +f 556 148 3187 +f 556 364 3208 +f 148 3208 2104 +f 2066 2104 3208 +f 1300 2066 804 +f 804 2066 3208 +f 1300 804 2318 +f 2318 804 2340 +f 1510 3142 2643 +f 3142 1510 850 +f 2643 3142 1532 +f 2971 757 2610 +f 3153 3142 850 +f 1389 3153 850 +f 757 2971 2340 +f 2610 1100 2971 +f 2664 754 2534 +f 3680 2971 428 +f 428 2971 1100 +f 2625 427 1532 +f 2625 119 427 +f 3153 2625 1532 +f 119 2625 2518 +f 2340 3027 2318 +f 3027 558 2318 +f 3027 2340 3714 +f 558 3027 221 +f 3714 3147 221 +f 3714 3405 3147 +f 422 885 3690 +f 1838 3153 885 +f 1715 3680 428 +f 2345 1715 913 +f 3680 1715 1623 +f 1623 3405 3714 +f 3555 1715 2345 +f 422 1838 885 +f 1715 3555 1623 +f 3555 966 1623 +f 3634 3147 3405 +f 752 3405 1623 +f 752 3634 3405 +f 1623 1985 752 +f 2764 1623 966 +f 1985 1623 2764 +f 2764 1909 1927 +f 2764 966 1909 +f 1927 1985 2764 +f 966 2838 1909 +f 2882 3287 3882 +f 2792 374 1516 +f 2488 3699 1516 +f 2617 159 3083 +f 2617 150 159 +f 2617 3083 3882 +f 819 695 3882 +f 695 2882 3882 +f 638 2882 695 +f 2488 374 53 +f 2488 53 3133 +f 374 2488 1516 +f 3287 2617 3882 +f 1536 3287 2882 +f 2488 3133 688 +f 2361 2628 2617 +f 3233 543 432 +f 432 1582 3233 +f 1582 2142 3233 +f 150 1193 897 +f 915 3133 53 +f 1152 1193 724 +f 3210 897 1152 +f 588 218 897 +f 588 2208 218 +f 1632 915 543 +f 3077 1322 1632 +f 119 2204 3752 +f 897 3210 588 +f 3849 3413 486 +f 2785 486 3413 +f 2204 650 2334 +f 798 2334 650 +f 2204 119 650 +f 119 2518 650 +f 2785 831 486 +f 2998 3413 588 +f 3146 798 650 +f 2785 3413 3358 +f 3358 3664 2785 +f 603 798 3146 +f 1422 2334 2241 +f 831 2345 486 +f 798 2241 2334 +f 3555 2345 831 +f 2799 3555 831 +f 612 2785 1241 +f 2785 612 831 +f 2799 966 3555 +f 2241 798 3709 +f 2838 966 2799 +f 3910 1360 2518 +f 3338 2838 2799 +f 289 3678 1962 +f 3136 603 3146 +f 3678 289 614 +f 401 1597 727 +f 401 727 2338 +f 1276 401 2338 +f 401 1276 3660 +f 1919 1963 2236 +f 1360 3910 1919 +f 1106 1276 2338 +f 2338 3678 1106 +f 1919 2236 3136 +f 2723 1276 1106 +f 2723 930 1276 +f 3136 2236 125 +f 1006 125 2236 +f 1226 3678 1820 +f 3136 1166 603 +f 502 873 1226 +f 2347 873 1762 +f 603 1166 3709 +f 3678 1226 1106 +f 3709 1166 1566 +f 1106 1226 3839 +f 125 1166 3136 +f 1226 873 2347 +f 889 125 1006 +f 125 889 1166 +f 889 1566 1166 +f 3412 3839 3649 +f 1566 889 1831 +f 1499 2347 1762 +f 1499 1762 2676 +f 3343 3642 2565 +f 1197 1499 2676 +f 1197 3895 1499 +f 795 1499 3895 +f 3895 3641 795 +f 2441 2347 1499 +f 1386 3343 2565 +f 3212 2441 795 +f 1386 2565 77 +f 1499 795 2441 +f 3649 2441 3662 +f 1566 605 3709 +f 2903 77 2565 +f 541 1386 77 +f 836 795 3641 +f 836 3212 795 +f 2970 836 3641 +f 1386 1707 3343 +f 2269 1707 1386 +f 3752 3722 3212 +f 3752 2204 3722 +f 636 1044 930 +f 1276 1044 2354 +f 3666 1707 367 +f 1276 930 1044 +f 930 3731 636 +f 3731 930 2085 +f 3831 749 541 +f 3367 2883 3731 +f 3731 2085 3367 +f 3037 2883 3367 +f 810 3356 3367 +f 3037 3367 3356 +f 2661 77 605 +f 2898 810 2085 +f 2661 1411 77 +f 1411 541 77 +f 3614 1411 2661 +f 3614 2389 1411 +f 1831 2661 1566 +f 2661 1831 3614 +f 2783 1831 889 +f 3397 3205 2244 +f 1842 3220 1266 +f 3205 3397 1842 +f 606 1842 3397 +f 3060 1842 606 +f 3060 3220 1842 +f 1411 2389 541 +f 2226 1548 3397 +f 606 3397 1548 +f 2319 814 606 +f 3819 1840 3831 +f 606 1548 2319 +f 1230 3801 814 +f 541 2389 3831 +f 814 3801 3060 +f 1230 2406 1755 +f 1230 373 2406 +f 2857 373 1230 +f 3673 1230 814 +f 2319 3673 814 +f 2627 3673 2319 +f 459 2236 1963 +f 459 1006 2236 +f 2857 1230 3673 +f 2623 688 3133 +f 2723 3645 930 +f 3412 2723 3839 +f 688 2623 1963 +f 340 459 2623 +f 3645 1931 2898 +f 3645 866 1931 +f 3645 2723 866 +f 1501 889 1006 +f 1501 2783 889 +f 2623 1586 340 +f 1586 1322 2590 +f 552 2590 1322 +f 19 3925 430 +f 1427 3925 19 +f 3925 3554 2720 +f 3925 3128 3554 +f 3799 3128 1427 +f 260 3387 1724 +f 1724 3387 3614 +f 1501 2720 2783 +f 1097 260 1724 +f 488 2389 3387 +f 2619 488 3387 +f 3706 488 914 +f 2619 914 488 +f 914 2619 2990 +f 260 859 815 +f 815 1434 1830 +f 518 777 1830 +f 518 1830 1434 +f 327 1434 2574 +f 518 1434 327 +f 2142 239 2255 +f 239 2142 61 +f 1801 239 61 +f 239 1801 1974 +f 1457 455 3244 +f 1974 1801 1167 +f 2255 3077 1632 +f 1513 552 3077 +f 239 3209 2255 +f 552 1376 2590 +f 2850 2138 1376 +f 2850 552 1513 +f 2850 1376 552 +f 483 1513 2946 +f 483 2850 1513 +f 770 2850 483 +f 2850 770 2138 +f 3168 2138 770 +f 3168 231 2138 +f 1905 3799 1427 +f 2138 1530 1376 +f 3799 1905 1530 +f 3496 3799 1530 +f 231 1530 2138 +f 1530 231 460 +f 371 3237 1530 +f 1530 460 1161 +f 1530 1161 371 +f 3237 3496 1530 +f 991 3237 371 +f 910 3496 3237 +f 2713 3496 910 +f 2449 3496 2713 +f 3496 2449 1007 +f 3496 1007 3589 +f 3128 2427 1543 +f 3799 3496 2427 +f 3785 1550 2427 +f 1550 2574 2427 +f 3589 952 3785 +f 3008 3785 952 +f 3008 1550 3785 +f 2584 1092 327 +f 2584 3882 1092 +f 957 2584 2044 +f 2115 3882 2584 +f 2115 2584 3830 +f 367 2269 749 +f 367 2051 20 +f 20 2051 732 +f 749 1481 367 +f 2051 367 1481 +f 2051 2136 732 +f 2051 1081 2136 +f 1481 1081 2051 +f 1840 1481 749 +f 1481 1840 2566 +f 2566 1081 1481 +f 1081 2566 1585 +f 1081 1482 2136 +f 2263 2136 1482 +f 2263 2452 2136 +f 2263 1099 2452 +f 1345 1099 2263 +f 2216 2263 1482 +f 2216 1345 2263 +f 1081 1585 1482 +f 1840 824 2566 +f 956 1585 2566 +f 2566 824 956 +f 3819 824 1840 +f 2745 956 824 +f 3706 3819 488 +f 824 3819 3706 +f 2745 824 3706 +f 1585 956 3088 +f 1803 2745 3706 +f 914 2193 1803 +f 2990 2193 914 +f 2973 2990 777 +f 1346 2867 2937 +f 643 2746 3473 +f 1234 3913 2834 +f 3776 2937 2867 +f 3473 2746 1346 +f 3473 1346 2937 +f 3192 3896 2147 +f 1460 1867 2770 +f 451 774 354 +f 1975 161 2017 +f 1112 2081 676 +f 3716 3463 2674 +f 476 818 2137 +f 2686 1467 560 +f 2486 169 3912 +f 1317 2862 2496 +f 3533 169 2486 +f 3904 580 1151 +f 1140 867 2067 +f 1467 50 560 +f 3644 669 2674 +f 3811 1792 2496 +f 2459 1939 1453 +f 3884 1453 1939 +f 2862 1317 321 +f 2083 2911 396 +f 284 2911 2083 +f 73 396 2911 +f 2674 986 3644 +f 3054 2137 50 +f 73 181 1229 +f 3276 1969 2459 +f 2674 669 3716 +f 1140 580 1738 +f 2679 1140 1738 +f 1332 867 2679 +f 1453 3884 2229 +f 2182 860 3067 +f 2182 3067 2496 +f 2686 1052 1467 +f 3054 1235 2137 +f 2137 1235 476 +f 1939 2686 3884 +f 3276 1453 2042 +f 3463 2182 2862 +f 1151 1235 3904 +f 3644 986 2679 +f 2679 986 1332 +f 986 2862 1332 +f 2486 3912 985 +f 3457 1317 2496 +f 3235 3276 3533 +f 3644 2679 1738 +f 321 1786 756 +f 1453 3276 2459 +f 669 3644 181 +f 3533 3276 2042 +f 3276 3235 1969 +f 1077 985 3912 +f 2182 2178 2254 +f 2459 284 1052 +f 1676 2657 860 +f 284 2789 2911 +f 580 3904 1738 +f 2324 669 181 +f 2042 1453 2229 +f 3463 3716 2178 +f 2674 3463 986 +f 1939 2459 1052 +f 1052 284 2083 +f 396 3054 50 +f 396 3866 3054 +f 3866 1229 3904 +f 2486 985 3116 +f 1217 2789 2493 +f 1870 3929 3879 +f 1668 2789 1393 +f 2228 1916 3117 +f 895 868 501 +f 857 669 2324 +f 2178 3716 631 +f 948 2228 3117 +f 2632 985 1077 +f 3611 72 1462 +f 3437 2237 2147 +f 3929 1870 1420 +f 72 3879 3305 +f 1607 1668 1393 +f 3452 1870 3879 +f 2034 307 1013 +f 833 704 2034 +f 3294 2944 895 +f 3668 2411 2981 +f 948 3154 3929 +f 3117 129 948 +f 1916 129 3117 +f 1077 307 1893 +f 3042 3005 3170 +f 2866 985 2632 +f 501 3294 895 +f 2944 1393 951 +f 3294 1104 2944 +f 1203 3235 3116 +f 3611 3452 72 +f 3668 631 857 +f 1120 355 2600 +f 3668 1157 1270 +f 2084 501 868 +f 3536 3433 2359 +f 1689 1916 1314 +f 2944 1607 1393 +f 3084 3176 3207 +f 994 3652 252 +f 3170 3005 515 +f 3305 1462 72 +f 3452 3879 72 +f 3346 3863 2078 +f 868 895 1404 +f 1893 2034 704 +f 3415 355 704 +f 2174 1430 1278 +f 252 3116 2866 +f 1430 397 1278 +f 2893 886 3020 +f 948 1420 2228 +f 2228 1420 1916 +f 868 1404 3474 +f 2438 2811 261 +f 1090 3042 565 +f 1196 3207 674 +f 2944 1104 1607 +f 3020 3652 994 +f 1278 1104 3294 +f 117 2604 886 +f 1203 3116 252 +f 2770 774 304 +f 3916 3452 3611 +f 3433 923 240 +f 3293 1283 1689 +f 252 2866 3769 +f 994 252 3769 +f 923 1217 2493 +f 3042 2078 3005 +f 1090 2078 3042 +f 1975 354 161 +f 129 1283 1270 +f 1653 3536 2359 +f 1689 1283 129 +f 923 3536 1217 +f 1668 1607 593 +f 73 1668 593 +f 1283 2254 631 +f 1893 2632 1077 +f 1283 3293 2254 +f 1430 2174 2587 +f 1676 3293 3490 +f 3716 857 631 +f 355 3429 704 +f 1430 2411 397 +f 252 3652 1203 +f 3176 1157 2981 +f 414 2388 2796 +f 3879 3929 3305 +f 1653 2604 3474 +f 3929 1420 948 +f 1870 1480 672 +f 833 1013 3346 +f 3474 1404 1653 +f 2893 2438 117 +f 397 3668 857 +f 3916 3611 1462 +f 571 1969 1203 +f 1090 3346 2078 +f 833 3346 1090 +f 3433 240 571 +f 3154 948 129 +f 994 2893 3020 +f 2438 1862 117 +f 2017 161 3311 +f 2185 3213 3103 +f 322 2604 117 +f 662 3916 1462 +f 2288 717 604 +f 3916 1210 3452 +f 717 1210 662 +f 199 3424 3035 +f 584 2247 613 +f 565 2796 2388 +f 963 3696 1038 +f 117 1862 322 +f 1928 3928 3093 +f 3195 3474 2604 +f 2888 322 1862 +f 348 841 474 +f 497 2364 1421 +f 1983 1567 404 +f 3176 1462 3154 +f 3084 604 1462 +f 1210 3916 662 +f 3650 1608 199 +f 2577 1480 1210 +f 548 3707 2972 +f 3306 3218 3108 +f 3456 323 2938 +f 3424 199 1608 +f 482 2364 717 +f 365 3707 2322 +f 424 736 3650 +f 1928 3093 2691 +f 1608 3612 3424 +f 1410 2753 2888 +f 1964 3231 841 +f 184 2288 1709 +f 554 1901 1983 +f 1901 1121 3576 +f 3108 3218 1165 +f 1113 3424 3612 +f 2977 164 1349 +f 508 261 2561 +f 2364 660 1210 +f 199 3408 927 +f 3035 3408 199 +f 3108 1165 2620 +f 508 2561 3707 +f 2691 548 2782 +f 1410 2888 1982 +f 414 1150 3277 +f 2972 2777 3277 +f 3466 1275 1367 +f 813 2652 1964 +f 1901 554 2977 +f 1816 1567 2753 +f 2612 1526 2540 +f 613 2247 2714 +f 1982 2888 1862 +f 334 2322 3093 +f 3792 322 2888 +f 3218 3803 1165 +f 2620 2579 3696 +f 3928 334 3093 +f 2322 3707 548 +f 787 1115 1664 +f 1734 3562 1816 +f 1165 2035 2620 +f 2561 2972 3707 +f 2313 1567 1816 +f 1490 660 2364 +f 508 1982 1862 +f 3650 736 1608 +f 2035 1709 634 +f 2652 813 134 +f 3231 2977 554 +f 134 3108 2652 +f 1526 105 3612 +f 2364 1210 717 +f 1349 2668 2977 +f 2668 1349 963 +f 2782 2972 3277 +f 2577 1210 660 +f 584 3408 2247 +f 365 2322 334 +f 2084 2977 2668 +f 105 2691 2782 +f 1764 1113 2796 +f 3093 548 2691 +f 1816 1410 3370 +f 3093 2322 548 +f 2247 3035 2749 +f 3424 1764 3035 +f 323 1421 482 +f 1421 323 3773 +f 1983 3576 3792 +f 1734 2719 2656 +f 1664 1248 2902 +f 2045 161 348 +f 1248 1928 12 +f 1115 1367 1664 +f 354 813 161 +f 1391 3103 2313 +f 2045 3103 3213 +f 3218 2147 3803 +f 344 2902 1248 +f 1391 2313 3562 +f 2751 736 1511 +f 161 813 841 +f 3130 3103 1391 +f 3545 1664 1275 +f 3063 1016 3545 +f 3306 3108 134 +f 1598 2902 344 +f 3306 134 1867 +f 1275 3063 3545 +f 813 354 774 +f 424 1511 736 +f 3562 2313 1816 +f 676 2081 2656 +f 161 2045 3311 +f 12 344 1248 +f 1371 2612 2751 +f 2770 1867 774 +f 3562 1734 2656 +f 3545 2719 1500 +f 1448 788 3192 +f 2045 348 3103 +f 3456 2938 2237 +f 841 348 161 +f 2612 3453 344 +f 134 813 774 +f 1448 3218 3306 +f 1448 2147 3218 +f 2237 2938 2147 +f 3566 2676 1762 +f 2883 3037 3356 +f 3660 3335 3624 +f 3566 2555 502 +f 1762 2555 3566 +f 1266 3220 1348 +f 1348 3220 3060 +f 310 1891 2264 +f 3643 721 1070 +f 3643 1070 2583 +f 1580 3643 649 +f 1457 1580 2967 +f 1111 850 1510 +f 1980 1389 850 +f 1980 3757 1389 +f 3356 2244 3475 +f 1806 3690 3757 +f 2792 422 1806 +f 2859 2643 2970 +f 1510 2643 1111 +f 1111 1980 850 +f 1293 61 2142 +f 863 1801 61 +f 1806 422 3690 +f 235 3701 863 +f 784 1495 432 +f 1495 1582 432 +f 2874 1962 737 +f 2874 737 658 +f 1237 1971 1962 +f 3335 727 1597 +f 3335 1597 3624 +f 2031 2487 2354 +f 2487 2031 636 +f 1561 3060 3801 +f 898 1111 2859 +f 3701 1167 1801 +f 863 3701 1801 +f 2863 678 2432 +f 746 1755 578 +f 1891 443 52 +f 362 183 2792 +f 2693 3778 174 +f 443 2693 174 +f 3059 3588 362 +f 276 2264 2393 +f 721 276 2446 +f 2583 649 3643 +f 3573 1237 3161 +f 678 1237 3573 +f 1857 2863 2432 +f 1857 2432 1313 +f 1824 2883 3475 +f 3080 10 3337 +f 1580 1457 1663 +f 2792 1806 362 +f 1313 2432 372 +f 267 3135 1321 +f 3135 267 2140 +f 3135 2140 3566 +f 3059 1914 3201 +f 22 2591 3826 +f 678 1971 1237 +f 3059 362 2905 +f 3566 3677 2676 +f 3731 2883 1824 +f 1864 443 310 +f 1663 2195 3643 +f 2905 1980 478 +f 372 2476 1313 +f 2476 372 3335 +f 2905 1806 3757 +f 2414 1522 1845 +f 2142 1582 1293 +f 3475 1836 1824 +f 3546 3475 2244 +f 81 2693 443 +f 2874 3161 1237 +f 1237 1962 2874 +f 3161 2874 658 +f 658 372 3161 +f 1321 614 1971 +f 502 1820 3135 +f 3566 502 3135 +f 21 2863 1857 +f 362 3588 3461 +f 3677 3641 3895 +f 3566 2140 2712 +f 3573 3161 372 +f 2354 3335 3660 +f 372 727 3335 +f 2487 636 1824 +f 1309 1348 1686 +f 1686 1348 1561 +f 3356 3475 2883 +f 2878 578 3080 +f 3335 2354 2487 +f 3319 81 961 +f 1688 3546 2244 +f 1019 1272 2195 +f 746 1561 3801 +f 578 1755 10 +f 2616 21 1313 +f 3201 3588 3059 +f 1845 1522 3876 +f 81 3319 2693 +f 21 1857 1313 +f 21 569 2863 +f 2591 863 1293 +f 3808 1824 1836 +f 1914 2905 478 +f 1914 3059 2905 +f 2432 3573 372 +f 3588 928 3461 +f 3826 1495 928 +f 678 267 1321 +f 3566 2712 2414 +f 2414 2712 1522 +f 3546 1836 3475 +f 1319 746 578 +f 885 1389 3757 +f 3690 885 3757 +f 3080 2693 3319 +f 1516 1838 2792 +f 1272 721 2195 +f 3876 2859 1845 +f 1111 898 478 +f 2616 3735 21 +f 141 1313 2476 +f 1553 267 569 +f 2140 267 1553 +f 2140 3547 2712 +f 2926 3876 1522 +f 3268 346 3808 +f 2859 2970 1845 +f 1111 2643 2859 +f 1836 3546 3486 +f 183 374 2792 +f 1561 2889 1686 +f 2905 3757 1980 +f 2878 1319 578 +f 3461 183 362 +f 1864 310 1272 +f 2195 2869 1019 +f 2869 2195 3203 +f 3467 3203 1663 +f 1663 1457 3467 +f 1495 784 3461 +f 3361 898 3876 +f 898 2859 3876 +f 727 372 658 +f 1971 289 1962 +f 1971 614 289 +f 3678 614 1820 +f 1845 2970 3641 +f 502 1226 1820 +f 873 502 2555 +f 2555 1762 873 +f 2676 3677 1197 +f 3895 1197 3677 +f 3205 1348 1688 +f 1348 3205 1266 +f 746 3801 1755 +f 3080 578 10 +f 3778 2693 3080 +f 1891 310 443 +f 721 2264 276 +f 1070 721 2446 +f 1580 1663 3643 +f 627 3164 597 +f 1138 597 3164 +f 1138 3834 597 +f 2356 3834 2870 +f 2870 2301 2356 +f 2870 2966 2301 +f 2301 2966 3422 +f 3422 834 2301 +f 2896 834 2265 +f 925 2896 2265 +f 1553 569 3735 +f 1053 2606 1846 +f 1846 1168 1227 +f 2606 1053 2030 +f 2030 1053 2129 +f 900 2129 1053 +f 900 154 2129 +f 3686 1659 900 +f 35 1428 532 +f 1428 35 3211 +f 2153 1659 1826 +f 2974 2658 171 +f 3686 1826 1659 +f 3211 91 1428 +f 389 126 2609 +f 1227 406 838 +f 3676 2712 3547 +f 2922 35 3079 +f 625 3079 35 +f 2800 625 532 +f 2800 532 3890 +f 2167 146 3890 +f 3211 1454 734 +f 734 1454 1710 +f 734 1710 1628 +f 1425 2889 1319 +f 385 426 353 +f 3859 3846 2747 +f 2167 2120 146 +f 2513 1515 2120 +f 2167 2513 2120 +f 2513 2121 1515 +f 3411 2121 3727 +f 3727 2121 3442 +f 3442 790 386 +f 386 3727 3442 +f 2535 2230 1109 +f 157 1109 3627 +f 3179 3203 3467 +f 876 2597 2648 +f 876 2648 417 +f 31 417 3400 +f 3400 417 2648 +f 1368 31 3400 +f 3051 691 2616 +f 2816 1719 691 +f 91 1719 2816 +f 2302 17 946 +f 995 48 448 +f 1913 448 1871 +f 2001 1871 448 +f 2499 2296 461 +f 1989 3016 2499 +f 1777 2262 1800 +f 1512 2779 1740 +f 62 1392 1740 +f 1280 126 314 +f 1392 484 1729 +f 2956 1729 484 +f 898 529 478 +f 3711 1280 314 +f 389 314 126 +f 389 3711 314 +f 3711 273 1280 +f 1010 273 3711 +f 1162 1164 3086 +f 2513 3442 2121 +f 1655 1553 257 +f 1655 2140 1553 +f 3547 2140 1655 +f 1648 3015 273 +f 1855 2597 2060 +f 1775 1489 3751 +f 1010 3914 2130 +f 1233 529 3361 +f 733 1648 1808 +f 1808 1945 733 +f 733 997 3015 +f 1109 2230 566 +f 2975 1775 997 +f 2975 1817 1775 +f 1489 1775 1817 +f 1749 3269 713 +f 3303 3539 3269 +f 2545 3751 1489 +f 17 467 2010 +f 946 2328 2302 +f 48 531 448 +f 448 531 2001 +f 3751 2545 2957 +f 2001 799 2546 +f 799 1646 2704 +f 2525 1060 2381 +f 3372 3903 1060 +f 2957 2747 3751 +f 2356 3903 627 +f 91 2816 1782 +f 1800 1989 1777 +f 2747 2957 679 +f 597 3834 2356 +f 834 2896 2301 +f 1781 2307 2316 +f 925 241 2896 +f 2230 2747 679 +f 2747 2230 2535 +f 3526 2746 479 +f 2535 450 2747 +f 2609 1540 2112 +f 3111 1010 2130 +f 3219 2591 22 +f 3859 450 1787 +f 450 3859 2747 +f 1787 3025 3859 +f 3025 995 1913 +f 3647 2158 408 +f 1160 3547 1655 +f 3016 3257 158 +f 3257 3016 1630 +f 3269 1233 713 +f 1926 905 3939 +f 2849 3201 1914 +f 905 1926 378 +f 241 475 2896 +f 3111 1808 1648 +f 691 1719 3081 +f 1836 3779 3808 +f 3939 1425 3376 +f 3526 2534 2820 +f 1781 1782 2307 +f 1259 3822 2743 +f 426 385 1855 +f 763 713 3822 +f 3303 1749 3074 +f 3074 1749 3177 +f 1860 3303 3074 +f 426 1855 2705 +f 691 257 3735 +f 3442 2513 1781 +f 1860 2111 2740 +f 2316 790 1781 +f 790 2316 3464 +f 467 1860 2740 +f 2740 2989 2328 +f 2989 2740 1315 +f 1914 478 529 +f 417 2060 876 +f 2060 417 31 +f 3111 1648 1010 +f 171 2658 1826 +f 531 3011 3627 +f 171 3255 2974 +f 2001 531 905 +f 2057 1777 1646 +f 2835 3434 360 +f 1777 2057 2075 +f 3942 3434 2835 +f 3942 1029 1999 +f 1029 3942 2492 +f 3015 3737 2492 +f 2492 3942 3015 +f 389 2609 2112 +f 1053 3686 900 +f 389 2112 3914 +f 629 3737 3015 +f 3015 1647 629 +f 2922 3211 35 +f 3211 2922 1454 +f 625 35 532 +f 3890 532 2167 +f 1019 2869 3418 +f 2869 2367 758 +f 1628 3081 734 +f 353 2705 1628 +f 2705 353 426 +f 890 1647 1775 +f 1243 3179 3526 +f 1781 790 3442 +f 1775 3296 890 +f 973 2752 3296 +f 1643 2752 973 +f 3111 3438 3926 +f 385 2597 1855 +f 2597 876 2060 +f 3086 1425 3939 +f 31 1368 645 +f 3704 645 1368 +f 2057 961 409 +f 2975 1945 1808 +f 1817 1808 2631 +f 171 1826 3686 +f 1162 566 2957 +f 2535 1109 157 +f 3051 2816 691 +f 1777 2499 1646 +f 659 1053 1227 +f 3268 2363 141 +f 790 3686 1053 +f 257 1553 3735 +f 2622 3079 625 +f 529 17 2849 +f 1280 273 3015 +f 2464 3779 1836 +f 1164 644 3086 +f 3479 1836 3486 +f 1227 2121 659 +f 790 659 386 +f 1227 1515 2121 +f 3411 659 2121 +f 175 659 3411 +f 175 386 659 +f 862 100 22 +f 1053 659 790 +f 1646 905 378 +f 3711 389 3914 +f 531 1787 3011 +f 3711 3914 1010 +f 531 48 1787 +f 378 2057 1646 +f 2001 2575 1871 +f 799 2001 1646 +f 2575 2001 2546 +f 2157 2704 1646 +f 2157 1646 1094 +f 1646 1638 1094 +f 3269 467 17 +f 1646 461 1638 +f 2499 461 1646 +f 2499 1777 1989 +f 2262 1777 62 +f 2246 2262 62 +f 2246 62 419 +f 62 2779 419 +f 2779 62 1740 +f 62 484 1392 +f 3400 942 1368 +f 3704 1368 942 +f 409 81 1864 +f 1160 1855 2060 +f 31 645 3194 +f 2307 3051 141 +f 1010 1648 273 +f 1648 733 3015 +f 2307 141 2363 +f 1945 997 733 +f 2975 997 1945 +f 1808 1817 2975 +f 2631 1489 1817 +f 1489 2631 2545 +f 1745 3683 832 +f 679 2957 566 +f 566 2230 679 +f 157 450 2535 +f 1164 2631 1808 +f 450 157 3775 +f 1808 3111 1164 +f 2631 1162 2545 +f 2545 1162 2957 +f 3627 905 531 +f 3097 22 3826 +f 2082 3859 1656 +f 1777 2075 62 +f 3179 1243 2367 +f 3723 3194 645 +f 3011 1787 3775 +f 3775 1787 450 +f 1787 48 3025 +f 48 995 3025 +f 995 448 1913 +f 532 1782 2513 +f 2513 1782 1781 +f 3016 158 3647 +f 3016 1989 744 +f 3647 2499 3016 +f 3111 3926 1164 +f 1800 2331 1989 +f 1989 2331 744 +f 3086 644 1425 +f 3194 1160 31 +f 17 832 2849 +f 3704 2743 2131 +f 3939 905 3627 +f 176 1745 832 +f 467 3269 3539 +f 378 961 2057 +f 17 2010 946 +f 17 2302 832 +f 308 3903 862 +f 832 185 176 +f 2849 1914 529 +f 2896 1684 2301 +f 627 597 2356 +f 2408 308 1918 +f 2356 2301 1684 +f 3219 22 100 +f 1782 2816 2307 +f 31 1160 2060 +f 3540 3840 758 +f 1243 479 3110 +f 1540 2316 1601 +f 1233 3361 1146 +f 17 529 1233 +f 2363 3268 3808 +f 2001 905 1646 +f 1425 644 2889 +f 409 2075 2057 +f 832 2302 185 +f 3319 378 1926 +f 961 378 3319 +f 409 1864 391 +f 2896 475 1684 +f 2869 3203 2367 +f 1719 91 3081 +f 91 1782 532 +f 1628 2705 3081 +f 2513 2167 532 +f 2131 1146 3723 +f 1855 257 3081 +f 2609 3464 1540 +f 1855 1655 257 +f 1918 3588 3201 +f 1918 928 3588 +f 1918 308 928 +f 1601 2363 3808 +f 2743 3822 84 +f 713 84 3822 +f 1749 713 763 +f 3303 3269 1749 +f 1860 3539 3303 +f 467 3539 1860 +f 2740 2010 467 +f 2740 946 2010 +f 2740 2328 946 +f 2328 2989 2302 +f 2302 2989 185 +f 1060 2525 3372 +f 3903 3372 1912 +f 3903 1912 627 +f 429 3427 2224 +f 3070 564 2224 +f 2910 2192 2775 +f 2775 2192 2424 +f 2277 2379 1813 +f 1813 3877 2015 +f 3877 1616 2015 +f 3877 1451 1616 +f 3877 1414 1451 +f 1414 1209 640 +f 2454 1240 133 +f 1574 3339 1357 +f 2132 3321 1641 +f 2453 1988 3620 +f 828 1216 167 +f 359 3125 646 +f 3510 3395 1187 +f 776 523 3395 +f 205 3832 2455 +f 2455 3064 205 +f 242 3339 1574 +f 242 1574 2960 +f 2192 2910 1343 +f 1583 2192 3076 +f 2379 2277 1583 +f 2454 133 2398 +f 2453 74 3321 +f 779 2815 2305 +f 779 2305 275 +f 3245 828 167 +f 3510 1187 1967 +f 3510 776 3395 +f 3064 2455 1192 +f 222 2309 1472 +f 1472 3157 222 +f 3157 1472 1812 +f 3674 3416 333 +f 3070 2224 3427 +f 3724 564 3070 +f 1357 3339 2851 +f 287 3671 2960 +f 287 1343 3671 +f 2192 1583 2424 +f 1583 3076 2379 +f 3470 1813 465 +f 2015 2287 465 +f 2287 2015 1616 +f 1002 2166 466 +f 2960 1574 287 +f 2039 2455 2038 +f 2098 1574 1357 +f 1343 287 3744 +f 2132 1641 179 +f 2453 3620 74 +f 1988 2453 3618 +f 3618 1216 1988 +f 779 3488 2815 +f 1002 2321 2166 +f 2321 1472 2309 +f 3674 1812 1472 +f 3341 2379 1995 +f 3341 418 3877 +f 2098 1357 3070 +f 3877 1209 1414 +f 1641 3321 2222 +f 3620 1675 74 +f 1988 1675 3620 +f 1216 3061 1988 +f 3488 1439 2815 +f 2815 1439 2184 +f 2184 2305 2815 +f 2305 2184 2963 +f 2305 2963 275 +f 2046 3510 359 +f 828 1205 2694 +f 2132 2453 3321 +f 3245 359 646 +f 3125 980 646 +f 1967 3746 3125 +f 1967 1187 42 +f 1192 729 3105 +f 3488 2630 179 +f 2630 3488 779 +f 3510 637 776 +f 776 1117 523 +f 1117 776 637 +f 3510 3832 637 +f 1967 3125 359 +f 2098 438 1574 +f 1002 173 1238 +f 466 173 1002 +f 3341 3877 2379 +f 2630 2132 179 +f 2497 3105 729 +f 359 3510 1967 +f 418 1209 3877 +f 2581 3439 2454 +f 1414 2996 1451 +f 281 359 167 +f 133 1240 3783 +f 2039 1192 2455 +f 3744 2148 3076 +f 1995 2379 3076 +f 3517 418 3341 +f 1593 3777 2581 +f 3618 2453 3430 +f 2201 779 439 +f 2455 3510 2038 +f 359 281 2046 +f 3288 1472 2321 +f 1874 3744 287 +f 2201 3430 2630 +f 3510 2046 2038 +f 74 2222 3321 +f 2677 179 2222 +f 2222 1988 3061 +f 1988 2222 1675 +f 2694 3061 1216 +f 2694 1216 828 +f 2222 74 1675 +f 3061 2694 2222 +f 3488 2677 1439 +f 2963 3180 275 +f 828 3245 3078 +f 3078 3245 646 +f 1529 980 3125 +f 1529 3125 3746 +f 3746 1967 42 +f 42 1187 2645 +f 1187 846 2645 +f 846 1187 3395 +f 846 3395 523 +f 846 523 1117 +f 3765 3832 205 +f 259 1117 637 +f 1117 2645 846 +f 259 2645 1117 +f 637 3832 3765 +f 259 637 3765 +f 3617 259 3765 +f 3420 2645 259 +f 3617 3765 205 +f 1238 1839 2531 +f 466 1080 173 +f 2166 1080 466 +f 222 2722 2309 +f 222 3157 2722 +f 3674 333 1812 +f 1812 3635 3157 +f 3635 1812 333 +f 3416 3320 333 +f 935 333 3320 +f 3416 3427 500 +f 500 3320 3416 +f 500 3427 429 +f 423 429 2224 +f 2224 564 423 +f 2960 1827 242 +f 2960 3671 1827 +f 1343 369 3671 +f 1343 2539 369 +f 1343 2910 2539 +f 2910 2775 3821 +f 2424 2005 2775 +f 2424 697 2005 +f 1583 324 697 +f 697 2424 1583 +f 1583 2277 324 +f 1813 3470 2277 +f 465 1813 2015 +f 2996 1414 640 +f 2454 416 1240 +f 133 3783 2398 +f 416 977 1240 +f 416 1114 977 +f 2868 2039 2038 +f 3906 1041 2537 +f 287 438 1874 +f 602 2560 30 +f 3297 2663 3631 +f 3631 2663 2041 +f 3719 666 2041 +f 3636 30 1027 +f 435 3636 1027 +f 435 1027 700 +f 3582 1908 3123 +f 2529 2802 2259 +f 1316 1640 3297 +f 3229 3741 970 +f 970 1195 3229 +f 1515 2430 2120 +f 386 3563 3727 +f 3727 3563 2430 +f 2120 2430 2410 +f 2410 2430 3036 +f 2430 3563 3036 +f 3820 189 1020 +f 337 2139 1351 +f 522 3591 769 +f 3454 189 1883 +f 2343 2521 2670 +f 3417 970 3741 +f 2879 1658 1942 +f 2879 1942 3848 +f 797 1195 970 +f 1942 1996 3848 +f 3848 1996 2685 +f 2685 2190 3848 +f 2685 3344 2190 +f 3344 1818 2190 +f 2190 1818 3392 +f 865 3848 2190 +f 865 2190 1236 +f 1236 2190 3392 +f 1236 3392 3123 +f 3392 3582 3123 +f 3260 1542 1520 +f 3392 1818 3582 +f 2273 3582 3183 +f 2663 3297 1640 +f 2560 602 3430 +f 3582 1818 3183 +f 3582 2273 1908 +f 3618 281 167 +f 1262 2320 2294 +f 3123 3308 1236 +f 3123 1908 3308 +f 3183 492 2988 +f 2101 3105 2497 +f 1672 3288 2101 +f 3183 2988 2273 +f 2988 1542 2273 +f 972 1542 3169 +f 1542 972 2876 +f 1542 2988 3169 +f 3517 1593 418 +f 2670 2521 3454 +f 2786 1301 2087 +f 700 1027 2560 +f 1254 1672 685 +f 1282 1883 189 +f 1640 2985 2139 +f 685 1672 1379 +f 1041 3906 1351 +f 1351 3906 337 +f 987 2033 666 +f 2873 2038 2046 +f 2200 2868 2873 +f 1254 685 1602 +f 1093 1874 438 +f 1874 2148 3744 +f 2695 3341 1995 +f 522 920 3591 +f 2663 3719 2041 +f 2772 2201 439 +f 2046 664 2873 +f 2521 1301 3552 +f 1231 2101 2497 +f 3399 3331 1535 +f 3906 2537 3636 +f 2273 3260 1908 +f 43 1874 1093 +f 1995 2148 1107 +f 1316 2410 1640 +f 2200 664 2329 +f 2985 2410 3036 +f 3036 76 2433 +f 3719 987 666 +f 2529 2329 2802 +f 351 1093 1541 +f 1542 3260 2273 +f 351 3229 1093 +f 2148 43 3499 +f 1020 2092 1242 +f 3399 2695 1107 +f 1213 1290 3517 +f 700 2560 3430 +f 3813 602 2537 +f 2200 2329 3308 +f 353 1628 3631 +f 1154 353 3631 +f 385 353 1154 +f 1231 1520 2092 +f 2529 2259 1658 +f 2529 1658 2879 +f 865 2529 2879 +f 1515 527 2430 +f 527 3727 2430 +f 865 3308 2529 +f 865 1236 3308 +f 3454 1883 2670 +f 2087 1301 2521 +f 3229 351 1301 +f 3698 3741 2786 +f 2648 942 3400 +f 1541 438 2098 +f 43 2148 1874 +f 1213 3399 1535 +f 522 1290 2099 +f 1213 3517 2695 +f 2695 3517 3341 +f 2139 1041 1351 +f 2663 2139 337 +f 337 3719 2663 +f 2410 2985 1640 +f 76 2802 2433 +f 30 2560 1027 +f 602 30 2537 +f 1542 2876 1520 +f 2876 2092 1520 +f 1520 1479 3260 +f 281 3813 131 +f 189 3820 1282 +f 664 2046 131 +f 3331 1380 511 +f 1246 1535 3331 +f 2797 1535 2019 +f 1020 189 3454 +f 3454 1379 1020 +f 3552 1301 351 +f 1479 1231 2497 +f 3229 2786 3741 +f 420 1380 1335 +f 3906 3636 435 +f 3906 435 337 +f 337 435 700 +f 1380 3399 1335 +f 2320 2099 2797 +f 2320 1262 2431 +f 1725 2431 1262 +f 1725 1463 2431 +f 522 769 1290 +f 2802 131 2433 +f 664 131 2802 +f 2873 664 2200 +f 2670 1883 2223 +f 1677 2670 2223 +f 1677 2343 2670 +f 2521 2343 2087 +f 507 2868 2200 +f 1041 2139 2985 +f 3009 2868 507 +f 2985 2433 2537 +f 1020 809 2092 +f 685 1379 3454 +f 3377 685 3454 +f 3229 43 1093 +f 2786 2568 3698 +f 3417 411 3619 +f 411 3741 3698 +f 3741 411 3417 +f 3417 3619 970 +f 970 3619 1956 +f 970 1956 797 +f 1213 2695 3399 +f 337 2772 3719 +f 2537 1041 2985 +f 1301 2786 3229 +f 700 3430 2201 +f 3499 1195 2065 +f 2431 522 2099 +f 2329 664 2802 +f 1908 3260 507 +f 1231 1479 1520 +f 2873 2868 2038 +f 2868 3009 2039 +f 1195 43 3229 +f 43 1195 3499 +f 420 2065 3602 +f 2065 420 1335 +f 1380 2666 511 +f 2666 1380 420 +f 1940 3331 511 +f 3331 1940 1246 +f 1593 3517 1290 +f 2663 1640 2139 +f 3036 2433 2985 +f 1535 1246 2019 +f 30 3636 2537 +f 2797 2019 1169 +f 2259 2802 76 +f 3813 2537 2433 +f 3813 2433 131 +f 987 2055 3871 +f 2797 1169 2320 +f 3297 3631 1628 +f 2041 1154 3631 +f 2041 3138 1154 +f 385 1154 3138 +f 3138 1665 2597 +f 3138 666 1665 +f 1665 2648 2597 +f 942 2648 1665 +f 3138 2041 666 +f 1264 1723 819 +f 695 819 3754 +f 3754 638 695 +f 1708 2882 638 +f 2183 2882 1708 +f 2882 2183 1536 +f 358 3287 1536 +f 2361 3287 358 +f 3287 2361 2617 +f 2628 2361 1284 +f 1284 724 2628 +f 3793 2569 3781 +f 3754 1708 638 +f 1536 2183 358 +f 2373 2361 358 +f 3845 3358 3391 +f 1634 3781 3621 +f 235 2194 3553 +f 3248 1944 1000 +f 3754 819 1381 +f 1381 1708 3754 +f 1381 2183 1708 +f 1284 2361 2373 +f 2194 213 3847 +f 3168 345 975 +f 1101 315 1576 +f 2517 2449 1143 +f 1933 952 1398 +f 3008 952 1933 +f 2929 384 3830 +f 3845 1994 1241 +f 3553 1974 1167 +f 3902 483 2946 +f 483 1681 770 +f 345 3168 1681 +f 315 460 975 +f 213 2194 3858 +f 315 1161 460 +f 1161 315 1101 +f 1101 1576 991 +f 1576 2517 1143 +f 3847 213 2946 +f 1576 1143 910 +f 2517 1007 2449 +f 2517 1398 1007 +f 2252 1887 3477 +f 1887 3126 957 +f 3184 384 2929 +f 2634 724 1284 +f 1395 724 2634 +f 1994 3845 3391 +f 707 1815 538 +f 1974 2194 3847 +f 707 626 139 +f 975 1984 315 +f 1974 3553 2194 +f 863 3248 1000 +f 3858 1944 3248 +f 1933 2252 3008 +f 3008 2252 3477 +f 1723 3830 384 +f 819 1723 384 +f 384 1381 819 +f 707 139 1815 +f 3189 1815 139 +f 1000 235 863 +f 1944 2194 235 +f 213 3902 2946 +f 1395 305 2687 +f 2762 2998 2687 +f 825 3858 3248 +f 2145 2517 315 +f 2252 1933 1896 +f 2344 2252 1896 +f 3391 3358 2762 +f 2569 778 3621 +f 85 2762 959 +f 1388 1994 3391 +f 1757 2420 1994 +f 235 1000 1944 +f 538 778 2569 +f 1563 3902 213 +f 1576 315 2517 +f 1251 918 3892 +f 85 3391 2762 +f 3248 2591 825 +f 1366 3902 80 +f 2146 3184 2344 +f 3184 2929 2344 +f 1251 3053 918 +f 959 2762 2687 +f 2175 1563 3858 +f 3091 2517 2145 +f 1896 1933 3091 +f 726 599 3053 +f 626 707 534 +f 3571 3189 139 +f 80 3902 1563 +f 3091 1933 2517 +f 2373 1381 918 +f 599 2634 3053 +f 1395 2634 599 +f 599 305 1395 +f 85 1388 3391 +f 2183 2373 358 +f 1395 3210 1152 +f 1395 2687 3210 +f 3781 2569 3621 +f 3664 3845 1241 +f 3781 3570 3793 +f 3570 3781 1634 +f 3209 1974 3847 +f 3168 770 1681 +f 975 231 3168 +f 231 975 460 +f 1161 1101 371 +f 371 1101 991 +f 991 1576 3237 +f 3237 1576 910 +f 2713 910 1143 +f 2713 1143 2449 +f 1398 3589 1007 +f 1398 952 3589 +f 2044 1550 3477 +f 2044 3477 957 +f 1887 957 3477 +f 3126 2584 957 +f 2584 3126 3830 +f 1723 2115 3830 +f 1723 1264 2115 +f 2928 217 2271 +f 23 2928 2271 +f 392 3889 23 +f 3006 3889 392 +f 2856 2474 856 +f 2448 3069 2925 +f 2506 517 1899 +f 1900 3559 487 +f 124 3480 1900 +f 2839 1464 1683 +f 1683 206 2672 +f 2672 206 2280 +f 2672 2280 3382 +f 3382 1596 250 +f 3382 947 1596 +f 2070 2344 1896 +f 2344 2070 2146 +f 2593 3665 250 +f 3761 3382 250 +f 1594 570 1899 +f 3519 1388 85 +f 2150 825 2448 +f 1953 265 823 +f 3069 2448 3219 +f 475 241 2337 +f 1953 100 475 +f 1299 1866 2804 +f 3432 1866 1299 +f 85 959 1793 +f 3519 3541 1388 +f 2925 2150 2448 +f 2715 1155 1228 +f 1669 2925 3069 +f 265 2925 1669 +f 2651 1739 2848 +f 3006 2551 3889 +f 2474 3006 856 +f 2474 1131 2124 +f 398 1069 2651 +f 1733 1069 1847 +f 1733 1847 3559 +f 124 2839 3480 +f 747 2839 2672 +f 1953 3472 100 +f 1953 823 3472 +f 3609 3766 201 +f 2070 1896 3501 +f 3833 2715 1503 +f 3519 85 1793 +f 1184 3804 2059 +f 3191 2928 23 +f 3889 3191 23 +f 2337 3371 789 +f 3889 2551 3318 +f 1895 3571 139 +f 2515 1155 3729 +f 3043 3318 2551 +f 383 1563 2175 +f 2839 747 1491 +f 121 3665 3062 +f 1537 782 121 +f 121 3062 1537 +f 782 1537 2804 +f 2804 2742 1299 +f 3432 1299 3661 +f 3661 904 3432 +f 904 3661 2290 +f 2290 163 904 +f 1756 163 2290 +f 2897 2549 1459 +f 2897 2901 2549 +f 782 2804 1866 +f 3409 1184 2059 +f 1756 2897 163 +f 3219 3472 3069 +f 3472 1669 3069 +f 2901 1228 1155 +f 1228 2901 2151 +f 2852 2715 1228 +f 80 383 1258 +f 1228 2266 2852 +f 201 1899 570 +f 1793 3833 3519 +f 3219 2448 825 +f 383 3318 1258 +f 3858 2150 2175 +f 2966 2870 1832 +f 2265 834 925 +f 3501 3091 1739 +f 2337 241 2186 +f 2186 241 3468 +f 3468 3371 2186 +f 2029 3371 3468 +f 3371 2029 217 +f 2059 1251 3892 +f 789 1953 475 +f 1953 789 265 +f 3804 1459 726 +f 1459 1713 726 +f 475 2337 789 +f 217 3191 618 +f 3923 954 1491 +f 2150 265 2175 +f 3043 574 2257 +f 2651 3501 1739 +f 870 1184 3409 +f 2374 383 2175 +f 1756 2151 2901 +f 823 265 1669 +f 2715 3833 3729 +f 1239 2506 1899 +f 954 2146 2070 +f 1594 1899 3230 +f 3519 3833 3109 +f 3109 3541 3519 +f 3889 3318 3191 +f 2474 2124 2551 +f 1793 168 3729 +f 618 2374 265 +f 3541 3109 2877 +f 2150 3858 825 +f 3318 3043 1258 +f 80 1563 383 +f 1155 2715 3729 +f 3043 2257 1258 +f 2146 431 3184 +f 1459 2549 1713 +f 1155 2549 2901 +f 2515 2549 1155 +f 3804 726 1251 +f 2374 618 3191 +f 3371 618 789 +f 2877 626 3541 +f 2551 3006 2474 +f 2928 3191 217 +f 1056 1895 139 +f 217 618 3371 +f 3559 590 1733 +f 590 3559 1491 +f 590 3501 2651 +f 1069 1733 2651 +f 590 2070 3501 +f 1900 1491 3559 +f 1491 1900 3480 +f 1491 3480 2839 +f 3923 2059 431 +f 2059 3923 3409 +f 1866 1459 3804 +f 121 782 1184 +f 3804 1184 1866 +f 100 3472 3219 +f 1669 3472 823 +f 2374 2175 265 +f 121 1184 870 +f 2124 574 3043 +f 1304 1239 2306 +f 2515 1713 2549 +f 1304 2506 1239 +f 1239 1899 201 +f 570 3174 201 +f 139 3766 1056 +f 3739 1716 3 +f 1296 1716 3739 +f 870 1716 1296 +f 3739 870 1296 +f 250 128 3761 +f 3739 128 870 +f 3409 3923 2659 +f 1339 2672 3382 +f 2672 1339 747 +f 1339 3382 3761 +f 3739 3761 128 +f 3 3923 747 +f 3 1339 3739 +f 1716 3409 2659 +f 128 250 3665 +f 128 3665 121 +f 870 128 121 +f 3923 3 2659 +f 3739 1339 3761 +f 1716 870 3409 +f 3 1716 2659 +f 3872 3166 2428 +f 1925 3166 1789 +f 3872 2428 2425 +f 3701 3553 3029 +f 1696 3872 3701 +f 1696 3701 3029 +f 3701 2425 1167 +f 3151 3029 3553 +f 2428 3029 3151 +f 1789 3166 3872 +f 1789 3872 1696 +f 1696 2428 3166 +f 2711 1167 2425 +f 2428 3151 2425 +f 3151 3553 1167 +f 1925 1696 3166 +f 1789 1696 1925 +f 2425 3701 3872 +f 1696 3029 2428 +f 235 3553 3701 +f 2425 3151 2711 +f 1167 2711 3151 +f 707 538 2569 +f 221 2968 558 +f 3147 2968 221 +f 2968 3147 3634 +f 3621 2365 1634 +f 328 2104 2066 +f 2510 3447 1015 +f 3187 1730 556 +f 3187 2104 328 +f 2318 558 3198 +f 1985 1927 2932 +f 2932 1927 1909 +f 3634 752 1279 +f 2350 538 2750 +f 2750 538 1815 +f 778 2365 3621 +f 2365 778 2350 +f 3072 2308 295 +f 3072 609 2308 +f 2838 1599 2932 +f 2739 2578 2392 +f 3295 609 1992 +f 2392 3893 1028 +f 982 1611 2511 +f 3189 2750 1815 +f 3455 3013 1171 +f 3455 806 3633 +f 3447 3295 1992 +f 1730 3013 2510 +f 2442 806 3198 +f 1327 2639 3571 +f 1015 1730 2510 +f 330 1300 2318 +f 1599 2750 2639 +f 2968 3198 558 +f 1279 752 1985 +f 330 2318 3198 +f 2308 609 2422 +f 496 2422 609 +f 2750 3189 2639 +f 538 2350 778 +f 1353 3013 3455 +f 2510 3013 1475 +f 3913 2392 2768 +f 1353 1475 3013 +f 2442 291 806 +f 2952 1028 3893 +f 3017 2511 2308 +f 2308 2422 3017 +f 3295 2510 1475 +f 3013 1730 1171 +f 806 330 3198 +f 2993 2968 1279 +f 2932 1599 2900 +f 519 2603 2900 +f 806 3455 1171 +f 1028 2768 2392 +f 2768 1028 1611 +f 982 2511 3017 +f 556 1730 1015 +f 1599 2639 2900 +f 2639 3189 3571 +f 328 2066 1300 +f 330 806 1171 +f 2768 1611 982 +f 982 3017 2763 +f 2350 3338 2365 +f 2365 3338 3570 +f 2365 3570 1634 +f 2511 1611 1415 +f 3609 3174 1774 +f 3913 2768 2949 +f 2949 2768 982 +f 1895 1056 1327 +f 3345 2107 224 +f 224 3264 3345 +f 742 3755 496 +f 3264 3230 517 +f 2483 2538 3658 +f 1907 192 2483 +f 1565 1907 2483 +f 2187 2774 1907 +f 2187 1565 1 +f 1 596 2187 +f 1858 2461 2894 +f 1148 2592 773 +f 138 1475 1353 +f 2108 2900 2639 +f 2377 1774 3174 +f 2949 982 2763 +f 1039 236 2724 +f 3633 806 291 +f 1594 3227 570 +f 3919 3789 2286 +f 3838 1700 2052 +f 1700 3838 1546 +f 1700 1546 2025 +f 2025 1546 3932 +f 3806 950 3085 +f 3806 285 2090 +f 3495 2143 356 +f 356 3658 3495 +f 3913 2949 2834 +f 63 3776 2834 +f 510 3919 3658 +f 3766 3609 1774 +f 1565 2483 108 +f 596 2894 2187 +f 1565 108 1 +f 3932 1546 2048 +f 596 1858 2894 +f 3085 285 3806 +f 1798 1858 596 +f 285 632 2090 +f 3607 2143 632 +f 1353 3455 3340 +f 2598 1858 381 +f 381 1174 2598 +f 1858 1798 381 +f 2598 1174 1011 +f 1011 657 2598 +f 3607 632 285 +f 2283 1011 1174 +f 2283 657 1011 +f 1594 3230 3227 +f 2177 3098 3004 +f 1833 2177 3004 +f 3755 352 2422 +f 3004 3098 2754 +f 1039 1604 3098 +f 236 2177 3386 +f 236 1039 2177 +f 2592 1148 519 +f 1039 3817 2399 +f 2639 1327 2108 +f 3725 773 379 +f 1408 1798 596 +f 1798 1408 381 +f 381 1408 1174 +f 1174 1408 2283 +f 3919 108 2483 +f 3386 657 102 +f 1 3919 1408 +f 3386 2724 236 +f 1039 633 3817 +f 46 3455 3633 +f 3230 3264 224 +f 29 3838 63 +f 773 3155 2180 +f 102 46 2724 +f 2107 3802 3155 +f 1897 3802 2107 +f 3345 1897 2107 +f 3264 1897 3345 +f 517 1897 3264 +f 301 285 2048 +f 285 3085 2048 +f 3230 2377 3227 +f 1475 138 742 +f 2143 3607 510 +f 2592 1930 2865 +f 2865 1930 2377 +f 1930 2108 1327 +f 1895 1327 3571 +f 63 2834 2949 +f 3476 3755 742 +f 1 108 3919 +f 657 1408 102 +f 633 1039 2724 +f 3455 46 3340 +f 3857 3633 291 +f 3155 2865 2107 +f 2377 224 2865 +f 224 2107 2865 +f 2377 3174 3227 +f 3789 742 138 +f 3838 2052 63 +f 2108 1930 2592 +f 1930 1056 3766 +f 291 303 3857 +f 3152 303 291 +f 1148 2660 519 +f 1408 2286 3340 +f 3607 285 3263 +f 2286 138 1353 +f 2286 1353 3340 +f 2048 1546 3838 +f 2048 3085 3932 +f 29 63 2949 +f 2143 510 356 +f 510 3658 356 +f 1056 1930 1327 +f 3263 301 352 +f 2483 3658 3919 +f 1408 596 1 +f 1408 657 2283 +f 46 3857 2724 +f 3386 102 2724 +f 3857 633 2724 +f 2278 303 2660 +f 3817 2278 3725 +f 3725 2278 1148 +f 773 2592 2865 +f 46 3633 3857 +f 3174 570 3227 +f 224 2377 3230 +f 3362 2025 3932 +f 3085 3362 3932 +f 3085 950 3362 +f 950 3030 3362 +f 2090 950 3806 +f 2090 3639 3030 +f 950 2090 3030 +f 632 3639 2090 +f 2143 368 632 +f 2538 3495 3658 +f 191 2538 2828 +f 192 2828 2538 +f 2483 192 2538 +f 3071 192 1907 +f 2774 3071 1907 +f 2461 1858 2598 +f 3386 677 657 +f 3098 1604 101 +f 2177 1039 3098 +f 1604 1039 875 +f 1039 2969 875 +f 1039 2399 2969 +f 379 299 3725 +f 2180 379 773 +f 2025 2994 1700 +f 3362 2994 2025 +f 3639 368 1253 +f 1253 368 2992 +f 2143 2992 368 +f 2095 3495 44 +f 2538 191 44 +f 368 3639 632 +f 3495 2682 2143 +f 2095 2682 3495 +f 3495 2538 44 +f 2032 864 3654 +f 3823 3654 864 +f 864 2032 3107 +f 1455 3823 144 +f 1364 1455 144 +f 144 2396 1364 +f 3363 60 2554 +f 1560 90 2330 +f 1695 990 441 +f 1695 441 1678 +f 2526 2924 1652 +f 1043 3359 136 +f 136 3920 1765 +f 136 3359 3920 +f 3359 1043 680 +f 3359 680 2709 +f 2435 3404 576 +f 812 1619 2068 +f 576 3527 2435 +f 2794 1569 1575 +f 380 1569 2794 +f 2899 3675 380 +f 1182 60 3363 +f 1182 561 60 +f 3784 576 2833 +f 3675 2899 3481 +f 1748 3481 440 +f 2826 440 1560 +f 440 2826 1748 +f 561 1743 60 +f 1378 1431 881 +f 730 336 2088 +f 3075 816 3518 +f 449 3410 454 +f 449 1372 3410 +f 2708 2163 1768 +f 812 2068 2413 +f 2118 812 1249 +f 3075 142 816 +f 2986 142 3075 +f 2986 2528 142 +f 2986 2906 2528 +f 2675 2761 1139 +f 1697 780 3271 +f 1697 2378 780 +f 1139 2761 1743 +f 667 2378 1652 +f 792 667 1652 +f 3720 3828 2163 +f 3508 3410 1372 +f 792 266 667 +f 792 2023 266 +f 3173 377 2395 +f 1588 3720 2982 +f 136 3578 1043 +f 671 3410 3749 +f 2916 2469 3410 +f 3654 1110 2032 +f 2032 3534 3107 +f 680 1043 3290 +f 212 3508 1372 +f 1791 3828 3720 +f 2163 2982 3720 +f 3508 3749 3410 +f 1519 3718 288 +f 1519 288 2413 +f 945 719 405 +f 1728 2413 288 +f 945 1466 719 +f 3556 70 943 +f 2407 692 3780 +f 1078 3556 943 +f 1347 3810 3120 +f 3810 1347 2172 +f 3908 1644 3449 +f 2285 2172 557 +f 2407 3780 2481 +f 1644 70 3556 +f 1568 1722 3700 +f 2844 1722 1568 +f 3414 2844 2735 +f 3414 2735 2352 +f 3564 2403 2788 +f 3484 3556 1078 +f 1334 671 1592 +f 1728 2637 2176 +f 1086 2176 3396 +f 2871 3508 212 +f 1122 3749 2403 +f 1519 2413 1865 +f 2481 3780 955 +f 2671 1122 318 +f 1182 2880 561 +f 1128 1519 1865 +f 2407 2413 1086 +f 2413 2407 1036 +f 2481 1036 2407 +f 2680 2387 1274 +f 3749 1122 2671 +f 1819 2387 2680 +f 2403 3749 2871 +f 2336 2554 2372 +f 671 3749 2671 +f 3749 3508 2871 +f 1728 288 2637 +f 3239 2530 1819 +f 1201 2530 3239 +f 1036 812 2413 +f 1082 452 3836 +f 2637 2576 2176 +f 3484 2961 1201 +f 1082 3836 2927 +f 2671 1592 671 +f 1086 1728 2176 +f 2413 1728 1086 +f 3396 692 1086 +f 2407 1086 692 +f 3564 1122 2403 +f 2481 955 1249 +f 1249 1036 2481 +f 3396 2176 1805 +f 187 407 2480 +f 187 1364 407 +f 1364 187 1671 +f 2068 3489 2413 +f 3106 2726 376 +f 2396 2495 407 +f 3548 2508 586 +f 3383 3258 3548 +f 3258 3383 2249 +f 2654 2594 153 +f 421 58 3606 +f 2680 3449 1644 +f 3364 2249 3383 +f 421 3534 58 +f 2396 407 1364 +f 70 3908 3581 +f 1865 2413 1153 +f 377 1742 3760 +f 377 3173 1742 +f 15 2899 380 +f 1743 561 690 +f 1431 1378 2961 +f 2794 15 380 +f 561 2348 690 +f 3484 1201 3239 +f 2880 2348 561 +f 1274 2352 2680 +f 3290 826 680 +f 3359 2709 3920 +f 3239 1819 1644 +f 1652 1765 3920 +f 278 136 1765 +f 278 1765 2924 +f 1153 1128 1865 +f 524 2836 568 +f 524 1134 2836 +f 1765 1652 2924 +f 3823 1455 1110 +f 3823 1110 3654 +f 3534 421 3107 +f 812 1036 1249 +f 1619 1993 2260 +f 1671 376 1364 +f 2554 557 2172 +f 586 2508 3598 +f 2554 1347 3120 +f 586 3383 3548 +f 1652 441 990 +f 945 3120 1466 +f 2654 421 2594 +f 2526 1652 990 +f 1455 1364 3489 +f 1110 3489 2260 +f 2761 2735 2844 +f 2260 1993 3534 +f 2761 2844 1568 +f 524 1695 1678 +f 1568 3700 1419 +f 990 2368 2526 +f 2761 1568 1419 +f 990 1695 2368 +f 1419 3700 2058 +f 1704 3760 536 +f 3760 1704 377 +f 1704 1431 3127 +f 2554 2172 1347 +f 524 1560 2330 +f 945 2372 2554 +f 1431 1704 881 +f 843 58 3534 +f 3414 1722 2844 +f 3534 1993 843 +f 3927 2058 3684 +f 3484 1431 2961 +f 3920 405 2023 +f 90 2532 2330 +f 1456 2532 2012 +f 3920 2023 1652 +f 90 15 2532 +f 1650 2013 3127 +f 2532 15 2012 +f 3127 377 1704 +f 1652 2023 792 +f 2794 1082 1215 +f 2439 3271 759 +f 572 2439 759 +f 2439 572 2906 +f 2906 869 2528 +f 2927 2012 1215 +f 2927 1215 1082 +f 1652 2378 1697 +f 1652 1697 3271 +f 1082 2794 452 +f 2536 376 2726 +f 1652 3271 2439 +f 2439 2906 441 +f 869 3901 3868 +f 2794 1344 452 +f 336 730 2826 +f 2826 2281 1748 +f 3075 3518 1678 +f 3481 1748 312 +f 312 3675 3481 +f 586 452 1344 +f 441 3075 1678 +f 1344 2396 3364 +f 380 3675 2962 +f 380 2962 1569 +f 1277 1569 2962 +f 1695 568 2368 +f 1575 1569 1277 +f 568 1695 524 +f 2396 144 3300 +f 2654 2396 3300 +f 586 1344 3364 +f 2396 2654 3364 +f 3300 3107 2654 +f 1678 3518 2088 +f 864 3300 144 +f 864 3107 3300 +f 864 144 3823 +f 2088 336 1678 +f 336 1560 1678 +f 452 1445 3836 +f 336 2826 1560 +f 2794 1575 2006 +f 2006 1575 2948 +f 2948 357 2495 +f 2260 2068 1619 +f 2006 2948 2495 +f 2260 3489 2068 +f 1057 407 2495 +f 407 1057 2480 +f 187 2480 2726 +f 3106 187 2726 +f 3106 1671 187 +f 1671 3106 376 +f 586 1445 452 +f 2395 377 3127 +f 1359 524 2330 +f 2032 2260 3534 +f 1455 3489 1110 +f 3107 421 2654 +f 2260 2032 1110 +f 843 1993 3944 +f 2959 3007 1619 +f 3944 1993 3007 +f 421 3606 2594 +f 1619 3007 1993 +f 2218 2559 1534 +f 1534 2336 2218 +f 826 1443 680 +f 1443 2559 2709 +f 1534 2559 2923 +f 2559 2218 2372 +f 1443 2709 680 +f 2709 2559 2372 +f 1651 3363 1534 +f 2218 2336 2372 +f 3363 2336 1534 +f 3363 2554 2336 +f 1651 1182 3363 +f 1747 1359 335 +f 2836 152 568 +f 2185 3103 3130 +f 246 225 2081 +f 246 2081 1112 +f 3894 2159 2450 +f 1769 400 3290 +f 3290 400 1396 +f 676 3250 3026 +f 3466 1001 1275 +f 1605 2395 3127 +f 1605 1885 2395 +f 2880 1651 3682 +f 2880 3682 2509 +f 1664 7 787 +f 1664 3001 7 +f 3001 1664 2902 +f 1577 3562 2081 +f 1937 2751 59 +f 2751 3941 59 +f 937 1224 1369 +f 1267 347 1649 +f 1649 2738 1267 +f 2738 1649 132 +f 499 993 2738 +f 1136 2912 3167 +f 316 2912 3730 +f 3730 2707 316 +f 1885 2707 3583 +f 2707 3730 3583 +f 3306 1129 1224 +f 3583 3945 1885 +f 3945 3867 1699 +f 2678 1219 594 +f 3945 3583 3867 +f 1950 3908 2675 +f 3213 3491 2045 +f 2202 3130 1391 +f 2202 1391 1577 +f 2805 153 2594 +f 2249 153 71 +f 3258 2249 71 +f 71 153 2805 +f 3548 3258 2508 +f 1445 3598 1071 +f 3333 1618 1650 +f 3357 2013 1650 +f 3453 2612 1371 +f 2824 1359 1747 +f 1134 1359 2824 +f 2362 2751 1511 +f 2362 1511 682 +f 1134 2824 901 +f 1701 147 2599 +f 2836 1134 152 +f 901 152 1134 +f 1699 1340 34 +f 34 1340 3055 +f 1699 3515 1340 +f 1699 3867 3515 +f 899 480 568 +f 1471 3685 2924 +f 480 899 781 +f 3685 781 3149 +f 3685 3149 2504 +f 826 1396 2923 +f 2504 3149 852 +f 2504 2203 2520 +f 2504 852 2203 +f 1680 3365 3063 +f 2203 3578 2520 +f 3578 2203 852 +f 3578 852 254 +f 3578 254 1769 +f 1680 3063 1001 +f 1769 254 400 +f 1448 1224 788 +f 400 1673 147 +f 1680 1001 3332 +f 1396 400 147 +f 1680 3332 2390 +f 2923 1651 1534 +f 2110 495 785 +f 785 495 2822 +f 2348 2880 2509 +f 2822 495 515 +f 2007 690 2348 +f 424 1219 1511 +f 2701 2675 1139 +f 3163 2567 1941 +f 3581 1078 943 +f 92 3580 785 +f 1078 3581 3333 +f 1358 7 3001 +f 3001 3767 1358 +f 1701 3682 1651 +f 3163 1941 933 +f 3682 1701 3365 +f 2509 3682 3365 +f 1598 1600 3767 +f 3041 2665 1181 +f 1247 2081 225 +f 1363 595 2567 +f 3520 2064 3594 +f 3520 3594 3041 +f 1941 1181 2665 +f 927 2781 424 +f 595 3408 584 +f 3580 2110 785 +f 1758 2822 2653 +f 3456 3773 323 +f 3357 1267 2013 +f 2080 3354 3773 +f 2013 993 3127 +f 584 3483 595 +f 2064 595 3483 +f 92 3522 1468 +f 3522 92 2822 +f 927 3408 1363 +f 2567 1181 1941 +f 642 3163 933 +f 1190 2567 3163 +f 3483 3522 2064 +f 3522 2822 1758 +f 1181 2567 3520 +f 2822 92 785 +f 3483 1468 3522 +f 3453 1371 399 +f 1363 1190 2781 +f 2064 3520 2567 +f 613 2714 1468 +f 1468 2714 92 +f 3594 3522 1758 +f 1941 2665 933 +f 3522 3594 2064 +f 1181 3520 3041 +f 584 613 3483 +f 3483 613 1468 +f 3127 1136 3167 +f 1950 2701 1358 +f 3167 316 1605 +f 2927 3836 1456 +f 2237 3437 2159 +f 682 1511 1219 +f 1445 1071 3836 +f 480 2526 2368 +f 2045 3491 3311 +f 3223 2781 1190 +f 1163 2466 494 +f 499 2738 3941 +f 2827 354 1975 +f 451 354 2827 +f 399 1937 132 +f 3824 1460 2770 +f 2825 1867 1460 +f 2825 1741 1867 +f 34 3055 3638 +f 1369 1129 1741 +f 1224 1129 1369 +f 400 254 1673 +f 2395 3945 1699 +f 3167 2912 316 +f 3456 3894 2450 +f 993 1136 3127 +f 2738 993 2013 +f 2738 2013 1267 +f 781 899 3149 +f 3357 347 1267 +f 3581 2915 1618 +f 3908 1950 2915 +f 3867 3223 3515 +f 132 3941 2738 +f 3941 2362 499 +f 1651 2880 1182 +f 2923 1396 1651 +f 2751 2362 3941 +f 59 3941 132 +f 132 1937 59 +f 1618 399 1649 +f 3453 399 1600 +f 399 1371 1937 +f 1043 1769 3290 +f 1043 3578 1769 +f 568 152 899 +f 3453 1600 1598 +f 1650 1618 3357 +f 2504 2924 3685 +f 3333 3581 1618 +f 1471 781 3685 +f 2902 1598 3767 +f 2902 3767 3001 +f 787 7 2390 +f 3034 1444 1747 +f 3306 1224 1448 +f 787 2390 1115 +f 1367 1115 3332 +f 3332 3466 1367 +f 1275 1001 3063 +f 2390 3332 1115 +f 1163 3773 3456 +f 1001 3466 3332 +f 524 1359 1134 +f 2330 335 1359 +f 3908 2915 3581 +f 3034 1483 1444 +f 2701 1680 1358 +f 3034 1456 1483 +f 3250 1016 2599 +f 3250 2599 3026 +f 1016 3365 1701 +f 1483 1456 937 +f 3026 2599 147 +f 676 3026 2790 +f 676 2790 1112 +f 147 2790 3026 +f 1937 1371 2751 +f 3515 1017 1340 +f 1902 788 937 +f 1902 937 3836 +f 937 1456 3836 +f 225 246 2976 +f 2976 1247 225 +f 147 246 1112 +f 1112 2790 147 +f 2976 246 147 +f 1577 1247 2020 +f 2020 2202 1577 +f 1247 2976 1673 +f 1673 2020 1247 +f 1699 34 2395 +f 2020 1673 2202 +f 2007 3365 1680 +f 3945 2395 1885 +f 3130 2202 852 +f 2707 1885 1605 +f 1605 316 2707 +f 3130 852 2185 +f 1071 1902 3836 +f 1902 1071 3598 +f 3213 2185 3491 +f 852 3491 2185 +f 1902 3598 2508 +f 3491 852 3149 +f 2508 71 1902 +f 3167 1605 3127 +f 71 2159 1902 +f 3311 3491 899 +f 71 107 2159 +f 899 272 3311 +f 3311 272 2017 +f 272 1975 2017 +f 347 3357 1618 +f 71 2508 3258 +f 272 901 1975 +f 2237 3894 3456 +f 3837 319 2805 +f 2805 319 71 +f 3606 58 3040 +f 901 451 2827 +f 901 2827 1975 +f 451 901 304 +f 1950 2675 2701 +f 2007 2701 1139 +f 3824 2770 304 +f 1017 3515 3223 +f 304 901 1444 +f 304 1444 3824 +f 1460 3824 2825 +f 1444 1741 2825 +f 2825 3824 1444 +f 2007 2348 2509 +f 2976 147 1673 +f 1741 1444 1483 +f 1483 1369 1741 +f 852 2202 1673 +f 3730 3867 3583 +f 2912 499 3730 +f 136 278 3578 +f 2520 278 2924 +f 499 2912 1136 +f 826 3290 1396 +f 278 2520 3578 +f 852 1673 254 +f 2924 2504 2520 +f 3192 788 1416 +f 3192 1416 3896 +f 1950 1358 1600 +f 1471 480 781 +f 3896 1416 3437 +f 480 2368 568 +f 3437 1416 2159 +f 993 499 1136 +f 2526 1471 2924 +f 2526 480 1471 +f 70 3581 943 +f 899 901 272 +f 335 2330 3034 +f 2362 682 499 +f 1600 399 1618 +f 347 1618 1649 +f 594 2781 3223 +f 2915 1600 1618 +f 2678 3730 682 +f 1340 1017 3055 +f 682 1219 2678 +f 2466 1118 843 +f 847 256 319 +f 256 107 319 +f 256 2415 107 +f 256 1163 2415 +f 2159 2415 2450 +f 1163 1118 2466 +f 843 3944 2466 +f 2805 2594 3606 +f 2805 3606 3837 +f 1163 3456 2450 +f 2466 3944 1331 +f 2415 2159 107 +f 494 3007 2959 +f 3040 1118 847 +f 1118 3040 843 +f 2415 1163 2450 +f 847 1163 256 +f 107 71 319 +f 847 319 3837 +f 3040 3837 3606 +f 847 3837 3040 +f 58 843 3040 +f 1163 847 1118 +f 2466 1331 494 +f 494 1331 3007 +f 2885 2847 270 +f 3796 2892 2733 +f 2892 3796 513 +f 1141 3314 3226 +f 3226 3314 462 +f 462 270 3226 +f 3459 270 2847 +f 412 3328 3010 +f 1141 88 513 +f 88 1141 3226 +f 8 56 2214 +f 2892 88 2733 +f 88 2892 513 +f 2501 3274 1450 +f 387 275 2813 +f 503 3448 2847 +f 8 3630 853 +f 3459 2765 3226 +f 779 275 387 +f 3448 3459 2847 +f 3625 2733 88 +f 969 503 8 +f 2501 969 8 +f 387 2813 3206 +f 2501 8 853 +f 3625 1225 2733 +f 1225 3625 2813 +f 888 2501 1450 +f 3226 2765 88 +f 1085 3459 3448 +f 1085 2765 3459 +f 888 1450 3568 +f 3679 3448 503 +f 3679 503 969 +f 969 2501 888 +f 3679 969 1221 +f 2276 1923 82 +f 439 779 387 +f 932 2055 3206 +f 3625 1178 3206 +f 1178 1260 665 +f 2375 2983 1176 +f 2987 2523 2326 +f 1860 3074 2111 +f 3679 1176 2983 +f 1176 3113 2375 +f 3772 537 2947 +f 3871 2055 932 +f 888 2887 3626 +f 665 537 3871 +f 942 1665 2033 +f 942 537 3772 +f 3772 2947 251 +f 1260 621 2947 +f 3206 2055 387 +f 665 932 1178 +f 1178 932 3206 +f 1221 3626 2326 +f 537 665 2947 +f 3871 932 665 +f 3626 2987 2326 +f 592 2523 149 +f 2276 2563 2887 +f 2033 987 3871 +f 665 1260 2947 +f 1260 2072 2423 +f 3625 2072 1178 +f 3679 1221 1176 +f 3626 1221 969 +f 1260 1178 2072 +f 3704 942 3772 +f 3704 3772 251 +f 2743 3704 251 +f 2743 251 1259 +f 3177 1749 763 +f 3074 3177 2111 +f 2111 1315 2740 +f 3850 2989 1315 +f 592 2730 2523 +f 2864 1259 251 +f 1259 2864 1625 +f 1259 1625 3177 +f 1625 2111 3177 +f 848 3850 1315 +f 3113 1176 1795 +f 3850 848 2730 +f 839 1158 3028 +f 2440 2913 1814 +f 47 3907 3795 +f 3843 3241 1487 +f 3241 3843 445 +f 560 350 445 +f 2128 2268 2440 +f 3887 2440 587 +f 2128 2440 3887 +f 3887 587 2053 +f 3870 1061 2053 +f 3870 3795 1061 +f 3795 3907 1250 +f 1250 3907 1487 +f 1487 2198 2757 +f 2757 2198 3241 +f 1809 2757 3241 +f 3579 505 1809 +f 505 3579 2503 +f 2024 2503 3628 +f 2024 3628 2641 +f 3628 350 2641 +f 818 2641 350 +f 65 3853 2062 +f 2062 1199 65 +f 1814 2053 587 +f 1814 3870 2053 +f 3870 47 3795 +f 3843 1487 3907 +f 3241 2198 1487 +f 445 1809 3241 +f 3579 1809 445 +f 2503 3579 350 +f 2503 350 3628 +f 476 3307 3897 +f 476 2755 3307 +f 47 3843 3907 +f 839 1773 1407 +f 919 2429 1853 +f 1853 1911 919 +f 1496 1911 1853 +f 1008 1287 307 +f 3681 2113 2913 +f 907 1008 1077 +f 2232 741 2602 +f 211 1853 3252 +f 1096 3252 3406 +f 2042 2229 2113 +f 350 560 818 +f 3597 1096 907 +f 3681 3597 169 +f 2004 1172 3115 +f 2113 47 1814 +f 1188 741 1737 +f 1158 2602 3616 +f 3406 3252 1853 +f 3912 3597 907 +f 3597 3912 169 +f 3884 445 3843 +f 83 3597 3681 +f 83 3681 2913 +f 3406 3616 1868 +f 3681 2042 2113 +f 1096 83 3252 +f 2229 47 2113 +f 2229 3884 3843 +f 1287 1008 2004 +f 1008 1172 2004 +f 2232 1737 741 +f 741 3115 2602 +f 2913 2113 1814 +f 3597 83 1096 +f 902 1013 1287 +f 2787 3024 1714 +f 1714 3569 2787 +f 3159 1095 78 +f 78 1095 3603 +f 2171 3024 1079 +f 2547 3379 2595 +f 2547 2595 86 +f 3024 2171 2342 +f 553 3379 2342 +f 86 2595 3603 +f 3603 1095 3096 +f 1095 313 3096 +f 1407 3587 2251 +f 1407 1773 3587 +f 1773 2891 3587 +f 2891 839 3028 +f 1048 3028 1936 +f 3018 1048 1936 +f 2062 3018 1936 +f 2062 3853 3018 +f 65 706 3853 +f 706 65 1199 +f 1199 2235 706 +f 2235 1199 2429 +f 2235 2429 1298 +f 2429 919 1298 +f 78 553 3348 +f 1496 2958 1911 +f 2647 767 3569 +f 767 2647 807 +f 2268 2958 211 +f 3324 3024 2787 +f 1079 143 3348 +f 3348 2171 1079 +f 553 2342 2171 +f 3379 553 2595 +f 3159 313 1095 +f 3159 2251 313 +f 3159 1407 2251 +f 839 2891 1773 +f 1158 1936 3028 +f 211 2958 1853 +f 3324 2787 3569 +f 567 3569 767 +f 3324 143 3024 +f 567 3324 3569 +f 553 2171 3348 +f 78 2595 553 +f 2595 78 3603 +f 78 3348 298 +f 3874 78 298 +f 2557 298 3324 +f 2669 122 1286 +f 924 137 1175 +f 2144 1737 3511 +f 3324 1024 2557 +f 2557 3874 298 +f 3511 1737 673 +f 2232 2602 3159 +f 1737 2232 673 +f 3535 567 2346 +f 684 1185 3915 +f 3525 1175 1133 +f 3525 924 1175 +f 2341 1185 3525 +f 2484 3525 1037 +f 1328 1024 3324 +f 3535 137 567 +f 1024 3874 2557 +f 1587 3915 1494 +f 45 1587 1494 +f 673 2978 684 +f 1328 3874 1024 +f 1175 137 3535 +f 3324 567 137 +f 673 3874 3384 +f 924 3384 1328 +f 122 3511 1286 +f 1494 3915 2341 +f 684 3915 1286 +f 1494 2341 2821 +f 1185 302 924 +f 684 2978 302 +f 3525 1185 924 +f 302 3384 924 +f 2978 673 3384 +f 2978 3384 302 +f 2341 3915 1185 +f 2149 1587 45 +f 3715 2845 3131 +f 495 3170 515 +f 1951 1523 2845 +f 1951 2845 3715 +f 2845 1788 3131 +f 1788 3425 1461 +f 1991 515 661 +f 3426 413 1690 +f 1690 3425 3426 +f 1788 2149 3426 +f 3005 661 515 +f 661 3798 2105 +f 661 3005 3798 +f 3048 1370 934 +f 3863 3798 3005 +f 36 1523 2767 +f 1991 661 1523 +f 3426 3425 1788 +f 1788 2079 1031 +f 36 1991 1523 +f 965 1370 2103 +f 3058 2144 2077 +f 3068 3058 902 +f 3131 1788 1461 +f 2822 515 1991 +f 3798 122 2105 +f 3715 589 2767 +f 3346 3058 3863 +f 3048 36 2103 +f 3426 2149 1377 +f 3715 3131 589 +f 1523 2079 2845 +f 2110 3170 495 +f 2078 3863 3005 +f 2105 122 2669 +f 2845 2079 1788 +f 36 3048 1991 +f 2767 1712 36 +f 3048 2822 1991 +f 1370 3048 2103 +f 3058 3068 2144 +f 3863 2077 3798 +f 3863 3058 2077 +f 1951 2767 1523 +f 2767 1951 3715 +f 2110 3171 3170 +f 3528 1059 2653 +f 2458 3041 3594 +f 3234 1370 965 +f 92 2110 3580 +f 3234 934 1370 +f 2247 3171 2714 +f 3234 2653 934 +f 2653 3234 3528 +f 1059 1821 2653 +f 934 2653 2822 +f 2247 2749 3171 +f 3171 2110 92 +f 2769 2581 3777 +f 715 3439 2581 +f 715 1063 3439 +f 3937 2581 2769 +f 2581 3937 715 +f 1441 2454 3439 +f 689 2029 3468 +f 217 1390 2271 +f 1390 23 2271 +f 392 23 1025 +f 654 3629 769 +f 23 1390 1025 +f 3629 654 3465 +f 392 1025 3087 +f 2732 3665 2593 +f 3665 2732 1337 +f 498 3665 1337 +f 392 3087 3006 +f 607 498 1337 +f 607 1337 3560 +f 3560 1337 2217 +f 878 2564 1906 +f 856 3873 2856 +f 856 3006 3087 +f 2856 3873 2384 +f 1131 2856 2384 +f 2856 1131 2474 +f 2412 3613 2384 +f 1952 2242 1325 +f 2412 514 3613 +f 3931 2436 1072 +f 3513 3503 1072 +f 2384 3613 1131 +f 2936 1784 395 +f 3918 395 892 +f 3918 892 3056 +f 2769 3777 575 +f 1131 514 2717 +f 1131 2717 3143 +f 2717 1417 3143 +f 3143 1417 598 +f 3199 1979 1265 +f 1979 3199 3273 +f 398 3143 598 +f 3143 398 1131 +f 1069 398 598 +f 1069 598 2335 +f 3559 1847 2335 +f 1847 1069 2335 +f 3559 2335 487 +f 1464 124 487 +f 124 1464 2839 +f 1683 2672 2839 +f 1900 487 124 +f 206 1683 89 +f 3710 3382 2280 +f 3710 947 3382 +f 3336 1218 718 +f 771 1539 2000 +f 3062 3665 607 +f 3062 607 3560 +f 3062 3560 3655 +f 878 1537 3655 +f 1537 878 1271 +f 3655 1537 3062 +f 1265 3933 2100 +f 2505 3199 2100 +f 1271 2227 2804 +f 2804 2227 2742 +f 1537 1271 2804 +f 3661 1299 2742 +f 1744 1660 2274 +f 1693 575 3777 +f 3349 3669 3445 +f 363 3445 608 +f 2122 2462 2940 +f 3330 3385 3374 +f 3336 718 622 +f 3807 433 2122 +f 1265 2100 3199 +f 3199 2505 3273 +f 2122 771 3309 +f 1470 490 38 +f 490 1394 38 +f 3599 2533 1218 +f 1744 920 2426 +f 2462 608 3747 +f 3591 920 1642 +f 1642 2274 953 +f 611 3689 1025 +f 718 3045 3816 +f 2953 487 2335 +f 2953 1464 487 +f 206 89 2280 +f 277 3710 2280 +f 3710 277 947 +f 947 277 1596 +f 1470 38 2431 +f 2613 2417 170 +f 3629 1693 3777 +f 3591 654 769 +f 1642 654 3591 +f 800 3330 647 +f 1049 250 1596 +f 1049 2593 250 +f 2732 2593 1049 +f 498 607 3665 +f 2217 3655 3560 +f 3655 2217 878 +f 1906 1271 878 +f 1906 2227 1271 +f 2215 2742 2227 +f 2742 2215 3286 +f 2290 3661 2742 +f 2290 504 1756 +f 504 2151 1756 +f 2742 3286 504 +f 504 2061 3162 +f 2151 504 3162 +f 2151 3162 2266 +f 2436 3931 1325 +f 2100 3445 363 +f 2852 3162 1952 +f 395 1784 892 +f 2266 3162 2852 +f 363 608 2462 +f 1325 3742 1952 +f 3742 1325 3931 +f 3742 3931 3503 +f 3825 2219 2122 +f 3503 3931 1072 +f 2505 2814 3273 +f 2852 1952 3742 +f 3918 2936 395 +f 1961 3513 1851 +f 3918 3056 1304 +f 1304 1851 3918 +f 3933 1304 3056 +f 1265 506 3933 +f 506 1265 1979 +f 3273 2814 2506 +f 2506 1304 506 +f 2506 506 1979 +f 1979 3273 2506 +f 2506 2814 1897 +f 38 1744 2426 +f 38 1660 1744 +f 3855 481 800 +f 769 3629 1593 +f 217 2029 1390 +f 3465 1693 3629 +f 2462 3747 2940 +f 647 3330 79 +f 3825 2122 3309 +f 1660 170 2274 +f 3385 3330 481 +f 485 622 3816 +f 718 1218 2533 +f 2533 3045 718 +f 2431 38 2426 +f 1660 38 1394 +f 1660 1394 1042 +f 1920 2325 3190 +f 2613 3441 2417 +f 1492 3855 800 +f 3747 608 3669 +f 1025 1390 611 +f 3087 1025 3689 +f 3816 622 718 +f 2533 3599 2125 +f 277 2280 89 +f 1596 277 1049 +f 2471 2000 2552 +f 2940 198 2122 +f 771 2122 1539 +f 3330 800 481 +f 920 2274 1642 +f 1744 2274 920 +f 3732 3385 3246 +f 3385 3732 3374 +f 1383 3374 2894 +f 2524 1374 1383 +f 1374 3374 1383 +f 320 109 1698 +f 1833 485 1884 +f 1698 1374 2524 +f 109 1884 3280 +f 485 3280 1884 +f 485 1833 2614 +f 622 485 2614 +f 2614 1021 622 +f 875 3787 1302 +f 2995 3670 2399 +f 3670 2995 728 +f 1302 3336 622 +f 1302 622 1021 +f 3787 3336 1302 +f 3336 3787 2399 +f 1218 3336 2399 +f 3670 1218 2399 +f 3599 1218 3670 +f 728 3599 3670 +f 1732 3599 728 +f 2125 3599 1732 +f 2125 299 2471 +f 1732 299 2125 +f 2000 299 379 +f 771 379 2180 +f 3309 2180 3604 +f 2514 3604 2180 +f 2514 2507 3604 +f 1872 2507 3802 +f 3802 3044 1872 +f 3444 1897 2814 +f 2814 2505 1505 +f 3444 1505 3825 +f 1505 3444 2814 +f 1297 1660 1042 +f 2613 1660 1297 +f 3030 1920 3362 +f 3362 3441 2994 +f 3441 2613 2994 +f 1920 3441 3362 +f 3639 2325 3030 +f 3497 2325 3639 +f 1253 3497 3639 +f 3497 1253 2992 +f 1492 3497 2992 +f 1492 2992 394 +f 394 2992 2682 +f 2095 533 2682 +f 2682 533 394 +f 2095 178 533 +f 44 178 2095 +f 178 44 191 +f 481 191 2828 +f 3246 3385 2828 +f 192 3246 2828 +f 192 3071 3246 +f 3246 3071 3732 +f 3732 3071 2774 +f 2187 3374 3732 +f 2187 3732 2774 +f 1383 2894 2461 +f 1383 2461 2524 +f 2598 1698 2524 +f 2524 2461 2598 +f 320 1698 2598 +f 677 109 320 +f 109 677 1884 +f 1833 3004 2614 +f 2614 3004 2754 +f 657 320 2598 +f 3615 363 433 +f 677 320 657 +f 677 3386 1884 +f 3615 433 3807 +f 1833 1884 3386 +f 3386 2177 1833 +f 2754 1021 2614 +f 101 1302 1021 +f 101 1604 1302 +f 1302 1604 875 +f 3787 875 2969 +f 2969 2399 3787 +f 3150 728 2995 +f 728 3150 1732 +f 1021 2754 3098 +f 101 1021 3098 +f 2995 2399 3817 +f 3150 2995 3817 +f 3817 1732 3150 +f 299 1732 3725 +f 3725 1732 3817 +f 2000 2471 299 +f 771 2000 379 +f 3309 771 2180 +f 1872 3309 2507 +f 3825 3309 1872 +f 3044 3825 1872 +f 3444 3825 3044 +f 2507 2514 3155 +f 3155 3802 2507 +f 3044 1897 3444 +f 1897 3044 3802 +f 2100 3615 2505 +f 2219 3615 3807 +f 1505 2505 2219 +f 2219 3807 2122 +f 2505 3615 2219 +f 3190 2417 3441 +f 1920 3190 3441 +f 3030 2325 1920 +f 394 533 1492 +f 1492 533 3855 +f 3374 2187 2894 +f 191 3855 178 +f 481 3855 191 +f 3385 481 2828 +f 433 363 2462 +f 433 2462 2122 +f 2219 3825 1505 +f 2613 1297 2994 +f 2994 1297 2052 +f 3604 2507 3309 +f 3855 533 178 +f 1579 3228 489 +f 28 1772 648 +f 1570 2512 3514 +f 3514 3137 24 +f 1790 1255 55 +f 1790 55 1067 +f 2512 1966 1981 +f 2512 1570 1966 +f 24 1934 1570 +f 1934 24 3137 +f 1889 2314 3498 +f 3498 93 1556 +f 1556 93 2382 +f 3487 2382 93 +f 3888 2382 3012 +f 2382 1244 3012 +f 2196 3012 1850 +f 2196 1850 3261 +f 3369 1889 3498 +f 3369 3498 1556 +f 2382 3888 1556 +f 3137 1433 1934 +f 1837 2102 2755 +f 349 3656 3768 +f 1772 160 694 +f 2703 434 1557 +f 434 2703 1047 +f 694 1557 434 +f 1047 93 434 +f 93 1047 1835 +f 981 57 1835 +f 93 1835 57 +f 3880 3542 57 +f 3733 2102 1255 +f 2018 57 3542 +f 2018 93 57 +f 3228 1579 3316 +f 3228 3316 2472 +f 1255 3366 55 +f 1067 55 3145 +f 3145 1981 1067 +f 1570 3514 24 +f 2119 3307 2755 +f 3733 2840 3471 +f 1151 349 1837 +f 1837 349 3366 +f 1151 2021 349 +f 2021 2067 3768 +f 3656 2021 3768 +f 349 3768 3145 +f 390 3145 3768 +f 1772 28 160 +f 160 28 1557 +f 2314 1772 694 +f 694 160 1557 +f 3498 2314 93 +f 1786 1835 1047 +f 2018 3446 766 +f 3487 2018 766 +f 93 2018 3487 +f 1244 2382 3487 +f 2067 28 648 +f 867 28 2067 +f 867 756 28 +f 3310 3156 442 +f 2021 3656 349 +f 1255 2102 3366 +f 349 3145 3366 +f 390 2067 648 +f 1151 1837 1235 +f 1151 580 2021 +f 2512 3145 390 +f 1255 1790 1987 +f 1433 3137 1889 +f 28 2703 1557 +f 2703 1786 1047 +f 1244 1850 3012 +f 3012 1273 3888 +f 3012 2196 1273 +f 253 3829 3738 +f 525 1954 2402 +f 525 3572 1954 +f 2706 253 3738 +f 2706 1780 253 +f 3829 1484 3738 +f 2371 3310 1423 +f 698 723 794 +f 723 2076 794 +f 2982 1780 2706 +f 762 1402 1177 +f 1177 1402 2982 +f 2982 2706 1177 +f 3310 442 1423 +f 1035 525 2402 +f 1531 1538 1571 +f 1849 271 3748 +f 1617 237 325 +f 1531 1996 2700 +f 325 1531 1617 +f 2700 1617 1531 +f 1538 325 2028 +f 325 1538 1531 +f 2736 2024 2641 +f 2119 3897 3307 +f 3342 891 1947 +f 2784 2197 3567 +f 3567 2128 2784 +f 2939 1269 2784 +f 2197 2784 1269 +f 3342 1947 2611 +f 489 2611 1947 +f 3316 3504 2736 +f 1232 2472 2736 +f 2736 2472 3316 +f 1232 2736 2225 +f 1579 489 3504 +f 3316 1579 3504 +f 237 3587 2891 +f 3028 1048 237 +f 1048 3018 2028 +f 2028 3018 3853 +f 2028 3853 706 +f 2028 706 2235 +f 2235 1298 635 +f 635 1298 919 +f 1911 635 919 +f 1911 2958 551 +f 1531 1571 1996 +f 2128 551 2268 +f 3748 1336 2809 +f 2939 3887 2053 +f 2053 1061 2939 +f 891 3795 1250 +f 2939 1061 891 +f 891 1250 1487 +f 1947 1487 2757 +f 3504 2757 1809 +f 3504 1809 505 +f 505 2503 3504 +f 3504 2503 2024 +f 3897 2225 2641 +f 3897 2119 2225 +f 2267 635 551 +f 635 1911 551 +f 1617 1657 2809 +f 2700 1657 1617 +f 2225 3471 1232 +f 2119 2102 2225 +f 271 1336 3748 +f 3587 237 1336 +f 2809 1336 237 +f 1538 2028 635 +f 635 2028 2235 +f 981 1102 57 +f 1102 3880 57 +f 1102 3542 3880 +f 1102 457 3542 +f 3542 457 3446 +f 2018 3542 3446 +f 3261 1194 2196 +f 3156 2196 1194 +f 766 1373 3021 +f 1194 3261 1850 +f 1373 457 1102 +f 766 3446 457 +f 1144 1194 1850 +f 1144 3156 1194 +f 442 3156 1144 +f 3021 981 1786 +f 457 1373 766 +f 2451 3487 766 +f 2451 1244 3487 +f 1399 1244 2451 +f 1373 1102 981 +f 1399 2451 3021 +f 1191 3721 3457 +f 1191 1426 3721 +f 1990 3852 3140 +f 3067 3811 2496 +f 1792 1191 3457 +f 1676 3657 2657 +f 3657 1676 3852 +f 3721 3021 1317 +f 3140 860 1990 +f 3703 1399 617 +f 3721 1426 617 +f 617 3021 3721 +f 3852 2168 3140 +f 375 794 2168 +f 1426 1191 1792 +f 1317 3457 3721 +f 1399 3021 617 +f 860 2657 1990 +f 375 2168 3852 +f 2168 1426 1792 +f 808 375 3852 +f 2168 3067 3140 +f 2496 1792 3457 +f 3067 2168 3811 +f 600 3703 617 +f 3811 2168 1792 +f 2168 600 1426 +f 1990 2657 3657 +f 3852 1990 3657 +f 1484 698 3738 +f 860 3140 3067 +f 3943 454 2798 +f 1177 808 3490 +f 449 454 559 +f 672 2173 1420 +f 454 3943 559 +f 2173 1314 1420 +f 190 3490 1314 +f 2798 1087 3943 +f 1629 155 762 +f 212 3864 2871 +f 449 559 3864 +f 1087 155 3943 +f 190 1177 3490 +f 3490 808 3852 +f 3773 3354 1421 +f 2463 497 1421 +f 1421 3186 111 +f 3354 955 3575 +f 984 3564 2788 +f 3100 692 984 +f 3575 3186 1421 +f 660 1490 1310 +f 111 2463 1421 +f 1163 2080 3773 +f 3354 3575 1421 +f 3506 2788 2403 +f 2959 812 2118 +f 2118 1249 955 +f 1423 442 1144 +f 1144 3703 1423 +f 600 2402 3703 +f 794 3738 698 +f 794 600 2168 +f 2076 1614 1035 +f 1614 525 1035 +f 1035 2402 600 +f 794 1035 600 +f 794 2076 1035 +f 1423 3703 2402 +f 3738 794 375 +f 1314 2173 190 +f 2706 3738 375 +f 3944 3007 1331 +f 375 808 2706 +f 672 1480 559 +f 3864 559 1881 +f 1629 1177 190 +f 2173 672 559 +f 3943 1629 2173 +f 190 2173 1629 +f 808 1177 2706 +f 155 1629 3943 +f 1480 1881 559 +f 559 3943 2173 +f 762 1177 1629 +f 984 497 2463 +f 111 3186 3100 +f 2725 1310 1490 +f 3506 2403 1881 +f 3864 1881 2871 +f 1619 812 2959 +f 1310 3506 660 +f 2725 3506 1310 +f 692 3100 3780 +f 2080 955 3354 +f 2959 2118 494 +f 660 3506 2577 +f 1881 2403 2871 +f 3506 2725 2788 +f 984 2788 2725 +f 497 2725 1490 +f 2463 3100 984 +f 3100 2463 111 +f 984 2725 497 +f 2577 1881 1480 +f 955 3100 3575 +f 3575 3100 3186 +f 3100 955 3780 +f 3564 984 692 +f 2118 955 2080 +f 1461 1690 1720 +f 3592 748 1257 +f 2522 1712 743 +f 2103 1712 2522 +f 2921 2103 1401 +f 965 2103 2921 +f 1712 589 743 +f 1712 2767 589 +f 1461 3425 1690 +f 1690 748 1720 +f 1690 1198 748 +f 743 2773 2522 +f 1461 1720 3131 +f 2522 2773 2116 +f 1401 3528 2921 +f 3786 1059 3528 +f 3786 3528 1401 +f 2921 3234 965 +f 3528 3234 2921 +f 1821 1059 3786 +f 2920 2458 1473 +f 2458 1821 1473 +f 933 2233 3092 +f 1821 3786 1473 +f 1473 3786 1401 +f 2458 2665 3041 +f 1017 3163 642 +f 3898 2179 894 +f 3865 3814 894 +f 1923 3782 1514 +f 82 3865 2179 +f 341 82 2179 +f 2276 341 2563 +f 1811 2179 3898 +f 714 2563 341 +f 1581 309 3284 +f 689 3509 2291 +f 2734 3509 689 +f 714 341 1811 +f 894 2179 3865 +f 309 1581 1412 +f 2636 3538 1811 +f 1631 2291 3509 +f 309 3538 3284 +f 3538 702 3284 +f 309 1412 1631 +f 1631 1412 2291 +f 3509 2734 2360 +f 3509 2759 1631 +f 3509 2360 2759 +f 2360 2875 2759 +f 3814 1189 3462 +f 894 3814 1294 +f 1584 2516 2563 +f 2516 1584 149 +f 689 3468 1506 +f 149 2987 2516 +f 1127 1584 714 +f 714 1852 1127 +f 1852 714 309 +f 2220 149 1584 +f 1584 2563 714 +f 714 1811 309 +f 1584 1127 2817 +f 2771 1127 1852 +f 1584 2817 2220 +f 1127 1050 2817 +f 1050 1127 2771 +f 1852 1832 2771 +f 1852 879 1832 +f 1506 2875 689 +f 2360 2734 689 +f 2771 3265 1050 +f 2473 3265 2771 +f 2771 1832 2473 +f 879 1852 1506 +f 1852 309 2875 +f 1506 1852 2875 +f 689 2875 2360 +f 2519 2069 563 +f 2519 403 2069 +f 2519 145 403 +f 2284 3169 492 +f 2611 803 94 +f 1843 2519 563 +f 2519 3530 145 +f 3228 1843 940 +f 2197 2240 3567 +f 630 1269 845 +f 2611 940 3285 +f 2472 2519 1843 +f 2357 3183 1818 +f 630 2240 2197 +f 845 3342 2206 +f 3342 94 2206 +f 2611 489 940 +f 3228 940 489 +f 1232 3530 2519 +f 2267 492 2357 +f 1269 630 2197 +f 94 3342 2611 +f 2472 1843 3228 +f 1232 2519 2472 +f 492 3183 2357 +f 492 3169 2988 +f 2240 972 3169 +f 630 972 2240 +f 94 1026 2206 +f 94 803 1026 +f 803 2611 3285 +f 1538 2357 1818 +f 3653 1691 2196 +f 1973 1965 1409 +f 1409 3653 3310 +f 2644 1965 712 +f 712 1965 2027 +f 3310 2196 3156 +f 1973 1409 3310 +f 2027 1973 3610 +f 1973 2027 1965 +f 1409 1965 2644 +f 1409 2644 1691 +f 1409 1691 3653 +f 1273 2196 1691 +f 2027 3610 3844 +f 3829 253 1003 +f 3404 3112 3572 +f 3572 3112 2371 +f 698 1103 723 +f 1768 698 2708 +f 2708 698 1484 +f 3232 1973 3112 +f 3112 1973 3310 +f 2076 1103 1768 +f 2076 723 1103 +f 1003 2163 2708 +f 3829 2708 1484 +f 1954 3572 2371 +f 1614 3404 525 +f 3404 3572 525 +f 253 1780 1003 +f 2076 576 1614 +f 3112 3310 2371 +f 1780 2163 1003 +f 2708 3829 1003 +f 1588 2982 1402 +f 3577 3720 1087 +f 1087 3720 1588 +f 1372 449 3864 +f 2163 1780 2982 +f 1402 762 1588 +f 1372 3864 212 +f 1087 2798 3577 +f 454 3577 2798 +f 155 1588 762 +f 1588 155 1087 +f 1463 1470 2431 +f 3381 583 1263 +f 3381 1263 3584 +f 1956 3619 3584 +f 3381 3619 411 +f 411 3347 3381 +f 3584 49 1729 +f 3584 1729 1956 +f 1956 1729 2956 +f 797 1956 2956 +f 1946 2942 3550 +f 2956 3602 2065 +f 3602 3835 2447 +f 2091 2447 735 +f 2666 2447 2091 +f 2666 3602 2447 +f 420 3602 2666 +f 511 2666 2091 +f 511 2091 1940 +f 2091 1246 1940 +f 1246 2091 2662 +f 1246 1886 2019 +f 1169 3380 2320 +f 2320 3380 2294 +f 2294 3380 3221 +f 3221 663 708 +f 708 663 2049 +f 1262 2294 3221 +f 2049 2942 708 +f 2930 2942 2049 +f 2942 2930 3550 +f 1262 3221 708 +f 1463 708 2942 +f 1262 1463 1725 +f 1262 708 1463 +f 2942 3256 1463 +f 3256 1470 1463 +f 1470 3256 490 +f 3256 2942 1946 +f 3256 1946 2580 +f 906 3256 2580 +f 490 3256 906 +f 1394 490 906 +f 1042 1394 906 +f 1042 906 2843 +f 1700 2994 2052 +f 1042 2843 1297 +f 1297 2843 2052 +f 2876 630 845 +f 1242 2206 1026 +f 2206 1242 845 +f 1282 3820 1026 +f 3820 1242 1026 +f 1282 1026 803 +f 1883 1282 3285 +f 112 3285 940 +f 1843 2221 471 +f 940 471 112 +f 1609 931 471 +f 1609 471 2221 +f 931 197 370 +f 1883 3285 112 +f 2223 1883 112 +f 471 1677 112 +f 1677 471 931 +f 2223 112 1677 +f 931 2343 1677 +f 2343 931 2087 +f 2087 931 370 +f 550 370 1202 +f 2093 550 2213 +f 550 1202 2213 +f 2786 2093 2568 +f 768 744 960 +f 408 2568 2093 +f 2568 408 744 +f 2568 744 768 +f 2568 768 2776 +f 2087 370 550 +f 2786 2087 550 +f 2786 550 2093 +f 3347 2776 2262 +f 2776 3347 3698 +f 3347 2262 3267 +f 3267 3033 3347 +f 3698 2568 2776 +f 411 3698 3347 +f 471 940 1843 +f 425 1636 838 +f 425 2332 1636 +f 2332 425 2908 +f 1058 2608 2908 +f 1058 2908 425 +f 2346 1458 1133 +f 2346 1133 1175 +f 3002 1037 2854 +f 1133 1037 3525 +f 2484 1037 3002 +f 3002 1030 2484 +f 2346 2608 1458 +f 1257 2821 3592 +f 3355 1030 705 +f 45 1494 2821 +f 1030 2821 2484 +f 1037 1458 2854 +f 1257 45 2821 +f 2608 1058 1458 +f 2821 2341 2484 +f 1458 1037 1133 +f 1175 3535 2346 +f 3525 2484 2341 +f 1377 2149 1257 +f 413 1198 1690 +f 413 3426 1198 +f 748 1377 1257 +f 1198 1377 748 +f 1377 1198 3426 +f 2525 1912 3372 +f 1912 2207 627 +f 3164 627 2207 +f 1138 3164 2207 +f 2870 3834 3265 +f 3265 2473 2870 +f 185 2989 3850 +f 185 592 176 +f 592 185 3850 +f 2381 176 592 +f 2381 2220 2525 +f 2220 2817 2525 +f 1912 2525 2817 +f 2207 1912 2817 +f 2817 1050 2207 +f 1138 2207 1050 +f 3834 1138 1050 +f 3265 3834 1050 +f 1832 2870 2473 +f 3569 1714 2063 +f 2647 3569 2063 +f 2342 2547 2063 +f 2547 2342 3379 +f 271 2547 86 +f 86 3603 271 +f 3603 3096 271 +f 1336 271 313 +f 1336 313 2251 +f 2647 1727 807 +f 1727 2063 1849 +f 2063 2547 271 +f 313 271 3096 +f 1714 3024 2063 +f 1849 2063 271 +f 1433 1130 2858 +f 2858 1934 1433 +f 1433 2298 1130 +f 2298 1433 3369 +f 3369 1556 619 +f 1570 2261 1966 +f 1570 1934 2189 +f 2861 2298 619 +f 3369 619 2298 +f 3844 1273 712 +f 712 1273 1691 +f 1691 2644 712 +f 1556 687 619 +f 1273 3278 687 +f 687 3888 1273 +f 1934 2858 2189 +f 3888 687 1556 +f 1861 2861 619 +f 712 2027 3844 +f 2741 2043 1285 +f 1657 2043 2741 +f 1657 1545 2043 +f 76 1285 2043 +f 76 2043 2259 +f 2259 2043 1545 +f 1657 1942 1545 +f 2259 1545 1658 +f 1657 2700 1942 +f 1571 2685 1996 +f 1571 3344 2685 +f 2700 1996 1942 +f 1538 3344 1571 +f 3344 1538 1818 +f 643 3473 2843 +f 3776 3473 2937 +f 3380 116 479 +f 663 479 2746 +f 3221 479 663 +f 663 2746 2930 +f 3550 2930 2746 +f 3550 2746 520 +f 140 520 2681 +f 3776 2052 2843 +f 116 3110 479 +f 520 2746 643 +f 2681 520 643 +f 2681 643 1528 +f 1528 643 2843 +f 3394 116 3380 +f 479 3221 3380 +f 663 2930 2049 +f 1946 3550 520 +f 520 140 1946 +f 2580 140 2681 +f 140 2580 1946 +f 1528 2580 2681 +f 2843 2580 1528 +f 906 2580 2843 +f 1987 3938 2544 +f 3938 3298 2544 +f 3298 3938 2918 +f 3298 294 2544 +f 3298 1405 2234 +f 2234 2188 3298 +f 3178 3835 2956 +f 3178 1405 3298 +f 3178 3298 2918 +f 3350 1405 3178 +f 3540 2541 802 +f 758 3840 1626 +f 3840 3057 1626 +f 1626 3178 2956 +f 1626 3350 3178 +f 2234 1405 3350 +f 2234 3057 783 +f 3840 2188 783 +f 3840 802 2188 +f 2861 2541 3869 +f 2541 544 3869 +f 783 3057 3840 +f 2234 3350 3057 +f 802 3840 3540 +f 2188 2234 783 +f 2541 1861 802 +f 2861 1861 2541 +f 619 687 210 +f 1861 2188 802 +f 2478 2544 151 +f 1987 2544 2478 +f 1987 2478 3148 +f 2966 1832 3422 +f 834 3422 879 +f 925 834 879 +f 925 1506 241 +f 241 1506 3468 +f 879 3422 1832 +f 879 1506 925 +f 3178 1159 3835 +f 3178 2918 1159 +f 1159 2918 735 +f 3835 1159 2447 +f 1159 735 2447 +f 3049 735 2261 +f 2091 735 3049 +f 3049 2261 64 +f 2189 3694 64 +f 3694 2091 64 +f 3694 2189 2858 +f 3694 2662 2091 +f 2662 3694 2858 +f 2662 2858 1130 +f 2298 2861 1130 +f 2956 3835 3602 +f 3049 64 2091 +f 1886 2662 1130 +f 1886 1246 2662 +f 2478 509 1212 +f 2840 3733 1222 +f 3815 118 3733 +f 3733 1255 3815 +f 118 1615 97 +f 118 583 1615 +f 583 118 3815 +f 3815 3148 1263 +f 97 1222 118 +f 3733 118 1222 +f 3815 1255 3148 +f 1903 3216 361 +f 1903 361 6 +f 3317 3216 1903 +f 2941 3317 1903 +f 1656 3216 3494 +f 3299 3494 3216 +f 146 2410 1292 +f 2410 146 2120 +f 3079 1454 2922 +f 3079 3574 1454 +f 3574 3079 2622 +f 1292 2622 2991 +f 1292 2991 3890 +f 3574 2622 1292 +f 1292 3890 146 +f 1316 1292 2410 +f 3574 1710 1454 +f 1292 1316 3574 +f 3574 3297 1710 +f 1628 1710 3297 +f 1316 3297 3574 +f 838 1636 1227 +f 1227 1636 18 +f 18 1555 625 +f 2800 1227 18 +f 625 2800 18 +f 3936 1227 2800 +f 2800 2991 3936 +f 625 1555 2622 +f 2991 2800 3890 +f 3353 3936 2991 +f 3353 2964 3936 +f 1227 3936 2964 +f 1555 18 2332 +f 2809 2071 3748 +f 2071 2741 338 +f 2071 2819 1822 +f 2819 2071 338 +f 2071 1822 3748 +f 3748 1822 2984 +f 2809 2741 2071 +f 2809 1657 2741 +f 1212 509 3257 +f 3563 175 3748 +f 3727 527 3411 +f 175 3563 386 +f 3748 175 1849 +f 1702 3025 1913 +f 1702 361 3025 +f 2221 361 1702 +f 361 2221 219 +f 219 1609 361 +f 410 6 361 +f 361 1609 2550 +f 2550 410 361 +f 361 1656 3025 +f 1656 361 3216 +f 1913 2575 1702 +f 1913 1871 2575 +f 2575 2546 1702 +f 1702 2546 2221 +f 3563 3748 2984 +f 1626 2956 484 +f 1981 3938 1067 +f 1067 3938 1987 +f 1981 2918 3938 +f 1987 1790 1067 +f 1232 1145 3557 +f 1145 1232 3478 +f 3478 1232 3471 +f 2919 2262 701 +f 701 1771 2919 +f 2840 1771 701 +f 2840 3267 1771 +f 3267 2840 1222 +f 3267 911 1771 +f 3471 279 3478 +f 3471 3770 279 +f 3471 701 3770 +f 2840 701 3471 +f 3036 2984 3532 +f 1822 3532 2984 +f 1822 2819 3532 +f 2819 338 1285 +f 338 2741 1285 +f 3532 2819 1285 +f 3036 3532 76 +f 76 3532 1285 +f 3317 1094 1638 +f 3590 461 2296 +f 799 2221 2546 +f 2704 2550 799 +f 2550 1609 799 +f 799 1609 2221 +f 2157 410 2550 +f 2550 2704 2157 +f 1527 6 3065 +f 1527 3065 1094 +f 1527 1094 2941 +f 2157 1094 410 +f 410 1094 3065 +f 1094 3317 2941 +f 3317 1638 145 +f 461 3891 1638 +f 1638 3891 145 +f 461 3590 3891 +f 2499 3647 2296 +f 1130 2861 1382 +f 110 1382 2861 +f 110 3394 1382 +f 1130 1382 1886 +f 2019 1382 3394 +f 1169 2019 3380 +f 1849 3411 527 +f 1849 175 3411 +f 1869 3299 1750 +f 1750 3299 3259 +f 3299 1869 158 +f 158 3257 3299 +f 3257 3494 3299 +f 2608 767 2908 +f 567 767 2608 +f 2346 567 2608 +f 527 1515 1727 +f 807 2964 1863 +f 1863 2964 3353 +f 2964 1727 1515 +f 1771 1212 2919 +f 1212 1771 911 +f 911 228 1212 +f 2919 1212 3257 +f 1694 2919 3257 +f 1694 3257 1630 +f 1966 2261 1981 +f 1981 735 2918 +f 735 1981 2261 +f 1570 64 2261 +f 1570 2189 64 +f 2908 1555 2332 +f 1555 2908 1863 +f 2991 2622 1863 +f 1863 2622 1555 +f 425 165 1058 +f 3788 2779 3360 +f 2779 1512 3360 +f 1740 1263 1512 +f 1263 1740 3584 +f 3584 1392 49 +f 3584 1740 1392 +f 1729 49 1392 +f 3869 3110 110 +f 110 3110 116 +f 110 116 3394 +f 3869 544 3110 +f 110 2861 3869 +f 3827 1212 1429 +f 1615 1429 3788 +f 1429 1615 583 +f 583 3827 1429 +f 3827 3148 2478 +f 2478 1212 3827 +f 3827 1263 3148 +f 1512 3827 583 +f 1512 1263 3827 +f 807 1863 2908 +f 1863 3353 2991 +f 807 1727 2964 +f 1694 3770 768 +f 768 3770 2776 +f 2776 3770 701 +f 2262 2776 701 +f 3557 1397 3530 +f 1232 3557 3530 +f 403 1903 539 +f 2941 1903 403 +f 539 2069 403 +f 145 3530 1397 +f 1750 3259 1958 +f 1958 1054 1750 +f 1750 1054 1145 +f 1145 2213 1750 +f 1145 2158 2213 +f 2158 3647 158 +f 3478 408 2158 +f 3478 279 3016 +f 3016 744 408 +f 3478 3016 408 +f 1397 962 145 +f 1145 1054 3557 +f 3557 1054 1397 +f 3478 2158 1145 +f 1054 1958 3590 +f 1958 3158 3590 +f 2296 1397 3590 +f 1397 1054 3590 +f 370 3891 1202 +f 1202 3891 3158 +f 1202 3158 1958 +f 1202 1958 2213 +f 3647 2093 2213 +f 962 3299 3216 +f 3317 962 3216 +f 3891 3299 962 +f 3299 3891 3259 +f 145 962 3317 +f 2331 1694 1630 +f 744 2331 1630 +f 1800 768 2331 +f 1800 1694 768 +f 2262 2919 1694 +f 1694 1800 2262 +f 767 807 2908 +f 3033 2246 419 +f 1949 3788 97 +f 3267 2262 2246 +f 3267 2246 911 +f 911 2246 3033 +f 1949 3033 419 +f 1949 419 3788 +f 3788 419 2779 +f 3033 228 911 +f 1212 228 1429 +f 97 228 3033 +f 1222 97 3033 +f 3788 1429 228 +f 97 3788 228 +f 686 197 2941 +f 197 1527 2941 +f 370 197 686 +f 1222 3033 3267 +f 3347 3033 1949 +f 3360 1949 97 +f 3347 1949 3360 +f 563 931 1609 +f 931 563 2069 +f 197 931 2069 +f 686 2941 403 +f 686 403 145 +f 370 686 145 +f 370 145 3891 +f 219 2221 1843 +f 1843 1609 219 +f 1843 563 1609 +f 2069 6 410 +f 2069 539 6 +f 1397 3158 962 +f 2296 1750 2213 +f 2296 2213 1958 +f 2093 3647 408 +f 960 3016 279 +f 960 744 1630 +f 1694 279 3770 +f 960 1630 3016 +f 3347 3360 1512 +f 3347 1512 3381 +f 3815 1263 583 +f 3381 1512 583 +f 2158 158 1869 +f 2213 2158 1869 +f 3647 2213 1869 +f 2296 3647 1750 +f 3647 1869 1750 +f 279 2331 960 +f 279 1694 2331 +f 3259 3891 3590 +f 3065 197 410 +f 410 197 2069 +f 3065 539 197 +f 3590 3158 3259 +f 962 3158 3891 +f 1615 3360 97 +f 3360 1615 3788 +f 2331 768 960 +f 539 3065 6 +f 6 1527 1903 +f 1397 3259 3158 +f 1397 2296 3259 +f 1958 3259 2296 +f 1527 539 1903 +f 1527 197 539 +f 3328 412 2997 +f 1935 2791 1890 +f 3010 3328 1406 +f 2572 3687 2791 +f 547 51 106 +f 106 1514 3782 +f 3672 893 2270 +f 988 68 3549 +f 835 1493 2914 +f 3401 280 75 +f 2881 458 2502 +f 620 976 751 +f 912 938 877 +f 547 1661 3854 +f 1825 3702 3193 +f 2907 3122 1289 +f 3197 2117 3543 +f 939 2154 3565 +f 877 938 332 +f 1856 893 3672 +f 248 628 3132 +f 3251 469 2890 +f 3736 3251 2890 +f 1856 1352 893 +f 444 3352 516 +f 393 470 2333 +f 3601 3881 1295 +f 3561 2795 297 +f 2832 2716 1318 +f 3648 3734 188 +f 130 1476 2890 +f 953 2769 575 +f 2769 953 1474 +f 127 2162 3121 +f 2769 1474 3937 +f 2270 893 711 +f 275 840 1225 +f 1225 840 3930 +f 3635 16 3157 +f 1507 1504 1564 +f 1564 2292 1507 +f 3692 968 3585 +f 968 2096 3451 +f 3558 1731 2615 +f 2635 429 423 +f 2731 2635 3312 +f 79 3561 585 +f 3689 3421 1977 +f 2808 2886 2721 +f 2766 3056 892 +f 647 79 585 +f 2258 2061 2199 +f 2291 1390 2029 +f 1784 2766 892 +f 3056 2766 3933 +f 1972 2460 2291 +f 611 2460 3860 +f 3421 3689 3860 +f 436 3816 172 +f 725 2023 405 +f 619 210 1861 +f 1046 2553 3846 +f 526 3232 3404 +f 1354 2153 331 +f 509 2943 223 +f 360 3434 1074 +f 884 2841 3800 +f 40 3450 683 +f 3222 2479 3279 +f 1091 2170 903 +f 165 2243 2854 +f 3431 3458 3390 +f 2935 1829 1387 +f 3139 2444 2404 +f 453 1948 1179 +f 3458 39 983 +f 820 194 2152 +f 3458 2626 3390 +f 2626 1477 3390 +f 3240 230 195 +f 3390 1477 2498 +f 2339 339 1888 +f 3924 2830 3119 +f 2386 2397 3329 +f 3253 2640 1312 +f 3073 193 772 +f 1437 681 1682 +f 2860 1785 1485 +f 3862 1612 2272 +f 1418 249 283 +f 2640 3253 216 +f 2860 1312 2640 +f 880 3764 3334 +f 2945 1752 577 +f 3608 3878 1898 +f 214 3608 1898 +f 2895 2272 2558 +f 1220 3322 2317 +f 610 1533 2293 +f 26 1256 3851 +f 103 3200 3764 +f 123 2317 2409 +f 1034 1779 2931 +f 1924 3440 3878 +f 1578 545 2086 +f 1721 2437 3304 +f 1722 3414 1606 +f 2351 2387 1819 +f 1931 874 2898 +f 2241 1432 1073 +f 3697 3688 3052 +f 3697 3052 3922 +f 3554 3128 1543 +f 1585 2216 1482 +f 967 956 2745 +f 3666 2323 1707 +f 3088 3236 268 +f 3856 1099 1345 +f 52 1929 3283 +f 1921 3586 1876 +f 1108 3375 1211 +f 3375 293 1211 +f 2857 709 591 +f 3586 1518 3524 +f 535 388 3262 +f 3623 1009 156 +f 3083 777 1092 +f 837 2026 817 +f 3375 1662 1955 +f 913 1633 2345 +f 114 364 1449 +f 114 1032 66 +f 757 804 3208 +f 2643 1532 2970 +f 427 836 1532 +f 913 1715 428 +f 784 53 374 +f 218 150 897 +f 1152 897 1193 +f 2518 3146 650 +f 3358 3413 2998 +f 3664 1241 2785 +f 658 3678 2338 +f 2347 3649 3839 +f 2347 3839 1226 +f 1432 2903 2565 +f 3212 3662 2441 +f 2269 1386 541 +f 2898 2085 3645 +f 810 3397 2244 +f 3387 2389 3614 +f 859 1097 3554 +f 1092 777 518 +f 552 1322 3077 +f 3496 3589 3785 +f 1346 2746 2867 +f 3526 2820 2746 +f 1077 3912 907 +f 2137 818 560 +f 1140 2021 580 +f 12 1526 2612 +f 1229 3644 1738 +f 1229 1738 3904 +f 73 2911 1668 +f 2081 3562 2656 +f 704 3429 1893 +f 1870 672 1420 +f 3176 3084 1462 +f 3429 2866 2632 +f 1287 1013 307 +f 2388 1090 565 +f 833 2034 1013 +f 3769 2866 3429 +f 1217 1393 2789 +f 1393 1217 951 +f 1607 1104 593 +f 1689 129 1916 +f 2632 1893 3429 +f 571 1203 3652 +f 1210 1870 3452 +f 1121 3474 3195 +f 2668 501 2084 +f 1983 404 554 +f 1741 3306 1867 +f 3035 2247 3408 +f 3792 1567 1983 +f 1567 3792 2753 +f 3576 1121 3195 +f 1490 2364 497 +f 1196 1038 2579 +f 1709 2035 184 +f 1150 2796 1113 +f 634 3207 1196 +f 1901 2084 1121 +f 1526 12 2691 +f 3437 2147 3896 +f 1448 3192 2147 +f 1309 1688 1348 +f 3376 1319 2878 +f 1688 2244 3205 +f 443 174 52 +f 2414 3677 3566 +f 3677 2414 3641 +f 3641 2414 1845 +f 2432 678 3573 +f 1582 1495 1293 +f 898 3361 529 +f 346 3335 2487 +f 569 21 3735 +f 1321 1820 614 +f 1457 2664 3467 +f 3051 2307 2816 +f 3464 3686 790 +f 644 1686 2889 +f 3376 2878 1926 +f 1926 2878 3319 +f 3526 3179 2534 +f 3686 3464 171 +f 3464 2609 171 +f 1160 1655 1855 +f 391 1864 1019 +f 3486 3438 3479 +f 3926 3438 1309 +f 2464 2112 1540 +f 3942 1999 740 +f 3296 3751 973 +f 644 1164 3926 +f 308 3097 928 +f 2631 1164 1162 +f 3540 758 1243 +f 3086 1109 1162 +f 2616 1313 3051 +f 2356 862 3903 +f 1428 91 532 +f 3427 3416 3674 +f 3724 3070 1357 +f 1209 418 2581 +f 3674 3659 1254 +f 2321 1002 3105 +f 2148 1995 3076 +f 3064 3617 205 +f 3617 3064 2531 +f 729 3009 2497 +f 3820 1020 1242 +f 3009 729 2039 +f 1195 797 2065 +f 2101 1231 809 +f 3719 2772 987 +f 987 439 2055 +f 3308 2329 2529 +f 439 987 2772 +f 2772 700 2201 +f 2762 3358 2998 +f 3210 2687 2998 +f 3902 1366 345 +f 384 918 1381 +f 3091 2145 1739 +f 626 534 3541 +f 3126 2929 3830 +f 483 3902 1681 +f 168 959 305 +f 384 3892 918 +f 3892 384 3184 +f 139 201 3766 +f 3184 431 3892 +f 163 2897 904 +f 2852 1961 2715 +f 3432 904 2897 +f 2852 3742 1961 +f 3501 1896 3091 +f 747 3923 1491 +f 2374 3191 383 +f 2059 3892 431 +f 139 626 201 +f 1985 2932 1279 +f 1171 328 330 +f 352 2763 3017 +f 3755 2422 496 +f 2187 1907 1565 +f 3817 633 2278 +f 519 2900 2108 +f 29 301 2048 +f 138 2286 3789 +f 46 102 3340 +f 2763 301 29 +f 510 3607 3476 +f 29 2048 3838 +f 510 3789 3919 +f 2682 2992 2143 +f 3577 454 2469 +f 1650 3127 1431 +f 586 3598 1445 +f 1215 15 2794 +f 2012 2927 1456 +f 3075 441 2986 +f 441 2906 2986 +f 586 3364 3383 +f 3364 153 2249 +f 153 3364 2654 +f 2923 1443 826 +f 2923 2559 1443 +f 3034 1747 335 +f 3048 934 2822 +f 1651 147 1701 +f 1358 2390 7 +f 1902 2159 1416 +f 1651 1396 147 +f 1902 1416 788 +f 1680 2390 1358 +f 1163 494 2080 +f 899 3491 3149 +f 2532 3034 2330 +f 1085 2423 2072 +f 1085 2072 2765 +f 88 2765 3625 +f 3625 2765 2072 +f 763 3822 3177 +f 1259 3177 3822 +f 3679 2983 3448 +f 1221 2326 1176 +f 3448 2983 1085 +f 2730 592 3850 +f 2062 1853 1199 +f 1172 1147 1868 +f 3115 3616 2602 +f 143 298 3348 +f 3324 298 143 +f 3024 143 1079 +f 302 1185 684 +f 2077 122 3798 +f 2458 3594 1758 +f 1131 3613 514 +f 3374 3315 3330 +f 2122 198 1539 +f 3816 731 3280 +f 504 2290 2742 +f 3513 1961 3503 +f 1304 3933 506 +f 485 3816 3280 +f 2000 1539 2552 +f 3615 2100 363 +f 3280 1374 109 +f 1698 109 1374 +f 1235 1837 2755 +f 3137 2314 1889 +f 476 1235 2755 +f 2102 2119 2755 +f 2067 390 3768 +f 635 2267 1538 +f 1538 2267 2357 +f 2284 551 2128 +f 2891 3028 237 +f 2028 325 237 +f 1617 2809 237 +f 1426 600 617 +f 3506 1881 2577 +f 2080 494 2118 +f 2103 2522 1401 +f 2665 2233 933 +f 341 2179 1811 +f 689 2291 2029 +f 1294 3814 3485 +f 3462 3485 3814 +f 309 2759 2875 +f 2284 2240 3169 +f 3381 3584 3619 +f 2876 972 630 +f 1242 2876 845 +f 1282 803 3285 +f 2149 45 1257 +f 1727 1849 527 +f 2063 3024 2342 +f 3473 3776 2843 +f 1626 3057 3350 +f 2019 3394 3380 +f 1960 2791 1935 +f 3423 3549 269 +f 1508 3549 402 +f 402 3423 3551 +f 402 3551 3118 +f 255 1126 2692 +f 3193 722 2482 +f 3289 1362 912 +f 2803 1362 3289 +f 3352 2270 2696 +f 3165 27 244 +f 2154 822 1385 +f 2795 996 3003 +f 2570 992 2758 +f 1547 2482 1879 +f 54 369 3821 +f 3197 255 2881 +f 297 2795 3003 +f 3493 369 3443 +f 3292 3247 130 +f 1474 2154 1385 +f 274 2716 2832 +f 656 3763 549 +f 1385 1441 3439 +f 822 1441 1385 +f 264 616 1084 +f 2829 248 3132 +f 3225 2737 2161 +f 2694 3180 2677 +f 2531 2096 968 +f 2475 3809 2097 +f 844 2117 1622 +f 230 2931 909 +f 1647 1766 2841 +f 2351 1066 2485 +f 2542 651 2479 +f 651 2542 2744 +f 623 829 2642 +f 3119 2830 842 +f 3527 3038 491 +f 2470 194 2397 +f 2470 1281 194 +f 220 521 1998 +f 2640 216 2022 +f 1307 861 1091 +f 3326 3794 3482 +f 1785 958 1485 +f 1848 3862 2272 +f 3726 882 2164 +f 1797 2245 3608 +f 1578 1022 3883 +f 1554 2920 2014 +f 1721 2245 3334 +f 2485 1424 2293 +f 1252 103 1917 +f 214 1797 3608 +f 1022 2106 1767 +f 2014 2920 2728 +f 2293 1424 1497 +f 2421 880 2245 +f 816 2528 869 +f 874 3688 2226 +f 3688 3697 1548 +f 540 3673 2627 +f 540 2857 3673 +f 3088 2216 1585 +f 2308 2511 295 +f 3389 3885 3856 +f 1245 113 1324 +f 1717 156 2054 +f 2973 777 3083 +f 2973 3083 159 +f 837 1518 2026 +f 791 3419 1207 +f 233 512 3524 +f 3153 1389 885 +f 3153 243 2625 +f 1963 3699 688 +f 53 784 543 +f 3839 2723 1106 +f 3662 3212 3722 +f 2872 3662 3722 +f 119 3752 427 +f 2354 3660 1276 +f 2085 810 3367 +f 3699 1963 1919 +f 19 340 2590 +f 2783 1097 1724 +f 3554 1097 2720 +f 1092 518 327 +f 2255 1632 3233 +f 3233 1632 543 +f 2590 1376 1905 +f 2834 2867 1234 +f 1467 2083 50 +f 1140 2067 2021 +f 2083 396 50 +f 2789 1668 2911 +f 1317 3021 321 +f 1077 1008 307 +f 2178 2182 3463 +f 3668 1270 631 +f 129 1270 1157 +f 3116 985 2866 +f 2324 593 1104 +f 1120 3769 355 +f 3176 2981 3207 +f 2411 3668 397 +f 2811 2438 2893 +f 2811 2893 994 +f 3293 860 2254 +f 323 3632 2938 +f 474 841 3231 +f 2084 1901 2977 +f 1764 3424 1113 +f 676 2656 3250 +f 1867 134 774 +f 3826 928 3097 +f 1824 636 3731 +f 2712 3676 1522 +f 721 1272 310 +f 346 1824 3808 +f 1824 346 2487 +f 3676 2926 1522 +f 3942 2835 1280 +f 2849 832 3683 +f 1684 475 100 +f 3676 3547 3194 +f 3479 3438 2130 +f 3723 1146 2926 +f 2132 3430 2453 +f 1472 3288 3659 +f 3674 3070 3427 +f 1357 2851 3724 +f 1672 3659 3288 +f 3430 2132 2630 +f 438 1541 1093 +f 522 2431 2426 +f 3848 865 2879 +f 3377 351 1541 +f 920 522 2426 +f 2517 1933 1398 +f 3664 3358 3845 +f 2515 3729 168 +f 3230 1899 517 +f 201 3174 3609 +f 2059 3804 1251 +f 2877 201 626 +f 265 2150 2925 +f 2306 1239 2877 +f 1339 3 747 +f 1028 2952 1810 +f 1171 1730 328 +f 2603 2993 1279 +f 328 1730 3187 +f 2442 2993 3152 +f 2993 2442 2968 +f 3198 2968 2442 +f 295 2511 1415 +f 29 2949 2763 +f 3789 510 3476 +f 63 2052 3776 +f 3755 3263 352 +f 2506 1897 517 +f 3152 2660 303 +f 2763 352 301 +f 2372 945 2709 +f 775 2916 3410 +f 3333 3484 1078 +f 1419 2058 3927 +f 2709 945 405 +f 3920 2709 405 +f 2081 1247 1577 +f 1363 2781 927 +f 594 424 2781 +f 399 132 1649 +f 2824 1747 1444 +f 3223 3867 2678 +f 3306 1741 1129 +f 3796 2733 2123 +f 82 1923 3865 +f 2231 2123 3930 +f 1315 3113 848 +f 1795 2326 2523 +f 3626 969 888 +f 3159 1158 1407 +f 839 1407 1158 +f 1008 907 1147 +f 3384 3874 1328 +f 2232 3874 673 +f 2653 1821 1758 +f 3513 2436 2936 +f 731 1374 3280 +f 1799 3497 1492 +f 1851 3513 2936 +f 2514 2180 3155 +f 1981 3145 2512 +f 1433 1889 3369 +f 551 2284 2267 +f 1336 2251 3587 +f 1487 1947 891 +f 766 3021 2451 +f 3814 3865 1189 +f 2284 492 2267 +f 698 1768 1103 +f 1886 1382 2019 +f 1243 3110 544 +f 2541 3540 544 +f 2964 1515 1227 +f 1200 2562 722 +f 2531 3064 1238 +f 3544 2242 202 +f 277 1337 1049 +f 882 1485 958 +f 671 775 3410 +f 2457 3301 1848 +f 2916 1791 2469 +f 610 1497 1123 +f 1497 610 2293 +f 1767 1859 1904 +f 658 2338 727 +f 2085 930 3645 +f 2739 3913 1234 +f 1467 1052 2083 +f 674 3207 2981 +f 951 1217 3536 +f 2288 604 1709 +f 963 1038 2587 +f 1577 1391 3562 +f 141 2476 3268 +f 544 3540 1243 +f 1192 1238 3064 +f 1887 2929 3126 +f 2337 2186 3371 +f 1459 1866 2897 +f 2266 1228 2151 +f 1810 1611 1028 +f 330 328 1300 +f 937 1369 1483 +f 3867 3730 2678 +f 3223 2678 594 +f 1587 2149 1031 +f 1821 2458 1758 +f 800 1799 1492 +f 3503 1961 3742 +f 1255 1987 3148 +f 1954 2371 1423 +f 1954 1423 2402 +f 3703 1144 1850 +f 3055 1017 642 +f 1631 2759 309 +f 3567 2240 2284 +f 3653 2196 3310 +f 2063 1727 2647 +f 3563 2984 3036 +f 2588 3886 3082 +f 2633 549 1068 +f 1502 1960 1935 +f 1960 2572 2791 +f 2803 2279 1362 +f 342 3003 3531 +f 3493 2127 369 +f 1142 1613 3934 +f 264 2718 1289 +f 2731 3312 761 +f 3595 3708 2248 +f 2250 248 3909 +f 976 3251 3736 +f 342 297 3003 +f 2333 1183 393 +f 2333 1089 1183 +f 3763 1183 1089 +f 2702 456 2135 +f 2778 317 579 +f 1890 3134 1935 +f 444 3019 3352 +f 939 1968 1736 +f 3003 996 366 +f 711 1922 2310 +f 2270 711 2310 +f 2117 3197 1156 +f 1362 32 3759 +f 2855 1442 2673 +f 2253 2688 1065 +f 1959 1205 1687 +f 1564 1504 3420 +f 180 2853 1055 +f 2475 3451 3809 +f 761 3339 242 +f 3561 710 1562 +f 444 2003 3019 +f 710 3561 79 +f 2417 1796 854 +f 710 2655 1562 +f 3860 3689 611 +f 1045 764 2618 +f 2556 1590 1766 +f 3089 1656 3494 +f 3089 3494 2933 +f 2181 2980 3050 +f 210 3278 2642 +f 2642 3278 1778 +f 25 1354 651 +f 1997 1387 1829 +f 858 1544 2621 +f 3712 3066 1894 +f 1544 3899 2621 +f 2935 983 1590 +f 1045 2618 1465 +f 861 2801 1051 +f 1544 2312 3899 +f 861 1051 2170 +f 1355 2312 1998 +f 1509 3507 1841 +f 2690 193 655 +f 3726 1375 1552 +f 2807 1418 3646 +f 1670 542 3507 +f 2895 1591 1848 +f 1875 3351 1334 +f 2697 2585 1915 +f 2558 2697 601 +f 909 1341 944 +f 1498 2094 120 +f 2823 2638 2467 +f 103 3764 69 +f 2558 2585 2697 +f 1498 1620 2638 +f 1828 3435 1062 +f 2421 2245 1797 +f 2401 69 2405 +f 693 3055 642 +f 357 2948 2491 +f 2491 720 357 +f 720 2349 1153 +f 2898 2226 810 +f 591 373 2857 +f 3283 1436 1589 +f 1436 276 1589 +f 3771 1211 2967 +f 2239 3175 1186 +f 2973 3090 2193 +f 3090 2134 849 +f 2646 150 2037 +f 2134 2646 2037 +f 1326 1955 641 +f 1009 1654 1447 +f 229 1633 3600 +f 3849 486 229 +f 233 3101 512 +f 3714 3680 1623 +f 3714 221 3027 +f 612 2799 831 +f 3801 1230 1755 +f 1586 2590 340 +f 2590 1905 19 +f 2255 3209 3077 +f 3209 1513 3077 +f 2946 3209 3847 +f 3209 2946 1513 +f 1905 1427 19 +f 1803 3706 914 +f 2193 2990 2973 +f 3083 1092 3882 +f 774 451 304 +f 321 756 1332 +f 50 2137 560 +f 986 3463 2862 +f 181 3644 1229 +f 1969 240 2459 +f 669 857 3716 +f 73 3866 396 +f 2324 181 593 +f 593 181 73 +f 1893 307 2034 +f 3429 355 3769 +f 886 2359 3020 +f 2604 1653 886 +f 2359 886 1653 +f 3433 571 3652 +f 1157 3154 129 +f 117 886 2893 +f 923 2493 240 +f 2811 3769 1120 +f 3154 1157 3176 +f 1410 1816 2753 +f 1349 3696 963 +f 3696 1349 936 +f 634 1709 3084 +f 963 2587 2089 +f 105 2782 1150 +f 1982 508 3707 +f 3576 3195 3792 +f 164 936 1349 +f 1964 164 3231 +f 1820 1321 3135 +f 3438 3486 1309 +f 81 443 1864 +f 3926 1686 644 +f 409 961 81 +f 1233 3269 17 +f 1846 1227 1053 +f 62 2075 484 +f 2455 3832 3510 +f 2398 1209 2454 +f 3618 167 1216 +f 3105 1238 1192 +f 3488 179 2677 +f 167 359 3245 +f 438 287 1574 +f 281 131 2046 +f 3659 3674 1472 +f 179 1641 2222 +f 2398 640 1209 +f 3552 3454 2521 +f 1213 2099 1290 +f 337 700 2772 +f 2320 2431 2099 +f 1984 2145 315 +f 2344 2929 2252 +f 1713 599 726 +f 599 1713 305 +f 2145 2257 574 +f 1733 590 2651 +f 1756 2901 2897 +f 398 2848 1131 +f 2739 2392 3913 +f 352 3017 2422 +f 1930 3766 1774 +f 1408 3919 2286 +f 2076 1768 576 +f 3396 3564 692 +f 1122 1805 318 +f 2396 1344 2794 +f 90 2899 15 +f 441 1652 2439 +f 1190 3163 1017 +f 2701 2007 1680 +f 1363 3408 595 +f 2532 1456 3034 +f 3223 1190 1017 +f 3625 3206 2813 +f 2887 888 3568 +f 848 1795 2730 +f 2326 1795 1176 +f 2375 621 2423 +f 3113 1795 848 +f 169 3533 2042 +f 3115 1172 1868 +f 1853 2958 1496 +f 36 1712 2103 +f 856 3087 3873 +f 3873 3087 1977 +f 3330 3315 79 +f 1851 2936 3918 +f 648 3514 390 +f 891 3342 2939 +f 2128 3887 2939 +f 3504 1947 2757 +f 743 1720 1679 +f 1923 1514 3865 +f 1811 3538 309 +f 592 149 2220 +f 1269 3342 845 +f 592 2220 2381 +f 1856 3672 2 +f 3000 2003 444 +f 95 366 996 +f 2040 1977 3421 +f 2655 3816 436 +f 2980 360 1074 +f 41 195 944 +f 874 2226 2898 +f 2340 804 757 +f 2998 588 3210 +f 3678 737 1962 +f 2749 565 3042 +f 1982 3707 365 +f 199 927 3650 +f 2664 2534 3467 +f 3179 3467 2534 +f 645 3704 2131 +f 2464 1540 1601 +f 809 1231 2092 +f 2257 2145 1984 +f 519 2660 2603 +f 3489 376 2536 +f 648 1772 3514 +f 1442 2855 765 +f 1796 1761 3032 +f 2576 2094 1498 +f 1211 1342 2967 +f 2420 3793 612 +f 488 3831 2389 +f 1963 2623 459 +f 2142 2255 3233 +f 3681 169 2042 +f 631 1270 1283 +f 2359 3433 3652 +f 1002 1238 3105 +f 1713 2515 305 +f 742 496 1475 +f 1224 937 788 +f 1438 3886 5 +f 2369 2253 2050 +f 2141 653 3328 +f 1200 3917 2562 +f 3118 3551 1759 +f 2418 27 3165 +f 1126 3687 2572 +f 2572 2692 1126 +f 2056 27 3118 +f 32 2279 827 +f 1807 1189 1268 +f 329 2831 3215 +f 456 2702 1794 +f 1023 2135 3272 +f 3505 2731 2650 +f 3493 761 2127 +f 1825 3875 3702 +f 2803 3289 2818 +f 620 2818 1180 +f 2954 908 393 +f 2832 615 3247 +f 1318 2716 300 +f 3734 1183 188 +f 2502 458 3881 +f 1183 3763 188 +f 3275 342 3398 +f 528 1068 549 +f 1105 2295 3905 +f 1105 3905 382 +f 2050 2253 1323 +f 840 3180 1959 +f 2853 3585 2008 +f 2721 2040 2808 +f 2199 2605 2258 +f 2353 2217 1337 +f 40 683 1943 +f 2649 2036 3946 +f 2801 1910 1051 +f 1465 3605 1894 +f 3066 1465 1894 +f 1703 2355 11 +f 11 2355 2649 +f 464 2830 2917 +f 3329 2397 2022 +f 1418 1485 3646 +f 1552 1400 3726 +f 3608 2245 1721 +f 1620 120 1119 +f 1875 2638 2823 +f 1329 2527 2323 +f 2054 3175 3921 +f 2054 3921 709 +f 3175 2054 156 +f 159 3090 2973 +f 2646 2134 3090 +f 3090 159 2646 +f 2586 3797 2468 +f 2971 3680 2340 +f 3714 2340 3680 +f 3413 3849 588 +f 2347 2441 3649 +f 2244 3356 810 +f 1427 3128 3925 +f 1376 1530 1905 +f 756 867 1332 +f 2182 2496 2862 +f 756 1786 2703 +f 2493 2459 240 +f 2493 284 2459 +f 284 2493 2789 +f 951 895 2944 +f 923 3433 3536 +f 2178 631 2254 +f 1090 2388 833 +f 2587 674 1430 +f 3474 1121 868 +f 3696 2579 1038 +f 348 474 404 +f 2147 2938 3803 +f 825 2591 3219 +f 3461 784 183 +f 2075 409 391 +f 2408 2381 1060 +f 3203 3179 2367 +f 3859 3025 1656 +f 3070 1254 1602 +f 1541 2098 1602 +f 3070 1602 2098 +f 3659 1672 1254 +f 685 3377 1602 +f 1541 1602 3377 +f 1908 507 3308 +f 3308 507 2200 +f 769 1593 1290 +f 2306 2877 3109 +f 2124 3043 2551 +f 3447 2510 3295 +f 1475 496 3295 +f 3295 496 609 +f 594 1219 424 +f 3365 1016 3063 +f 3459 3226 270 +f 3568 1450 2477 +f 3568 3782 1923 +f 2733 1225 3930 +f 2004 3115 1188 +f 2004 1188 3068 +f 1286 3511 684 +f 2669 1031 2079 +f 2105 2079 1523 +f 2079 2105 2669 +f 2242 1952 3162 +f 170 1660 2613 +f 3629 3777 1593 +f 3315 731 710 +f 1786 981 1835 +f 2284 2128 3567 +f 489 1947 3504 +f 1373 981 3021 +f 1720 748 3592 +f 1658 1545 1942 +f 2881 2502 3197 +f 3401 3595 2299 +f 2999 2853 2008 +f 3924 3050 3378 +f 2486 3235 3533 +f 2486 3116 3235 +f 365 334 3370 +f 2408 3903 308 +f 3449 2680 2352 +f 902 1287 3068 +f 1183 3734 393 +f 1959 2694 1205 +f 3505 2635 2731 +f 623 2642 1778 +f 351 3377 3552 +f 1984 345 1366 +f 3858 1563 213 +f 168 305 2515 +f 3315 710 79 +f 2812 282 3266 +f 3886 1438 3082 +f 1268 106 51 +f 2127 1827 3671 +f 130 3247 1476 +f 1959 3180 2694 +f 1065 2688 3521 +f 4 1828 1752 +f 3175 2239 3921 +f 2862 321 1332 +f 3652 3020 2359 +f 2620 3696 936 +f 3011 3775 3627 +f 2166 2321 2309 +f 1254 3070 3674 +f 1288 2166 2309 +f 1209 2581 2454 +f 2268 2913 2440 +f 1287 2004 3068 +f 1286 1031 2669 +f 482 1421 2364 +f 1921 1770 2026 +f 658 737 3678 +f 1364 376 3489 +f 684 3511 673 +f 1772 2314 3514 +f 615 1084 1476 +f 3232 623 1778 +f 1404 951 3536 +f 1268 3695 3325 +f 329 3122 2907 +f 2250 329 2907 +f 3637 2673 2315 +f 2673 3759 2855 +f 2673 3637 3759 +f 2806 1761 1796 +f 989 683 2933 +f 453 1703 681 +f 3196 1910 3094 +f 814 3060 606 +f 1969 571 240 +f 1404 3536 1653 +f 3415 414 2600 +f 604 662 1462 +f 1964 936 164 +f 3231 164 2977 +f 841 813 1964 +f 1348 3060 1561 +f 1743 2761 1419 +f 3021 1786 321 +f 3695 3940 209 +f 1532 836 2970 +f 402 3118 2418 +f 3789 3476 742 +f 1795 2523 2730 +f 1502 1935 3134 +f 1778 3278 1882 +f 724 1193 2628 +f 1982 365 1410 +f 2381 1745 176 +f 3094 3592 3355 +f 61 1293 863 +f 3529 703 3165 +f 3023 447 3102 +f 470 2758 3238 +f 3596 238 2718 +f 912 877 3289 +f 469 3693 130 +f 3403 1970 3019 +f 1639 300 3121 +f 1639 2209 300 +f 1959 2699 840 +f 2645 1018 42 +f 16 935 1142 +f 1311 277 562 +f 1135 989 282 +f 3899 2312 1355 +f 917 3428 2238 +f 3774 3712 1948 +f 1281 3774 1948 +f 1022 3663 3883 +f 668 3946 2690 +f 869 3523 2443 +f 2409 2317 1452 +f 312 2409 1452 +f 2872 3502 247 +f 883 2216 872 +f 883 872 1876 +f 1876 1551 472 +f 1329 3436 2589 +f 978 1186 3175 +f 1207 473 791 +f 2586 473 1415 +f 1810 3797 2586 +f 1931 866 247 +f 340 19 430 +f 1974 3209 239 +f 1235 3054 3904 +f 2679 867 1140 +f 1969 3235 1203 +f 1420 1314 1916 +f 2174 1278 3294 +f 1314 3490 1689 +f 2981 1157 3668 +f 634 3084 3207 +f 2561 2777 2972 +f 322 3195 2604 +f 3108 2620 2652 +f 2620 2035 2579 +f 678 1321 1971 +f 2408 1918 3683 +f 2316 1540 3464 +f 3779 2464 1601 +f 3479 2464 1836 +f 862 2356 1684 +f 3015 997 1775 +f 862 22 3097 +f 1109 3086 3939 +f 1745 2408 3683 +f 2464 3479 2112 +f 1479 2497 3009 +f 534 1388 3541 +f 2183 1381 2373 +f 1304 2306 1851 +f 618 265 789 +f 1491 954 590 +f 1279 2968 3634 +f 2900 2603 2932 +f 2350 2750 1599 +f 285 301 3263 +f 3263 3755 3476 +f 3481 2899 90 +f 1139 1743 690 +f 3489 1153 2413 +f 1139 690 2007 +f 2509 3365 2007 +f 2237 2159 3894 +f 152 901 899 +f 1016 1701 2599 +f 1444 901 2824 +f 2987 3626 2887 +f 2987 2887 2516 +f 666 2033 1665 +f 2913 2268 211 +f 2229 3843 47 +f 1147 1096 3406 +f 3915 1587 1031 +f 1788 1031 2149 +f 3170 3171 3042 +f 2714 3171 92 +f 3366 2102 1837 +f 2233 2665 2458 +f 3244 455 2952 +f 455 3797 2952 +f 1415 1810 2586 +f 1532 3142 3153 +f 2981 2411 674 +f 1147 907 1096 +f 75 3272 3401 +f 1142 2635 3505 +f 1622 2117 1156 +f 3666 1329 2323 +f 2306 1503 1851 +f 3607 3263 3476 +f 2567 1190 1363 +f 1493 1350 3818 +f 357 720 2536 +f 1153 2536 720 +f 882 958 2164 +f 4 493 1591 +f 4 1591 2895 +f 3171 2749 3042 +f 2848 574 2124 +f 2773 743 1679 +f 2022 2397 916 +f 1121 2084 868 +f 1855 3081 2705 +f 3202 2527 3922 +f 3119 651 2181 +f 1964 2652 936 +f 2620 936 2652 +f 3314 2885 462 +f 3521 447 3529 +f 2758 1403 3238 +f 1403 2758 992 +f 3141 135 2829 +f 3640 2582 1624 +f 3349 2465 3669 +f 835 2 1350 +f 3349 1622 1156 +f 3162 2061 202 +f 2404 245 3450 +f 1943 683 989 +f 1880 699 1877 +f 2419 3243 3038 +f 3812 3605 2618 +f 3313 1635 3329 +f 2571 1552 1375 +f 2391 1291 2884 +f 1400 1552 13 +f 1307 542 1670 +f 2773 2391 2116 +f 882 3646 1485 +f 926 1998 3373 +f 2467 1552 2571 +f 2962 312 1277 +f 2491 1277 3717 +f 3910 2518 2625 +f 243 3910 2625 +f 915 1586 3133 +f 3910 3699 1919 +f 3699 3910 243 +f 1360 1919 3136 +f 3752 836 427 +f 1707 2269 367 +f 866 2723 3412 +f 2619 3387 260 +f 1278 2324 1104 +f 397 2324 1278 +f 3346 1013 902 +f 3058 3346 902 +f 2587 2174 2089 +f 717 662 604 +f 1150 1113 105 +f 2749 3035 1764 +f 2035 634 2579 +f 3826 2591 1293 +f 569 678 2863 +f 2130 2112 3479 +f 1107 2695 1995 +f 3053 2373 918 +f 2838 2932 1909 +f 1408 3340 102 +f 1148 2278 2660 +f 773 2865 3155 +f 90 440 3481 +f 90 1560 440 +f 2864 621 1625 +f 1814 587 2440 +f 2602 1158 3159 +f 3490 3852 1676 +f 3355 705 1132 +f 3595 2160 1624 +f 1590 1829 2935 +f 882 3726 3646 +f 2491 3717 624 +f 114 1449 2689 +f 784 374 183 +f 2028 237 1048 +f 844 3544 1844 +f 842 2830 2756 +f 367 20 3666 +f 149 2523 2987 +f 3752 3212 836 +f 3342 1269 2939 +f 2257 1984 1366 +f 2250 238 628 +f 1977 2040 2721 +f 1591 3691 2457 +f 28 756 2703 +f 674 2411 1430 +f 157 3627 3775 +f 3627 1109 3939 +f 916 2397 820 +f 916 820 1785 +f 3490 3293 1689 +f 1739 2145 574 +f 3915 1031 1286 +f 2105 1523 661 +f 3002 705 1030 +f 165 1458 1058 +f 1315 2111 3113 +f 2164 958 573 +f 493 3691 1591 +f 1591 2457 1848 +f 3703 1850 1399 +f 3104 465 1451 +f 1590 2841 1766 +f 1525 3691 493 +f 1612 1083 2289 +f 2607 3651 1330 +f 1828 1123 1752 +f 3092 1554 1572 +f 3092 2233 1554 +f 2491 2948 1575 +f 3524 512 3593 +f 3447 1992 1449 +f 1516 3699 243 +f 3415 2600 355 +f 3792 3195 322 +f 1210 1480 1870 +f 1664 3928 1248 +f 1457 3244 2664 +f 3751 2553 973 +f 1335 1107 3499 +f 1335 3399 1107 +f 1152 724 1395 +f 3729 3833 1793 +f 3053 1251 726 +f 2350 1599 3338 +f 881 1704 536 +f 2012 15 1215 +f 2491 1575 1277 +f 2276 2887 3568 +f 3406 1868 1147 +f 2144 3068 1188 +f 891 1061 3795 +f 326 1436 196 +f 184 2035 1165 +f 3667 264 3031 +f 1635 2386 3329 +f 53 543 915 +f 3552 3377 3454 +f 2951 3066 3712 +f 761 242 2127 +f 2951 3712 3774 +f 3782 3568 2477 +f 922 3401 2299 +f 3586 3524 3593 +f 3401 3272 1223 +f 2796 1150 414 +f 3632 323 482 +f 312 3717 1277 +f 1516 243 1838 +f 786 2689 1449 +f 2276 3568 1923 +f 2211 926 1603 +f 3154 1462 3305 +f 1783 2601 705 +f 3632 482 2288 +f 1850 1244 1399 +f 2160 3401 1223 +f 2398 1736 3104 +f 165 1064 1877 +f 1088 1877 1064 +f 1603 1083 2211 +f 2498 811 2500 +f 2607 1330 220 +f 3862 3301 3841 +f 2211 3841 926 +f 1679 1720 1173 +f 1083 216 3253 +f 1424 2169 1497 +f 1706 2624 2169 +f 120 3322 1119 +f 1705 13 1119 +f 230 1034 2931 +f 786 3072 3419 +f 2380 326 196 +f 1186 196 1929 +f 2610 3524 1518 +f 3262 388 967 +f 1838 243 3153 +f 260 1097 859 +f 859 1434 815 +f 414 3415 2388 +f 1120 2600 2777 +f 1764 565 2749 +f 2888 2753 3792 +f 565 1764 2796 +f 1686 3926 1309 +f 2112 2130 3914 +f 1388 534 1994 +f 816 3868 3518 +f 3449 2352 2735 +f 3449 2735 2761 +f 1853 2429 1199 +f 2225 2102 3733 +f 3592 2821 3355 +f 233 3524 2610 +f 233 2610 757 +f 833 3415 704 +f 3296 1775 3751 +f 2275 1083 3253 +f 3415 833 2388 +f 2281 2826 730 +f 2945 3249 1525 +f 2128 2939 2784 +f 2160 3595 3401 +f 165 1877 699 +f 1045 2812 99 +f 887 1880 1877 +f 1877 1088 887 +f 3243 3393 3038 +f 3518 3868 2088 +f 1776 624 3717 +f 3868 2281 730 +f 2719 1016 3250 +f 91 3211 734 +f 1776 3717 1452 +f 687 3278 210 +f 3154 3305 3929 +f 818 3897 2641 +f 1613 1142 3505 +f 1970 3352 3019 +f 1004 244 2315 +f 2954 765 827 +f 3254 227 3402 +f 3841 220 926 +f 1612 2211 1083 +f 2094 624 1776 +f 2094 1776 120 +f 1831 2783 1724 +f 3614 1831 1724 +f 73 1229 3866 +f 2811 994 3769 +f 3803 2938 3632 +f 184 3803 3632 +f 184 1165 3803 +f 2747 3846 2553 +f 2039 729 1192 +f 1131 2848 2124 +f 1743 3927 557 +f 1743 1419 3927 +f 3449 2761 2675 +f 3908 3449 2675 +f 70 1644 3908 +f 2567 595 2064 +f 2225 2736 2641 +f 589 3131 1720 +f 589 1720 743 +f 642 933 3092 +f 3438 3111 2130 +f 1465 2618 3605 +f 1375 1834 655 +f 3313 1603 3373 +f 2288 482 717 +f 2409 2281 1976 +f 1748 2281 312 +f 2289 2275 162 +f 1599 2838 3338 +f 2884 2116 2391 +f 2747 2553 3751 +f 2225 3733 3471 +f 693 642 3092 +f 860 3293 1676 +f 3871 537 2033 +f 2719 3250 2656 +f 91 734 3081 +f 2797 2099 1213 +f 1535 2797 1213 +f 476 3897 818 +f 855 3217 1105 +f 3217 3204 1105 +f 551 2958 2268 +f 3782 2477 37 +f 3617 2531 968 +f 3334 2245 880 +f 2062 1936 1158 +f 2552 2533 2471 +f 1107 2148 3499 +f 2533 2125 2471 +f 2300 1438 1685 +f 311 2596 2688 +f 3185 1406 653 +f 2300 1023 675 +f 2954 827 555 +f 2718 264 3667 +f 3909 2831 2250 +f 1180 2818 639 +f 3251 620 1180 +f 3251 976 620 +f 1476 3270 2890 +f 2209 264 1084 +f 248 135 3909 +f 248 2829 135 +f 366 2842 3003 +f 2304 2999 3095 +f 1759 3551 1055 +f 2304 2056 1759 +f 2999 2304 1759 +f 2304 3095 9 +f 2552 1674 2327 +f 3286 2199 2061 +f 3512 1972 2291 +f 3450 245 2073 +f 3494 3124 2933 +f 3450 2073 683 +f 1465 3066 2812 +f 2170 1051 464 +f 2830 464 2756 +f 286 1281 2470 +f 1875 2823 3351 +f 2585 2272 1612 +f 1509 811 1005 +f 601 3435 2558 +f 2909 3281 263 +f 1578 3883 3302 +f 545 1578 3302 +f 2698 3762 3172 +f 753 3705 1554 +f 851 2698 3172 +f 2169 1341 1706 +f 3745 2485 2293 +f 3291 3412 3649 +f 2016 2334 1422 +f 2016 1422 2872 +f 2239 1929 207 +f 1214 1551 3593 +f 1214 3593 1032 +f 1921 2026 1518 +f 326 2748 1627 +f 1955 1627 641 +f 1654 3885 1447 +f 1632 1322 915 +f 1322 1586 915 +f 3136 3146 1360 +f 2226 3397 810 +f 3831 1840 749 +f 488 3819 3831 +f 1830 2619 815 +f 1830 2990 2619 +f 2574 1434 1543 +f 1830 777 2990 +f 2427 2574 1543 +f 2584 327 2044 +f 857 2324 397 +f 2777 261 1120 +f 2668 2089 501 +f 501 2174 3294 +f 508 2438 261 +f 508 1862 2438 +f 736 2540 1608 +f 554 474 3231 +f 548 2972 2782 +f 1016 2719 3545 +f 3319 2878 3080 +f 928 1495 3461 +f 1838 422 2792 +f 3203 2195 1663 +f 3939 3376 1926 +f 3194 3723 3676 +f 2926 3361 3876 +f 758 1626 3418 +f 1160 3194 3547 +f 84 713 1233 +f 84 1233 1146 +f 3744 3076 2192 +f 3105 3288 2321 +f 3105 2101 3288 +f 3744 2192 1343 +f 1672 2101 809 +f 1242 2092 2876 +f 602 3813 3618 +f 707 2569 1757 +f 305 959 2687 +f 345 1984 975 +f 1757 3793 2420 +f 2373 3053 2634 +f 1550 3008 3477 +f 303 2278 633 +f 633 3857 303 +f 3396 1805 1122 +f 3396 1122 3564 +f 2396 2006 2495 +f 2375 3113 2111 +f 2375 1625 621 +f 2375 2423 2983 +f 350 3579 445 +f 1814 47 3870 +f 3884 2686 445 +f 3406 1853 2062 +f 1868 3616 3115 +f 1008 1147 1172 +f 3874 2232 78 +f 2736 3504 2024 +f 2065 797 2956 +f 3193 582 722 +f 1135 282 2951 +f 1943 3388 830 +f 2409 1976 123 +f 3753 1606 3414 +f 66 364 114 +f 3072 295 3419 +f 12 2612 344 +f 1109 566 1162 +f 3499 2065 1335 +f 3902 345 1681 +f 1438 2300 675 +f 292 2315 2056 +f 2479 651 2238 +f 1848 3301 3862 +f 3776 2867 2834 +f 261 2811 1120 +f 3723 2926 3676 +f 1076 2123 2231 +f 1051 2756 464 +f 3087 3689 1977 +f 2954 1014 765 +f 2799 612 3793 +f 1241 2420 612 +f 3247 615 1476 +f 1318 2209 1084 +f 3124 989 2933 +f 3094 3355 3196 +f 3596 2718 3667 +f 340 430 459 +f 1543 859 3554 +f 862 1684 100 +f 2860 916 1785 +f 1876 3586 1551 +f 3291 3649 3662 +f 674 2587 1196 +f 2596 3521 2688 +f 3402 2404 3450 +f 2561 261 2777 +f 308 862 3097 +f 3402 227 2404 +f 40 1943 830 +f 2854 705 3002 +f 705 2854 1783 +f 3301 2607 3841 +f 3460 1547 971 +f 3637 2315 292 +f 2281 2409 312 +f 2552 1539 1674 +f 2056 2304 292 +f 2375 2111 1625 +f 1957 3000 3224 +f 3197 3543 1559 +f 3272 67 1223 +f 3272 2135 67 +f 3141 477 528 +f 750 2162 2303 +f 2965 3596 366 +f 95 2965 366 +f 1356 3930 998 +f 1356 2231 3930 +f 2635 935 500 +f 1796 3190 1799 +f 1785 820 1437 +f 1841 542 2366 +f 1679 1173 1938 +f 2801 1938 1173 +f 2576 1498 1413 +f 200 3705 753 +f 1578 2086 2106 +f 866 3291 247 +f 113 2689 786 +f 786 3419 113 +f 3291 2872 247 +f 3722 2204 2016 +f 3623 2136 2452 +f 1006 430 1501 +f 3799 2427 3128 +f 3785 2427 3496 +f 2044 327 2574 +f 1598 344 3453 +f 1052 2686 1939 +f 860 2182 2254 +f 414 3277 2600 +f 927 424 3650 +f 3277 1150 2782 +f 2751 2540 736 +f 1319 3376 1425 +f 479 1243 3526 +f 1918 3201 3683 +f 3942 1280 3015 +f 484 2075 391 +f 3201 2849 3683 +f 602 3618 3430 +f 3331 3399 1380 +f 1241 1994 2420 +f 707 1757 534 +f 534 1757 1994 +f 80 1258 1366 +f 1258 2257 1366 +f 945 2554 3120 +f 2314 434 93 +f 760 2596 5 +f 805 2615 9 +f 3119 2238 651 +f 1805 2576 1413 +f 3152 291 2442 +f 3886 760 5 +f 3320 500 935 +f 3119 842 2238 +f 26 2376 1256 +f 2603 2660 2993 +f 2603 1279 2932 +f 262 3702 290 +f 207 1929 52 +f 3277 2777 2600 +f 809 1379 1672 +f 621 1260 2423 +f 3940 3740 209 +f 1452 3717 312 +f 1004 2673 1323 +f 2660 3152 2993 +f 3094 2801 1173 +f 2281 3901 1976 +f 2281 3868 3901 +f 434 2314 694 +f 753 1554 2014 +f 459 430 1006 +f 1543 1434 859 +f 2044 2574 1550 +f 1757 2569 3793 +f 68 2231 269 +f 3702 262 2710 +f 1661 547 3185 +f 1403 3215 2831 +f 258 998 2699 +f 858 2470 2386 +f 3182 2391 2773 +f 1938 3794 3182 +f 1452 3322 1776 +f 263 1621 2698 +f 3684 1606 2401 +f 3623 1717 732 +f 1664 3545 3928 +f 1367 1275 1664 +f 746 2889 1561 +f 746 1319 2889 +f 418 1593 2581 +f 3489 2536 1153 +f 557 2554 1743 +f 2131 3723 645 +f 365 3370 1410 +f 1500 334 3928 +f 3514 2314 3137 +f 2864 251 2947 +f 3418 1626 484 +f 2554 60 1743 +f 2686 560 445 +f 295 1207 3419 +f 2576 2637 2349 +f 3577 2469 3720 +f 269 2231 1356 +f 691 3735 2616 +f 263 2698 2909 +f 1256 2376 263 +f 3545 1500 3928 +f 2864 2947 621 +f 96 2297 3047 +f 620 2803 2818 +f 1504 1507 258 +f 2161 2727 3225 +f 2405 1986 2285 +f 1108 3114 1436 +f 2617 2628 1193 +f 1734 1816 3370 +f 2869 758 3418 +f 1019 3418 391 +f 2062 1158 3616 +f 2512 390 3514 +f 3193 3702 582 +f 1667 760 3886 +f 2543 2339 2355 +f 2146 954 431 +f 3851 3726 1400 +f 1500 1734 3370 +f 334 1500 3370 +f 171 2835 3255 +f 1601 2316 2363 +f 2288 184 3632 +f 1085 2983 2423 +f 1776 3322 120 +f 1256 3281 2807 +f 3851 1256 2807 +f 1500 2719 1734 +f 3406 2062 3616 +f 656 1645 1478 +f 3684 2405 2285 +f 2349 2637 3718 +f 2578 754 2664 +f 2576 2349 2094 +f 3081 257 691 +f 754 1234 2820 +f 2799 3793 3570 +f 8 503 56 +f 3324 924 1328 +f 412 3010 56 +f 2818 877 639 +f 2212 1478 2310 +f 1055 2999 1759 +f 2886 2808 2161 +f 2465 2490 198 +f 2282 917 1910 +f 1878 1308 2810 +f 1308 3946 2810 +f 3778 591 207 +f 2745 1803 3262 +f 3904 3054 3866 +f 1864 1272 1019 +f 3361 2926 1146 +f 2379 3877 1813 +f 173 1839 1238 +f 1020 1379 809 +f 954 2070 590 +f 1930 1774 2377 +f 1878 2205 1768 +f 1819 2680 1644 +f 1678 1560 524 +f 1374 731 3315 +f 2384 3873 2721 +f 1098 2886 2161 +f 3753 2351 2485 +f 346 3268 2476 +f 2131 2743 84 +f 346 2476 3335 +f 1280 2835 126 +f 2036 2649 2339 +f 1375 2164 1834 +f 871 69 880 +f 2651 2848 398 +f 472 1551 1873 +f 1739 574 2848 +f 263 3281 1256 +f 760 447 3521 +f 760 3521 2596 +f 2801 3094 1910 +f 2617 1193 150 +f 3374 1374 3315 +f 1667 3886 2588 +f 1146 2131 84 +f 2578 2664 3244 +f 1173 1720 3592 +f 2739 1234 754 +f 2799 3570 3338 +f 503 2847 56 +f 137 924 3324 +f 3094 1173 3592 +f 412 56 2847 +f 1170 3047 2297 +f 1308 2370 2690 +f 536 3760 230 +f 12 1928 2691 +f 1248 3928 1928 +f 2751 2612 2540 +f 721 3643 2195 +f 1293 1495 3826 +f 1479 3009 507 +f 1479 507 3260 +f 1768 2833 576 +f 926 3373 1603 +f 1223 67 3421 +f 2609 126 2835 +f 3418 484 391 +f 954 3923 431 +f 207 591 2239 +f 1763 3039 2297 +f 1502 3134 3368 +f 3917 2710 2729 +f 2295 1960 1502 +f 1763 2297 96 +f 3529 1065 3521 +f 555 908 2954 +f 579 1521 2778 +f 1645 656 2633 +f 2003 3000 2605 +f 2696 2310 1478 +f 1208 929 1559 +f 258 3242 998 +f 3095 2475 805 +f 182 202 2258 +f 562 3818 1350 +f 182 2258 1753 +f 3860 3640 2160 +f 3860 2460 3640 +f 3421 3860 1223 +f 2533 2552 446 +f 2951 282 2812 +f 2152 1281 1948 +f 3327 1829 3139 +f 11 1365 1682 +f 668 1365 11 +f 573 1365 1834 +f 655 1365 668 +f 1752 2945 493 +f 1924 249 3281 +f 1735 3638 693 +f 1066 944 1424 +f 195 41 1378 +f 2047 3502 1073 +f 1186 1929 2239 +f 3046 1770 581 +f 688 3699 2488 +f 3133 1586 2623 +f 1360 3146 2518 +f 3688 1548 2226 +f 951 1404 895 +f 3576 1983 1901 +f 1709 604 3084 +f 2089 2668 963 +f 3612 105 1113 +f 569 267 678 +f 1980 1111 478 +f 1243 758 2367 +f 3813 281 3618 +f 2194 1944 3858 +f 863 2591 3248 +f 1503 2715 1961 +f 1866 1184 782 +f 1148 773 3725 +f 2592 519 2108 +f 3410 2469 454 +f 3333 1650 1431 +f 3730 499 682 +f 2813 275 1225 +f 439 387 2055 +f 82 341 2276 +f 2887 2563 2516 +f 1188 1737 2144 +f 3366 3145 55 +f 3232 3112 3404 +f 3099 1685 5 +f 2596 311 3099 +f 1288 2096 1839 +f 202 2242 3162 +f 2867 2746 2820 +f 404 474 554 +f 1196 2579 634 +f 1526 2691 105 +f 1239 201 2877 +f 83 2913 211 +f 2548 3275 468 +f 1288 3809 2096 +f 3860 2160 1223 +f 1692 2275 3253 +f 543 784 432 +f 721 310 2264 +f 1358 3767 1600 +f 942 2033 537 +f 2897 1866 3432 +f 69 871 2405 +f 2867 2820 1234 +f 2929 1887 2252 +f 2396 2794 2006 +f 2 3672 1970 +f 339 2543 3605 +f 3046 581 268 +f 1188 3115 741 +f 3109 1503 2306 +f 3015 1775 1647 +f 1503 3109 3833 +f 83 211 3252 +f 1503 1961 1851 +f 3909 317 2831 +f 317 3238 2831 +f 2619 260 815 +f 1635 3373 2312 +f 749 2269 541 +f 3565 2548 468 +f 3373 1998 2312 +f 1603 216 1083 +f 3355 2821 1030 +f 2552 2327 446 +f 2729 1502 3368 +f 2250 2831 329 +f 779 2201 2630 +f 2460 611 2291 +f 3141 528 1521 +f 174 207 52 +f 2544 99 3266 +f 2469 1791 3720 +f 2363 2316 2307 +f 917 2756 1910 +f 1878 1768 2163 +f 2164 1375 3726 +f 3760 1034 230 +f 2609 2835 171 +f 3129 3039 311 +f 311 2369 3129 +f 311 2688 2369 +f 2729 2710 262 +f 3242 3423 269 +f 1352 96 893 +f 3165 1804 1065 +f 1295 3881 186 +f 3197 1559 255 +f 1208 1559 3543 +f 166 1306 3242 +f 3032 2548 1075 +f 202 182 1844 +f 3544 202 1844 +f 2571 1375 655 +f 1915 2585 2289 +f 2380 196 1186 +f 603 3709 798 +f 3709 2903 1432 +f 605 2903 3709 +f 605 77 2903 +f 430 3925 1501 +f 3925 2720 1501 +f 1097 2783 2720 +f 2540 3612 1608 +f 404 2313 3103 +f 2540 1526 3612 +f 348 404 3103 +f 2905 362 1806 +f 1309 3486 1688 +f 3486 3546 1688 +f 2408 1060 3903 +f 1601 3808 3779 +f 3138 2597 385 +f 168 1793 959 +f 576 3404 1614 +f 34 3173 2395 +f 2077 2144 3511 +f 1473 1401 696 +f 2636 1811 3898 +f 3165 1065 3529 +f 3073 2571 655 +f 2661 605 1566 +f 404 1567 2313 +f 2408 1745 2381 +f 3051 1313 141 +f 3191 3318 383 +f 3672 2270 3352 +f 2548 3032 1761 +f 2238 842 917 +f 2123 2733 3930 +f 2881 2692 437 +f 1038 1196 2587 +f 1796 3032 854 +f 2562 3917 716 +f 2077 3511 122 +f 989 2846 282 +f 1880 2238 3428 +f 473 1207 1415 +f 1624 2160 3640 +f 248 2250 628 +f 820 2397 194 +f 2174 501 2089 +f 2373 2634 1284 +f 78 2232 3159 +f 2291 611 1390 +f 1645 1978 1478 +f 3516 1446 3099 +f 1735 693 949 +f 3709 1432 2241 +f 3333 1431 3484 +f 3556 3484 3239 +f 3556 3239 1644 +f 3099 5 2596 +f 3516 3099 3039 +f 1692 3253 1312 +f 1950 1600 2915 \ No newline at end of file diff --git a/utils/mesh_to_sdf_test.py b/utils/mesh_to_sdf_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5f2cb3f5dfc1d746aa2b156bce6c46c42709fc6e --- /dev/null +++ b/utils/mesh_to_sdf_test.py @@ -0,0 +1,59 @@ +import os +import numpy as np +import taichi as ti + +ti.init() + + +real = ti.f32 + + +def get_sdf_spatial_grad(): + sdf_sv_fn = "/data/xueyi/diffsim/NeuS/init_box_mesh.npy" + if not os.path.exists(sdf_sv_fn): + sdf_sv_fn = "/home/xueyi/diffsim/NeuS/init_box_mesh.npy" + sdf_sv = np.load(sdf_sv_fn, allow_pickle=True) + scene_sdf = sdf_sv + print(sdf_sv.shape) + + res = sdf_sv.shape[0] + + sdf_grad = np.zeros((res, res, res, 3), dtype=np.float32) + + + # scene_sdf = ti.field(dtype=real, shape=(self.sdf_res, self.sdf_res, self.sdf_res)) + for i_x in range(res): + print(f"Start processing i_x : {i_x}") + for i_y in range(res): + print(f"Start processing i_x : {i_x}, i_y : {i_y}") + for i_z in range(res): + cur_grad = np.zeros((3,), dtype=np.float32) + + if i_x > 0 and i_x < res - 1: + cur_grad[0] = (scene_sdf[i_x + 1, i_y, i_z] - scene_sdf[i_x - 1, i_y, i_z]) / 2.0 + elif i_x == 0: + cur_grad[0] = scene_sdf[i_x + 1, i_y, i_z] - scene_sdf[i_x, i_y, i_z] + elif i_x == res - 1: + cur_grad[0] = scene_sdf[i_x, i_y, i_z] - scene_sdf[i_x - 1, i_y, i_z] + + if i_y > 0 and i_y < res - 1: + cur_grad[1] = (scene_sdf[i_x, i_y + 1, i_z] - scene_sdf[i_x, i_y - 1, i_z]) / 2.0 + elif i_y == 0: + cur_grad[1] = scene_sdf[i_x, i_y + 1, i_z] - scene_sdf[i_x, i_y, i_z] + elif i_y == res - 1: + cur_grad[1] = scene_sdf[i_x, i_y, i_z] - scene_sdf[i_x, i_y - 1, i_z] + + if i_z > 0 and i_z < res - 1: + cur_grad[2] = (scene_sdf[i_x, i_y, i_z + 1] - scene_sdf[i_x, i_y, i_z - 1]) / 2.0 + elif i_z == 0: + cur_grad[2] = scene_sdf[i_x, i_y, i_z + 1] - scene_sdf[i_x, i_y, i_z] + elif i_z == res - 1: + cur_grad[2] = scene_sdf[i_x, i_y, i_z] - scene_sdf[i_x, i_y, i_z - 1] + + sdf_grad[i_x, i_y, i_z, :] = cur_grad[:] + + sdf_grad_sv_fn = "/home/xueyi/diffsim/NeuS/init_box_mesh_sdf_grad.npy" + np.save(sdf_grad_sv_fn, sdf_grad) + +if __name__ == '__main__': + get_sdf_spatial_grad() # \ No newline at end of file diff --git a/utils/taco_preprocessing.py b/utils/taco_preprocessing.py new file mode 100644 index 0000000000000000000000000000000000000000..0e3255a12866cf3d0cccffc9b6a6766f797f6cce --- /dev/null +++ b/utils/taco_preprocessing.py @@ -0,0 +1,293 @@ + + +import numpy as np +import trimesh +import os +# try: +# import mesh2sdf +# except: +# pass +import mesh2sdf +import time +from scipy.spatial.transform import Rotation as R + +def extract_obj_meshes(sv_dict_fn): + # sv_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # if not os.path.exists(sv_fn): + # sv_fn = "/data/xueyi/arctic/processed_sv_dicts/box_grab_01_extracted_dict.npy" + active_passive_sv_dict = np.load(sv_dict_fn, allow_pickle=True).item() # + + obj_verts = active_passive_sv_dict['obj_verts'] # object orientation # + obj_faces = active_passive_sv_dict['obj_faces'] + + obj_mesh = trimesh.Trimesh(obj_verts[0], obj_faces) + obj_mesh_sv_fn = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_box.obj" + ) + obj_mesh.export(obj_mesh_sv_fn) + + obj_verts_reversed = obj_verts[:, :, [1, 0, 2]] + obj_verts_reversed[:, :, 1] = -obj_verts_reversed[:, :, 1] + obj_mesh_reversed = trimesh.Trimesh(obj_verts_reversed[0], obj_faces) + obj_mesh_sv_fn_reversed = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_box_reversed.obj" + ) + obj_mesh_reversed.export(obj_mesh_sv_fn_reversed) + + +def extract_obj_meshes_boundingbox(sv_dict_fn): + # sv_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # if not os.path.exists(sv_fn): + # sv_fn = "/data/xueyi/arctic/processed_sv_dicts/box_grab_01_extracted_dict.npy" + active_passive_sv_dict = np.load(sv_dict_fn, allow_pickle=True).item() # + + obj_verts = active_passive_sv_dict['obj_verts'] # object orientation # + obj_faces = active_passive_sv_dict['obj_faces'] + + init_obj_verts = obj_verts[0] + minn_box = np.min(init_obj_verts, axis=0, keepdims=True) + maxx_box = np.max(init_obj_verts, axis=0, keepdims=True) + # get the minn box and maxx box # + + + box_triangle_mesh_faces = np.array([ + [1, 2, 3], # Left face (triangle 1) + [2, 3, 4], # Left face (triangle 2) + [5, 6, 7], # Right face (triangle 1) + [6, 7, 8], # Right face (triangle 2) + [1, 3, 5], # Bottom face (triangle 1) + [3, 5, 7], # Bottom face (triangle 2) + [2, 4, 6], # Top face (triangle 1) + [4, 6, 8], # Top face (triangle 2) + [1, 2, 5], # Front face (triangle 1) + [2, 5, 6], # Front face (triangle 2) + [3, 4, 7], # Back face (triangle 1) + [4, 7, 8] # Back face (triangle 2) + ], dtype=np.int32) - 1 + + box_vertices = np.array([ + [-1, -1, -1], + [-1, -1, 1], + [-1, 1, -1], + [-1, 1, 1], + [1, -1, -1], + [1, -1, 1], + [1, 1, -1], + [1, 1, 1] + ], dtype=np.float32) + + box_vertices = (box_vertices - (-1)) / 2 + + + box_vertices = box_vertices * (maxx_box - minn_box) + minn_box + + box_mesh= trimesh.Trimesh(box_vertices, box_triangle_mesh_faces) + # + + # obj_mesh = trimesh.Trimesh(obj_verts[0], obj_faces) + obj_mesh_sv_fn = os.path.join( + "/home/xueyi/diffsim/NeuS/utils", "init_bounding_box.obj" + ) + box_mesh.export(obj_mesh_sv_fn) + + # obj_verts_reversed = obj_verts[:, :, [1, 0, 2]] + # obj_verts_reversed[:, :, 1] = -obj_verts_reversed[:, :, 1] + # obj_mesh_reversed = trimesh.Trimesh(obj_verts_reversed[0], obj_faces) + # obj_mesh_sv_fn_reversed = os.path.join( + # "/home/xueyi/diffsim/NeuS/utils", "init_box_reversed.obj" + # ) + # obj_mesh_reversed.export(obj_mesh_sv_fn_reversed) + +def extract_obj_meshes_taco(pkl_fn): + import pickle as pkl + + sv_dict = pkl.load(open(pkl_fn, "rb")) + + print(f"sv_dict: {sv_dict.keys()}") + + # maxx_ws = min(maxx_ws, len(sv_dict['obj_verts']) - start_idx) + + obj_pcs = sv_dict['obj_verts'] # [start_idx: start_idx + maxx_ws] + # obj_pcs = torch.from_numpy(obj_pcs).float().cuda() + + # self.obj_pcs = obj_pcs + # # obj_vertex_normals = sv_dict['obj_vertex_normals'] + # # obj_vertex_normals = torch.from_numpy(obj_vertex_normals).float().cuda() + # self.obj_normals = torch.zeros_like(obj_pcs[0]) ### get the obj naormal vectors ## + + object_pose = sv_dict['obj_pose'] # [start_idx: start_idx + maxx_ws] + # object_pose = torch.from_numpy(object_pose).float().cuda() ### nn_frames x 4 x 4 ### + object_global_orient_mtx = object_pose[:, :3, :3 ] ## nn_frames x 3 x 3 ## + object_transl = object_pose[:, :3, 3] ## nn_frmaes x 3 ## + + obj_faces = sv_dict['obj_faces'] + # obj_faces = torch.from_numpy(obj_faces).long().cuda() + # self.obj_faces = obj_faces # [0] ### obj faces ## + + # obj_verts = sv_dict['obj_verts'] + # minn_verts = np.min(obj_verts, axis=0) + # maxx_verts = np.max(obj_verts, axis=0) + # extent = maxx_verts - minn_verts + # center_ori = (maxx_verts + minn_verts) / 2 + # scale_ori = np.sqrt(np.sum(extent ** 2)) + # obj_verts = torch.from_numpy(obj_verts).float().cuda() + + init_obj_verts = obj_pcs[0] + init_obj_ornt_mtx = object_global_orient_mtx[0] + init_obj_transl = object_transl[0] + + canon_obj_verts = np.matmul( + init_obj_ornt_mtx.T, (init_obj_verts - init_obj_transl[None]).T + ).T + # self.obj_verts = canon_obj_verts.clone() + # obj_verts = canon_obj_verts.clone() + canon_obj_mesh = trimesh.Trimesh(vertices=canon_obj_verts, faces=obj_faces) + canon_obj_mesh_export_dir = "/".join(pkl_fn.split("/")[:-1]) + pkl_name = pkl_fn.split("/")[-1].split(".")[0] + canon_obj_mesh_sv_fn = f"{pkl_name}.obj" + canon_obj_mesh.export(os.path.join(canon_obj_mesh_export_dir, canon_obj_mesh_sv_fn)) + print(f"canon_obj_mesh_sv_fn: {canon_obj_mesh_sv_fn}") + +def compute_sdf(obj_file_name): + filename = obj_file_name + + # init_mesh_scale = 1.0 + init_mesh_scale = 0.8 + + mesh_scale = 0.8 + size = 128 + level = 2 / size + + mesh = trimesh.load(filename, force='mesh') + + # normalize mesh + vertices = mesh.vertices + vertices = vertices * init_mesh_scale + bbmin = vertices.min(0) # + bbmax = vertices.max(0) # + center = (bbmin + bbmax) * 0.5 + scale = 2.0 * mesh_scale / (bbmax - bbmin).max() # bounding box's max # # bbmax - bbmin # + vertices = (vertices - center) * scale # (vertices - center) * scale # + + scaled_bbmin = vertices.min(0) + scaled_bbmax = vertices.max(0) + print(f"scaled_bbmin: {scaled_bbmin}, scaled_bbmax: {scaled_bbmax}") + + + t0 = time.time() + sdf, mesh = mesh2sdf.compute( ## sdf and mesh ## + vertices, mesh.faces, size, fix=True, level=level, return_mesh=True) + t1 = time.time() + + print(f"sdf: {sdf.shape}, mesh: {mesh.vertices.shape}") + + mesh.vertices = mesh.vertices / scale + center + mesh.export(filename[:-4] + '.fixed.obj') ## .fixed.obj ## + np.save(filename[:-4] + '.npy', sdf) ## .npy ## + print('It takes %.4f seconds to process %s' % (t1-t0, filename)) + +def create_all_data_file(obj_idx): + + # ###### only for the grab dataset only currently ######## + GRAB_data_root = "/data1/xueyi/GRAB_extracted_test/train" + # # /data/xueyi/GRAB/GRAB_extracted_test/train/102_obj.npy + if not os.path.exists(GRAB_data_root): + GRAB_data_root = "/data/xueyi/GRAB/GRAB_extracted_test/train" + + # self.conf['model.obj_sdf_fn'] = os.path.join(GRAB_data_root, f"{self.obj_idx}_obj.npy") + # self.conf['model.kinematic_mano_gt_sv_fn'] = os.path.join(GRAB_data_root, f"{self.obj_idx}_sv_dict.npy") + # self.conf['model.scaled_obj_mesh_fn'] = os.path.join(GRAB_data_root, f"{self.obj_idx}_obj.obj") + # self.conf['model.ckpt_fn'] = "" + # self.conf['model.load_optimized_init_transformations'] = "" + + ## grab data root ## + + # obj_idx = + + obj_sdf_fn = os.path.join(GRAB_data_root, f"{obj_idx}_obj.npy") + kinematic_mano_gt_sv_fn = os.path.join(GRAB_data_root, f"{obj_idx}_sv_dict.npy") + scaled_obj_mesh_fn = os.path.join(GRAB_data_root, f"{obj_idx}_obj.obj") + + kinematics_sv_dict = np.load(kinematic_mano_gt_sv_fn, allow_pickle=True).item() + obj_sdf = np.load(obj_sdf_fn, allow_pickle=True) + all_data = { + 'sv_dict': kinematics_sv_dict, + 'obj_sdf': obj_sdf + } + os.makedirs("./data", exist_ok=True) + all_data_sv_fn = os.path.join("./data", f"{obj_idx}_grab_all_data.npy") + np.save(all_data_sv_fn, all_data) + + print(f"all data saved to {all_data_sv_fn}") + + # self.ckpt_fn = self.conf['model.ckpt_fn'] + # self.load_optimized_init_transformations = self.conf['model.load_optimized_init_transformations'] + + + pass + +if __name__=='__main__': + + obj_idx = 102 + create_all_data_file(obj_idx=obj_idx) + exit(0) + + # compute_sdf('/data/xueyi/taco/processed_data/20230917/right_20230917_032.obj') + # exit(0) + + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_004.pkl" + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_030.pkl" + pkl_fn = "/data3/datasets/xueyi/taco/processed_data/20230917/right_20230917_037.pkl" + pkl_fn = "/data/xueyi/taco/processed_data/20230917/right_20230917_037.pkl" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230917" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231010" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230919" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231104" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231105" + pkl_root_folder = "/data/xueyi/taco/processed_data/20230917" ## pkl folder + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231102" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230923" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230926" + pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20230927" + pkl_root_folder = "/data/xueyi/taco/processed_data/20230930" + pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20231031" + # /data2/datasets/xueyi/taco/processed_data/20231027 + pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20231027" + pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20231026" + pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20231024" + pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20231020" + # pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20230929" + # pkl_root_folder = "/data2/datasets/xueyi/taco/processed_data/20230930" + tot_pkl_fns = os.listdir(pkl_root_folder) + tot_pkl_fns = [fn for fn in tot_pkl_fns if fn.endswith(".pkl")] + # tot_pkl_fns = ['right_20230930_001.pkl'] + # tot_pkl_fns = ['/data/xueyi/taco/processed_data/20230917/right_20230917_032.obj'] + for i_fn, cur_pkl_fn in enumerate(tot_pkl_fns): + cur_full_pkl_fn = os.path.join(pkl_root_folder, cur_pkl_fn) + extract_obj_meshes_taco(cur_full_pkl_fn) + # compute_sdf(cur_full_fn) + # exit(0) + + # pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231104" + # pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231105" + # pkl_root_folder = "/data/xueyi/taco/processed_data/20230917" + # pkl_root_folder = "/data3/datasets/xueyi/taco/processed_data/20231102" + tot_fns = os.listdir(pkl_root_folder) + tot_fns = [fn for fn in tot_fns if fn.endswith(".obj")] + for cur_fn in tot_fns: + cur_full_fn = os.path.join(pkl_root_folder, cur_fn) + compute_sdf(cur_full_fn) + + # obj_mesh_fn = "/data3/datasets/xueyi/taco/processed_data/20231104/right_20231104_017.obj" + # compute_sdf(obj_mesh_fn) + exit(0) + + extract_obj_meshes_taco(pkl_fn) + exit(0) + + sv_dict_fn = "/data2/datasets/sim/arctic_processed_data/processed_sv_dicts/s01/box_grab_01_extracted_dict.npy" + # extract_obj_meshes(sv_dict_fn=sv_dict_fn) + + extract_obj_meshes_boundingbox(sv_dict_fn) + + diff --git a/utils/test_checkpoint_files.py b/utils/test_checkpoint_files.py new file mode 100644 index 0000000000000000000000000000000000000000..9a9d733cb279276aeaf59abee42ba490c1f33fee --- /dev/null +++ b/utils/test_checkpoint_files.py @@ -0,0 +1,16 @@ + +import torch + + +def check_checkpoint_file(ckpt_fn): + loaded_state_dict = torch.load(ckpt_fn, map_location='cpu') + mano_robot_init_states_dict = loaded_state_dict['mano_robot_init_states'] + print(type(mano_robot_init_states_dict)) + print(mano_robot_init_states_dict.keys()) + mano_init_states_weight = mano_robot_init_states_dict['weight'] + print(f"weight: {mano_init_states_weight.shape}, {type(mano_init_states_weight)}") + +if __name__=='__main__': + ckpt_fn = "/data3/datasets/diffsim/neus/exp/hand_test_routine_2_light_color_wtime_active_passive/wmask_reverse_value_totviews_tag_forces_rule_v18__thres_000_wglb_onlyhandtrack_sf_10000_/checkpoints/ckpt_052000.pth" + + check_checkpoint_file(ckpt_fn) diff --git a/utils/utils.py b/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c9cba8c99290453d4a06cda3dcef73b754278ef1 --- /dev/null +++ b/utils/utils.py @@ -0,0 +1,28 @@ +import os +import numpy as np + + + +def read_obj_file_ours(obj_fn, sub_one=False): + vertices = [] + faces = [] + with open(obj_fn, "r") as rf: + for line in rf: + items = line.strip().split(" ") + if items[0] == 'v': + cur_verts = items[1:] + cur_verts = [float(vv) for vv in cur_verts] + vertices.append(cur_verts) + elif items[0] == 'f': + cur_faces = items[1:] # faces + cur_face_idxes = [] + for cur_f in cur_faces: + try: + cur_f_idx = int(cur_f.split("/")[0]) + except: + cur_f_idx = int(cur_f.split("//")[0]) + cur_face_idxes.append(cur_f_idx if not sub_one else cur_f_idx - 1) + faces.append(cur_face_idxes) + rf.close() + vertices = np.array(vertices, dtype=np.float) + return vertices, faces diff --git a/utils/utils/__init__.py b/utils/utils/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/utils/utils/backup_dataset.py b/utils/utils/backup_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5742ce7e833c6e266c13f75f73815d7a5fc332f8 --- /dev/null +++ b/utils/utils/backup_dataset.py @@ -0,0 +1,53 @@ +import os +import shutil + +def copy_folder_contents(source_folder, destination_folder): + # 检查目标文件夹是否存在,如果不存在则创建 + if not os.path.exists(destination_folder): + os.makedirs(destination_folder) + + # 遍历源文件夹中的所有文件和子文件夹 + for root, dirs, files in os.walk(source_folder): + # 构建目标文件夹的对应子文件夹路径 + relative_path = os.path.relpath(root, source_folder) + destination_dir = os.path.join(destination_folder, relative_path) + + # 检查目标文件夹是否存在,如果不存在则创建 + if not os.path.exists(destination_dir): + os.makedirs(destination_dir) + + # 复制文件到目标文件夹中 + for file in files: + source_file = os.path.join(root, file) + destination_file = os.path.join(destination_dir, file) + + # 检查目标文件是否存在,如果存在则跳过 + if not os.path.exists(destination_file): + shutil.copy2(source_file, destination_file) + +if __name__ == '__main__': + date_list = ['20230915', '20230917', '20230927', '20230928'] + file_list = ['mano', 'src'] + for date in date_list: + src_root = f'/share/hlyang/results/dataset/{date}' + dst_root = f'/share/hlyang/results/dataset_old/{date}' + os.makedirs(dst_root, exist_ok=True) + video_list = os.listdir(src_root) + video_list.sort() + + for video_id in video_list: + try: + src_video_root = os.path.join(src_root, video_id) + for file in file_list: + assert os.path.exists(os.path.join(src_video_root, file)) + + dst_video_root = os.path.join(dst_root, video_id) + os.makedirs(dst_video_root, exist_ok=True) + + for file in file_list: + src_dir = os.path.join(src_video_root, file) + dst_dir = os.path.join(dst_video_root, file) + copy_folder_contents(src_dir, dst_dir) + except: + continue + \ No newline at end of file diff --git a/utils/utils/cal_all_valid.py b/utils/utils/cal_all_valid.py new file mode 100644 index 0000000000000000000000000000000000000000..c8295a7b313252f44b0edbcf10e3908002baf5fb --- /dev/null +++ b/utils/utils/cal_all_valid.py @@ -0,0 +1,54 @@ +import os +import sys +sys.path.append('.') +from utils.hoi_io import get_valid_video_list, get_num_frame, get_num_frame_v2 +from utils.get_world_mesh_from_mano_params import load_world_meshes_acc +import argparse +import shutil +from tqdm import tqdm +import pickle +import traceback + +def add_a_line(path, line): + with open(path, 'a') as f: + f.write(line) + +if __name__ == "__main__": + + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + root = '/share/hlyang/results' + + # parser = argparse.ArgumentParser() + # date = '20230917' + date_list = ['20230917', '20230919', '20230923', '20230926', '20230927', '20230928', '20230929', '20231002', '20231005', '20231006', '20231010', '20231013', '20231015'] + cnt = 0 + + for date in date_list: + video_list = get_valid_video_list(date, remove_hand=True) + + for video_id in tqdm(video_list): + try: + num_frame = get_num_frame_v2(video_id) + start = 1 + end = num_frame + # frame_list = [str(frame).zfill(5) for frame in range(start, end + 1)] + last_frame = str(end).zfill(5) + + video_src_root = os.path.join(root, date, video_id) + os.path.exists(video_src_root) + + # hand mesh + mesh_exp_name = 'world_mesh_batch' + right_hand_mesh = load_world_meshes_acc(date, video_id, mesh_exp_name, [last_frame], right_hand_bool=True)[last_frame] + left_hand_mesh = load_world_meshes_acc(date, video_id, mesh_exp_name, [last_frame], right_hand_bool=False)[last_frame] + assert right_hand_mesh is not None + assert left_hand_mesh is not None + + cnt += 1 + + except Exception as err: + # traceback.print_exc() + # print(err) + continue + + print(cnt) \ No newline at end of file diff --git a/utils/utils/cal_computer_time_diff.py b/utils/utils/cal_computer_time_diff.py new file mode 100755 index 0000000000000000000000000000000000000000..b4abbda97ceaccb4fc3c296aa49d230bfd9ae270 --- /dev/null +++ b/utils/utils/cal_computer_time_diff.py @@ -0,0 +1,151 @@ +''' +解析录制时的{camera_id}__FrameTimeStamp.txt,根据timestamp丢弃录制时有丢失的帧,并将剩余的帧数据存为pkl。之后video2img.py等一系列程序将基于该pkl进行处理。 + +生成{date}_record.txt {date}_2m1.txt + +TODO:不同的相机从第一帧开始timestamp就不同,应该考虑这个误差。 +TODO:完善并实装该功能。目前仅仅统计每一个视角的frame数量。 + +example: +python utils/process_frame_loss.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' + +import traceback +import argparse +import os +import pickle +from shutil import copy +from tqdm import tqdm +import numpy as np + +time_diff = None + +def cal_common_timestamps(timestamps_list, error_threshold=14): + timestamps_list = [np.array(timestamps) for timestamps in timestamps_list] + + common_timestamps = timestamps_list[0] + for t_idx, timestamps in enumerate(timestamps_list[1:]): + common_timestamps_ = [] + for timestamp in timestamps: + condition = (common_timestamps >= timestamp - error_threshold) & (common_timestamps <= timestamp + error_threshold) + within_range = common_timestamps[condition] + + if len(within_range) == 1: # 匹配上了 + res = within_range[0] + # 做个平滑 + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + elif len(within_range) == 0: # 没匹配上 + continue + else: # ??? + res = within_range[np.abs(within_range - timestamp).argmin()] + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + # raise ValueError(f'len(within_range) should be 0 or 1, but got {len(within_range)}') + + common_timestamps = np.array(common_timestamps_) + + return common_timestamps.tolist() + +def cal_computer_time_diff(camera_list, root_dir, video_id, error_threshold): + date = video_id[:8] + global time_diff + + time_diff_record_root = '/share/hlyang/results/record' + time_diff_reference_record_root = '/share/hlyang/results/reference_record' + time_diff_record_path = os.path.join(time_diff_reference_record_root, f'{date}_record.txt') + time_diff_data_path = os.path.join(time_diff_record_root, f'{date}_2m1.txt') + + assert os.path.exists(root_dir) + video_dir = os.path.join(root_dir, video_id, 'rgb') + assert os.path.exists(video_dir), video_dir + + # check if files exist + # for camera_id in camera_list: + # assert os.path.exists(os.path.join((video_dir, camera_id + '_FrameTimeStamp.txt')) + # assert os.path.exists(os.path.join((video_dir, camera_id + '.mp4')) + + computer1_camera_list = ['21218078', '22139906', '22139908', '22139910', '22139911', '22139913', '22139914', '22139946'] + computer2_camera_list = [camera for camera in camera_list if camera not in computer1_camera_list] + assert len(computer2_camera_list) == 4 + computer1_ids = [camera_list.index(camera) for camera in computer1_camera_list] + computer2_ids = [camera_list.index(camera) for camera in computer2_camera_list] + + timestamps_list = [] + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + + if not os.path.exists(timestamp_path): + record_line = f'{video_id} lack timestamp file\n' + with open(time_diff_record_path, 'a') as f: + f.write(record_line) + return + + with open(timestamp_path, 'r') as f: + lines = f.readlines() + + timestamps = [] + cnt = 0 + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + timestamp = parts[1] + timestamps.append(int(timestamp)) + cnt += 1 + + timestamps_list.append(timestamps) + + computer1_timestamps_list = [timestamps_list[idx] for idx in computer1_ids] + computer2_timestamps_list = [timestamps_list[idx] for idx in computer2_ids] + computer1_common_timestamps = cal_common_timestamps(computer1_timestamps_list, error_threshold) + computer2_common_timestamps = cal_common_timestamps(computer2_timestamps_list, error_threshold) + + len_computer1_common_timestamps = len(computer1_common_timestamps) + len_computer2_common_timestamps = len(computer2_common_timestamps) + # print(computer1_common_timestamps[0], computer2_common_timestamps[0]) + + record_line = f'{video_id} ' + for idx, camera in enumerate(camera_list): + record_line += f'{camera}: {len(timestamps_list[idx])} ' + record_line += f'computer1: {len_computer1_common_timestamps} computer2: {len_computer2_common_timestamps}\n' + + with open(time_diff_record_path, 'a') as f: + f.write(record_line) + + if len_computer1_common_timestamps == len_computer2_common_timestamps: + time_diff_2m1 = computer2_common_timestamps[0] - computer1_common_timestamps[0] + + with open(time_diff_data_path, 'a') as f: + f.write(f'{time_diff_2m1} {video_id}\n') + + time_diff = time_diff_2m1 + + if time_diff is not None: + assert abs(time_diff_2m1 - time_diff) <= error_threshold + + + return + # if len_computer1_common_timestamps != len_computer2_common_timestamps: + # return + +if __name__ == '__main__': + + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + date = '20231015' + root_dir = f'/share/datasets/HOI-mocap/{date}' + + dir_list = os.listdir(root_dir) + video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + video_list.sort() + + print(video_list) + + error_threshold = 15 + + for video_id in tqdm(video_list): + try: + cal_computer_time_diff(camera_list, root_dir, video_id, error_threshold) + except Exception as error: + traceback.print_exc() + continue diff --git a/utils/utils/cal_computer_time_diff2.py b/utils/utils/cal_computer_time_diff2.py new file mode 100755 index 0000000000000000000000000000000000000000..150a4d27bdae079fa2860a620e682f6bebcfd338 --- /dev/null +++ b/utils/utils/cal_computer_time_diff2.py @@ -0,0 +1,158 @@ +''' +解析录制时的{camera_id}__FrameTimeStamp.txt,根据timestamp丢弃录制时有丢失的帧,并将剩余的帧数据存为pkl。之后video2img.py等一系列程序将基于该pkl进行处理。 + +生成{date}_record.txt {date}_2m1.txt + +TODO:不同的相机从第一帧开始timestamp就不同,应该考虑这个误差。 +TODO:完善并实装该功能。目前仅仅统计每一个视角的frame数量。 + +example: +python utils/process_frame_loss.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' + +import traceback +import argparse +import os +import pickle +from shutil import copy +from tqdm import tqdm +import numpy as np + +time_diff = None + +def cal_common_timestamps(timestamps_list, error_threshold=14): + timestamps_list = [np.array(timestamps) for timestamps in timestamps_list] + + common_timestamps = timestamps_list[0] + for t_idx, timestamps in enumerate(timestamps_list[1:]): + common_timestamps_ = [] + for timestamp in timestamps: + condition = (common_timestamps >= timestamp - error_threshold) & (common_timestamps <= timestamp + error_threshold) + within_range = common_timestamps[condition] + + if len(within_range) == 1: # 匹配上了 + res = within_range[0] + # 做个平滑 + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + elif len(within_range) == 0: # 没匹配上 + continue + else: # ??? + res = within_range[np.abs(within_range - timestamp).argmin()] + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + # raise ValueError(f'len(within_range) should be 0 or 1, but got {len(within_range)}') + + common_timestamps = np.array(common_timestamps_) + + return common_timestamps.tolist() + +def cal_computer_time_diff(camera_list, date_root_dir, save_root, video_id, error_threshold): + date = video_id[:8] + global time_diff + + time_diff_record_root = os.path.join(save_root, 'record') + time_diff_reference_record_root = os.path.join(save_root, 'reference_record') + # time_diff_record_root = '/share/hlyang/results/record' + # time_diff_reference_record_root = '/share/hlyang/results/reference_record' + time_diff_record_path = os.path.join(time_diff_reference_record_root, f'{date}_record.txt') + time_diff_data_path = os.path.join(time_diff_record_root, f'{date}_2m1.txt') + + assert os.path.exists(date_root_dir) + video_dir = os.path.join(date_root_dir, video_id, 'rgb') + assert os.path.exists(video_dir), video_dir + + # check if files exist + # for camera_id in camera_list: + # assert os.path.exists(os.path.join((video_dir, camera_id + '_FrameTimeStamp.txt')) + # assert os.path.exists(os.path.join((video_dir, camera_id + '.mp4')) + + computer1_camera_list = ['21218078', '22139906', '22139908', '22139910', '22139911', '22139913', '22139914', '22139946'] + computer2_camera_list = [camera for camera in camera_list if camera not in computer1_camera_list] + assert len(computer2_camera_list) == 4 + computer1_ids = [camera_list.index(camera) for camera in computer1_camera_list] + computer2_ids = [camera_list.index(camera) for camera in computer2_camera_list] + + timestamps_list = [] + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + + if not os.path.exists(timestamp_path): + record_line = f'{video_id} lack timestamp file\n' + with open(time_diff_record_path, 'a') as f: + f.write(record_line) + return + + with open(timestamp_path, 'r') as f: + lines = f.readlines() + + timestamps = [] + cnt = 0 + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + timestamp = parts[1] + timestamps.append(int(timestamp)) + cnt += 1 + + timestamps_list.append(timestamps) + + computer1_timestamps_list = [timestamps_list[idx] for idx in computer1_ids] + computer2_timestamps_list = [timestamps_list[idx] for idx in computer2_ids] + computer1_common_timestamps = cal_common_timestamps(computer1_timestamps_list, error_threshold) + computer2_common_timestamps = cal_common_timestamps(computer2_timestamps_list, error_threshold) + + len_computer1_common_timestamps = len(computer1_common_timestamps) + len_computer2_common_timestamps = len(computer2_common_timestamps) + # print(computer1_common_timestamps[0], computer2_common_timestamps[0]) + + record_line = f'{video_id} ' + for idx, camera in enumerate(camera_list): + record_line += f'{camera}: {len(timestamps_list[idx])} ' + record_line += f'computer1: {len_computer1_common_timestamps} computer2: {len_computer2_common_timestamps}\n' + + with open(time_diff_record_path, 'a') as f: + f.write(record_line) + + if len_computer1_common_timestamps == len_computer2_common_timestamps: + time_diff_2m1 = computer2_common_timestamps[0] - computer1_common_timestamps[0] + + with open(time_diff_data_path, 'a') as f: + f.write(f'{time_diff_2m1} {video_id}\n') + + time_diff = time_diff_2m1 + + if time_diff is not None: + assert abs(time_diff_2m1 - time_diff) <= error_threshold + + + return + # if len_computer1_common_timestamps != len_computer2_common_timestamps: + # return + +if __name__ == '__main__': + + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + date = '20230930' + # upload_root_dir = '/share/datasets/HOI-mocap' + upload_root_dir = '/data2/HOI-mocap' + upload_date_root = os.path.join(upload_root_dir, date) + + # save_root = '/share/hlyang/results' + save_root = '/data2/hlyang/results' + + dir_list = os.listdir(upload_date_root) + video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + video_list.sort() + + print(video_list) + + error_threshold = 15 + + for video_id in tqdm(video_list): + try: + cal_computer_time_diff(camera_list, upload_date_root, save_root, video_id, error_threshold) + except Exception as error: + traceback.print_exc() + continue diff --git a/utils/utils/check_pipeline_results_acc_batch.py b/utils/utils/check_pipeline_results_acc_batch.py new file mode 100644 index 0000000000000000000000000000000000000000..177603227f2b3a00366172f739b10d86c51cbd7c --- /dev/null +++ b/utils/utils/check_pipeline_results_acc_batch.py @@ -0,0 +1,55 @@ +import os +import sys +sys.path.append('.') +from utils.hoi_io2 import get_valid_video_list, get_num_frame, get_num_frame_v2 +from utils.get_world_mesh_from_mano_params2 import load_world_meshes_acc +import argparse +import shutil +from tqdm import tqdm +import pickle +import traceback +from utils.organize_dataset import organize_record_file + +def add_a_line(path, line): + with open(path, 'a') as f: + f.write(line) + +if __name__ == "__main__": + + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + # root = '/share/hlyang/results' + root = '/data2/hlyang/results' + + date_list = ['20230930'] + + for date in date_list: + video_list = get_valid_video_list(root, date, consider_pipiline_failed=False, remove_hand=True, consider_nokov_failed=True) + + pipeline_results_check_failed_record_path = f'{root}/reference_record/{date}_results_check_failed.txt' + + for video_id in tqdm(video_list): + try: + num_frame = get_num_frame_v2(root, video_id) + start = 1 + end = num_frame + # frame_list = [str(frame).zfill(5) for frame in range(start, end + 1)] + last_frame = str(end).zfill(5) + + video_src_root = os.path.join(root, date, video_id) + os.path.exists(video_src_root) + + # hand mesh + mesh_exp_name = 'world_mesh_batch' + right_hand_mesh = load_world_meshes_acc(root, date, video_id, mesh_exp_name, [last_frame], right_hand_bool=True)[last_frame] + left_hand_mesh = load_world_meshes_acc(root, date, video_id, mesh_exp_name, [last_frame], right_hand_bool=False)[last_frame] + assert right_hand_mesh is not None + assert left_hand_mesh is not None + + except Exception as err: + # traceback.print_exc() + # print(err) + add_a_line(pipeline_results_check_failed_record_path, f'{video_id}\n') + continue + + # if os.path.exists(pipeline_results_check_failed_record_path): + # organize_record_file(pipeline_results_check_failed_record_path) \ No newline at end of file diff --git a/utils/utils/check_pipeline_results_acc_old.py b/utils/utils/check_pipeline_results_acc_old.py new file mode 100644 index 0000000000000000000000000000000000000000..e0ac47a46bd8969787594f430cc60d71a346eab7 --- /dev/null +++ b/utils/utils/check_pipeline_results_acc_old.py @@ -0,0 +1,64 @@ +import os +import sys +sys.path.append('.') +from utils.hoi_io import get_valid_video_list, get_num_frame +import argparse +import shutil +from tqdm import tqdm +import pickle +import traceback + +def add_a_line(path, line): + with open(path, 'a') as f: + f.write(line) + +if __name__ == "__main__": + + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + # parser = argparse.ArgumentParser() + date = '20230929' + video_list = get_valid_video_list(date, consider_pipiline_failed=True) + + results_root = f'/share/hlyang/results/backup/{date}_backup' + # dataset_root = '/share/hlyang/results/dataset' + + # dataset_date_dir = os.path.join(dataset_root, date) + # os.makedirs(dataset_date_dir, exist_ok=True) + + pipeline_results_check_failed_record_path = f'/share/hlyang/results/record/{date}_results_check_failed.txt' + + for video_id in tqdm(video_list): + try: + # num_frame = get_num_frame(video_id) + # start = 1 + # end = num_frame + # frame_list = [str(frame).zfill(5) for frame in range(start, end + 1)] + + video_src_root = os.path.join(results_root, video_id) + assert os.path.exists(video_src_root) + + metadata_dir = os.path.join(video_src_root, 'metadata') + metadata_list = [filename for filename in os.listdir(metadata_dir) if filename.endswith('.pkl')] + assert len(metadata_list) > 0 + metadata_path = os.path.join(metadata_dir, metadata_list[0]) + + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + + # hand mesh + right_hand_mesh_src_dir = os.path.join(video_src_root, 'get_world_mesh_from_mano_params', 'meshes', 'right_hand') + left_hand_mesh_src_dir = os.path.join(video_src_root, 'get_world_mesh_from_mano_params', 'meshes', 'left_hand') + + right_hand_mesh_last_path = os.path.join(right_hand_mesh_src_dir, f'hand_{str(num_frame).zfill(5)}.obj') + left_hand_mesh_last_path = os.path.join(left_hand_mesh_src_dir, f'hand_{str(num_frame).zfill(5)}.obj') + assert os.path.exists(right_hand_mesh_last_path), right_hand_mesh_last_path + assert os.path.exists(left_hand_mesh_last_path), left_hand_mesh_last_path + + except Exception as err: + traceback.print_exc() + print(err) + add_a_line(pipeline_results_check_failed_record_path, f'{video_id}\n') + continue + diff --git a/utils/utils/clean.py b/utils/utils/clean.py new file mode 100644 index 0000000000000000000000000000000000000000..6685487abf8bac6d5dc0c5e103f43687ed1f6e39 --- /dev/null +++ b/utils/utils/clean.py @@ -0,0 +1,46 @@ +import os +import sys +sys.path.append('.') +from shutil import copy, rmtree +from os.path import join +from hoi_io2 import get_valid_video_list +from tqdm import tqdm + +# if __name__ == '__main__': + +# date_list = ['20230923'] + +# for date in date_list: +# # video_list = [f'20230915_{str(i).zfill(3)}' for i in (4, 6, 8, 9, 10, 11, 12, 13, 15)] +# # video_list = get_valid_video_list(date) +# video_list = ['20230930_hand1'] + +# # clean_dir_list = ['4_mesh_vis'] +# clean_dir_list = ['4_mesh_vis_old', 'crop', 'fake_mask_track', 'fake_mask_track_failed', 'fit_hand_joint_ransac_batch_by_squence', 'get_world_mesh_from_mano_params', 'joint_ransac_every_joint_triangulation', 'mask', 'mmpose'] +# for video_id in tqdm(video_list): +# root = join('/share/hlyang/results', video_id) +# for dir in clean_dir_list: +# path = join(root, dir) +# if os.path.exists(path): +# print('clean dir', path) +# rmtree(path) + +if __name__ == '__main__': + + root = '/data2/hlyang/results' + date_list = ['20231019'] + + for date in date_list: + # given_list = [f'20231013_{str(i).zfill(3)}' for i in range(132, 192)] + video_list = get_valid_video_list(root, date, remove_hand=True) + + # clean_dir_list = ['4_mesh_vis'] + # clean_dir_list = ['crop', 'crop_batch', 'fake_mask_track', 'fit_hand_batch', 'joint_opt_batch', 'mask', 'mmpose', 'mmpose_batch', 'ransac_batch', 'world_mesh_batch'] + clean_dir_list = ['sub_video'] + for video_id in tqdm(video_list): + root_video = join(root, date, video_id) + for dir in clean_dir_list: + path = join(root_video, dir) + if os.path.exists(path): + print('clean dir', path) + rmtree(path) diff --git a/utils/utils/correct_start_frame.py b/utils/utils/correct_start_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..90797ac204d2d434345344ee992dce3072390323 --- /dev/null +++ b/utils/utils/correct_start_frame.py @@ -0,0 +1,82 @@ +import os +import sys +sys.path.append('.') +from shutil import copy, rmtree, copytree +from os.path import join +from hoi_io2 import get_valid_video_list +from tqdm import tqdm +import pickle + +def load_valid_start_dict(root, date): + path = os.path.join(root, 'record', f'{date}_valid_start.txt') + valid_start = {} + + with open(path, 'r') as f: + lines = f.readlines() + + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + valid_start[parts[0]] = parts[1] + + return valid_start + +def correct_metadata(metadata_dir, camera_list, valid_start): + cm_ts_path = os.path.join(metadata_dir, 'common_timestamp.txt') + valid_start_int = int(valid_start) + + cm_ts_list = [] + with open(cm_ts_path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip() + if len(parts) > 0: + cm_ts_list.append(line) + + with open(cm_ts_path, 'w') as f: + for line in cm_ts_list[valid_start_int-1:]: + f.write(line) + + for camera in camera_list: + metadata_path = os.path.join(metadata_dir, f'{camera}.pkl') + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + + metadata['num_frame'] = metadata['num_frame'] - (valid_start_int-1) + metadata['cnt_list'] = metadata['cnt_list'][valid_start_int-1:] + + cnt2frame_id_dict = metadata['cnt2frame_id_dict'] + + cnt2frame_id_dict_new = {} + for cnt, frame in cnt2frame_id_dict.items(): + if int(frame) < valid_start_int: + continue + else: + cnt2frame_id_dict_new[cnt] = str(int(frame) - valid_start_int + 1).zfill(5) + metadata['cnt2frame_id_dict'] = cnt2frame_id_dict_new + metadata['frame_id2cnt_dict'] = {v: k for k, v in cnt2frame_id_dict_new.items()} + + with open(metadata_path, 'wb') as f: + pickle.dump(metadata, f) + +if __name__ == '__main__': + + root = '/data2/hlyang/results' + upload_root = '/data2/HOI-mocap' + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + date = '20231019' + + video_list = get_valid_video_list(root, date, consider_nokov_failed=True) + # video_list = ['20231019_001'] + valid_start_dict = load_valid_start_dict(root, date) + + for video_id in video_list: + valid_start = valid_start_dict[video_id] + + metadata_dir = os.path.join(root, date, video_id, 'metadata') + metadata_backup_dir = os.path.join(root, date, video_id, 'metadata_backup') + + copytree(metadata_dir, metadata_backup_dir) + + correct_metadata(metadata_dir, camera_list, valid_start) + \ No newline at end of file diff --git a/utils/utils/cp_dataset.py b/utils/utils/cp_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4080ba07471987d93b6361ecf97dad5ba818efb2 --- /dev/null +++ b/utils/utils/cp_dataset.py @@ -0,0 +1,73 @@ +import traceback +import sys +sys.path.append('.') +import os +import shutil +from tqdm import tqdm + +def copy_file_or_dir(src_path, dst_path): + if os.path.isfile(src_path): + shutil.copy(src_path, dst_path) + else: + shutil.copytree(src_path, dst_path) + print(f'{src_path} -> {dst_path}') + +def load_sequence_names_from_organized_record(path: str, date: str): + organized_sequence_list = [] + with open(path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) > 0 and parts[0].startswith(date): + organized_sequence_list.append(parts[0]) + + organized_sequence_list = list(set(organized_sequence_list)) + organized_sequence_list.sort(key=lambda x:int(x)) + + return organized_sequence_list + +def get_organized_date_list(path: str): + organized_date_list = [] + + with open(path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) > 0: + organized_date_list.append(parts[0][:8]) + + organized_date_list = list(set(organized_date_list)) + organized_date_list.sort() + + return organized_date_list + +if __name__ == '__main__': + + # dataset_src_root = '/share/hlyang/results/dataset' + # dataset_dst_root = '/data2/hlyang/results/dataset' + + # organized_dataset_record_path = '/share/hlyang/results/dataset/organized_record.txt' + + dataset_src_root = '/data2/hlyang/results/dataset' + dataset_dst_root = '/data3/hlyang/results/dataset' + + organized_dataset_record_path = '/data2/hlyang/results/dataset/organized_record.txt' + + date_list = get_organized_date_list(organized_dataset_record_path) + for date in date_list: + video_list = load_sequence_names_from_organized_record(organized_dataset_record_path, date) + for video_id in tqdm(video_list): + sequence_src_dir = os.path.join(dataset_src_root, date, video_id) + sequence_dst_dir = os.path.join(dataset_dst_root, date, video_id) + os.makedirs(sequence_dst_dir, exist_ok=True) + + dir_list = ['interaction_field', 'mano_wo_contact', 'rgb', 'src', 'egocentric_rgb.mp4'] + for dir in dir_list: + src_dir = os.path.join(sequence_src_dir, dir) + dst_dir = os.path.join(sequence_dst_dir, dir) + + # if os.path.exists(dst_dir): + # continue + # shutil.copytree(src_dir, dst_dir) + # print(f'{src_dir} -> {dst_dir}') + copy_file_or_dir(src_dir, dst_dir) \ No newline at end of file diff --git a/utils/utils/cp_hand_shape.py b/utils/utils/cp_hand_shape.py new file mode 100644 index 0000000000000000000000000000000000000000..8630d17817bdfe8a99daedcf6097b98ea50ad2f1 --- /dev/null +++ b/utils/utils/cp_hand_shape.py @@ -0,0 +1,24 @@ +import os +import sys +sys.path.append('.') +from tqdm import tqdm +import shutil + +if __name__ == '__main__': + filename_list = ['right_hand_shape.pkl', 'left_hand_shape.pkl'] + + root_src = '/share/hlyang/results' + # root_src = '/data2/hlyang/results' + src_date = '20231010' + src_dir = f'{root_src}/{src_date}/{src_date}_hand2/src' + + root_dst = '/data2/hlyang/results' + dst_date = '20230930' + dst_dir_list = [f'{root_dst}/{dst_date}/{dst_date}_{str(i).zfill(3)}/src' for i in range(121, 153)] + + for dst_dir in tqdm(dst_dir_list): + if os.path.isdir(dst_dir): + for filename in filename_list: + src_path = os.path.join(src_dir, filename) + dst_path = os.path.join(dst_dir, filename) + shutil.copy(src_path, dst_path) \ No newline at end of file diff --git a/utils/utils/cp_metadata.py b/utils/utils/cp_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..ad460cfc5dae9153777b0ce87b13eeecb65d1306 --- /dev/null +++ b/utils/utils/cp_metadata.py @@ -0,0 +1,27 @@ +import os +import shutil +from tqdm import tqdm + +if __name__ == '__main__': + date_list = ['20230923', '20230926', '20230927', '20230928', '20230929', '20231002'] + + for date in date_list: + + src_root = os.path.join('/share/hlyang/results', f'{date}_backup') + dst_root = os.path.join('/share/hlyang/results', date) + os.makedirs(dst_root, exist_ok=True) + + video_id_list = os.listdir(src_root) + video_id_list.sort() + + dir_list = ['metadata', 'src'] + + for video_id in tqdm(video_id_list): + src_video_root = os.path.join(src_root, video_id) + dst_video_root = os.path.join(dst_root, video_id) + os.makedirs(dst_video_root, exist_ok=True) + + for dir in dir_list: + src_dir = os.path.join(src_video_root, dir) + dst_dir = os.path.join(dst_video_root, dir) + shutil.copytree(src_dir, dst_dir) \ No newline at end of file diff --git a/utils/utils/cp_vis.py b/utils/utils/cp_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..2a7f295b0070a440ca2e3de736115a335f7bbf4f --- /dev/null +++ b/utils/utils/cp_vis.py @@ -0,0 +1,30 @@ +import os +import sys +sys.path.append('.') +from shutil import copy +from os.path import join +from tqdm import tqdm +from utils.organize_dataset import load_sequence_names_from_organized_record + +if __name__ == '__main__': + root = '/data3/hlyang/results' + organized_dataset_record_path = '/data3/hlyang/results/dataset/organized_record.txt' + + # video_list = [f'20230928_{str(i).zfill(3)}' for i in (20, 23, 26, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 47, 48, 49, 50, 51, 52)] + + date = '20231027' + # video_list = get_valid_video_list(root, date, remove_hand=True, consider_nokov_failed=True, consider_pipiline_failed=True) + video_list = load_sequence_names_from_organized_record(organized_dataset_record_path, date) + + save_exp_name = '4_mesh_vis_addition_test_3' + video_name_suffix = '_30fps' + dst_dir = os.path.join(root, 'vis_dataset_test', save_exp_name) + os.makedirs(dst_dir, exist_ok=True) + + for video_id in tqdm(video_list): + src_path = join(root, date, video_id, save_exp_name, f'{video_id}{video_name_suffix}.mp4') + if not os.path.exists(src_path): + print(f'Not exist: {src_path}') + continue + dst_path = join(dst_dir, f'{video_id}{video_name_suffix}.mp4') + copy(src_path, dst_path) \ No newline at end of file diff --git a/utils/utils/crop_resize_no_another_hand.py b/utils/utils/crop_resize_no_another_hand.py new file mode 100755 index 0000000000000000000000000000000000000000..bbe34be09475602d1483176747ea73bc33b8a676 --- /dev/null +++ b/utils/utils/crop_resize_no_another_hand.py @@ -0,0 +1,883 @@ +''' +只crop可扣除一只手的图,没有resize。 + +example: +python utils/crop_resize_no_another_hand.py --video_id 20230818_04_old + +TODO:检查为什么运行到后面会越来越慢。似乎是卡在了load_bg_img,可以测一下这一行的时间。 re:好像只是单纯有点慢 +''' + +import os +import sys +sys.path.append('.') +import cv2 +import numpy as np +import os.path as osp +from tqdm import tqdm +import pickle +import multiprocessing as mlp +from utils.hoi_io import get_downsampled_seg_infos_batch, get_seg_infos_batch3, load_bg_img, get_downsampled_seg_infos_batch_v2, read_init_crop, cal_represent_frame_list, get_downsampled_seg_infos_batch_v2_acc_batch +from utils.scandir import scandir +import argparse +from time import time +import json + +def crop_from_mask(video_id: str, camera_list: str, frame_list: list[str]): + ''' + 当前帧如果没有某个mask,则用上一帧的bbox进行crop。 + ''' + + seg, downsample_factor = get_downsampled_seg_infos_batch(video_id, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + last_right_min_x = None + last_right_max_x = None + last_right_min_y = None + last_right_max_y = None + last_right_meanh = None + last_right_meanw = None + + last_left_min_x = None + last_left_max_x = None + last_left_min_y = None + last_left_max_y = None + last_left_meanh = None + last_left_meanw = None + + MAX_HEIGHT = 4095 + MAX_WIDTH = 2999 + MARGIN_SIZE = 50 + + # TODO:以下的x和y定义反了,需要更改 + for camera_idx, camera_id in enumerate(camera_list): + # left_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop_imgs_left_hand', camera_id) + # right_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop_imgs_right_hand', camera_id) + left_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # upsample + mask_right_hand = cv2.resize(mask_right_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + mask_left_hand = cv2.resize(mask_left_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + + # 读入图片 + # timer1 = time() + img = load_bg_img(video_id, camera_id, frame_id) + # timer2 = time() + # print(timer2 - timer1) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = max(right_maxh - right_minh, right_maxw - right_minw) + right_min_x = max(0, right_midh - right_length // 2 - MARGIN_SIZE) + right_max_x = min(MAX_WIDTH, right_midh + right_length // 2 + MARGIN_SIZE) + right_min_y = max(0, right_midw - right_length // 2 - MARGIN_SIZE) + right_max_y = min(MAX_HEIGHT, right_midw + right_length // 2 + MARGIN_SIZE) + + last_right_min_x = right_min_x + last_right_max_x = right_max_x + last_right_min_y = right_min_y + last_right_max_y = right_max_y + last_right_meanh = right_meanh + last_right_meanw = right_meanw + elif frame_idx != 0 and len(mask_right_hand_idx[0]) == 0: + right_min_x = last_right_min_x + right_max_x = last_right_max_x + right_min_y = last_right_min_y + right_max_y = last_right_max_y + right_meanh = last_right_meanh + right_meanw = last_right_meanw + # else: + # print('no hand mask in the first frame!') + # exit(1) + else: + print('no hand mask in the first frame of this sub_frame_list!') + print('right_hand', camera_id, frame_id) + if frame_idx == 0 and frame_id != '00001': + info_path = osp.join(right_hand_crop_dir, camera_id + '_' + str(int(frame_id)-1).zfill(5) + '_crop_info.pkl') + assert os.path.exists(info_path) + with open(info_path, 'rb') as f: + right_min_x, right_max_x, right_min_y, right_max_y, right_meanh, right_meanw = pickle.load(f) + + last_right_min_x = right_min_x + last_right_max_x = right_max_x + last_right_min_y = right_min_y + last_right_max_y = right_max_y + last_right_meanh = right_meanh + last_right_meanw = right_meanw + else: + print('GG, no hand mask in the first frame!') + exit(1) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_x: right_max_x, right_min_y: right_max_y] + right_crop_info = [right_min_x, right_max_x, right_min_y, right_max_y, right_meanh, right_meanw] + + with open(osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(osp.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + + # test_img = np.zeros((3000, 4096, 3)) + # for y, x in zip(mask_left_hand_idx[0], mask_left_hand_idx[1]): + # cv2.circle(test_img, (x, y), 5, (255, 0, 0), -1) + # cv2.imwrite('./test.png',test_img) + + if len(mask_left_hand_idx[0]) != 0: + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + left_min_x = max(0, left_midh - left_length // 2 - MARGIN_SIZE) + left_max_x = min(MAX_WIDTH, left_midh + left_length // 2 + MARGIN_SIZE) + left_min_y = max(0, left_midw - left_length // 2 - MARGIN_SIZE) + left_max_y = min(MAX_HEIGHT, left_midw + left_length // 2 + MARGIN_SIZE) + + last_left_min_x = left_min_x + last_left_max_x = left_max_x + last_left_min_y = left_min_y + last_left_max_y = left_max_y + last_left_meanh = left_meanh + last_left_meanw = left_meanw + elif frame_idx != 0 and len(mask_left_hand_idx[0]) == 0: + left_min_x = last_left_min_x + left_max_x = last_left_max_x + left_min_y = last_left_min_y + left_max_y = last_left_max_y + left_meanh = last_left_meanh + left_meanw = last_left_meanw + else: + print('no hand mask in the first frame of this sub_frame_list!') + print('left_hand', camera_id, frame_id) + if frame_idx == 0 and frame_id != '00001': + info_path = osp.join(left_hand_crop_dir, camera_id + '_' + str(int(frame_id)-1).zfill(5) + '_crop_info.pkl') + assert os.path.exists(info_path) + with open(info_path, 'rb') as f: + left_min_x, left_max_x, left_min_y, left_max_y, left_meanh, left_meanw = pickle.load(f) + + last_left_min_x = left_min_x + last_left_max_x = left_max_x + last_left_min_y = left_min_y + last_left_max_y = left_max_y + last_left_meanh = left_meanh + last_left_meanw = left_meanw + else: + print(frame_idx, frame_id) + print('GG, no hand mask in the first frame!') + exit(1) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_x: left_max_x, left_min_y: left_max_y] + left_crop_info = [left_min_x, left_max_x, left_min_y, left_max_y, left_meanh, left_meanw] + + with open(osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + +def crop(img, min_h, max_h, min_w, max_w, black_min_h=None, black_max_h=None, black_min_w=None, black_max_w=None): + img = img.copy() + + mid_h = (min_h + max_h) // 2 + mid_w = (min_w + max_w) // 2 + length = max(max_h - min_h, max_w - min_w) + + if [black_min_h, black_max_h, black_min_w, black_max_w] == [not None, not None, not None, not None]: + img[black_min_h: black_max_h, black_min_w, black_max_w] = 255.0 + + crop = img[min_h: max_h, min_w: max_w] + + return crop + +def apply_factor_and_margin(min_h, max_h, min_w, max_w, factor, margin_size, MAX_H, MAX_W): + # # avoid overflow + # min_h_ = min_h.astype(np.uint16) + # max_h_ = max_h.astype(np.uint16) + # min_w_ = min_w.astype(np.uint16) + # max_w_ = max_w.astype(np.uint16) + # mid_h = ((min_h_ + max_h_) // 2).astype(np.uint8) + # mid_w = ((min_w_ + max_w_) // 2).astype(np.uint8) + + mid_h = (min_h + max_h) // 2 + mid_w = (min_w + max_w) // 2 + + length = max(max_h - min_h, max_w - min_w) + length = int(length * factor) + half_length = length // 2 + + min_h = max(0, mid_h - half_length - margin_size, 0) + max_h = min(MAX_H, mid_h + half_length + margin_size) + min_w = max(0, mid_w - half_length - margin_size, 0) + max_w = min(MAX_W, mid_w + half_length + margin_size) + + return min_h, max_h, min_w, max_w, mid_h, mid_w + +def crop_hand_from_pos(video_id: str, camera_id: str, frame_id: str, pos, crop_factor = 1, margin_size = 0): + H = 3000 + W = 4096 + MAX_HEIGHT = H - 1 + MAX_WIDTH = W - 1 + + assert pos.shape == (2, 4) + # assert np.all(pos[...] >= 0), print(pos) + # assert np.all(pos[..., [0, 1]] <= MAX_HEIGHT), print(pos) + # assert np.all(pos[..., [2, 3]] <= MAX_WIDTH), print(pos) + + if np.all(pos[...] >= 0) and np.all(pos[..., [0, 1]] <= MAX_HEIGHT) and np.all(pos[..., [2, 3]] <= MAX_WIDTH): + + left_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + bg = load_bg_img(video_id, camera_id, frame_id) + + # crop info + right_crop_info_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + (right_min_h, right_max_h, right_min_w, right_max_w) = pos[0] + right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w = apply_factor_and_margin(right_min_h, right_max_h, right_min_w, right_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + right_crop_info = [right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w] + + with open(right_crop_info_path, 'wb') as f: + pickle.dump(right_crop_info, f) + + left_crop_info_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + (left_min_h, left_max_h, left_min_w, left_max_w) = pos[1] + left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w = apply_factor_and_margin(left_min_h, left_max_h, left_min_w, left_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + left_crop_info = [left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w] + with open(left_crop_info_path, 'wb') as f: + pickle.dump(left_crop_info, f) + + right_crop = crop(bg, right_min_h, right_max_h, right_min_w, right_max_w, left_min_h, left_max_h, left_min_w, left_max_w) + right_crop_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(right_crop_path, right_crop) + + # left hand + left_crop = crop(bg, left_min_h, left_max_h, left_min_w, left_max_w, right_min_h, right_max_h, right_min_w, right_max_w) + left_crop_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(left_crop_path, left_crop) + +def crop_hand_from_pos_acc(date: str, crop_save_exp_name, crop_info_save_exp_name, video_id: str, camera_list: str, frame_list: str, represent_frame_id, img_batch, pos_batch, crop_factor = 1, margin_size = 0): + H = 3000 + W = 4096 + MAX_HEIGHT = H - 1 + MAX_WIDTH = W - 1 + + # assert pos.shape == (2, 4) + # assert np.all(pos[...] >= 0), print(pos) + # assert np.all(pos[..., [0, 1]] <= MAX_HEIGHT), print(pos) + # assert np.all(pos[..., [2, 3]] <= MAX_WIDTH), print(pos) + + # TODO + right_crop_info_batch = {} + left_crop_info_batch = {} + + for f_idx, frame_id in enumerate(frame_list): + right_crop_info_batch[frame_id] = {} + left_crop_info_batch[frame_id] = {} + + for c_idx, camera_id in enumerate(camera_list): + if camera_id not in pos_batch[frame_id].keys(): + continue + pos = pos_batch[frame_id][camera_id] + + if np.all(pos[...] >= 0) and np.all(pos[..., [0, 1]] <= MAX_HEIGHT) and np.all(pos[..., [2, 3]] <= MAX_WIDTH): + + left_hand_crop_dir = osp.join('/share/hlyang/results', date, video_id, crop_save_exp_name, 'left_hand', camera_id) + right_hand_crop_dir = osp.join('/share/hlyang/results', date, video_id, crop_save_exp_name, 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + bg = img_batch[f_idx][c_idx].copy() + + # crop info + + (right_min_h, right_max_h, right_min_w, right_max_w) = pos[0] + right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w = apply_factor_and_margin(right_min_h, right_max_h, right_min_w, right_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + right_crop_info = [right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w] + right_crop_info_batch[frame_id][camera_id] = right_crop_info + + # right_crop_info_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + # with open(right_crop_info_path, 'wb') as f: + # pickle.dump(right_crop_info, f) + + + (left_min_h, left_max_h, left_min_w, left_max_w) = pos[1] + left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w = apply_factor_and_margin(left_min_h, left_max_h, left_min_w, left_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + left_crop_info = [left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w] + left_crop_info_batch[frame_id][camera_id] = left_crop_info + + # left_crop_info_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + # with open(left_crop_info_path, 'wb') as f: + # pickle.dump(left_crop_info, f) + + right_crop = crop(bg, right_min_h, right_max_h, right_min_w, right_max_w, left_min_h, left_max_h, left_min_w, left_max_w) + right_crop_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(right_crop_path, right_crop) + + # left hand + left_crop = crop(bg, left_min_h, left_max_h, left_min_w, left_max_w, right_min_h, right_max_h, right_min_w, right_max_w) + left_crop_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(left_crop_path, left_crop) + + right_crop_info_dir = osp.join('/share/hlyang/results', date, video_id, crop_info_save_exp_name, 'right_hand') + os.makedirs(right_crop_info_dir, exist_ok=True) + left_crop_info_dir = osp.join('/share/hlyang/results', date, video_id, crop_info_save_exp_name, 'left_hand') + os.makedirs(left_crop_info_dir, exist_ok=True) + + right_crop_info_path = osp.join(right_crop_info_dir, represent_frame_id + '_crop_info.pkl') + with open(right_crop_info_path, 'wb') as f: + pickle.dump(right_crop_info_batch, f) + left_crop_info_path = osp.join(left_crop_info_dir, represent_frame_id + '_crop_info.pkl') + with open(left_crop_info_path, 'wb') as f: + pickle.dump(left_crop_info_batch, f) + +def crop_from_init(video_id: str, camera_list: str, crop_factor = 1, margin_size = 0): + ''' + 手动标注第一二帧,从src中读取标注数据。 + crop一只手时,将另一只手的bbox所在区域全部置为黑。 + TODO: pos的shape有问题,要改 + ''' + frame_list = [str(i).zfill(5) for i in range(1, 3)] + + for camera_idx, camera_id in enumerate(camera_list): + + left_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in enumerate(frame_list): + + pos = read_init_crop(video_id, camera_id, frame_id) + crop_hand_from_pos(video_id, camera_id, frame_id, pos, crop_factor, margin_size) + +def crop_from_mask_v2(video_id: str, from_exp_name, camera_list: str, frame_list: list[str], rgb_batch, crop_factor = 1, margin_size = 50): + + # for camera_id in camera_list: + # left_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id) + # right_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id) + # os.makedirs(left_hand_crop_dir, exist_ok=True) + # os.makedirs(right_hand_crop_dir, exist_ok=True) + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2(video_id, from_exp_name, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + img = load_bg_img(video_id, camera_id, frame_id) + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + log_path = os.path.join('/share/hlyang/results', video_id, 'crop', 'log.json') + if os.path.exists(log_path): + with open(log_path, 'r') as f: + log = json.load(f) + log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + else: + log = {} + log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + with open(log_path, 'w') as f: + json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +def crop_from_mask_v3(video_id: str, from_exp_name, camera_list: str, frame_list: list[str], rgb_batch, crop_factor = 1, margin_size = 50): + ''' + 增加了rgb_batch,可以不用从本地再读图片。 + ''' + # for camera_id in camera_list: + # left_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id) + # right_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id) + # os.makedirs(left_hand_crop_dir, exist_ok=True) + # os.makedirs(right_hand_crop_dir, exist_ok=True) + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2(video_id, from_exp_name, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + # img = rgb_batch[int(frame_id)-1,camera_idx].copy() + # img = rgb_batch[int(frame_id)-1][camera_idx].copy() + img = rgb_batch[frame_idx][camera_idx].copy() + # img = load_bg_img(video_id, camera_id, frame_id) + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + log_path = os.path.join('/share/hlyang/results', video_id, 'crop', 'log.json') + if os.path.exists(log_path): + with open(log_path, 'r') as f: + log = json.load(f) + log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + else: + log = {} + log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + with open(log_path, 'w') as f: + json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +def crop_from_mask_v3_acc_batch(date, video_id: str, from_exp_name, camera_list: str, represent_frame_id, frame_list: list[str], BATCH_SIZE, rgb_batch, crop_factor = 1, margin_size = 50): + ''' + 增加了rgb_batch,可以不用从本地再读图片。 + 每一个batch的文件分左右手写在一起。 + ''' + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2_acc_batch(date, video_id, from_exp_name, frame_list, camera_list, represent_frame_id=represent_frame_id) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + left_hand_crop_dir = os.path.join('/share/hlyang/results', date, video_id, 'crop_batch', 'left_hand') + right_hand_crop_dir = os.path.join('/share/hlyang/results', date, video_id, 'crop_batch', 'right_hand') + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + # represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list) + # represent_keys = list(represent_relation.keys()) + # assert len(represent_keys) == 1 + # represent_frame_id = represent_keys[0] + + left_crop_info_batch = {} + left_crop_batch = {} + right_crop_info_batch = {} + right_crop_batch = {} + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join('/share/hlyang/results', date, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join('/share/hlyang/results', date, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + img = rgb_batch[frame_idx][camera_idx].copy() + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + if frame_id not in right_crop_info_batch: + right_crop_info_batch[frame_id] = {} + right_crop_info_batch[frame_id][camera_id] = right_crop_info + + if frame_id not in right_crop_batch: + right_crop_batch[frame_id] = {} + right_crop_batch[frame_id][camera_id] = right_crop + + # with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + # pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + if frame_id not in left_crop_info_batch: + left_crop_info_batch[frame_id] = {} + left_crop_info_batch[frame_id][camera_id] = left_crop_info + + if frame_id not in left_crop_batch: + left_crop_batch[frame_id] = {} + left_crop_batch[frame_id][camera_id] = left_crop + + # with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + # pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + left_hand_crop_dir = os.path.join('/share/hlyang/results', date, video_id, 'crop_batch', 'left_hand') + right_hand_crop_dir = os.path.join('/share/hlyang/results', date, video_id, 'crop_batch', 'right_hand') + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + with open(os.path.join(right_hand_crop_dir, represent_frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info_batch, f) + with open(os.path.join(right_hand_crop_dir, represent_frame_id + '_crop.pkl'), 'wb') as f: + pickle.dump(right_crop_batch, f) + + with open(os.path.join(left_hand_crop_dir, represent_frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info_batch, f) + with open(os.path.join(left_hand_crop_dir, represent_frame_id + '_crop.pkl'), 'wb') as f: + pickle.dump(left_crop_batch, f) + + # 寻思这log也没人看啊 + # log_path = os.path.join('/share/hlyang/results', video_id, 'crop', 'log.json') + # if os.path.exists(log_path): + # with open(log_path, 'r') as f: + # log = json.load(f) + # log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + # log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + # log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + # log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + # else: + # log = {} + # log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + # log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + # log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + # log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + # with open(log_path, 'w') as f: + # json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +if __name__ == "__main__": + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + # camera_list = ['22139911'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + + img_dir = osp.join('/share/hlyang/results', video_id, 'imgs', camera_list[0]) # 默认每个视角的frame数相同 + assert os.path.isdir(img_dir) + img_filename_list = list(scandir(img_dir, 'png')) + frame_list = [] + for img_filename in img_filename_list: + frame_id = img_filename[-9:-4] + frame_list.append(frame_id) + + procs = [] + for camera_id in camera_list: + args = (video_id, [camera_id], frame_list) + proc = mlp.Process(target=crop_from_mask, args=args) + + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + +if __name__ == '__main__': + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139916', '22139946'] + frame_list = [str(i).zfill(5) for i in range(1,3)] + crop_from_init('20230904_01', camera_list, frame_list) \ No newline at end of file diff --git a/utils/utils/crop_resize_no_another_hand2.py b/utils/utils/crop_resize_no_another_hand2.py new file mode 100755 index 0000000000000000000000000000000000000000..709fd3be1adcfa7c62699f9bb9c785f4afb82958 --- /dev/null +++ b/utils/utils/crop_resize_no_another_hand2.py @@ -0,0 +1,885 @@ +''' +只crop可扣除一只手的图,没有resize。 + +example: +python utils/crop_resize_no_another_hand.py --video_id 20230818_04_old + +TODO:检查为什么运行到后面会越来越慢。似乎是卡在了load_bg_img,可以测一下这一行的时间。 re:好像只是单纯有点慢 +''' + +import os +import sys +sys.path.append('.') +import cv2 +import numpy as np +import os.path as osp +from tqdm import tqdm +import pickle +import multiprocessing as mlp +from utils.hoi_io2 import get_downsampled_seg_infos_batch, get_seg_infos_batch3, load_bg_img, get_downsampled_seg_infos_batch_v2, read_init_crop, cal_represent_frame_list, get_downsampled_seg_infos_batch_v2_acc_batch +from utils.scandir import scandir +import argparse +from time import time +import json + +def crop_from_mask(video_id: str, camera_list: str, frame_list: list[str]): + ''' + 当前帧如果没有某个mask,则用上一帧的bbox进行crop。 + ''' + + seg, downsample_factor = get_downsampled_seg_infos_batch(video_id, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + last_right_min_x = None + last_right_max_x = None + last_right_min_y = None + last_right_max_y = None + last_right_meanh = None + last_right_meanw = None + + last_left_min_x = None + last_left_max_x = None + last_left_min_y = None + last_left_max_y = None + last_left_meanh = None + last_left_meanw = None + + MAX_HEIGHT = 4095 + MAX_WIDTH = 2999 + MARGIN_SIZE = 50 + + # TODO:以下的x和y定义反了,需要更改 + for camera_idx, camera_id in enumerate(camera_list): + # left_hand_crop_dir = osp.join(root, video_id, 'crop_imgs_left_hand', camera_id) + # right_hand_crop_dir = osp.join(root, video_id, 'crop_imgs_right_hand', camera_id) + left_hand_crop_dir = osp.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # upsample + mask_right_hand = cv2.resize(mask_right_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + mask_left_hand = cv2.resize(mask_left_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + + # 读入图片 + # timer1 = time() + img = load_bg_img(video_id, camera_id, frame_id) + # timer2 = time() + # print(timer2 - timer1) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = max(right_maxh - right_minh, right_maxw - right_minw) + right_min_x = max(0, right_midh - right_length // 2 - MARGIN_SIZE) + right_max_x = min(MAX_WIDTH, right_midh + right_length // 2 + MARGIN_SIZE) + right_min_y = max(0, right_midw - right_length // 2 - MARGIN_SIZE) + right_max_y = min(MAX_HEIGHT, right_midw + right_length // 2 + MARGIN_SIZE) + + last_right_min_x = right_min_x + last_right_max_x = right_max_x + last_right_min_y = right_min_y + last_right_max_y = right_max_y + last_right_meanh = right_meanh + last_right_meanw = right_meanw + elif frame_idx != 0 and len(mask_right_hand_idx[0]) == 0: + right_min_x = last_right_min_x + right_max_x = last_right_max_x + right_min_y = last_right_min_y + right_max_y = last_right_max_y + right_meanh = last_right_meanh + right_meanw = last_right_meanw + # else: + # print('no hand mask in the first frame!') + # exit(1) + else: + print('no hand mask in the first frame of this sub_frame_list!') + print('right_hand', camera_id, frame_id) + if frame_idx == 0 and frame_id != '00001': + info_path = osp.join(right_hand_crop_dir, camera_id + '_' + str(int(frame_id)-1).zfill(5) + '_crop_info.pkl') + assert os.path.exists(info_path) + with open(info_path, 'rb') as f: + right_min_x, right_max_x, right_min_y, right_max_y, right_meanh, right_meanw = pickle.load(f) + + last_right_min_x = right_min_x + last_right_max_x = right_max_x + last_right_min_y = right_min_y + last_right_max_y = right_max_y + last_right_meanh = right_meanh + last_right_meanw = right_meanw + else: + print('GG, no hand mask in the first frame!') + exit(1) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_x: right_max_x, right_min_y: right_max_y] + right_crop_info = [right_min_x, right_max_x, right_min_y, right_max_y, right_meanh, right_meanw] + + with open(osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(osp.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + + # test_img = np.zeros((3000, 4096, 3)) + # for y, x in zip(mask_left_hand_idx[0], mask_left_hand_idx[1]): + # cv2.circle(test_img, (x, y), 5, (255, 0, 0), -1) + # cv2.imwrite('./test.png',test_img) + + if len(mask_left_hand_idx[0]) != 0: + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + left_min_x = max(0, left_midh - left_length // 2 - MARGIN_SIZE) + left_max_x = min(MAX_WIDTH, left_midh + left_length // 2 + MARGIN_SIZE) + left_min_y = max(0, left_midw - left_length // 2 - MARGIN_SIZE) + left_max_y = min(MAX_HEIGHT, left_midw + left_length // 2 + MARGIN_SIZE) + + last_left_min_x = left_min_x + last_left_max_x = left_max_x + last_left_min_y = left_min_y + last_left_max_y = left_max_y + last_left_meanh = left_meanh + last_left_meanw = left_meanw + elif frame_idx != 0 and len(mask_left_hand_idx[0]) == 0: + left_min_x = last_left_min_x + left_max_x = last_left_max_x + left_min_y = last_left_min_y + left_max_y = last_left_max_y + left_meanh = last_left_meanh + left_meanw = last_left_meanw + else: + print('no hand mask in the first frame of this sub_frame_list!') + print('left_hand', camera_id, frame_id) + if frame_idx == 0 and frame_id != '00001': + info_path = osp.join(left_hand_crop_dir, camera_id + '_' + str(int(frame_id)-1).zfill(5) + '_crop_info.pkl') + assert os.path.exists(info_path) + with open(info_path, 'rb') as f: + left_min_x, left_max_x, left_min_y, left_max_y, left_meanh, left_meanw = pickle.load(f) + + last_left_min_x = left_min_x + last_left_max_x = left_max_x + last_left_min_y = left_min_y + last_left_max_y = left_max_y + last_left_meanh = left_meanh + last_left_meanw = left_meanw + else: + print(frame_idx, frame_id) + print('GG, no hand mask in the first frame!') + exit(1) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_x: left_max_x, left_min_y: left_max_y] + left_crop_info = [left_min_x, left_max_x, left_min_y, left_max_y, left_meanh, left_meanw] + + with open(osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + +def crop(img, min_h, max_h, min_w, max_w, black_min_h=None, black_max_h=None, black_min_w=None, black_max_w=None): + img = img.copy() + + mid_h = (min_h + max_h) // 2 + mid_w = (min_w + max_w) // 2 + length = max(max_h - min_h, max_w - min_w) + + if [black_min_h, black_max_h, black_min_w, black_max_w] == [not None, not None, not None, not None]: + img[black_min_h: black_max_h, black_min_w, black_max_w] = 255.0 + + crop = img[min_h: max_h, min_w: max_w] + + return crop + +def apply_factor_and_margin(min_h, max_h, min_w, max_w, factor, margin_size, MAX_H, MAX_W): + # # avoid overflow + # min_h_ = min_h.astype(np.uint16) + # max_h_ = max_h.astype(np.uint16) + # min_w_ = min_w.astype(np.uint16) + # max_w_ = max_w.astype(np.uint16) + # mid_h = ((min_h_ + max_h_) // 2).astype(np.uint8) + # mid_w = ((min_w_ + max_w_) // 2).astype(np.uint8) + + mid_h = (min_h + max_h) // 2 + mid_w = (min_w + max_w) // 2 + + length = max(max_h - min_h, max_w - min_w) + length = int(length * factor) + half_length = length // 2 + + min_h = max(0, mid_h - half_length - margin_size, 0) + max_h = min(MAX_H, mid_h + half_length + margin_size) + min_w = max(0, mid_w - half_length - margin_size, 0) + max_w = min(MAX_W, mid_w + half_length + margin_size) + + return min_h, max_h, min_w, max_w, mid_h, mid_w + +def crop_hand_from_pos(root, video_id: str, camera_id: str, frame_id: str, pos, crop_factor = 1, margin_size = 0): + H = 3000 + W = 4096 + MAX_HEIGHT = H - 1 + MAX_WIDTH = W - 1 + + assert pos.shape == (2, 4) + # assert np.all(pos[...] >= 0), print(pos) + # assert np.all(pos[..., [0, 1]] <= MAX_HEIGHT), print(pos) + # assert np.all(pos[..., [2, 3]] <= MAX_WIDTH), print(pos) + + if np.all(pos[...] >= 0) and np.all(pos[..., [0, 1]] <= MAX_HEIGHT) and np.all(pos[..., [2, 3]] <= MAX_WIDTH): + + left_hand_crop_dir = osp.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + bg = load_bg_img(video_id, camera_id, frame_id) + + # crop info + right_crop_info_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + (right_min_h, right_max_h, right_min_w, right_max_w) = pos[0] + right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w = apply_factor_and_margin(right_min_h, right_max_h, right_min_w, right_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + right_crop_info = [right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w] + + with open(right_crop_info_path, 'wb') as f: + pickle.dump(right_crop_info, f) + + left_crop_info_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + (left_min_h, left_max_h, left_min_w, left_max_w) = pos[1] + left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w = apply_factor_and_margin(left_min_h, left_max_h, left_min_w, left_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + left_crop_info = [left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w] + with open(left_crop_info_path, 'wb') as f: + pickle.dump(left_crop_info, f) + + right_crop = crop(bg, right_min_h, right_max_h, right_min_w, right_max_w, left_min_h, left_max_h, left_min_w, left_max_w) + right_crop_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(right_crop_path, right_crop) + + # left hand + left_crop = crop(bg, left_min_h, left_max_h, left_min_w, left_max_w, right_min_h, right_max_h, right_min_w, right_max_w) + left_crop_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(left_crop_path, left_crop) + +def crop_hand_from_pos_acc(root, date: str, crop_save_exp_name, crop_info_save_exp_name, video_id: str, camera_list: str, frame_list: str, represent_frame_id, img_batch, pos_batch, crop_factor = 1, margin_size = 0): + H = 3000 + W = 4096 + MAX_HEIGHT = H - 1 + MAX_WIDTH = W - 1 + + # assert pos.shape == (2, 4) + # assert np.all(pos[...] >= 0), print(pos) + # assert np.all(pos[..., [0, 1]] <= MAX_HEIGHT), print(pos) + # assert np.all(pos[..., [2, 3]] <= MAX_WIDTH), print(pos) + + # TODO + right_crop_info_batch = {} + left_crop_info_batch = {} + + for f_idx, frame_id in enumerate(frame_list): + right_crop_info_batch[frame_id] = {} + left_crop_info_batch[frame_id] = {} + + for c_idx, camera_id in enumerate(camera_list): + if camera_id not in pos_batch[frame_id].keys(): + continue + pos = pos_batch[frame_id][camera_id] + + if np.all(pos[...] >= 0) and np.all(pos[..., [0, 1]] <= MAX_HEIGHT) and np.all(pos[..., [2, 3]] <= MAX_WIDTH): + + left_hand_crop_dir = osp.join(root, date, video_id, crop_save_exp_name, 'left_hand', camera_id) + right_hand_crop_dir = osp.join(root, date, video_id, crop_save_exp_name, 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + bg = img_batch[f_idx][c_idx].copy() + + # crop info + + (right_min_h, right_max_h, right_min_w, right_max_w) = pos[0] + right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w = apply_factor_and_margin(right_min_h, right_max_h, right_min_w, right_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + right_crop_info = [right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w] + right_crop_info_batch[frame_id][camera_id] = right_crop_info + + # right_crop_info_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + # with open(right_crop_info_path, 'wb') as f: + # pickle.dump(right_crop_info, f) + + + (left_min_h, left_max_h, left_min_w, left_max_w) = pos[1] + left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w = apply_factor_and_margin(left_min_h, left_max_h, left_min_w, left_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + left_crop_info = [left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w] + left_crop_info_batch[frame_id][camera_id] = left_crop_info + + # left_crop_info_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + # with open(left_crop_info_path, 'wb') as f: + # pickle.dump(left_crop_info, f) + + right_crop = crop(bg, right_min_h, right_max_h, right_min_w, right_max_w, left_min_h, left_max_h, left_min_w, left_max_w) + right_crop_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(right_crop_path, right_crop) + + # left hand + left_crop = crop(bg, left_min_h, left_max_h, left_min_w, left_max_w, right_min_h, right_max_h, right_min_w, right_max_w) + left_crop_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(left_crop_path, left_crop) + + right_crop_info_dir = osp.join(root, date, video_id, crop_info_save_exp_name, 'right_hand') + os.makedirs(right_crop_info_dir, exist_ok=True) + left_crop_info_dir = osp.join(root, date, video_id, crop_info_save_exp_name, 'left_hand') + os.makedirs(left_crop_info_dir, exist_ok=True) + + right_crop_info_path = osp.join(right_crop_info_dir, represent_frame_id + '_crop_info.pkl') + with open(right_crop_info_path, 'wb') as f: + pickle.dump(right_crop_info_batch, f) + left_crop_info_path = osp.join(left_crop_info_dir, represent_frame_id + '_crop_info.pkl') + with open(left_crop_info_path, 'wb') as f: + pickle.dump(left_crop_info_batch, f) + +def crop_from_init(root, date, video_id: str, camera_list: str, crop_factor = 1, margin_size = 0): + ''' + 手动标注第一二帧,从src中读取标注数据。 + crop一只手时,将另一只手的bbox所在区域全部置为黑。 + TODO: pos的shape有问题,要改 + ''' + frame_list = [str(i).zfill(5) for i in range(1, 3)] + + for camera_idx, camera_id in enumerate(camera_list): + + left_hand_crop_dir = osp.join(root, date, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join(root, date, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in enumerate(frame_list): + + pos = read_init_crop(video_id, camera_id, frame_id) + crop_hand_from_pos(video_id, camera_id, frame_id, pos, crop_factor, margin_size) + +def crop_from_mask_v2(root, video_id: str, from_exp_name, camera_list: str, frame_list: list[str], rgb_batch, crop_factor = 1, margin_size = 50): + + # for camera_id in camera_list: + # left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + # right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + # os.makedirs(left_hand_crop_dir, exist_ok=True) + # os.makedirs(right_hand_crop_dir, exist_ok=True) + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2(video_id, from_exp_name, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + img = load_bg_img(video_id, camera_id, frame_id) + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + log_path = os.path.join(root, video_id, 'crop', 'log.json') + if os.path.exists(log_path): + with open(log_path, 'r') as f: + log = json.load(f) + log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + else: + log = {} + log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + with open(log_path, 'w') as f: + json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +def crop_from_mask_v3(root, video_id: str, from_exp_name, camera_list: str, frame_list: list[str], rgb_batch, crop_factor = 1, margin_size = 50): + ''' + 增加了rgb_batch,可以不用从本地再读图片。 + ''' + # for camera_id in camera_list: + # left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + # right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + # os.makedirs(left_hand_crop_dir, exist_ok=True) + # os.makedirs(right_hand_crop_dir, exist_ok=True) + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2(video_id, from_exp_name, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + # img = rgb_batch[int(frame_id)-1,camera_idx].copy() + # img = rgb_batch[int(frame_id)-1][camera_idx].copy() + img = rgb_batch[frame_idx][camera_idx].copy() + # img = load_bg_img(video_id, camera_id, frame_id) + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + log_path = os.path.join(root, video_id, 'crop', 'log.json') + if os.path.exists(log_path): + with open(log_path, 'r') as f: + log = json.load(f) + log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + else: + log = {} + log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + with open(log_path, 'w') as f: + json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +def crop_from_mask_v3_acc_batch(root, date, video_id: str, from_exp_name, camera_list: str, represent_frame_id, frame_list: list[str], BATCH_SIZE, rgb_batch, crop_factor = 1, margin_size = 50): + ''' + 增加了rgb_batch,可以不用从本地再读图片。 + 每一个batch的文件分左右手写在一起。 + ''' + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2_acc_batch(root, date, video_id, from_exp_name, frame_list, camera_list, represent_frame_id=represent_frame_id) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + left_hand_crop_dir = os.path.join(root, date, video_id, 'crop_batch', 'left_hand') + right_hand_crop_dir = os.path.join(root, date, video_id, 'crop_batch', 'right_hand') + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + # represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list) + # represent_keys = list(represent_relation.keys()) + # assert len(represent_keys) == 1 + # represent_frame_id = represent_keys[0] + + left_crop_info_batch = {} + left_crop_batch = {} + right_crop_info_batch = {} + right_crop_batch = {} + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join(root, date, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join(root, date, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + img = rgb_batch[frame_idx][camera_idx].copy() + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + if frame_id not in right_crop_info_batch: + right_crop_info_batch[frame_id] = {} + right_crop_info_batch[frame_id][camera_id] = right_crop_info + + if frame_id not in right_crop_batch: + right_crop_batch[frame_id] = {} + right_crop_batch[frame_id][camera_id] = right_crop + + # with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + # pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + if frame_id not in left_crop_info_batch: + left_crop_info_batch[frame_id] = {} + left_crop_info_batch[frame_id][camera_id] = left_crop_info + + if frame_id not in left_crop_batch: + left_crop_batch[frame_id] = {} + left_crop_batch[frame_id][camera_id] = left_crop + + # with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + # pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + left_hand_crop_dir = os.path.join(root, date, video_id, 'crop_batch', 'left_hand') + right_hand_crop_dir = os.path.join(root, date, video_id, 'crop_batch', 'right_hand') + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + with open(os.path.join(right_hand_crop_dir, represent_frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info_batch, f) + with open(os.path.join(right_hand_crop_dir, represent_frame_id + '_crop.pkl'), 'wb') as f: + pickle.dump(right_crop_batch, f) + + with open(os.path.join(left_hand_crop_dir, represent_frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info_batch, f) + with open(os.path.join(left_hand_crop_dir, represent_frame_id + '_crop.pkl'), 'wb') as f: + pickle.dump(left_crop_batch, f) + + # 寻思这log也没人看啊 + # log_path = os.path.join(root, video_id, 'crop', 'log.json') + # if os.path.exists(log_path): + # with open(log_path, 'r') as f: + # log = json.load(f) + # log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + # log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + # log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + # log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + # else: + # log = {} + # log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + # log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + # log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + # log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + # with open(log_path, 'w') as f: + # json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +if __name__ == "__main__": + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + # camera_list = ['22139911'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + root = '/share/datasets/HOI-mocap' + date = video_id[:8] + + img_dir = osp.join(root, date, video_id, 'imgs', camera_list[0]) # 默认每个视角的frame数相同 + assert os.path.isdir(img_dir) + img_filename_list = list(scandir(img_dir, 'png')) + frame_list = [] + for img_filename in img_filename_list: + frame_id = img_filename[-9:-4] + frame_list.append(frame_id) + + procs = [] + for camera_id in camera_list: + args = (video_id, [camera_id], frame_list) + proc = mlp.Process(target=crop_from_mask, args=args) + + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + +if __name__ == '__main__': + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139916', '22139946'] + frame_list = [str(i).zfill(5) for i in range(1,3)] + crop_from_init('20230904_01', camera_list, frame_list) \ No newline at end of file diff --git a/utils/utils/crop_resize_no_another_hand3.py b/utils/utils/crop_resize_no_another_hand3.py new file mode 100755 index 0000000000000000000000000000000000000000..ed94f3d3dc203620fe46736193fc14995545e0c6 --- /dev/null +++ b/utils/utils/crop_resize_no_another_hand3.py @@ -0,0 +1,880 @@ +''' +只crop可扣除一只手的图,没有resize。 + +example: +python utils/crop_resize_no_another_hand.py --video_id 20230818_04_old + +TODO:检查为什么运行到后面会越来越慢。似乎是卡在了load_bg_img,可以测一下这一行的时间。 re:好像只是单纯有点慢 +''' + +import os +import sys +sys.path.append('.') +import cv2 +import numpy as np +import os.path as osp +from tqdm import tqdm +import pickle +import multiprocessing as mlp +from utils.hoi_io2 import get_downsampled_seg_infos_batch, get_seg_infos_batch3, load_bg_img, get_downsampled_seg_infos_batch_v2, read_init_crop, cal_represent_frame_list, get_downsampled_seg_infos_batch_v2_acc_batch +from utils.scandir import scandir +import argparse +from time import time +import json + +def crop_from_mask(video_id: str, camera_list: str, frame_list: list[str]): + ''' + 当前帧如果没有某个mask,则用上一帧的bbox进行crop。 + ''' + + seg, downsample_factor = get_downsampled_seg_infos_batch(video_id, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + last_right_min_x = None + last_right_max_x = None + last_right_min_y = None + last_right_max_y = None + last_right_meanh = None + last_right_meanw = None + + last_left_min_x = None + last_left_max_x = None + last_left_min_y = None + last_left_max_y = None + last_left_meanh = None + last_left_meanw = None + + MAX_HEIGHT = 4095 + MAX_WIDTH = 2999 + MARGIN_SIZE = 50 + + # TODO:以下的x和y定义反了,需要更改 + for camera_idx, camera_id in enumerate(camera_list): + # left_hand_crop_dir = osp.join(root, video_id, 'crop_imgs_left_hand', camera_id) + # right_hand_crop_dir = osp.join(root, video_id, 'crop_imgs_right_hand', camera_id) + left_hand_crop_dir = osp.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # upsample + mask_right_hand = cv2.resize(mask_right_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + mask_left_hand = cv2.resize(mask_left_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + + # 读入图片 + # timer1 = time() + img = load_bg_img(video_id, camera_id, frame_id) + # timer2 = time() + # print(timer2 - timer1) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = max(right_maxh - right_minh, right_maxw - right_minw) + right_min_x = max(0, right_midh - right_length // 2 - MARGIN_SIZE) + right_max_x = min(MAX_WIDTH, right_midh + right_length // 2 + MARGIN_SIZE) + right_min_y = max(0, right_midw - right_length // 2 - MARGIN_SIZE) + right_max_y = min(MAX_HEIGHT, right_midw + right_length // 2 + MARGIN_SIZE) + + last_right_min_x = right_min_x + last_right_max_x = right_max_x + last_right_min_y = right_min_y + last_right_max_y = right_max_y + last_right_meanh = right_meanh + last_right_meanw = right_meanw + elif frame_idx != 0 and len(mask_right_hand_idx[0]) == 0: + right_min_x = last_right_min_x + right_max_x = last_right_max_x + right_min_y = last_right_min_y + right_max_y = last_right_max_y + right_meanh = last_right_meanh + right_meanw = last_right_meanw + # else: + # print('no hand mask in the first frame!') + # exit(1) + else: + print('no hand mask in the first frame of this sub_frame_list!') + print('right_hand', camera_id, frame_id) + if frame_idx == 0 and frame_id != '00001': + info_path = osp.join(right_hand_crop_dir, camera_id + '_' + str(int(frame_id)-1).zfill(5) + '_crop_info.pkl') + assert os.path.exists(info_path) + with open(info_path, 'rb') as f: + right_min_x, right_max_x, right_min_y, right_max_y, right_meanh, right_meanw = pickle.load(f) + + last_right_min_x = right_min_x + last_right_max_x = right_max_x + last_right_min_y = right_min_y + last_right_max_y = right_max_y + last_right_meanh = right_meanh + last_right_meanw = right_meanw + else: + print('GG, no hand mask in the first frame!') + exit(1) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_x: right_max_x, right_min_y: right_max_y] + right_crop_info = [right_min_x, right_max_x, right_min_y, right_max_y, right_meanh, right_meanw] + + with open(osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(osp.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + + # test_img = np.zeros((3000, 4096, 3)) + # for y, x in zip(mask_left_hand_idx[0], mask_left_hand_idx[1]): + # cv2.circle(test_img, (x, y), 5, (255, 0, 0), -1) + # cv2.imwrite('./test.png',test_img) + + if len(mask_left_hand_idx[0]) != 0: + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + left_min_x = max(0, left_midh - left_length // 2 - MARGIN_SIZE) + left_max_x = min(MAX_WIDTH, left_midh + left_length // 2 + MARGIN_SIZE) + left_min_y = max(0, left_midw - left_length // 2 - MARGIN_SIZE) + left_max_y = min(MAX_HEIGHT, left_midw + left_length // 2 + MARGIN_SIZE) + + last_left_min_x = left_min_x + last_left_max_x = left_max_x + last_left_min_y = left_min_y + last_left_max_y = left_max_y + last_left_meanh = left_meanh + last_left_meanw = left_meanw + elif frame_idx != 0 and len(mask_left_hand_idx[0]) == 0: + left_min_x = last_left_min_x + left_max_x = last_left_max_x + left_min_y = last_left_min_y + left_max_y = last_left_max_y + left_meanh = last_left_meanh + left_meanw = last_left_meanw + else: + print('no hand mask in the first frame of this sub_frame_list!') + print('left_hand', camera_id, frame_id) + if frame_idx == 0 and frame_id != '00001': + info_path = osp.join(left_hand_crop_dir, camera_id + '_' + str(int(frame_id)-1).zfill(5) + '_crop_info.pkl') + assert os.path.exists(info_path) + with open(info_path, 'rb') as f: + left_min_x, left_max_x, left_min_y, left_max_y, left_meanh, left_meanw = pickle.load(f) + + last_left_min_x = left_min_x + last_left_max_x = left_max_x + last_left_min_y = left_min_y + last_left_max_y = left_max_y + last_left_meanh = left_meanh + last_left_meanw = left_meanw + else: + print(frame_idx, frame_id) + print('GG, no hand mask in the first frame!') + exit(1) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_x: left_max_x, left_min_y: left_max_y] + left_crop_info = [left_min_x, left_max_x, left_min_y, left_max_y, left_meanh, left_meanw] + + with open(osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + +def crop(img, min_h, max_h, min_w, max_w, black_min_h=None, black_max_h=None, black_min_w=None, black_max_w=None): + img = img.copy() + + mid_h = (min_h + max_h) // 2 + mid_w = (min_w + max_w) // 2 + length = max(max_h - min_h, max_w - min_w) + + if [black_min_h, black_max_h, black_min_w, black_max_w] == [not None, not None, not None, not None]: + img[black_min_h: black_max_h, black_min_w, black_max_w] = 255.0 + + crop = img[min_h: max_h, min_w: max_w] + + return crop + +def apply_factor_and_margin(min_h, max_h, min_w, max_w, factor, margin_size, MAX_H, MAX_W): + # # avoid overflow + # min_h_ = min_h.astype(np.uint16) + # max_h_ = max_h.astype(np.uint16) + # min_w_ = min_w.astype(np.uint16) + # max_w_ = max_w.astype(np.uint16) + # mid_h = ((min_h_ + max_h_) // 2).astype(np.uint8) + # mid_w = ((min_w_ + max_w_) // 2).astype(np.uint8) + + mid_h = (min_h + max_h) // 2 + mid_w = (min_w + max_w) // 2 + + length = max(max_h - min_h, max_w - min_w) + length = int(length * factor) + half_length = length // 2 + + min_h = max(0, mid_h - half_length - margin_size, 0) + max_h = min(MAX_H, mid_h + half_length + margin_size) + min_w = max(0, mid_w - half_length - margin_size, 0) + max_w = min(MAX_W, mid_w + half_length + margin_size) + + return min_h, max_h, min_w, max_w, mid_h, mid_w + +def crop_hand_from_pos(root, video_id: str, camera_id: str, frame_id: str, pos, crop_factor = 1, margin_size = 0): + H = 3000 + W = 4096 + MAX_HEIGHT = H - 1 + MAX_WIDTH = W - 1 + + assert pos.shape == (2, 4) + # assert np.all(pos[...] >= 0), print(pos) + # assert np.all(pos[..., [0, 1]] <= MAX_HEIGHT), print(pos) + # assert np.all(pos[..., [2, 3]] <= MAX_WIDTH), print(pos) + + if np.all(pos[...] >= 0) and np.all(pos[..., [0, 1]] <= MAX_HEIGHT) and np.all(pos[..., [2, 3]] <= MAX_WIDTH): + + left_hand_crop_dir = osp.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + bg = load_bg_img(video_id, camera_id, frame_id) + + # crop info + right_crop_info_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + (right_min_h, right_max_h, right_min_w, right_max_w) = pos[0] + right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w = apply_factor_and_margin(right_min_h, right_max_h, right_min_w, right_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + right_crop_info = [right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w] + + with open(right_crop_info_path, 'wb') as f: + pickle.dump(right_crop_info, f) + + left_crop_info_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + (left_min_h, left_max_h, left_min_w, left_max_w) = pos[1] + left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w = apply_factor_and_margin(left_min_h, left_max_h, left_min_w, left_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + left_crop_info = [left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w] + with open(left_crop_info_path, 'wb') as f: + pickle.dump(left_crop_info, f) + + right_crop = crop(bg, right_min_h, right_max_h, right_min_w, right_max_w, left_min_h, left_max_h, left_min_w, left_max_w) + right_crop_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(right_crop_path, right_crop) + + # left hand + left_crop = crop(bg, left_min_h, left_max_h, left_min_w, left_max_w, right_min_h, right_max_h, right_min_w, right_max_w) + left_crop_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(left_crop_path, left_crop) + +def crop_hand_from_pos_acc(root, local_root, date: str, crop_save_exp_name, crop_info_save_exp_name, video_id: str, camera_list: str, frame_list: str, represent_frame_id, img_batch, pos_batch, crop_factor = 1, margin_size = 0): + H = 3000 + W = 4096 + MAX_HEIGHT = H - 1 + MAX_WIDTH = W - 1 + + # assert pos.shape == (2, 4) + # assert np.all(pos[...] >= 0), print(pos) + # assert np.all(pos[..., [0, 1]] <= MAX_HEIGHT), print(pos) + # assert np.all(pos[..., [2, 3]] <= MAX_WIDTH), print(pos) + + # TODO + right_crop_info_batch = {} + left_crop_info_batch = {} + + for f_idx, frame_id in enumerate(frame_list): + right_crop_info_batch[frame_id] = {} + left_crop_info_batch[frame_id] = {} + + for c_idx, camera_id in enumerate(camera_list): + if camera_id not in pos_batch[frame_id].keys(): + continue + pos = pos_batch[frame_id][camera_id] + + if np.all(pos[...] >= 0) and np.all(pos[..., [0, 1]] <= MAX_HEIGHT) and np.all(pos[..., [2, 3]] <= MAX_WIDTH): + + left_hand_crop_dir = osp.join(local_root, date, video_id, crop_save_exp_name, 'left_hand', camera_id) + right_hand_crop_dir = osp.join(local_root, date, video_id, crop_save_exp_name, 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + bg = img_batch[f_idx][c_idx].copy() + + # crop info + + (right_min_h, right_max_h, right_min_w, right_max_w) = pos[0] + right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w = apply_factor_and_margin(right_min_h, right_max_h, right_min_w, right_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + right_crop_info = [right_min_h, right_max_h, right_min_w, right_max_w, right_mid_h, right_mid_w] + right_crop_info_batch[frame_id][camera_id] = right_crop_info + + # right_crop_info_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + # with open(right_crop_info_path, 'wb') as f: + # pickle.dump(right_crop_info, f) + + + (left_min_h, left_max_h, left_min_w, left_max_w) = pos[1] + left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w = apply_factor_and_margin(left_min_h, left_max_h, left_min_w, left_max_w, crop_factor, margin_size, MAX_HEIGHT, MAX_WIDTH) + + left_crop_info = [left_min_h, left_max_h, left_min_w, left_max_w, left_mid_h, left_mid_w] + left_crop_info_batch[frame_id][camera_id] = left_crop_info + + # left_crop_info_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl') + # with open(left_crop_info_path, 'wb') as f: + # pickle.dump(left_crop_info, f) + + right_crop = crop(bg, right_min_h, right_max_h, right_min_w, right_max_w, left_min_h, left_max_h, left_min_w, left_max_w) + right_crop_path = osp.join(right_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(right_crop_path, right_crop) + + # left hand + left_crop = crop(bg, left_min_h, left_max_h, left_min_w, left_max_w, right_min_h, right_max_h, right_min_w, right_max_w) + left_crop_path = osp.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png') + cv2.imwrite(left_crop_path, left_crop) + + right_crop_info_dir = osp.join(local_root, date, video_id, crop_info_save_exp_name, 'right_hand') + os.makedirs(right_crop_info_dir, exist_ok=True) + left_crop_info_dir = osp.join(local_root, date, video_id, crop_info_save_exp_name, 'left_hand') + os.makedirs(left_crop_info_dir, exist_ok=True) + + right_crop_info_path = osp.join(right_crop_info_dir, represent_frame_id + '_crop_info.pkl') + with open(right_crop_info_path, 'wb') as f: + pickle.dump(right_crop_info_batch, f) + left_crop_info_path = osp.join(left_crop_info_dir, represent_frame_id + '_crop_info.pkl') + with open(left_crop_info_path, 'wb') as f: + pickle.dump(left_crop_info_batch, f) + +def crop_from_init(root, date, video_id: str, camera_list: str, crop_factor = 1, margin_size = 0): + ''' + 手动标注第一二帧,从src中读取标注数据。 + crop一只手时,将另一只手的bbox所在区域全部置为黑。 + TODO: pos的shape有问题,要改 + ''' + frame_list = [str(i).zfill(5) for i in range(1, 3)] + + for camera_idx, camera_id in enumerate(camera_list): + + left_hand_crop_dir = osp.join(root, date, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = osp.join(root, date, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in enumerate(frame_list): + + pos = read_init_crop(video_id, camera_id, frame_id) + crop_hand_from_pos(video_id, camera_id, frame_id, pos, crop_factor, margin_size) + +def crop_from_mask_v2(root, video_id: str, from_exp_name, camera_list: str, frame_list: list[str], rgb_batch, crop_factor = 1, margin_size = 50): + + # for camera_id in camera_list: + # left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + # right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + # os.makedirs(left_hand_crop_dir, exist_ok=True) + # os.makedirs(right_hand_crop_dir, exist_ok=True) + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2(video_id, from_exp_name, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + img = load_bg_img(video_id, camera_id, frame_id) + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + log_path = os.path.join(root, video_id, 'crop', 'log.json') + if os.path.exists(log_path): + with open(log_path, 'r') as f: + log = json.load(f) + log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + else: + log = {} + log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + with open(log_path, 'w') as f: + json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +def crop_from_mask_v3(root, video_id: str, from_exp_name, camera_list: str, frame_list: list[str], rgb_batch, crop_factor = 1, margin_size = 50): + ''' + 增加了rgb_batch,可以不用从本地再读图片。 + ''' + # for camera_id in camera_list: + # left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + # right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + # os.makedirs(left_hand_crop_dir, exist_ok=True) + # os.makedirs(right_hand_crop_dir, exist_ok=True) + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2(video_id, from_exp_name, frame_list, camera_list) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join(root, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join(root, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + # img = rgb_batch[int(frame_id)-1,camera_idx].copy() + # img = rgb_batch[int(frame_id)-1][camera_idx].copy() + img = rgb_batch[frame_idx][camera_idx].copy() + # img = load_bg_img(video_id, camera_id, frame_id) + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + log_path = os.path.join(root, video_id, 'crop', 'log.json') + if os.path.exists(log_path): + with open(log_path, 'r') as f: + log = json.load(f) + log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + else: + log = {} + log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + with open(log_path, 'w') as f: + json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +def crop_from_mask_v3_acc_batch(root, local_root, date, video_id: str, from_exp_name, camera_list: str, represent_frame_id, frame_list: list[str], BATCH_SIZE, rgb_batch, crop_factor = 1, margin_size = 50): + ''' + 增加了rgb_batch,可以不用从本地再读图片。 + 每一个batch的文件分左右手写在一起。 + ''' + + seg, downsample_factor = get_downsampled_seg_infos_batch_v2_acc_batch(root, date, video_id, from_exp_name, frame_list, camera_list, represent_frame_id=represent_frame_id) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + + right_hand_valid_camera_dict = {} + left_hand_valid_camera_dict = {} + for frame in frame_list: + right_hand_valid_camera_dict[frame] = [] + left_hand_valid_camera_dict[frame] = [] + + # represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list) + # represent_keys = list(represent_relation.keys()) + # assert len(represent_keys) == 1 + # represent_frame_id = represent_keys[0] + + left_crop_info_batch = {} + left_crop_batch = {} + right_crop_info_batch = {} + right_crop_batch = {} + + for camera_idx, camera_id in enumerate(camera_list): + left_hand_crop_dir = os.path.join(local_root, date, video_id, 'crop', 'left_hand', camera_id) + right_hand_crop_dir = os.path.join(local_root, date, video_id, 'crop', 'right_hand', camera_id) + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + for frame_idx, frame_id in tqdm(enumerate(frame_list)): + mask_right_hand = right_hand_seg[frame_idx, camera_idx] + mask_left_hand = left_hand_seg[frame_idx, camera_idx] + + # 读入图片 + img = rgb_batch[frame_idx][camera_idx].copy() + + # H = 3000 + # W = 4096 + (H, W) = img.shape[:2] + MAX_WIDTH = W - 1 + MAX_HEIGHT = H - 1 + + # upsample mask + mask_right_hand = cv2.resize(mask_right_hand, (W, H), interpolation=cv2.INTER_NEAREST) + mask_left_hand = cv2.resize(mask_left_hand, (W, H), interpolation=cv2.INTER_NEAREST) + + # 以下为右手 + mask_right_hand_idx = np.nonzero(mask_right_hand) + if len(mask_right_hand_idx[0]) != 0: + right_hand_valid_camera_dict[frame_id].append(camera_id) + + right_minh = np.min(mask_right_hand_idx[0]) + right_maxh = np.max(mask_right_hand_idx[0]) + right_minw = np.min(mask_right_hand_idx[1]) + right_maxw = np.max(mask_right_hand_idx[1]) + right_midh = (right_maxh + right_minh) // 2 + right_midw = (right_maxw + right_minw) // 2 + right_meanh = np.mean(mask_right_hand_idx[0]) + right_meanw = np.mean(mask_right_hand_idx[1]) + + right_length = int(max(right_maxh - right_minh, right_maxw - right_minw) * crop_factor) + half_length = right_length // 2 + right_min_y = max(0, right_midh - half_length - margin_size) + right_max_y = min(MAX_HEIGHT, right_midh + half_length + margin_size) + right_min_x = max(0, right_midw - half_length - margin_size) + right_max_x = min(MAX_WIDTH, right_midw + half_length + margin_size) + + # 将左手变成白色 + right_crop = img.copy() + right_crop[mask_left_hand == 1] = 255.0 + + # 居中 + right_crop = right_crop[right_min_y: right_max_y, right_min_x: right_max_x] + right_crop_info = [right_min_y, right_max_y, right_min_x, right_max_x, right_meanh, right_meanw] + + if frame_id not in right_crop_info_batch: + right_crop_info_batch[frame_id] = {} + right_crop_info_batch[frame_id][camera_id] = right_crop_info + + if frame_id not in right_crop_batch: + right_crop_batch[frame_id] = {} + right_crop_batch[frame_id][camera_id] = right_crop + + # with open(os.path.join(right_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + # pickle.dump(right_crop_info, f) + + cv2.imwrite(os.path.join(right_hand_crop_dir, camera_id + '_' +frame_id + '.png'), right_crop) + + # 以下为左手 + mask_left_hand_idx = np.nonzero(mask_left_hand) + if len(mask_left_hand_idx[0]) != 0: + left_hand_valid_camera_dict[frame_id].append(camera_id) + + left_minh = np.min(mask_left_hand_idx[0]) + left_maxh = np.max(mask_left_hand_idx[0]) + left_minw = np.min(mask_left_hand_idx[1]) + left_maxw = np.max(mask_left_hand_idx[1]) + left_midh = (left_maxh + left_minh) // 2 + left_midw = (left_maxw + left_minw) // 2 + left_meanh = np.mean(mask_left_hand_idx[0]) + left_meanw = np.mean(mask_left_hand_idx[1]) + + left_length = max(left_maxh - left_minh, left_maxw - left_minw) + half_length = left_length // 2 + left_min_y = max(0, left_midh - half_length - margin_size) + left_max_y = min(MAX_HEIGHT, left_midh + half_length + margin_size) + left_min_x = max(0, left_midw - half_length - margin_size) + left_max_x = min(MAX_WIDTH, left_midw + half_length + margin_size) + + # 将右手变成白色 + left_crop = img.copy() + left_crop[mask_right_hand == 1] = 255.0 + + # 居中 + left_crop = left_crop[left_min_y: left_max_y, left_min_x: left_max_x] + left_crop_info = [left_min_y, left_max_y, left_min_x, left_max_x, left_meanh, left_meanw] + + if frame_id not in left_crop_info_batch: + left_crop_info_batch[frame_id] = {} + left_crop_info_batch[frame_id][camera_id] = left_crop_info + + if frame_id not in left_crop_batch: + left_crop_batch[frame_id] = {} + left_crop_batch[frame_id][camera_id] = left_crop + + # with open(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '_crop_info.pkl'), 'wb') as f: + # pickle.dump(left_crop_info, f) + + cv2.imwrite(os.path.join(left_hand_crop_dir, camera_id + '_' + frame_id + '.png'), left_crop) + + + # save valid and invalid info + right_hand_invalid_camera_dict = {} + left_hand_invalid_camera_dict = {} + for frame in frame_list: + right_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in right_hand_valid_camera_dict[frame]] + left_hand_invalid_camera_dict[frame] = [camera for camera in camera_list if camera not in left_hand_valid_camera_dict[frame]] + + left_hand_crop_dir = os.path.join(local_root, date, video_id, 'crop_batch', 'left_hand') + right_hand_crop_dir = os.path.join(local_root, date, video_id, 'crop_batch', 'right_hand') + os.makedirs(left_hand_crop_dir, exist_ok=True) + os.makedirs(right_hand_crop_dir, exist_ok=True) + + with open(os.path.join(right_hand_crop_dir, represent_frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(right_crop_info_batch, f) + with open(os.path.join(right_hand_crop_dir, represent_frame_id + '_crop.pkl'), 'wb') as f: + pickle.dump(right_crop_batch, f) + + with open(os.path.join(left_hand_crop_dir, represent_frame_id + '_crop_info.pkl'), 'wb') as f: + pickle.dump(left_crop_info_batch, f) + with open(os.path.join(left_hand_crop_dir, represent_frame_id + '_crop.pkl'), 'wb') as f: + pickle.dump(left_crop_batch, f) + + # 寻思这log也没人看啊 + # log_path = os.path.join(root, video_id, 'crop', 'log.json') + # if os.path.exists(log_path): + # with open(log_path, 'r') as f: + # log = json.load(f) + # log['right_hand_valid_camera_dict'].update(right_hand_valid_camera_dict) + # log['left_hand_valid_camera_dict'].update(left_hand_valid_camera_dict) + # log['right_hand_invalid_camera_dict'].update(right_hand_invalid_camera_dict) + # log['left_hand_invalid_camera_dict'].update(left_hand_invalid_camera_dict) + # else: + # log = {} + # log['right_hand_valid_camera_dict'] = right_hand_valid_camera_dict + # log['left_hand_valid_camera_dict'] = left_hand_valid_camera_dict + # log['right_hand_invalid_camera_dict'] = right_hand_invalid_camera_dict + # log['left_hand_invalid_camera_dict'] = left_hand_invalid_camera_dict + + # with open(log_path, 'w') as f: + # json.dump(log, f) + + return right_hand_valid_camera_dict, left_hand_valid_camera_dict, right_hand_invalid_camera_dict, left_hand_invalid_camera_dict + +if __name__ == "__main__": + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + # camera_list = ['22139911'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + root = '/share/datasets/HOI-mocap' + date = video_id[:8] + + img_dir = osp.join(root, date, video_id, 'imgs', camera_list[0]) # 默认每个视角的frame数相同 + assert os.path.isdir(img_dir) + img_filename_list = list(scandir(img_dir, 'png')) + frame_list = [] + for img_filename in img_filename_list: + frame_id = img_filename[-9:-4] + frame_list.append(frame_id) + + procs = [] + for camera_id in camera_list: + args = (video_id, [camera_id], frame_list) + proc = mlp.Process(target=crop_from_mask, args=args) + + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + +if __name__ == '__main__': + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139916', '22139946'] + frame_list = [str(i).zfill(5) for i in range(1,3)] + crop_from_init('20230904_01', camera_list, frame_list) \ No newline at end of file diff --git a/utils/utils/downsample_mesh.py b/utils/utils/downsample_mesh.py new file mode 100755 index 0000000000000000000000000000000000000000..6386f6e177dbeb1e9ea62a662887e53908a903dc --- /dev/null +++ b/utils/utils/downsample_mesh.py @@ -0,0 +1,48 @@ +''' +读取obj文件,并且通过o3d将mesh进行downsample以减少计算量,指定将其downsample到某个顶点数量。 + +TODO: 在20230715_15中mask == 3是左手操控的物体,与后面的标准相反。此处暂时将左手操作的物体记作object1。 + +example: +python utils/downsample_mesh.py --object_src_path /share/datasets/HOI-mocap/object_models/0802-zsz-object004/object004_cm.obj --video_id 20230715_15 --object_label 1 +''' + +import open3d as o3d +import argparse +import os + +if __name__ == '__main__': + + parser = argparse.ArgumentParser() + parser.add_argument('--object_src_path', required=True, type=str) + parser.add_argument('--video_id', required=True, type=str) + parser.add_argument('--object_label', required=True, type=int) + parser.add_argument('--num_verts', default=2000, required=False, type=int) + args = parser.parse_args() + + object_src_path = args.object_src_path + video_id = args.video_id + object_label = args.object_label + num_verts = args.num_verts + + assert os.path.exists(object_src_path) + + mesh = o3d.io.read_triangle_mesh(object_src_path) + simplified_mesh = mesh.simplify_quadric_decimation(num_verts) # 指定要保留的顶点数量 + + save_dir = os.path.join('/share/hlyang/results', video_id, 'src') + os.makedirs(save_dir, exist_ok=True) + if object_label == 1: + save_path = save_dir = os.path.join(save_dir, 'object1.obj') + else: + save_path = save_dir = os.path.join(save_dir, 'object2.obj') + o3d.io.write_triangle_mesh(save_path, simplified_mesh) + +# # 加载网格 +# mesh = o3d.io.read_triangle_mesh('../src/Scan.obj') + +# # 进行网格下采样 +# simplified_mesh = mesh.simplify_quadric_decimation(2000) # 指定要保留的顶点数量 + +# # 保存下采样后的网格 +# o3d.io.write_triangle_mesh('../src/bottle.obj', simplified_mesh) \ No newline at end of file diff --git a/utils/utils/get_bbox_from_mano_params.py b/utils/utils/get_bbox_from_mano_params.py new file mode 100755 index 0000000000000000000000000000000000000000..de92fd791a8ea97c5e6a6ce3c7da4f5cc5a23162 --- /dev/null +++ b/utils/utils/get_bbox_from_mano_params.py @@ -0,0 +1,216 @@ +import os +import sys +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.join(current_dir, '..')) +from utils.hoi_io import render_from_mano_params, load_bg_img, cvt_multiview_imgs2mp4, load_mano_info_batch, get_camera_params +import torch +import numpy as np +import cv2 +import pickle +from manopth.manopth.manolayer import ManoLayer +from pytorch3d.structures import Meshes +from pytorch3d.renderer import (PerspectiveCameras, PointLights, RasterizationSettings, MeshRenderer, MeshRasterizer, SoftPhongShader, SoftSilhouetteShader, SoftPhongShader, TexturesVertex) +from tqdm import tqdm + +def get_bbox_from_hand_params(video_id, from_exp_name, camera_list, frame_list, right_hand_bool, enlarge_factor, device): + ''' + return: + bbox: shape: [num_frame, num_camera, 4] [min_h, max_h, min_w, max_w] + ''' + device = torch.device(device) + num_frame = len(frame_list) + num_camera = len(camera_list) + + rendered_image_batch = render_from_mano_params(video_id, from_exp_name, camera_list, frame_list, right_hand_bool, device) + rendered_image_batch = rendered_image_batch.numpy() + + MAX_HEIGHT = 4095 + MAX_WIDTH = 2999 + + # TODO:试试批处理能不能更快,似乎太大了后np.nonzero会卡住 + bbox_list = [] + for i in range(num_frame): + bbox_list_camera = [] + for j in range(num_camera): + non_white_pixels = np.all(rendered_image_batch[i, j, ...] != [1., 1., 1.], axis=-1) + hand_idx = np.nonzero(non_white_pixels) + min_h = np.min(hand_idx[0]) + max_h = np.max(hand_idx[0]) + mid_h = (min_h + max_h) // 2 + + min_w = np.min(hand_idx[1]) + max_w = np.max(hand_idx[1]) + mid_w = (min_w + max_w) // 2 + + length = int(max(max_h - min_h, max_w - min_w) * enlarge_factor) + + half_length = length // 2 + min_h_ = max(0, mid_h - half_length) + max_h_ = min(MAX_WIDTH, mid_h + half_length) + min_w_ = max(0, mid_w - half_length) + max_w_ = min(MAX_HEIGHT, mid_w + half_length) + # bbox_list_camera.append(torch.IntTensor([min_h_, max_h_, min_w_, max_w_])) + + # numpy 顺序 左上角x,左上角y,右下角x,右下角y + # bbox_list_camera.append(np.array([min_h_, max_h_, min_w_, max_w_])) + bbox_list_camera.append(np.array([min_w_, min_h_, max_w_, max_h_])) + + bbox_list_camera = np.stack(bbox_list_camera) + bbox_list.append(bbox_list_camera) + bbox_list = np.stack(bbox_list) + return bbox_list + +def get_bbox_from_hand_params_light_memory_use(video_id, from_exp_name, camera_list, frame_list, right_hand_bool, enlarge_factor, device): + ''' + TODO: 增加从文件读取shape参数 + ''' + + hand_trans_batch, hand_pose_batch, _ = load_mano_info_batch(video_id, from_exp_name, frame_list, right_hand_bool) + num_frame = len(frame_list) + + device = torch.device(device) + + calibration_info_path = os.path.join('/share/hlyang/results', video_id, 'src', 'calibration.json') + assert os.path.join(calibration_info_path) + R, R_inverse, T, K, focal_length, principal_point, image_size = get_camera_params(calibration_info_path, camera_list) + + use_pca = False + ncomps = 45 + if right_hand_bool: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx=0) + else: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx=0) + + lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]]) + camera = PerspectiveCameras(device=device, R=R_inverse, T=T, image_size=image_size, in_ndc=False, focal_length=-focal_length, principal_point=principal_point) + raster_settings = RasterizationSettings( + image_size=(3000, 4096), + blur_radius=0, + faces_per_pixel=1, + ) + renderer = MeshRenderer(rasterizer=MeshRasterizer(cameras=camera, raster_settings=raster_settings), shader=SoftPhongShader(device=device, cameras=camera, lights=lights)) + faces_idx = mano_layer.th_faces.detach().clone().to(device) + + MAX_HEIGHT = 4095 + MAX_WIDTH = 2999 + + bbox_list = [] + for i, frame in tqdm(enumerate(frame_list)): + bbox_list_camera = [] + + hand_pose = hand_pose_batch[i, ...] + hand_trans = hand_trans_batch[i, ...] + + if len(hand_pose.shape) == 1: + hand_pose = hand_pose.unsqueeze(0) + verts, _, _ = mano_layer(hand_pose) + verts = verts.squeeze() + verts = verts / 1000.0 + verts += hand_trans + verts = verts.to(device) + + + mesh = Meshes(verts=[verts], faces=[faces_idx]) + color = torch.ones(1, verts.size(0), 3, device=device) + color[:, :, 2] = 255 + mesh.textures = TexturesVertex(verts_features=color) + mesh = mesh.extend(R.shape[0]) + + images = renderer(mesh)[..., :3].squeeze().cpu().numpy() + + for j, camera in enumerate(camera_list): + non_white_pixels = np.all(images[j, ...] != [1., 1., 1.], axis=-1) + hand_idx = np.nonzero(non_white_pixels) + min_h = np.min(hand_idx[0]) + max_h = np.max(hand_idx[0]) + mid_h = (min_h + max_h) // 2 + + min_w = np.min(hand_idx[1]) + max_w = np.max(hand_idx[1]) + mid_w = (min_w + max_w) // 2 + + length = int(max(max_h - min_h, max_w - min_w) * enlarge_factor) + + half_length = length // 2 + min_h_ = max(0, mid_h - half_length) + max_h_ = min(MAX_WIDTH, mid_h + half_length) + min_w_ = max(0, mid_w - half_length) + max_w_ = min(MAX_HEIGHT, mid_w + half_length) + + length = int(max(max_h - min_h, max_w - min_w) * enlarge_factor) + + half_length = length // 2 + min_h_ = max(0, mid_h - half_length) + max_h_ = min(MAX_WIDTH, mid_h + half_length) + min_w_ = max(0, mid_w - half_length) + max_w_ = min(MAX_HEIGHT, mid_w + half_length) + + bbox_list_camera.append(np.array([min_w_, min_h_, max_w_, max_h_])) + bbox_list_camera = np.stack(bbox_list_camera) + bbox_list.append(bbox_list_camera) + bbox_list = np.stack(bbox_list) + return bbox_list + +def draw_bbox_from_hand_params(video_id, from_exp_name, camera_list, frame_list, right_hand_bool, enlarge_factor, resize_bool, resize_resolution, device): + device = torch.device(device) + # bbox = get_bbox_from_hand_params(video_id, from_exp_name, camera_list, frame_list, right_hand_bool, enlarge_factor, device) + bbox = get_bbox_from_hand_params_light_memory_use(video_id, from_exp_name, camera_list, frame_list, right_hand_bool, enlarge_factor, device) + + # 每一帧用前一帧的结果画bbox + point_color = (0, 255, 0) + thickness = 20 + lineType = 4 + + for frame_idx, frame in enumerate(frame_list[1:]): # frame_idx从0开始,frame从1开始 + for camera_idx, camera in enumerate(camera_list): + if right_hand_bool: + save_dir = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'right_hand', camera) + else: + save_dir = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'left_hand', camera) + + os.makedirs(save_dir, exist_ok=True) + bg = load_bg_img(video_id, camera, frame) + cv2.rectangle(bg, bbox[frame_idx, camera_idx, 0:2], bbox[frame_idx, camera_idx, 2:4], point_color, thickness, lineType) + if resize_bool: + cv2.resize(bg, resize_resolution, interpolation=cv2.INTER_LINEAR) + cv2.imwrite(os.path.join(save_dir, f'{camera}_{frame}.png'), bg) + + +if __name__ == '__main__': + exp_name = 'get_bbox_from_hand_params' + resize_bool = True + resize_resolution = (1024, 750) + factor = 1.4 + device = 7 + + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139916', '22139946'] + + video_id_list = [f'20230818_0{i}' for i in ('3', '4', '5', '6', '7')] + for video_id in video_id_list: + + metadata_path = os.path.join('/share/hlyang/results', video_id, 'metadata', f'{camera_list[0]}.pkl') + assert os.path.exists(metadata_path) + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + start = 1 + end = metadata['num_frame'] + frame_list = [str(frame).zfill(5) for frame in range(start, end + 1)] + + # frame_list = [str(i).zfill(5) for i in range(1,5)] + # video_id = '20230818_03' + + draw_bbox_from_hand_params(video_id, 'fit_hand_joint_ransac_batch_by_squence', camera_list, frame_list, True, factor, resize_bool, resize_resolution, device) + + img_dir = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'right_hand') + save_video_path = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'right_hand', 'right_hand.mp4') + cvt_multiview_imgs2mp4(img_dir, save_video_path, 1, camera_list, frame_list[1:]) + save_video_path = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'right_hand', 'right_hand_30fps.mp4') + cvt_multiview_imgs2mp4(img_dir, save_video_path, 30, camera_list, frame_list[1:]) + + draw_bbox_from_hand_params(video_id, 'fit_hand_joint_ransac_batch_by_squence', camera_list, frame_list, False, factor, resize_bool, resize_resolution, device) + + img_dir = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'left_hand') + save_video_path = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'left_hand', 'left_hand.mp4') + cvt_multiview_imgs2mp4(img_dir, save_video_path, 1, camera_list, frame_list[1:]) + save_video_path = os.path.join('/share/hlyang/results', video_id, exp_name, 'vis', 'left_hand', 'left_hand_30fps.mp4') + cvt_multiview_imgs2mp4(img_dir, save_video_path, 30, camera_list, frame_list[1:]) diff --git a/utils/utils/get_nokov_success_record.py b/utils/utils/get_nokov_success_record.py new file mode 100644 index 0000000000000000000000000000000000000000..89ca5fd8586e6bc16b10525696eb827c4ee30209 --- /dev/null +++ b/utils/utils/get_nokov_success_record.py @@ -0,0 +1,31 @@ +import os +import sys +sys.path.append('.') +from utils.hoi_io2 import get_valid_video_list, get_num_frame, get_num_frame_v2 +from utils.organize_dataset import add_a_line, organize_record_file + +if __name__ == "__main__": + + # date_list = ['20230917', '20230919', '20230923', '20230926', '20230927', '20230928', '20230929', '20231002', '20231005', '20231006', '20231010', '20231013', '20231015'] + # date_list = ['20231019', '20231020', '20231026', '20231027', '20231031', '20231102'] + # date_list = ['20231103', '20231104', '20231105'] + date_list = ['20230930'] + # date_list = ['20231024'] + + # video_record_root = '/share/hlyang/results' + video_record_root = '/data2/hlyang/results' + record_root = '/data2/hlyang/results' + upload_root = '/data2/HOI-mocap' + + for date in date_list: + video_list = get_valid_video_list(video_record_root, date, remove_hand=True) + nokov_success_record_path = os.path.join(record_root, 'record', f'{date}_nokov_succeed.txt') + + for video_id in video_list: + obj_pose_dir = os.path.join(upload_root, 'HO_poses', date, video_id, 'objpose') + invalid_path = os.path.join(upload_root, 'HO_poses', date, video_id, 'invalid') + if os.path.isdir(obj_pose_dir) and not os.path.isfile(invalid_path): + add_a_line(nokov_success_record_path, f'{video_id}') + + if os.path.exists(nokov_success_record_path): + organize_record_file(nokov_success_record_path) \ No newline at end of file diff --git a/utils/utils/get_world_mesh_from_mano_params.py b/utils/utils/get_world_mesh_from_mano_params.py new file mode 100644 index 0000000000000000000000000000000000000000..6fcd2587dfa345b76a4d805bf22e3d1905bd7be0 --- /dev/null +++ b/utils/utils/get_world_mesh_from_mano_params.py @@ -0,0 +1,217 @@ +''' +python utils/get_world_mesh_from_mano_params.py --video_id 20230904_01 +''' +import os +import sys +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.join(current_dir, '..')) +import pickle +from utils.hoi_io import load_mano_info_batch, load_mano_info_batch_acc, cal_represent_frame_list +from manopth.manopth.manolayer import ManoLayer +from tqdm import tqdm +from utils.save_obj import save_obj +import argparse +from utils.scandir import scandir +import torch +import trimesh + +def save_world_mesh_from_mano_params(date, video_id, from_exp_name, save_exp_name, frame_list, right_hand_bool): + num_frame = len(frame_list) + + hand_trans, hand_pose, _ = load_mano_info_batch(video_id, from_exp_name, frame_list, right_hand_bool) + + if right_hand_bool: + shape_path = os.path.join('/share/hlyang/results', video_id, 'src', 'right_hand_shape.pkl') + else: + shape_path = os.path.join('/share/hlyang/results', video_id, 'src', 'left_hand_shape.pkl') + + if os.path.exists(shape_path): + with open(shape_path, 'rb') as f: + shape_data = pickle.load(f) + hand_shape = shape_data['hand_shape'] + else: + hand_shape = torch.zeros((10)) + hand_shape = hand_shape.unsqueeze(0).expand(num_frame, -1) + + use_pca = False + ncomps = 45 + if right_hand_bool: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx=0) + else: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx=0) + + faces_idx = mano_layer.th_faces.detach() + + verts, _, _ = mano_layer(hand_pose, hand_shape) + verts = verts + verts = verts / 1000.0 + verts += hand_trans.unsqueeze(1) + + if right_hand_bool: + save_dir = os.path.join('/share/hlyang/results', video_id, save_exp_name, 'meshes', 'right_hand') + else: + save_dir = os.path.join('/share/hlyang/results', video_id, save_exp_name, 'meshes', 'left_hand') + os.makedirs(save_dir, exist_ok=True) + for i, frame in tqdm(enumerate(frame_list)): + save_path = os.path.join(save_dir, f'hand_{frame}.obj') + save_obj(save_path, verts[i], faces_idx) + +def save_world_mesh_from_mano_params2(date, video_id, from_exp_name, save_exp_name, frame_list, right_hand_bool): + num_frame = len(frame_list) + + hand_trans, hand_pose, _ = load_mano_info_batch_acc(date, video_id, from_exp_name, frame_list, right_hand_bool, represent_frame_id=frame_list[0]) + + if right_hand_bool: + shape_path = os.path.join('/share/hlyang/results', date, video_id, 'src', 'right_hand_shape.pkl') + else: + shape_path = os.path.join('/share/hlyang/results', date, video_id, 'src', 'left_hand_shape.pkl') + + if os.path.exists(shape_path): + with open(shape_path, 'rb') as f: + shape_data = pickle.load(f) + hand_shape = shape_data['hand_shape'] + else: + hand_shape = torch.zeros((10)) + hand_shape = hand_shape.unsqueeze(0).expand(num_frame, -1) + + use_pca = False + ncomps = 45 + if right_hand_bool: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx=0) + else: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx=0) + + faces_idx = mano_layer.th_faces.detach() + + verts, _, _ = mano_layer(hand_pose, hand_shape) + verts = verts + verts = verts / 1000.0 + verts += hand_trans.unsqueeze(1) + + if right_hand_bool: + save_dir = os.path.join('/share/hlyang/results', date, video_id, save_exp_name, 'meshes', 'right_hand') + else: + save_dir = os.path.join('/share/hlyang/results', date, video_id, save_exp_name, 'meshes', 'left_hand') + os.makedirs(save_dir, exist_ok=True) + for i, frame in tqdm(enumerate(frame_list)): + save_path = os.path.join(save_dir, f'hand_{frame}.obj') + save_obj(save_path, verts[i], faces_idx) + +def load_world_meshes_acc(date, video_id, from_exp_name, frame_list, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + trimesh_mesh_dict = {} + + if right_hand_bool: + save_dir = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'meshes_batch', 'right_hand') + else: + save_dir = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'meshes_batch', 'left_hand') + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join(save_dir, f'hand_{represent_frame_id}.obj') + with open(path, 'rb') as f: + data = pickle.load(f) + + f = data['f'] + for f_idx, frame_id in enumerate(represented_frame_list): + v = data[frame_id] + mesh = trimesh.Trimesh(vertices=v, faces=f) + trimesh_mesh_dict[frame_id] = mesh + + return trimesh_mesh_dict + + + +def save_world_mesh_from_mano_params_acc(date, video_id, from_exp_name, save_exp_name, frame_list, represent_frame_id, right_hand_bool): + num_frame = len(frame_list) + + hand_trans, hand_pose, _ = load_mano_info_batch_acc(date, video_id, from_exp_name, frame_list, right_hand_bool) + + if right_hand_bool: + shape_path = os.path.join('/share/hlyang/results', date, video_id, 'src', 'right_hand_shape.pkl') + else: + shape_path = os.path.join('/share/hlyang/results', date, video_id, 'src', 'left_hand_shape.pkl') + + if os.path.exists(shape_path): + with open(shape_path, 'rb') as f: + shape_data = pickle.load(f) + hand_shape = shape_data['hand_shape'] + else: + hand_shape = torch.zeros((10)) + hand_shape = hand_shape.unsqueeze(0).expand(num_frame, -1) + + use_pca = False + ncomps = 45 + if right_hand_bool: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx=0) + else: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx=0) + + faces_idx = mano_layer.th_faces.detach() + + verts, _, _ = mano_layer(hand_pose, hand_shape) + verts = verts + verts = verts / 1000.0 + verts += hand_trans.unsqueeze(1) + + if right_hand_bool: + save_dir = os.path.join('/share/hlyang/results', date, video_id, save_exp_name, 'meshes_batch', 'right_hand') + else: + save_dir = os.path.join('/share/hlyang/results', date, video_id, save_exp_name, 'meshes_batch', 'left_hand') + os.makedirs(save_dir, exist_ok=True) + + save_data = {} + save_data['f'] = faces_idx.numpy() + for i, frame in tqdm(enumerate(frame_list)): + save_data[frame] = verts[i].numpy() + + save_path = os.path.join(save_dir, f'hand_{represent_frame_id}.obj') + with open(save_path, 'wb') as f: + pickle.dump(save_data, f) + +if __name__ == '__main__': + exp_name = 'get_world_mesh_from_mano_params_joint' + # video_id_list = [f'20230904_{str(i).zfill(2)}' for i in range(2,3)] + # video_id_list = ['20230904_01'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + # parser.add_argument('--start', required=False, type=int, default=0) + # parser.add_argument('--end', required=False, type=int, default=0) + args = parser.parse_args() + video_id = args.video_id + + # # start和end要么同时输入,要么使用默认,从metadata中取数 + # start = args.start + # end = args.end + # assert (start == 0 and end == 0) or (start > 0 and start <= end) + # if start == 0 and end == 0: + # start = 1 + # metadata_path = os.path.join('/share/hlyang/results', video_id, 'metadata', f'{camera_list[0]}.pkl') + # assert os.path.exists(metadata_path) + # with open(metadata_path, 'rb') as f: + # metadata = pickle.load(f) + # end = metadata['num_frame'] + + # for video_id in video_id_list: + + # frame_list = [str(frame).zfill(5) for frame in range(start, end + 1)] + + hand_info_exp_name = 'joint_opt' + + res_dir = os.path.join('/share/hlyang/results', video_id, hand_info_exp_name, 'res', 'right_hand') + fn_list = list(scandir(res_dir, '.pkl')) + frame_list = [fn.split('_')[1].split('.')[0] for fn in fn_list] + frame_list.sort() + + save_world_mesh_from_mano_params(video_id, hand_info_exp_name, exp_name, frame_list, right_hand_bool=True) + + res_dir = os.path.join('/share/hlyang/results', video_id, hand_info_exp_name, 'res', 'left_hand') + fn_list = list(scandir(res_dir, '.pkl')) + frame_list = [fn.split('_')[1].split('.')[0] for fn in fn_list] + frame_list.sort() + + save_world_mesh_from_mano_params(video_id, hand_info_exp_name, exp_name, frame_list, right_hand_bool=False) \ No newline at end of file diff --git a/utils/utils/get_world_mesh_from_mano_params2.py b/utils/utils/get_world_mesh_from_mano_params2.py new file mode 100644 index 0000000000000000000000000000000000000000..b2c26bc06a1195f1fbb540bb11d55c58ada7e131 --- /dev/null +++ b/utils/utils/get_world_mesh_from_mano_params2.py @@ -0,0 +1,134 @@ +''' +python utils/get_world_mesh_from_mano_params.py --video_id 20230904_01 +''' +import os +import sys +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.join(current_dir, '..')) +import pickle +from utils.hoi_io2 import load_mano_info_batch, load_mano_info_batch_acc, cal_represent_frame_list +from manopth.manopth.manolayer import ManoLayer +from tqdm import tqdm +from utils.save_obj import save_obj +import argparse +from utils.scandir import scandir +import torch +import trimesh + +def load_world_meshes_acc(root, date, video_id, from_exp_name, frame_list, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + trimesh_mesh_dict = {} + + if right_hand_bool: + save_dir = os.path.join(root, date, video_id, from_exp_name, 'meshes_batch', 'right_hand') + else: + save_dir = os.path.join(root, date, video_id, from_exp_name, 'meshes_batch', 'left_hand') + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join(save_dir, f'hand_{represent_frame_id}.obj') + with open(path, 'rb') as f: + data = pickle.load(f) + + f = data['f'] + for f_idx, frame_id in enumerate(represented_frame_list): + v = data[frame_id] + mesh = trimesh.Trimesh(vertices=v, faces=f) + trimesh_mesh_dict[frame_id] = mesh + + return trimesh_mesh_dict + +def save_world_mesh_from_mano_params_acc(root, date, video_id, from_exp_name, save_exp_name, frame_list, represent_frame_id, right_hand_bool): + num_frame = len(frame_list) + + hand_trans, hand_pose, _ = load_mano_info_batch_acc(root, date, video_id, from_exp_name, frame_list, right_hand_bool) + + if right_hand_bool: + shape_path = os.path.join(root, date, video_id, 'src', 'right_hand_shape.pkl') + else: + shape_path = os.path.join(root, date, video_id, 'src', 'left_hand_shape.pkl') + + if os.path.exists(shape_path): + with open(shape_path, 'rb') as f: + shape_data = pickle.load(f) + hand_shape = shape_data['hand_shape'] + else: + hand_shape = torch.zeros((10)) + hand_shape = hand_shape.unsqueeze(0).expand(num_frame, -1) + + use_pca = False + ncomps = 45 + if right_hand_bool: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx=0) + else: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx=0) + + faces_idx = mano_layer.th_faces.detach() + + verts, _, _ = mano_layer(hand_pose, hand_shape) + verts = verts + verts = verts / 1000.0 + verts += hand_trans.unsqueeze(1) + + if right_hand_bool: + save_dir = os.path.join(root, date, video_id, save_exp_name, 'meshes_batch', 'right_hand') + else: + save_dir = os.path.join(root, date, video_id, save_exp_name, 'meshes_batch', 'left_hand') + os.makedirs(save_dir, exist_ok=True) + + save_data = {} + save_data['f'] = faces_idx.numpy() + for i, frame in tqdm(enumerate(frame_list)): + save_data[frame] = verts[i].numpy() + + save_path = os.path.join(save_dir, f'hand_{represent_frame_id}.obj') + with open(save_path, 'wb') as f: + pickle.dump(save_data, f) + +if __name__ == '__main__': + exp_name = 'get_world_mesh_from_mano_params_joint' + # video_id_list = [f'20230904_{str(i).zfill(2)}' for i in range(2,3)] + # video_id_list = ['20230904_01'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + # parser.add_argument('--start', required=False, type=int, default=0) + # parser.add_argument('--end', required=False, type=int, default=0) + args = parser.parse_args() + video_id = args.video_id + + # # start和end要么同时输入,要么使用默认,从metadata中取数 + # start = args.start + # end = args.end + # assert (start == 0 and end == 0) or (start > 0 and start <= end) + # if start == 0 and end == 0: + # start = 1 + # metadata_path = os.path.join('/share/hlyang/results', video_id, 'metadata', f'{camera_list[0]}.pkl') + # assert os.path.exists(metadata_path) + # with open(metadata_path, 'rb') as f: + # metadata = pickle.load(f) + # end = metadata['num_frame'] + + # for video_id in video_id_list: + + # frame_list = [str(frame).zfill(5) for frame in range(start, end + 1)] + + hand_info_exp_name = 'joint_opt' + root = '/share/hlyang/results' + + res_dir = os.path.join('/share/hlyang/results', video_id, hand_info_exp_name, 'res', 'right_hand') + fn_list = list(scandir(res_dir, '.pkl')) + frame_list = [fn.split('_')[1].split('.')[0] for fn in fn_list] + frame_list.sort() + + save_world_mesh_from_mano_params(root, video_id, hand_info_exp_name, exp_name, frame_list, right_hand_bool=True) + + res_dir = os.path.join('/share/hlyang/results', video_id, hand_info_exp_name, 'res', 'left_hand') + fn_list = list(scandir(res_dir, '.pkl')) + frame_list = [fn.split('_')[1].split('.')[0] for fn in fn_list] + frame_list.sort() + + save_world_mesh_from_mano_params(root, video_id, hand_info_exp_name, exp_name, frame_list, right_hand_bool=False) \ No newline at end of file diff --git a/utils/utils/hoi_io.py b/utils/utils/hoi_io.py new file mode 100755 index 0000000000000000000000000000000000000000..7e4043537fe78feec08787fae6b268546bcf1720 --- /dev/null +++ b/utils/utils/hoi_io.py @@ -0,0 +1,1431 @@ +import os +# from utils.scandir import scandir +import numpy as np +import cv2 +from tqdm import tqdm +import pickle +import torch +import json +from time import time, sleep +from manopth.manopth.manolayer import ManoLayer +from pytorch3d.structures import Meshes +from pytorch3d.renderer import (PerspectiveCameras, PointLights, RasterizationSettings, MeshRenderer, MeshRasterizer, SoftPhongShader, SoftSilhouetteShader, SoftPhongShader, TexturesVertex) +import open3d as o3d +import shutil +from collections import OrderedDict + +def load_bg_img_test(camera_id, frame_id): + path = os.path.join('/home/hlyang/utils/HOI/img', camera_id, 'imgs', camera_id + '_' + frame_id + '.png') + assert os.path.exists(path) + img = cv2.imread(path, cv2.IMREAD_COLOR) + return img + +def load_bg_img(video_id, camera_id, frame_id): + img_root = os.path.join('/share/hlyang/results', video_id, 'imgs') + # os.path.exists(img_root) + path = os.path.join(img_root, camera_id, camera_id + '_' + frame_id + '.png') + # assert os.path.exists(path), path + img = cv2.imread(path, cv2.IMREAD_COLOR) + return img + +def cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len = 2): + + represent_relation = OrderedDict() + # represent_order = [] # 防止低版本python的字典是无序的,但似乎用OrderedDict就好了 + for frame in frame_list: + num = int(frame) + if num <= first_batch_len: + key = str(1).zfill(5) + else: + key = str(((num - (first_batch_len + 1)) // BATCH_SIZE) * BATCH_SIZE + first_batch_len + 1).zfill(5) + if key not in represent_relation: + represent_relation[key] = [] + represent_relation[key].append(frame) + + return represent_relation + +def load_bg_img_acc(video_id, camera_id, BATCH_SIZE, frame_list): + date = video_id[:8] + sub_video_root = os.path.join('/share/hlyang/results', date, video_id, 'sub_video') + + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list) + + full_img_list = [] + for represent_frame_id, represented_frame_list in represent_relation.items(): + + video_path = os.path.join(sub_video_root, camera_id, camera_id + '_' + represent_frame_id + '.mp4') + + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + cap.set(cv2.CAP_PROP_FOURCC, fourcc) + # fps = cap.get(cv2.CAP_PROP_FPS) + # W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + # H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + suc = cap.isOpened() + img_list = [] + for i in range(BATCH_SIZE): + suc, img = cap.read() + if not suc: + break + img_list.append(img) + + # num_img = len(img_list) + + img_list_ = [] + for idx in [int(frame) - int(represent_frame_id) for frame in represented_frame_list]: + # assert idx >= 0 and idx < num_img + img_list_.append(img_list[idx]) + img_list = img_list_ + + for img in img_list: + full_img_list.append(img) + + return full_img_list + +def load_bg_imgs(video_id, frame_list, camera_list, BATCH_SIZE): + + rgb_batch = [[] for _ in range(len(frame_list))] + + for c_idx, camera in enumerate(camera_list): + img_list = load_bg_img_acc(video_id, camera, BATCH_SIZE, frame_list) + for f_idx, bg in enumerate(img_list): + rgb_batch[f_idx].append(bg) + + return rgb_batch + +def load_crop_info_test(camera_id, frame_id, is_righthand): + ''' + + return: + + ''' + if is_righthand: + crop_info_path = os.path.join('/home/hlyang/utils/HOI/img/', camera_id, 'crop_imgs_righthand', camera_id + '_' + frame_id + '_crop_info.pkl') + else: + crop_info_path = os.path.join('/home/hlyang/utils/HOI/img/', camera_id, 'crop_imgs_lefthand', camera_id + '_' + frame_id + '_crop_info.pkl') + assert os.path.exists(crop_info_path) + + with open(crop_info_path, 'rb') as f: + crop_info = pickle.load(f) + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info + return h_min, h_max, w_min, w_max, h_mean, w_mean + +def load_crop_info(video_id, camera_id, frame_id, is_righthand): + ''' + return: + + ''' + if is_righthand: + crop_info_path = os.path.join('/share/hlyang/results', video_id, 'crop', 'right_hand', camera_id, camera_id + '_' + frame_id + '_crop_info.pkl') + else: + crop_info_path = os.path.join('/share/hlyang/results', video_id, 'crop', 'left_hand', camera_id, camera_id + '_' + frame_id + '_crop_info.pkl') + assert os.path.exists(crop_info_path) + + with open(crop_info_path, 'rb') as f: + crop_info = pickle.load(f) + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info + return h_min, h_max, w_min, w_max, h_mean, w_mean + +def load_crop_info_v2(date, video_id, from_exp_name, camera_list, frame_list, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + ''' + TODO: 考虑缺失的情况,但应该不会有缺失的情况。现在的处理方案:只载入读到了,空的跳过 + + ''' + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + crop_info_batch = {} + + for represent_frame_id, represented_frame_list in represent_relation.items(): + if right_hand_bool: + path = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'right_hand', represent_frame_id + '_crop_info.pkl') + else: + path = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'left_hand', represent_frame_id + '_crop_info.pkl') + with open(path, 'rb') as f: + crop_info_ = pickle.load(f) + + crop_info_batch.update(crop_info_) + + # for f_idx, frame_id in enumerate(represented_frame_list): + # crop_info_batch[frame_id] = {} + + # crop_info_frame_ = crop_info_.get(frame_id, None) + # if crop_info_frame_ is None: + # continue + + # for c_idx, camera_id in enumerate(camera_list): + # crop_info_camera_ = crop_info_frame_.get(camera_id, None) + # if crop_info_camera_ is not None: + # crop_info_batch[frame_id][camera_id] = crop_info_camera_ + + return crop_info_batch + + + + + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info + return h_min, h_max, w_min, w_max, h_mean, w_mean + +def get_seg_infos_batch_test(seg_path_prefix: str, frame_list: list[str], camera_list: list[str]): + ''' + TODO:这个过程非常慢,也许需要加速 + + return: + object_seg, left_hand_seg, right_hand_seg 形如[batch_size, num_camera, h, w] + ''' + object_seg_batch = [] + left_hand_seg_batch = [] + right_hand_seg_batch = [] + for frame_id in frame_list: + object_seg_cameras = [] + left_hand_seg_cameras = [] + right_hand_seg_cameras = [] + for camera_id in camera_list: + # path = os.path.join(seg_path_prefix, camera_id, 'denoised_mask_imgs', camera_id + '_' + frame_id + '_mask_denoised.png') + path = os.path.join(seg_path_prefix, camera_id, 'mask_imgs', camera_id + '_' + frame_id + '_mask.png') + assert os.path.exists(path) + seg_img = cv2.imread(path, cv2.IMREAD_COLOR) # BGR + seg_img = torch.from_numpy(seg_img) + # object_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] > 50), True, False) + # left_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] < 50), True, False) + # right_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] < 50) & (seg_img[:, :, 2] > 50), True, False) + object_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] > 50), 1.0, 0.0) + left_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] < 50), 1.0, 0.0) + right_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] < 50) & (seg_img[:, :, 2] > 50), 1.0, 0.0) + object_seg_cameras.append(object_seg) + left_hand_seg_cameras.append(left_hand_seg) + right_hand_seg_cameras.append(right_hand_seg) + object_seg_cameras = torch.stack(object_seg_cameras) + left_hand_seg_cameras = torch.stack(left_hand_seg_cameras) + right_hand_seg_cameras = torch.stack(right_hand_seg_cameras) + object_seg_batch.append(object_seg_cameras) + left_hand_seg_batch.append(left_hand_seg_cameras) + right_hand_seg_batch.append(right_hand_seg_cameras) + object_seg_batch = torch.stack(object_seg_batch) + left_hand_seg_batch = torch.stack(left_hand_seg_batch) + right_hand_seg_batch = torch.stack(right_hand_seg_batch) + return object_seg_batch, left_hand_seg_batch, right_hand_seg_batch + +def get_seg_infos_batch(video_id: str, frame_list: list[str], camera_list: list[str]): + ''' + TODO:不知为何性能非常差,frame_list不要传太长,可能是触发了Linux的内存保护机制。 + + return: + left_hand_seg, right_hand_seg, object1_seg, object2_seg 形如[batch_size, num_camera, h, w] + ''' + left_hand_seg_batch = [] + right_hand_seg_batch = [] + object1_seg_batch = [] + object2_seg_batch = [] + + mask_dict = {} + for camera_id in camera_list: + path = os.path.join('/share/hlyang/results', video_id, 'anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) + mask_dict[camera_id] = torch.from_numpy(mask) + + #如果frame_list是连续的,可以直接索引少一层循环,速度应该会快得多。 + frame_idx_list = [int(frame_id)-1 for frame_id in frame_list] + + continuous_bool = True + for i in range(len(frame_idx_list)-1): + if frame_idx_list[i] + 1 != frame_idx_list[i+1]: + continuous_bool = False + break + with torch.no_grad(): + if continuous_bool: + frame_start = frame_idx_list[0] + frame_end = frame_idx_list[-1] + + for camera_id in camera_list: + mask = mask_dict[camera_id] + + left_hand_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 1, 1., 0.) + right_hand_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 2, 1., 0.) + object1_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 3, 1., 0.) + object2_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 4, 1., 0.) + + left_hand_seg_batch.append(left_hand_seg) + right_hand_seg_batch.append(right_hand_seg) + object1_seg_batch.append(object1_seg) + object2_seg_batch.append(object2_seg) + + left_hand_seg_batch = torch.stack(left_hand_seg_batch).transpose(0, 1) + right_hand_seg_batch = torch.stack(right_hand_seg_batch).transpose(0, 1) + object1_seg_batch = torch.stack(object1_seg_batch).transpose(0, 1) + object2_seg_batch = torch.stack(object2_seg_batch).transpose(0, 1) + else: + for frame_id in frame_list: + frame_idx = int(frame_id) - 1 + left_hand_seg_cameras = [] + right_hand_seg_cameras = [] + object1_seg_cameras = [] + object2_seg_cameras = [] + for camera_id in camera_list: + mask = mask_dict[camera_id] + left_hand_seg = torch.where(mask[frame_idx, ...] == 1, 1., 0.) + right_hand_seg = torch.where(mask[frame_idx, ...] == 2, 1., 0.) + object1_seg = torch.where(mask[frame_idx, ...] == 3, 1., 0.) + object2_seg = torch.where(mask[frame_idx, ...] == 4, 1., 0.) + + left_hand_seg_cameras.append(left_hand_seg) + right_hand_seg_cameras.append(right_hand_seg) + object1_seg_cameras.append(object1_seg) + object2_seg_cameras.append(object2_seg) + + left_hand_seg_cameras = torch.stack(left_hand_seg_cameras) + right_hand_seg_cameras = torch.stack(right_hand_seg_cameras) + object1_seg_cameras = torch.stack(object1_seg_cameras) + object2_seg_cameras = torch.stack(object2_seg_cameras) + + left_hand_seg_batch.append(left_hand_seg_cameras) + right_hand_seg_batch.append(right_hand_seg_cameras) + object1_seg_batch.append(object1_seg_cameras) + object2_seg_batch.append(object2_seg_cameras) + + left_hand_seg_batch = torch.stack(left_hand_seg_batch) + right_hand_seg_batch = torch.stack(right_hand_seg_batch) + object1_seg_batch = torch.stack(object1_seg_batch) + object2_seg_batch = torch.stack(object2_seg_batch) + + return left_hand_seg_batch, right_hand_seg_batch, object1_seg_batch, object2_seg_batch + +def get_seg_infos_batch2(video_id: str, frame_list: list[str], camera_list: list[str]): + ''' + get_seg_infos_batch1的numpy实现 + TODO:不知为何性能非常差 + + return: + left_hand_seg, right_hand_seg, object1_seg, object2_seg 形如[batch_size, num_camera, h, w] + ''' + left_hand_seg_batch = [] + right_hand_seg_batch = [] + object1_seg_batch = [] + object2_seg_batch = [] + + mask_dict = {} + for camera_id in camera_list: + path = os.path.join('/share/hlyang/results', video_id, 'anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) + mask_dict[camera_id] = mask + + #如果frame_list是连续的,可以直接索引少一层循环,速度应该会快得多。 + frame_idx_list = [int(frame_id)-1 for frame_id in frame_list] + + continuous_bool = True + for i in range(len(frame_idx_list)-1): + if frame_idx_list[i] + 1 != frame_idx_list[i+1]: + continuous_bool = False + break + + if continuous_bool: + frame_start = frame_idx_list[0] + frame_end = frame_idx_list[-1] + + for camera_id in camera_list: + print('hello_' + camera_id) + mask = mask_dict[camera_id] + + left_hand_seg = np.where(mask[frame_start: frame_end + 1, ...] == 1, 1., 0.) + right_hand_seg = np.where(mask[frame_start: frame_end + 1, ...] == 2, 1., 0.) + object1_seg = np.where(mask[frame_start: frame_end + 1, ...] == 3, 1., 0.) + object2_seg = np.where(mask[frame_start: frame_end + 1, ...] == 4, 1., 0.) + + left_hand_seg_batch.append(left_hand_seg) + right_hand_seg_batch.append(right_hand_seg) + object1_seg_batch.append(object1_seg) + object2_seg_batch.append(object2_seg) + + left_hand_seg_batch = np.stack(left_hand_seg_batch).transpose(0, 1) + right_hand_seg_batch = np.stack(right_hand_seg_batch).transpose(0, 1) + object1_seg_batch = np.stack(object1_seg_batch).transpose(0, 1) + object2_seg_batch = np.stack(object2_seg_batch).transpose(0, 1) + else: + for frame_id in frame_list: + frame_idx = int(frame_id) - 1 + left_hand_seg_cameras = [] + right_hand_seg_cameras = [] + object1_seg_cameras = [] + object2_seg_cameras = [] + for camera_id in camera_list: + mask = mask_dict[camera_id] + left_hand_seg = np.where(mask[frame_idx, ...] == 1, 1., 0.) + right_hand_seg = np.where(mask[frame_idx, ...] == 2, 1., 0.) + object1_seg = np.where(mask[frame_idx, ...] == 3, 1., 0.) + object2_seg = np.where(mask[frame_idx, ...] == 4, 1., 0.) + + left_hand_seg_cameras.append(left_hand_seg) + right_hand_seg_cameras.append(right_hand_seg) + object1_seg_cameras.append(object1_seg) + object2_seg_cameras.append(object2_seg) + + left_hand_seg_cameras = np.stack(left_hand_seg_cameras) + right_hand_seg_cameras = np.stack(right_hand_seg_cameras) + object1_seg_cameras = np.stack(object1_seg_cameras) + object2_seg_cameras = np.stack(object2_seg_cameras) + + left_hand_seg_batch.append(left_hand_seg_cameras) + right_hand_seg_batch.append(right_hand_seg_cameras) + object1_seg_batch.append(object1_seg_cameras) + object2_seg_batch.append(object2_seg_cameras) + + left_hand_seg_batch = np.stack(left_hand_seg_batch) + right_hand_seg_batch = np.stack(right_hand_seg_batch) + object1_seg_batch = np.stack(object1_seg_batch) + object2_seg_batch = np.stack(object2_seg_batch) + + return left_hand_seg_batch, right_hand_seg_batch, object1_seg_batch, object2_seg_batch + +def get_seg_infos_batch3(video_id: str, frame_list: list[str], camera_list: list[str]): + ''' + + return: + seg: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表左手、右手、object1和object2 + ''' + seg_batch = [] + + mask_dict = {} + for camera_id in camera_list: + path = os.path.join('/share/hlyang/results', video_id, 'anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) + mask_dict[camera_id] = mask + + #如果frame_list是连续的,可以直接索引少一层循环,速度应该会快得多。 + frame_idx_list = [int(frame_id)-1 for frame_id in frame_list] + + continuous_bool = True + for i in range(len(frame_idx_list)-1): + if frame_idx_list[i] + 1 != frame_idx_list[i+1]: + continuous_bool = False + break + + if continuous_bool: + frame_start = frame_idx_list[0] + frame_end = frame_idx_list[-1] + 1 + + for camera_id in camera_list: + mask = mask_dict[camera_id] + + seg = mask[frame_start: frame_end, ...] + seg_batch.append(seg) + + seg_batch = np.stack(seg_batch) + seg_batch = seg_batch.swapaxes(0, 1) + else: + for frame_idx in frame_idx_list: + seg_cameras = [] + for camera_id in camera_list: + mask = mask_dict[camera_id] + seg = [frame_idx, ...] + + seg_cameras.append(seg) + + seg_cameras = np.stack(seg_cameras) + + seg_batch.append(seg_cameras) + + seg_batch = np.stack(seg_batch) + + return seg_batch + +def get_downsampled_seg_infos_batch(video_id: str, frame_list: list[str], camera_list: list[str]): + ''' + + return: (seg, downsample_factor) + seg: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表右手、左手、object1(右手操作的物体)和object2(左手操作的物体), h为750, w为1024 + downsample_factor: 4 + ''' + seg_batch = [] + + for frame_id in frame_list: + seg_camera = [] + for camera_id in camera_list: + path = os.path.join('/share/hlyang/results', video_id, 'anno_results', camera_id, camera_id + '_' + frame_id+'.npy') + assert os.path.exists(path) + seg = np.load(path) + seg_camera.append(seg) + seg_camera = np.stack(seg_camera) + seg_batch.append(seg_camera) + seg_batch = np.stack(seg_batch) + + return seg_batch, 4 + +def get_downsampled_seg_infos_batch_v2(video_id: str, from_exp_name, frame_list: list[str], camera_list: list[str]): + ''' + 某些视角会track失败,得不到mask:如果path不存在,就生成一个全0。 + + return: (seg, downsample_factor) + seg: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表右手、左手、object1(右手操作的物体)和object2(左手操作的物体), h为750, w为1024 + downsample_factor: 4 + ''' + seg_batch = [] + + for frame_id in frame_list: + seg_camera = [] + for camera_id in camera_list: + path = os.path.join('/share/hlyang/results', video_id, from_exp_name, 'res', camera_id, camera_id + '_' + frame_id+'.npy') + if os.path.exists(path): + seg = np.load(path) + else: + seg = np.zeros(shape=(750, 1024), dtype=np.uint8) + seg_camera.append(seg) + seg_camera = np.stack(seg_camera) + seg_batch.append(seg_camera) + seg_batch = np.stack(seg_batch) + + return seg_batch, 4 + +def get_downsampled_seg_infos_batch_v2_acc_batch(date, video_id: str, from_exp_name, frame_list: list[str], camera_list: list[str], BATCH_SIZE = 20, represent_frame_id = None): + ''' + 某些视角会track失败,得不到mask:如果path不存在,就生成一个全0。 + + return: (full_mask, downsample_factor) + full_mask: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表右手、左手、object1(右手操作的物体)和object2(左手操作的物体), h为750, w为1024 + downsample_factor: 4 + ''' + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + seg_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'res', represent_frame_id+'.npy') + mask = np.load(path) + + for f_idx, frame_id in enumerate(represented_frame_list): + seg_camera = [] + for c_idx, camera_id in enumerate(camera_list): + try: + seg = mask[f_idx, c_idx] + except: + seg = np.zeros(shape=(750, 1024), dtype=np.uint8) + seg_camera.append(seg) + seg_camera = np.stack(seg_camera) + seg_batch.append(seg_camera) + + seg_batch = np.stack(seg_batch) + + return seg_batch, 4 + +def cvt_multiview_imgs2mp4(img_dir:str, save_path, save_fps, camera_list, frame_list, height = 3000, width = 4096, downsample_factor = 4): + # camera_img_dict = {} + # paths = list(scandir(img_prefix, suffix='.png', recursive=True, full_path=True)) + # for path in paths: + # dirname, basename = os.path.split(path) + # camera_id = basename.split('_')[0] + # if not camera_id in camera_img_dict: + # camera_img_dict[camera_id] = [] + width = width // downsample_factor + height = height // downsample_factor + + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + videoWriter = cv2.VideoWriter(save_path, fourcc, save_fps, (width * 4, height * 3)) + + for frame_id in tqdm(frame_list): + saved_img = np.zeros((height * 3, width * 4, 3)).astype(np.uint8) + for camera_idx, camera_id in enumerate(camera_list): + img_path = os.path.join(img_dir, camera_id, camera_id+'_'+frame_id+'.png') + # assert os.path.exists(img_path) + if os.path.exists(img_path): + img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) + img = cv2.resize(img, (width, height)) + cv2.putText(img, f'{frame_id} {camera_id}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=2) + else: + img = np.full((height, width, 3), 255, dtype=np.uint8) + saved_img[height*(camera_idx//4) : height*((camera_idx//4)+1), width*(camera_idx%4) : width*((camera_idx%4)+1)] = img + videoWriter.write(saved_img) + videoWriter.release() + +def load_mmpose_joints(video_id, camera_list, frame_id, is_righthand): + joints = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join('/share/hlyang/results', video_id, 'mmpose_right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join('/share/hlyang/results', video_id, 'mmpose_left_hand','predictions', camera_id + '_' + frame_id + '.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(video_id, camera_id, frame_id, is_righthand) + + # 注意这里是[w_min, h_min] + joints.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + joints = torch.stack(joints, dim=0) + return joints + +def load_mmpose_joints_test(camera_list, frame_id, is_righthand): + joints_path_prefix = '/home/hlyang/HOI/mmpose/test/output_both_hands/predictions' + + joints = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_lefthand.json') + else: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_righthand.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(camera_id, frame_id, is_righthand) + + # 注意这里是[w_min, h_min] + joints.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + joints = torch.stack(joints, dim=0) + return joints + +def load_mmpose_joints_batch(video_id, camera_list, frame_list, is_righthand): + joints = [] + for frame_id in frame_list: + joints_frame = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join('/share/hlyang/results', video_id, 'mmpose', 'right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join('/share/hlyang/results', video_id, 'mmpose', 'left_hand','predictions', camera_id + '_' + frame_id + '.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: # TODO: 如果joints_path不存在(或者mmpose没预测出来), 构建一个格式相同的joints_info, 把数值都填成1e9 + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(video_id, camera_id, frame_id, is_righthand) + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + # print('------------') + # print(joints) + return torch.stack(joints, dim=0) + +def load_mmpose_joints_batch_v2(video_id, camera_list, frame_list, is_righthand): + ''' + 允许一部分mmpose的结果找不到,某一帧可能存在某些视角缺失。 + ''' + num_frame = len(frame_list) + num_camera = len(camera_list) + invalid_exists_bool = False + valid_mask = torch.full((num_frame, num_camera), 1., dtype=torch.float32) + + joints = [] + for frame_idx, frame_id in enumerate(frame_list): + joints_frame = [] + for camera_idx, camera_id in enumerate(camera_list): + if is_righthand: + joints_path = os.path.join('/share/hlyang/results', video_id, 'mmpose', 'right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join('/share/hlyang/results', video_id, 'mmpose', 'left_hand','predictions', camera_id + '_' + frame_id + '.json') + + if os.path.exists(joints_path): # mmpose结果存在 + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(video_id, camera_id, frame_id, is_righthand) + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(xy) + torch.FloatTensor([w_min, h_min]) for xy in joints_info[0]['keypoints']], dim=0)) + else: # TODO: 如果joints_path不存在(或者mmpose没预测出来), 构建一个格式相同的joints_info, 把数值都填成1e9 + invalid_exists_bool = True + valid_mask[frame_idx, camera_idx] = 0. + joints_frame.append(torch.full((21,2), 1e9, dtype=torch.float32)) + + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + return torch.stack(joints, dim=0), invalid_exists_bool, valid_mask + +def load_mmpose_joints_batch_v3(date, video_id, crop_info_from_exp_name, camera_list, frame_list, represent_frame_id, is_righthand): + ''' + 允许一部分mmpose的结果找不到,某一帧可能存在某些视角缺失。 + ''' + num_frame = len(frame_list) + num_camera = len(camera_list) + invalid_exists_bool = False + valid_mask = torch.full((num_frame, num_camera), 1., dtype=torch.float32) + + crop_info_batch = load_crop_info_v2(date, video_id, crop_info_from_exp_name, camera_list, frame_list, is_righthand, represent_frame_id=represent_frame_id) + joints = [] + for frame_idx, frame_id in enumerate(frame_list): + joints_frame = [] + for camera_idx, camera_id in enumerate(camera_list): + if is_righthand: + joints_path = os.path.join('/share/hlyang/results', date, video_id, 'mmpose', 'right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join('/share/hlyang/results', date, video_id, 'mmpose', 'left_hand','predictions', camera_id + '_' + frame_id + '.json') + + if os.path.exists(joints_path): # mmpose结果存在 + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + # h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info_v2(date, video_id, camera_id, frame_id, is_righthand) + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info_batch[frame_id][camera_id] + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(xy) + torch.FloatTensor([w_min, h_min]) for xy in joints_info[0]['keypoints']], dim=0)) + else: # TODO: 如果joints_path不存在(或者mmpose没预测出来), 构建一个格式相同的joints_info, 把数值都填成1e9 + invalid_exists_bool = True + valid_mask[frame_idx, camera_idx] = 0. + joints_frame.append(torch.full((21,2), 1e9, dtype=torch.float32)) + + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + return torch.stack(joints, dim=0), invalid_exists_bool, valid_mask + +def load_mmpose_joints_batch_v3_acc(date, video_id, from_exp_name, camera_list, frame_list, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + ''' + 允许一部分mmpose的结果找不到,某一帧可能存在某些视角缺失。 + ''' + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + invalid_exists_bool = True + + hand_joints_2d_gt_batch = [] + mmpose_valid_mask_batch = [] + for represent_frame_id, represented_frame_list in represent_relation.items(): + if right_hand_bool: + path = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'right_hand','predictions', f'hand_{represent_frame_id}.pkl') + else: + path = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'left_hand','predictions', f'hand_{represent_frame_id}.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + hand_joints_2d_gt, mmpose_valid_mask = data_batch + + for frame_id in represented_frame_list: + idx = int(frame_id) - int(represent_frame_id) + hand_joints_2d_gt_batch.append(hand_joints_2d_gt[idx]) + mmpose_valid_mask_batch.append(mmpose_valid_mask[idx]) + + hand_joints_2d_gt_batch = torch.stack(hand_joints_2d_gt_batch) + mmpose_valid_mask_batch = torch.stack(mmpose_valid_mask_batch) + + return hand_joints_2d_gt_batch, mmpose_valid_mask_batch + +def load_mmpose_joints_batch_test(camera_list, frame_list, is_righthand): + joints_path_prefix = '/home/hlyang/HOI/mmpose/test/output_both_hands/predictions' + joints = [] + for frame_id in frame_list: + joints_frame = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_lefthand.json') + else: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_righthand.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(camera_id, frame_id, is_righthand) + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + return torch.stack(joints, dim=0) + +def seg_downsample_2_set_test(seg, factor): + ''' + 将seg转换成set以计算CD Loss,使用downsample来降低集合中元素中个数。 + + input: + seg: shape如[bs, num_camera, h, w] + + return: + seg_set: shape如[bs, num_camera, num_verts_in_this_camera, 2] + num_verts_in_this_camera是不定的,所以实际上是个二维list,元素类型为tensor + ''' + + batch_size, num_camera, h, w = seg.shape + assert factor >= 0 + + seg = torch.nn.functional.interpolate(seg, size=[h // factor, w // factor]) + + seg_set = [] + for batch in range(batch_size): + seg_set_batch = [] + for camera in range(num_camera): + # seg_set_camera = [] + idx = torch.flip(torch.nonzero(seg[batch, camera]), dims=[-1]) + # for x, y in idx[:]: + # seg_set_camera.append(torch.FloatTensor([y * factor, x * factor])) + # seg_set_camera = torch.stack(seg_set_camera) + seg_set_batch.append(idx * factor) + seg_set.append(seg_set_batch) + + return seg_set + +def seg2set(seg, factor): + ''' + 将seg转换成set以计算CD Loss。 + + input: + seg: shape如[bs, num_camera, h, w] + + return: + seg_set: shape如[bs, num_camera, num_verts_in_this_camera, 2] + num_verts_in_this_camera是不定的,所以实际上是个二维list,元素类型为tensor + ''' + + batch_size, num_camera, h, w = seg.shape + assert factor >= 0 + + seg_set = [] + for batch in range(batch_size): + seg_set_batch = [] + for camera in range(num_camera): + # seg_set_camera = [] + idx = torch.flip(torch.nonzero(seg[batch, camera]), dims=[-1]) + # for x, y in idx[:]: + # seg_set_camera.append(torch.FloatTensor([y * factor, x * factor])) + # seg_set_camera = torch.stack(seg_set_camera) + seg_set_batch.append(idx * factor) + seg_set.append(seg_set_batch) + + return seg_set + +def load_mano_info_batch(video_id: str, from_exp_name: str, frame_list: str, right_hand_bool: bool): + if right_hand_bool: + dir = os.path.join('/share/hlyang/results', video_id, from_exp_name, 'res', 'right_hand') + else: + dir = os.path.join('/share/hlyang/results', video_id, from_exp_name, 'res', 'left_hand') + + hand_trans_batch = [] + hand_pose_batch = [] + mask_batch = [] + for frame_id in frame_list: + path = os.path.join(dir, 'hand_' + frame_id + '.pkl') + assert os.path.exists(path), path + with open(path, 'rb') as f: + data = pickle.load(f) + hand_trans = data['hand_trans'] + hand_pose = data['hand_pose'] + mask = data.get('joints_mask', None) + hand_trans_batch.append(hand_trans) + hand_pose_batch.append(hand_pose) + if mask is not None: + mask_batch.append(mask) + hand_trans_batch = torch.stack(hand_trans_batch) + hand_pose_batch = torch.stack(hand_pose_batch) + + if len(mask_batch) == 0: + return hand_trans_batch, hand_pose_batch, None + else: + mask_batch = torch.stack(mask_batch) + return hand_trans_batch, hand_pose_batch, mask_batch + +def load_mano_info_batch_acc(date, video_id: str, from_exp_name: str, frame_list: str, right_hand_bool: bool, BATCH_SIZE = 20, represent_frame_id = None): + if right_hand_bool: + dir = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'res', 'right_hand') + else: + dir = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'res', 'left_hand') + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + hand_trans_batch = [] + hand_pose_batch = [] + mask_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + # print(represent_frame_id, represented_frame_list) + path = os.path.join(dir, 'hand_' + represent_frame_id + '.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + + for frame_id in represented_frame_list: + data = data_batch[frame_id] + + hand_trans = data['hand_trans'] + hand_pose = data['hand_pose'] + mask = data.get('joints_mask', None) + + hand_trans_batch.append(hand_trans) + hand_pose_batch.append(hand_pose) + if mask is not None: + mask_batch.append(mask) + + hand_trans_batch = torch.stack(hand_trans_batch) + hand_pose_batch = torch.stack(hand_pose_batch) + + if len(mask_batch) == 0: + return hand_trans_batch, hand_pose_batch, None + else: + mask_batch = torch.stack(mask_batch) + return hand_trans_batch, hand_pose_batch, mask_batch + +def get_mano_info_batch_test(mano_info_path_prefix: str, frame_list: str): + hand_trans_batch = [] + hand_pose_batch = [] + mask_batch = [] + for frame_id in frame_list: + path = os.path.join(mano_info_path_prefix, 'hand_' + frame_id + '.pkl') + assert os.path.exists(path) + with open(path, 'rb') as f: + data = pickle.load(f) + hand_trans = data['hand_trans'] + hand_pose = data['hand_pose'] + mask = data.get('joints_mask', None) + hand_trans_batch.append(hand_trans) + hand_pose_batch.append(hand_pose) + if mask is not None: + mask_batch.append(mask) + hand_trans_batch = torch.stack(hand_trans_batch) + hand_pose_batch = torch.stack(hand_pose_batch) + + if len(mask_batch) == 0: + return hand_trans_batch, hand_pose_batch, None + else: + mask_batch = torch.stack(mask_batch) + return hand_trans_batch, hand_pose_batch, mask_batch + +# def liuyun_convert_axangle_to_euler_2(): +# from transforms3d.euler import euler2axangle +# from transforms3d.axangles import axangle2euler +# a = np.float32([0.1, 0.2, 0.3]) # axangle +# ai, aj, ak = axangle2euler(a) # axes="sxyz" +# a_euler = np.float32([ai, aj, ak]) +# a_axangle = euler2axangle(ai, aj, ak) +# assert a == a_axangle + #nlopt + +def load_joint_ransac_batch(video_id, frame_list, mask_type, right_hand_bool): + ''' + 从文件中读取joint ransac得到的hand_trans_3d和joints_mask,并且打成batch。 + + return hand_trans_3d, hand_joints_mask + ''' + assert mask_type in ('joints_mask', 'ransac_mask', 'final_mask') + + if right_hand_bool: + join_ransac_res_dir = os.path.join('/share/hlyang/results', video_id, 'joint_ransac_every_joint_triangulation', 'res', 'right_hand') + else: + join_ransac_res_dir = os.path.join('/share/hlyang/results', video_id, 'joint_ransac_every_joint_triangulation', 'res', 'left_hand') + assert os.path.exists(join_ransac_res_dir) + + trans_batch = [] + mask_batch = [] + for frame_id in frame_list: + path = os.path.join(join_ransac_res_dir, f'hand_{frame_id}.pkl') + assert os.path.exists(path) + with open(path, 'rb') as f: + data = pickle.load(f) + trans_batch.append(data['joints_trans']) + mask_batch.append(data[mask_type]) + trans_batch = torch.stack(trans_batch) + mask_batch = torch.stack(mask_batch) + return trans_batch, mask_batch + +def load_joint_ransac_batch_acc(date, video_id, from_exp_name, frame_list, mask_type, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + ''' + 从文件中读取joint ransac得到的hand_trans_3d和joints_mask,并且打成batch。 + + return hand_trans_3d, hand_joints_mask + ''' + assert mask_type in ('joints_mask', 'ransac_mask', 'final_mask') + + if right_hand_bool: + join_ransac_res_dir = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'res', 'right_hand') + else: + join_ransac_res_dir = os.path.join('/share/hlyang/results', date, video_id, from_exp_name, 'res', 'left_hand') + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + trans_batch = [] + mask_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join(join_ransac_res_dir, 'hand_' + represent_frame_id + '.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + for frame_id in represented_frame_list: + data = data_batch[frame_id] + trans_batch.append(data['joints_trans']) + mask_batch.append(data[mask_type]) + trans_batch = torch.stack(trans_batch) + mask_batch = torch.stack(mask_batch) + return trans_batch, mask_batch + +def world2camera_batch_cam(verts_world, R, T): + ''' + TODO:移除fit_hand_model.py中的world2camera_batch_cam + verts: [bs, num_verts, 3] + R: [num_cameras, 3, 3] + T: [num_cameras, 1, 3] + + return: + verts_camera: [bs, num_cameras, num_verts, 3] + ''' + # batch_size = verts_world.shape[0] + num_camera = R.shape[0] + verts_camera = torch.einsum('cij, bnj -> bcni', R, verts_world) + verts_camera = verts_camera + T.resize(1, num_camera, 1, 3) + return verts_camera + +def camera2pixel_batch_cam(verts_camera, K): + ''' + TODO:移除fit_hand_model.py中的camera2pixel_batch_cam + verts_camera: [bs, num_cameras, num_verts, 3] + K: [num_cameras, 3, 3] + ''' + verts_pixel = torch.einsum('cij, bcnj -> bcni', K, verts_camera) + ret1 = verts_pixel[..., 0] / verts_pixel[..., 2] + ret2 = verts_pixel[..., 1] / verts_pixel[..., 2] + verts_pixel = torch.stack([ret1, ret2], dim=-1) + return verts_pixel + +def get_camera_params(calibration_info_path: str, camera_list): + ''' + TODO:移除fit_hand_model.py中的get_camera_params + ''' + + assert os.path.exists(calibration_info_path) + with open(calibration_info_path) as f: + cali_data = json.load(f) + + R_list = [] + R_inverse_list = [] + T_list = [] + K_list = [] + focal_length_list = [] + principal_point_list = [] + + for camera_id in camera_list: + R = torch.tensor(cali_data[camera_id]['R']).reshape(1, 3, 3) + T = torch.tensor(cali_data[camera_id]['T']).reshape(1, 3) + K = torch.tensor(cali_data[camera_id]['K']).reshape(1, 3, 3) + fx = K[0, 0, 0] + fy = K[0, 1, 1] + px = K[0, 0, 2] + py = K[0, 1, 2] + + R_list.append(R) + R_inverse_list.append(R.inverse()) + T_list.append(T) + K_list.append(K) + focal_length_list.append(torch.tensor([fx, fy]).unsqueeze(0)) + principal_point_list.append(torch.tensor([px, py]).unsqueeze(0)) + + R = torch.concatenate(R_list, dim=0) + R_inverse = torch.concatenate(R_inverse_list, dim=0) + T = torch.concatenate(T_list, dim=0) + K = torch.concatenate(K_list, dim=0) + focal_length = torch.concatenate(focal_length_list, dim=0) + principal_point = torch.concatenate(principal_point_list, dim=0) + + image_size = torch.tensor([3000, 4096]).unsqueeze(0).repeat(len(camera_list), 1) + + return R, R_inverse, T, K, focal_length, principal_point, image_size + +def render_from_mano_params(video_id, exp_name, camera_list, frame_list, right_hand_bool, device): + ''' + 从一次实验结果(mano params的格式)得到render的结果,最终结果保存在cpu中。 + + Warning: 很容易内存爆炸,不应该那么多数据存内存里。 + ''' + hand_trans_batch, hand_pose_batch, _ = load_mano_info_batch(video_id, exp_name, frame_list, right_hand_bool) + num_frame = len(frame_list) + + device = torch.device(device) + + calibration_info_path = os.path.join('/share/hlyang/results', video_id, 'src', 'calibration.json') + assert os.path.join(calibration_info_path) + R, R_inverse, T, K, focal_length, principal_point, image_size = get_camera_params(calibration_info_path, camera_list) + + use_pca = False + ncomps = 45 + if right_hand_bool: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx=0) + else: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx=0) + + lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]]) + camera = PerspectiveCameras(device=device, R=R_inverse, T=T, image_size=image_size, in_ndc=False, focal_length=-focal_length, principal_point=principal_point) + raster_settings = RasterizationSettings( + image_size=(3000, 4096), + blur_radius=0, + faces_per_pixel=1, + ) + renderer = MeshRenderer(rasterizer=MeshRasterizer(cameras=camera, raster_settings=raster_settings), shader=SoftPhongShader(device=device, cameras=camera, lights=lights)) + faces_idx = mano_layer.th_faces.detach().clone().to(device) + + rendered_image_list = [] + + for i in tqdm(range(num_frame)): + hand_pose = hand_pose_batch[i, ...] + hand_trans = hand_trans_batch[i, ...] + + if len(hand_pose.shape) == 1: + hand_pose = hand_pose.unsqueeze(0) + verts, _, _ = mano_layer(hand_pose) + verts = verts.squeeze() + verts = verts / 1000.0 + verts += hand_trans + verts = verts.to(device) + + + mesh = Meshes(verts=[verts], faces=[faces_idx]) + color = torch.ones(1, verts.size(0), 3, device=device) + color[:, :, 2] = 255 + mesh.textures = TexturesVertex(verts_features=color) + mesh = mesh.extend(R.shape[0]) + + images = renderer(mesh)[..., :3].squeeze() + rendered_image_list.append(images.cpu()) + rendered_image_batch = torch.stack(rendered_image_list) + + return rendered_image_batch + +def denoise(mask, half_length = 60, threshold = 900, return_rate = False): + rows, cols = mask.shape + cnt = np.zeros(mask.shape) + idx = np.nonzero(mask) + + if return_rate: + len1 = len(idx[0]) + + min_x = np.maximum(0, idx[0] - half_length) + max_x = np.minimum(rows - 1, idx[0] + half_length + 1) + min_y = np.maximum(0, idx[1] - half_length) + max_y = np.minimum(cols - 1, idx[1] + half_length + 1) + # 能再次批处理优化吗? + for x, y, x1, x2, y1, y2 in zip(idx[0], idx[1], min_x, max_x, min_y, max_y): + cnt[x, y] = mask[x1:x2, y1:y2].sum() + valid = mask & (cnt > threshold) + + if return_rate: + len2 = valid.sum() + rate = len2/len1 + return valid, rate + else: + return valid + +def denoise_mask(mask, half_length = 60, threshold = 900): + ''' + input: + mask: shape: [height, width] + ''' + assert len(mask.shape) == 2 + + right_hand_mask = np.where(mask == 1, True, False) + left_hand_mask = np.where(mask == 2, True, False) + object1_mask = np.where(mask == 3, True, False) + object2_mask = np.where(mask == 4, True, False) + right_hand_mask = denoise(right_hand_mask, half_length, threshold) + left_hand_mask = denoise(left_hand_mask, half_length, threshold) + + denoised_mask = np.zeros_like(mask).astype(np.uint8) + denoised_mask[right_hand_mask] = 1 + denoised_mask[left_hand_mask] = 2 + denoised_mask[object1_mask] = 3 + denoised_mask[object2_mask] = 4 + + return denoised_mask + +def read_init_crop(video_id, camera_id, frame_id): + ''' + 第一排为右手,第二排为左手。 + 每一排分别是 min_h, max_h, min_w, max_w + + return shape: [2, 4] + ''' + path = os.path.join('/share/hlyang/results', video_id, 'src', 'init_crop', f'{camera_id}_{frame_id}.txt') + assert os.path.exists(path) + crop_info = np.loadtxt(path).astype(np.uint32) + + return crop_info + +def get_obj_mesh_path(obj_id: str): + obj_dir = '/share/datasets/HOI-mocap/object_models_final' + obj_filenames = os.listdir(obj_dir) + valid_filename = [filename for filename in obj_filenames if (filename.endswith(f'object{obj_id}') or ((obj_id[0] == '0') and (filename.endswith(f'object{obj_id[1:]}'))))] # 加上object防止多个匹配结果 + assert len(valid_filename) == 1, f'obj {obj_id} match failed' + obj_path = None + for mesh_name in os.listdir(os.path.join(obj_dir, valid_filename[0])): + # if "cm.obj" in mesh_name: + if ("m.obj" in mesh_name) and (not "cm.obj" in mesh_name): + assert obj_path is None + obj_path = os.path.join(obj_dir, valid_filename[0], mesh_name) + assert os.path.exists(obj_path) + return obj_path + +def verts_apply_pose_batch(verts, pose_batch): + rotation_batch = pose_batch[:, :3, :3] # [num_frame, 3, 3] + translation_batch = pose_batch[:, :3, 3] # [num_frame, 1, 3] + + verts = torch.einsum('fij, vj -> fvi', rotation_batch, verts) # [num_frame, num_verts, 3] + verts = verts + translation_batch.unsqueeze(1) + return verts + +def load_nokov_objs_mesh(video_id, frame_list): + ''' + + return: tool_verts_batch: [num_frame, num_verts, 3] + tool_faces + obj_verts_batch: [num_frame, num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join('/share/datasets/HOI-mocap', date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + tool_mesh_path = get_obj_mesh_path(obj_id=tool_id) + tool_mesh = o3d.io.read_triangle_mesh(tool_mesh_path) + tool_mesh = tool_mesh.simplify_quadric_decimation(2000) + tool_verts = np.asarray(tool_mesh.vertices) + # tool_verts = torch.from_numpy(tool_verts / 100.).float() + tool_verts = torch.from_numpy(tool_verts).float() + tool_faces = torch.from_numpy(np.asarray(tool_mesh.triangles)) + + tool_pose_path = os.path.join('/share/datasets/HOI-mocap/HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + assert os.path.exists(tool_pose_path) + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path))[frame_idx_list, ...] + tool_verts_batch = verts_apply_pose_batch(tool_verts, tool_pose_batch) + + obj_mesh_path = get_obj_mesh_path(obj_id=obj_id) + obj_mesh = o3d.io.read_triangle_mesh(obj_mesh_path) + obj_mesh = obj_mesh.simplify_quadric_decimation(2000) + obj_verts = np.asarray(obj_mesh.vertices) + # obj_verts = torch.from_numpy(obj_verts / 100.).float() + obj_verts = torch.from_numpy(obj_verts).float() + obj_faces = torch.from_numpy(np.asarray(obj_mesh.triangles)) + + obj_pose_path = os.path.join('/share/datasets/HOI-mocap/HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + assert os.path.exists(obj_pose_path) + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path))[frame_idx_list, ...] + obj_verts_batch = verts_apply_pose_batch(obj_verts, obj_pose_batch) + + return tool_verts_batch, tool_faces, obj_verts_batch, obj_faces + +def check_nokov_exists(video_id): + date = video_id[:8] + original_nokov_data_dir = os.path.join('/share/datasets/HOI-mocap', date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + + nokov_data_filenames = os.listdir(original_nokov_data_dir) + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + try: + tool_mesh_path = get_obj_mesh_path(obj_id=tool_id) + obj_mesh_path = get_obj_mesh_path(obj_id=obj_id) + tool_pose_path = os.path.join('/share/datasets/HOI-mocap/HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + obj_pose_path = os.path.join('/share/datasets/HOI-mocap/HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + + if not os.path.exists(tool_pose_path): + return False + if not os.path.exists(obj_pose_path): + return False + except: + return False + return True + + +def cal_normals(verts, faces): + assert len(verts.shape) == 3 + bs = verts.shape[0] + + + with torch.no_grad(): + verts = verts.detach().clone().cpu() + verts = torch.unbind(verts, dim=0) + verts = [tensor.numpy() for tensor in verts] + + faces = faces.detach().clone().cpu().numpy() + + normals_list = [] + for i in range(bs): + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(verts[i]) + mesh.triangles = o3d.utility.Vector3iVector(faces) + mesh.compute_vertex_normals() + normals = mesh.vertex_normals + normals_list.append(torch.from_numpy(np.asarray(normals))) + + normals = torch.stack(normals_list, dim=0) + + return normals + +def get_valid_video_list(date: str, consider_pipiline_failed = False, consider_nokov_failed = False, given_list = None, remove_hand = True): + valid_record_path = os.path.join('/share/hlyang/results/record', f'{date}_valid_video_id.txt') + assert os.path.exists(valid_record_path), valid_record_path + + with open(valid_record_path, 'r') as f: + lines = f.readlines() + + valid_video_list = [] + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + valid_video_list.append(parts[0]) + + if remove_hand: + valid_video_list = [id for id in valid_video_list if 'hand' not in id] + + valid_video_list = list(set(valid_video_list)) + valid_video_list.sort() + + if given_list is not None: + # valid_video_list_ = [id for id in given_list if id in valid_video_list] + # valid_video_list = valid_video_list_ + valid_video_list = [id for id in valid_video_list if id in given_list] + valid_video_list.sort() + + if consider_pipiline_failed: + pipeline_failed_record_path = os.path.join('/share/hlyang/results/record', f'{date}_pipeline_failed.txt') + if os.path.exists(pipeline_failed_record_path): + failed_video_list = [] + with open(pipeline_failed_record_path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + failed_video_list.append(parts[0]) + valid_video_list = [video_id for video_id in valid_video_list if video_id not in failed_video_list] + valid_video_list.sort() + + if consider_nokov_failed: + nokov_failed_record_path = os.path.join('/share/hlyang/results/record', f'{date}_nokov_failed.txt') + if os.path.exists(nokov_failed_record_path): + failed_video_list = [] + with open(nokov_failed_record_path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + failed_video_list.append(int(parts[0])) + + failed_video_list = [f'{date}_{str(i).zfill(3)}' for i in failed_video_list] + + valid_video_list = [video_id for video_id in valid_video_list if video_id not in failed_video_list] + valid_video_list.sort() + + return valid_video_list + +def get_time_diff(date, error_threshold = 16, valid_threshold = 3): + ''' + 仅适用于20230930及之后的time_diff文件 + ''' + + time_diff_record_root = '/share/hlyang/results/record' + time_diff_data_path = os.path.join(time_diff_record_root, f'{date}_2m1.txt') + + if not os.path.exists(time_diff_data_path): + return {} + + time_diff_data = {} + with open(time_diff_data_path, 'r') as f: + lines = f.readlines() + + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + time_diff_data[parts[1]] = int(parts[0]) + + # denoise + time_diff_array = np.array([v for k, v in time_diff_data.items()]) + invalid_time_diff_list = [] + for time_diff in time_diff_array: + cnt = np.sum((time_diff_array >= time_diff - error_threshold) & (time_diff_array <= time_diff + error_threshold)) + if cnt <= 3: + invalid_time_diff_list.append(time_diff) + keys_to_remove = [k for k, v in time_diff_data.items() if v in invalid_time_diff_list] + for k in keys_to_remove: + del time_diff_data[k] + + return time_diff_data + + +def get_pipeline_failed_video_list(date: str, rename_bool = True): + peline_failed_record_path = os.path.join('/share/hlyang/results/record', f'{date}_pipeline_failed.txt') + assert os.path.exists(peline_failed_record_path), peline_failed_record_path + + with open(peline_failed_record_path, 'r') as f: + lines = f.readlines() + + pipeline_failed_video_list = [] + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + pipeline_failed_video_list.append(parts[0]) + + + pipeline_failed_video_list = list(set(pipeline_failed_video_list)) + pipeline_failed_video_list.sort() + + if rename_bool: + time1 = time() + peline_failed_record_dst_path = os.path.join('/share/hlyang/results/record', f'{date}_pipeline_failed_{time1}.txt') + shutil.copy(peline_failed_record_path, peline_failed_record_dst_path) + os.remove(peline_failed_record_path) + + return pipeline_failed_video_list + +def get_num_frame(video_id): + metadata_dir = os.path.join('/share/hlyang/results', video_id, 'metadata') + assert os.path.exists(metadata_dir) + metadata_list = [filename for filename in os.listdir(metadata_dir) if filename.endswith('.pkl')] + assert len(metadata_list) > 0 + metadata_path = os.path.join('/share/hlyang/results', video_id, 'metadata', metadata_list[0]) + + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + + return num_frame + +def get_num_frame_v2(video_id): + data = video_id[:8] + metadata_dir = os.path.join('/share/hlyang/results', data, video_id, 'metadata') + assert os.path.exists(metadata_dir) + metadata_list = [filename for filename in os.listdir(metadata_dir) if filename.endswith('.pkl')] + assert len(metadata_list) > 0 + metadata_path = os.path.join('/share/hlyang/results', data, video_id, 'metadata', metadata_list[0]) + + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + + return num_frame + +def load_hand_info_batch_acc(hand_info_dir, frame_list, BATCH_SIZE = 20, represent_frame_id = None): + ''' + 与hoi_io中的load_mano_info_batch功能类似,考虑去重 + ''' + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + pose_batch = [] + trans_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join(hand_info_dir, 'hand_' + represent_frame_id + '.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + + for frame_id in represented_frame_list: + data = data_batch[frame_id] + pose = data['hand_pose'] + trans = data['hand_trans'] + pose_batch.append(pose) + trans_batch.append(trans) + + pose_batch = torch.stack(pose_batch) + trans_batch = torch.stack(trans_batch) + return pose_batch, trans_batch \ No newline at end of file diff --git a/utils/utils/hoi_io2.py b/utils/utils/hoi_io2.py new file mode 100755 index 0000000000000000000000000000000000000000..5d8157caf8b5f82cc089426c10c004de5963cd2e --- /dev/null +++ b/utils/utils/hoi_io2.py @@ -0,0 +1,1689 @@ +import os +# from utils.scandir import scandir +import numpy as np +import cv2 +from tqdm import tqdm +import pickle +import torch +import json +from time import time, sleep +from manopth.manolayer import ManoLayer +# from pytorch3d.structures import Meshes +# from pytorch3d.renderer import (PerspectiveCameras, PointLights, RasterizationSettings, MeshRenderer, MeshRasterizer, SoftPhongShader, SoftSilhouetteShader, SoftPhongShader, TexturesVertex) +import open3d as o3d +import shutil +from collections import OrderedDict + +def load_simplied_nokov_objs_mesh(root, video_id, frame_list, use_cm = True): + ''' + + return: tool_verts_batch: [num_frame, num_verts, 3] + tool_faces + obj_verts_batch: [num_frame, num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + if use_cm: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_cm.obj') + else: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_m.obj') + tool_mesh = o3d.io.read_triangle_mesh(tool_mesh_path) + tool_verts = np.asarray(tool_mesh.vertices) + if use_cm: + tool_verts = torch.from_numpy(tool_verts / 100.).float() + else: + tool_verts = torch.from_numpy(tool_verts).float() + tool_faces = torch.from_numpy(np.asarray(tool_mesh.triangles)) + + tool_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + assert os.path.exists(tool_pose_path) + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path))[frame_idx_list, ...] + tool_verts_batch = verts_apply_pose_batch(tool_verts, tool_pose_batch) + + if use_cm: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_cm.obj') + else: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_m.obj') + obj_mesh = o3d.io.read_triangle_mesh(obj_mesh_path) + obj_verts = np.asarray(obj_mesh.vertices) + if use_cm: + obj_verts = torch.from_numpy(obj_verts / 100.).float() + else: + obj_verts = torch.from_numpy(obj_verts).float() + obj_faces = torch.from_numpy(np.asarray(obj_mesh.triangles)) + + obj_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + assert os.path.exists(obj_pose_path) + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path))[frame_idx_list, ...] + obj_verts_batch = verts_apply_pose_batch(obj_verts, obj_pose_batch) + + return tool_verts_batch, tool_faces, obj_verts_batch, obj_faces + +def load_simplied_objs_mesh(root, video_id, use_cm = True): + ''' + templete + + return: tool_verts: [num_frame, num_verts, 3] + tool_faces + obj_verts: [num_frame, num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + if use_cm: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_cm.obj') + else: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_m.obj') + tool_mesh = o3d.io.read_triangle_mesh(tool_mesh_path) + tool_verts = np.asarray(tool_mesh.vertices) + if use_cm: + tool_verts = torch.from_numpy(tool_verts / 100.).float() + else: + tool_verts = torch.from_numpy(tool_verts).float() + tool_faces = torch.from_numpy(np.asarray(tool_mesh.triangles)) + + if use_cm: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_cm.obj') + else: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_m.obj') + obj_mesh = o3d.io.read_triangle_mesh(obj_mesh_path) + obj_verts = np.asarray(obj_mesh.vertices) + if use_cm: + obj_verts = torch.from_numpy(obj_verts / 100.).float() + else: + obj_verts = torch.from_numpy(obj_verts).float() + obj_faces = torch.from_numpy(np.asarray(obj_mesh.triangles)) + + return tool_verts, tool_faces, obj_verts, obj_faces + +def load_objs_params(root, video_id, frame_list): + ''' + + return: tool_verts_batch: [num_frame, num_verts, 3] + tool_faces + obj_verts_batch: [num_frame, num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + tool_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + assert os.path.exists(tool_pose_path) + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path))[frame_idx_list, ...] + tool_rot_batch = tool_pose_batch[:, :3, :3] # [num_frame, 3, 3] + tool_trans_batch = tool_pose_batch[:, :3, 3] # [num_frame, 1, 3] + + obj_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + assert os.path.exists(obj_pose_path) + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path))[frame_idx_list, ...] + obj_rot_batch = obj_pose_batch[:, :3, :3] # [num_frame, 3, 3] + obj_trans_batch = obj_pose_batch[:, :3, 3] # [num_frame, 1, 3] + + return tool_rot_batch, tool_trans_batch, obj_rot_batch, obj_trans_batch + + +def load_bg_img(root, video_id, camera_id, frame_id): + img_root = os.path.join(root, video_id, 'imgs') + # os.path.exists(img_root) + path = os.path.join(img_root, camera_id, camera_id + '_' + frame_id + '.png') + # assert os.path.exists(path), path + img = cv2.imread(path, cv2.IMREAD_COLOR) + return img + +def cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len = 2): + + represent_relation = OrderedDict() + # represent_order = [] # 防止低版本python的字典是无序的,但似乎用OrderedDict就好了 + for frame in frame_list: + num = int(frame) + if num <= first_batch_len: + key = str(1).zfill(5) + else: + key = str(((num - (first_batch_len + 1)) // BATCH_SIZE) * BATCH_SIZE + first_batch_len + 1).zfill(5) + if key not in represent_relation: + represent_relation[key] = [] + represent_relation[key].append(frame) + + return represent_relation + +def load_bg_img_acc(root, video_id, camera_id, BATCH_SIZE, frame_list): + date = video_id[:8] + sub_video_root = os.path.join(root, date, video_id, 'sub_video') + + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list) + + full_img_list = [] + for represent_frame_id, represented_frame_list in represent_relation.items(): + + video_path = os.path.join(sub_video_root, camera_id, camera_id + '_' + represent_frame_id + '.mp4') + + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + cap.set(cv2.CAP_PROP_FOURCC, fourcc) + # fps = cap.get(cv2.CAP_PROP_FPS) + # W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + # H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + suc = cap.isOpened() + img_list = [] + for i in range(BATCH_SIZE): + suc, img = cap.read() + if not suc: + break + img_list.append(img) + + # num_img = len(img_list) + + img_list_ = [] + for idx in [int(frame) - int(represent_frame_id) for frame in represented_frame_list]: + # assert idx >= 0 and idx < num_img + img_list_.append(img_list[idx]) + img_list = img_list_ + + for img in img_list: + full_img_list.append(img) + + return full_img_list + +def load_bg_imgs(root, video_id, frame_list, camera_list, BATCH_SIZE): + + rgb_batch = [[] for _ in range(len(frame_list))] + + for c_idx, camera in enumerate(camera_list): + img_list = load_bg_img_acc(root, video_id, camera, BATCH_SIZE, frame_list) + for f_idx, bg in enumerate(img_list): + rgb_batch[f_idx].append(bg) + + return rgb_batch + +def load_crop_info_test(camera_id, frame_id, is_righthand): + ''' + + return: + + ''' + if is_righthand: + crop_info_path = os.path.join('/home/hlyang/utils/HOI/img/', camera_id, 'crop_imgs_righthand', camera_id + '_' + frame_id + '_crop_info.pkl') + else: + crop_info_path = os.path.join('/home/hlyang/utils/HOI/img/', camera_id, 'crop_imgs_lefthand', camera_id + '_' + frame_id + '_crop_info.pkl') + assert os.path.exists(crop_info_path) + + with open(crop_info_path, 'rb') as f: + crop_info = pickle.load(f) + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info + return h_min, h_max, w_min, w_max, h_mean, w_mean + +def load_crop_info(root, video_id, camera_id, frame_id, is_righthand): + ''' + return: + + ''' + if is_righthand: + crop_info_path = os.path.join(root, video_id, 'crop', 'right_hand', camera_id, camera_id + '_' + frame_id + '_crop_info.pkl') + else: + crop_info_path = os.path.join(root, video_id, 'crop', 'left_hand', camera_id, camera_id + '_' + frame_id + '_crop_info.pkl') + assert os.path.exists(crop_info_path) + + with open(crop_info_path, 'rb') as f: + crop_info = pickle.load(f) + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info + return h_min, h_max, w_min, w_max, h_mean, w_mean + +def load_crop_info_v2(root, date, video_id, from_exp_name, camera_list, frame_list, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + ''' + TODO: 考虑缺失的情况,但应该不会有缺失的情况。现在的处理方案:只载入读到了,空的跳过 + + ''' + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + crop_info_batch = {} + + for represent_frame_id, represented_frame_list in represent_relation.items(): + if right_hand_bool: + path = os.path.join(root, date, video_id, from_exp_name, 'right_hand', represent_frame_id + '_crop_info.pkl') + else: + path = os.path.join(root, date, video_id, from_exp_name, 'left_hand', represent_frame_id + '_crop_info.pkl') + with open(path, 'rb') as f: + crop_info_ = pickle.load(f) + + crop_info_batch.update(crop_info_) + + # for f_idx, frame_id in enumerate(represented_frame_list): + # crop_info_batch[frame_id] = {} + + # crop_info_frame_ = crop_info_.get(frame_id, None) + # if crop_info_frame_ is None: + # continue + + # for c_idx, camera_id in enumerate(camera_list): + # crop_info_camera_ = crop_info_frame_.get(camera_id, None) + # if crop_info_camera_ is not None: + # crop_info_batch[frame_id][camera_id] = crop_info_camera_ + + return crop_info_batch + + + + + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info + return h_min, h_max, w_min, w_max, h_mean, w_mean + +def get_seg_infos_batch_test(seg_path_prefix, frame_list, camera_list): + ''' + return: + object_seg, left_hand_seg, right_hand_seg 形如[batch_size, num_camera, h, w] + ''' + object_seg_batch = [] + left_hand_seg_batch = [] + right_hand_seg_batch = [] + for frame_id in frame_list: + object_seg_cameras = [] + left_hand_seg_cameras = [] + right_hand_seg_cameras = [] + for camera_id in camera_list: + # path = os.path.join(seg_path_prefix, camera_id, 'denoised_mask_imgs', camera_id + '_' + frame_id + '_mask_denoised.png') + path = os.path.join(seg_path_prefix, camera_id, 'mask_imgs', camera_id + '_' + frame_id + '_mask.png') + assert os.path.exists(path) + seg_img = cv2.imread(path, cv2.IMREAD_COLOR) # BGR + seg_img = torch.from_numpy(seg_img) + # object_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] > 50), True, False) + # left_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] < 50), True, False) + # right_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] < 50) & (seg_img[:, :, 2] > 50), True, False) + object_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] > 50), 1.0, 0.0) + left_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] > 50) & (seg_img[:, :, 2] < 50), 1.0, 0.0) + right_hand_seg = torch.where((seg_img[:, :, 0] < 50) & (seg_img[:, :, 1] < 50) & (seg_img[:, :, 2] > 50), 1.0, 0.0) + object_seg_cameras.append(object_seg) + left_hand_seg_cameras.append(left_hand_seg) + right_hand_seg_cameras.append(right_hand_seg) + object_seg_cameras = torch.stack(object_seg_cameras) + left_hand_seg_cameras = torch.stack(left_hand_seg_cameras) + right_hand_seg_cameras = torch.stack(right_hand_seg_cameras) + object_seg_batch.append(object_seg_cameras) + left_hand_seg_batch.append(left_hand_seg_cameras) + right_hand_seg_batch.append(right_hand_seg_cameras) + object_seg_batch = torch.stack(object_seg_batch) + left_hand_seg_batch = torch.stack(left_hand_seg_batch) + right_hand_seg_batch = torch.stack(right_hand_seg_batch) + return object_seg_batch, left_hand_seg_batch, right_hand_seg_batch + +def get_seg_infos_batch(root, video_id, frame_list, camera_list): + ''' + return: + left_hand_seg, right_hand_seg, object1_seg, object2_seg 形如[batch_size, num_camera, h, w] + ''' + left_hand_seg_batch = [] + right_hand_seg_batch = [] + object1_seg_batch = [] + object2_seg_batch = [] + + mask_dict = {} + for camera_id in camera_list: + path = os.path.join(root, video_id, 'anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) + mask_dict[camera_id] = torch.from_numpy(mask) + + #如果frame_list是连续的,可以直接索引少一层循环,速度应该会快得多。 + frame_idx_list = [int(frame_id)-1 for frame_id in frame_list] + + continuous_bool = True + for i in range(len(frame_idx_list)-1): + if frame_idx_list[i] + 1 != frame_idx_list[i+1]: + continuous_bool = False + break + with torch.no_grad(): + if continuous_bool: + frame_start = frame_idx_list[0] + frame_end = frame_idx_list[-1] + + for camera_id in camera_list: + mask = mask_dict[camera_id] + + left_hand_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 1, 1., 0.) + right_hand_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 2, 1., 0.) + object1_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 3, 1., 0.) + object2_seg = torch.where(mask[frame_start: frame_end + 1, ...] == 4, 1., 0.) + + left_hand_seg_batch.append(left_hand_seg) + right_hand_seg_batch.append(right_hand_seg) + object1_seg_batch.append(object1_seg) + object2_seg_batch.append(object2_seg) + + left_hand_seg_batch = torch.stack(left_hand_seg_batch).transpose(0, 1) + right_hand_seg_batch = torch.stack(right_hand_seg_batch).transpose(0, 1) + object1_seg_batch = torch.stack(object1_seg_batch).transpose(0, 1) + object2_seg_batch = torch.stack(object2_seg_batch).transpose(0, 1) + else: + for frame_id in frame_list: + frame_idx = int(frame_id) - 1 + left_hand_seg_cameras = [] + right_hand_seg_cameras = [] + object1_seg_cameras = [] + object2_seg_cameras = [] + for camera_id in camera_list: + mask = mask_dict[camera_id] + left_hand_seg = torch.where(mask[frame_idx, ...] == 1, 1., 0.) + right_hand_seg = torch.where(mask[frame_idx, ...] == 2, 1., 0.) + object1_seg = torch.where(mask[frame_idx, ...] == 3, 1., 0.) + object2_seg = torch.where(mask[frame_idx, ...] == 4, 1., 0.) + + left_hand_seg_cameras.append(left_hand_seg) + right_hand_seg_cameras.append(right_hand_seg) + object1_seg_cameras.append(object1_seg) + object2_seg_cameras.append(object2_seg) + + left_hand_seg_cameras = torch.stack(left_hand_seg_cameras) + right_hand_seg_cameras = torch.stack(right_hand_seg_cameras) + object1_seg_cameras = torch.stack(object1_seg_cameras) + object2_seg_cameras = torch.stack(object2_seg_cameras) + + left_hand_seg_batch.append(left_hand_seg_cameras) + right_hand_seg_batch.append(right_hand_seg_cameras) + object1_seg_batch.append(object1_seg_cameras) + object2_seg_batch.append(object2_seg_cameras) + + left_hand_seg_batch = torch.stack(left_hand_seg_batch) + right_hand_seg_batch = torch.stack(right_hand_seg_batch) + object1_seg_batch = torch.stack(object1_seg_batch) + object2_seg_batch = torch.stack(object2_seg_batch) + + return left_hand_seg_batch, right_hand_seg_batch, object1_seg_batch, object2_seg_batch + +def get_seg_infos_batch2(root, video_id, frame_list, camera_list): + ''' + get_seg_infos_batch1的numpy实现 + TODO:不知为何性能非常差 + + return: + left_hand_seg, right_hand_seg, object1_seg, object2_seg 形如[batch_size, num_camera, h, w] + ''' + left_hand_seg_batch = [] + right_hand_seg_batch = [] + object1_seg_batch = [] + object2_seg_batch = [] + + mask_dict = {} + for camera_id in camera_list: + path = os.path.join(root, video_id, 'anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) + mask_dict[camera_id] = mask + + #如果frame_list是连续的,可以直接索引少一层循环,速度应该会快得多。 + frame_idx_list = [int(frame_id)-1 for frame_id in frame_list] + + continuous_bool = True + for i in range(len(frame_idx_list)-1): + if frame_idx_list[i] + 1 != frame_idx_list[i+1]: + continuous_bool = False + break + + if continuous_bool: + frame_start = frame_idx_list[0] + frame_end = frame_idx_list[-1] + + for camera_id in camera_list: + print('hello_' + camera_id) + mask = mask_dict[camera_id] + + left_hand_seg = np.where(mask[frame_start: frame_end + 1, ...] == 1, 1., 0.) + right_hand_seg = np.where(mask[frame_start: frame_end + 1, ...] == 2, 1., 0.) + object1_seg = np.where(mask[frame_start: frame_end + 1, ...] == 3, 1., 0.) + object2_seg = np.where(mask[frame_start: frame_end + 1, ...] == 4, 1., 0.) + + left_hand_seg_batch.append(left_hand_seg) + right_hand_seg_batch.append(right_hand_seg) + object1_seg_batch.append(object1_seg) + object2_seg_batch.append(object2_seg) + + left_hand_seg_batch = np.stack(left_hand_seg_batch).transpose(0, 1) + right_hand_seg_batch = np.stack(right_hand_seg_batch).transpose(0, 1) + object1_seg_batch = np.stack(object1_seg_batch).transpose(0, 1) + object2_seg_batch = np.stack(object2_seg_batch).transpose(0, 1) + else: + for frame_id in frame_list: + frame_idx = int(frame_id) - 1 + left_hand_seg_cameras = [] + right_hand_seg_cameras = [] + object1_seg_cameras = [] + object2_seg_cameras = [] + for camera_id in camera_list: + mask = mask_dict[camera_id] + left_hand_seg = np.where(mask[frame_idx, ...] == 1, 1., 0.) + right_hand_seg = np.where(mask[frame_idx, ...] == 2, 1., 0.) + object1_seg = np.where(mask[frame_idx, ...] == 3, 1., 0.) + object2_seg = np.where(mask[frame_idx, ...] == 4, 1., 0.) + + left_hand_seg_cameras.append(left_hand_seg) + right_hand_seg_cameras.append(right_hand_seg) + object1_seg_cameras.append(object1_seg) + object2_seg_cameras.append(object2_seg) + + left_hand_seg_cameras = np.stack(left_hand_seg_cameras) + right_hand_seg_cameras = np.stack(right_hand_seg_cameras) + object1_seg_cameras = np.stack(object1_seg_cameras) + object2_seg_cameras = np.stack(object2_seg_cameras) + + left_hand_seg_batch.append(left_hand_seg_cameras) + right_hand_seg_batch.append(right_hand_seg_cameras) + object1_seg_batch.append(object1_seg_cameras) + object2_seg_batch.append(object2_seg_cameras) + + left_hand_seg_batch = np.stack(left_hand_seg_batch) + right_hand_seg_batch = np.stack(right_hand_seg_batch) + object1_seg_batch = np.stack(object1_seg_batch) + object2_seg_batch = np.stack(object2_seg_batch) + + return left_hand_seg_batch, right_hand_seg_batch, object1_seg_batch, object2_seg_batch + +def get_seg_infos_batch3(root, video_id: str, frame_list, camera_list): + ''' + + return: + seg: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表左手、右手、object1和object2 + ''' + seg_batch = [] + + mask_dict = {} + for camera_id in camera_list: + path = os.path.join(root, video_id, 'anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) + mask_dict[camera_id] = mask + + #如果frame_list是连续的,可以直接索引少一层循环,速度应该会快得多。 + frame_idx_list = [int(frame_id)-1 for frame_id in frame_list] + + continuous_bool = True + for i in range(len(frame_idx_list)-1): + if frame_idx_list[i] + 1 != frame_idx_list[i+1]: + continuous_bool = False + break + + if continuous_bool: + frame_start = frame_idx_list[0] + frame_end = frame_idx_list[-1] + 1 + + for camera_id in camera_list: + mask = mask_dict[camera_id] + + seg = mask[frame_start: frame_end, ...] + seg_batch.append(seg) + + seg_batch = np.stack(seg_batch) + seg_batch = seg_batch.swapaxes(0, 1) + else: + for frame_idx in frame_idx_list: + seg_cameras = [] + for camera_id in camera_list: + mask = mask_dict[camera_id] + seg = [frame_idx, ...] + + seg_cameras.append(seg) + + seg_cameras = np.stack(seg_cameras) + + seg_batch.append(seg_cameras) + + seg_batch = np.stack(seg_batch) + + return seg_batch + +def get_downsampled_seg_infos_batch(root, video_id: str, frame_list, camera_list): + ''' + + return: (seg, downsample_factor) + seg: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表右手、左手、object1(右手操作的物体)和object2(左手操作的物体), h为750, w为1024 + downsample_factor: 4 + ''' + seg_batch = [] + + for frame_id in frame_list: + seg_camera = [] + for camera_id in camera_list: + path = os.path.join(root, video_id, 'anno_results', camera_id, camera_id + '_' + frame_id+'.npy') + assert os.path.exists(path) + seg = np.load(path) + seg_camera.append(seg) + seg_camera = np.stack(seg_camera) + seg_batch.append(seg_camera) + seg_batch = np.stack(seg_batch) + + return seg_batch, 4 + +def get_downsampled_seg_infos_batch_v2(root, video_id: str, from_exp_name, frame_list, camera_list): + ''' + 某些视角会track失败,得不到mask:如果path不存在,就生成一个全0。 + + return: (seg, downsample_factor) + seg: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表右手、左手、object1(右手操作的物体)和object2(左手操作的物体), h为750, w为1024 + downsample_factor: 4 + ''' + seg_batch = [] + + for frame_id in frame_list: + seg_camera = [] + for camera_id in camera_list: + path = os.path.join(root, video_id, from_exp_name, 'res', camera_id, camera_id + '_' + frame_id+'.npy') + if os.path.exists(path): + seg = np.load(path) + else: + seg = np.zeros(shape=(750, 1024), dtype=np.uint8) + seg_camera.append(seg) + seg_camera = np.stack(seg_camera) + seg_batch.append(seg_camera) + seg_batch = np.stack(seg_batch) + + return seg_batch, 4 + +def get_downsampled_seg_infos_batch_v2_acc_batch(root, date, video_id: str, from_exp_name, frame_list, camera_list, BATCH_SIZE = 20, represent_frame_id = None): + ''' + 某些视角会track失败,得不到mask:如果path不存在,就生成一个全0。 + + return: (full_mask, downsample_factor) + full_mask: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表右手、左手、object1(右手操作的物体)和object2(左手操作的物体), h为750, w为1024 + downsample_factor: 4 + ''' + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + seg_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join(root, date, video_id, from_exp_name, 'res', represent_frame_id+'.npy') + mask = np.load(path) + + for f_idx, frame_id in enumerate(represented_frame_list): + seg_camera = [] + for c_idx, camera_id in enumerate(camera_list): + try: + seg = mask[f_idx, c_idx] + except: + seg = np.zeros(shape=(750, 1024), dtype=np.uint8) + seg_camera.append(seg) + seg_camera = np.stack(seg_camera) + seg_batch.append(seg_camera) + + seg_batch = np.stack(seg_batch) + + return seg_batch, 4 + +def get_downsampled_2d_mask(root, video_id: str, mask_type: str, frame_list, camera_list): + ''' + 用于fit_object_model中对物体进行监督 + 20240129 + + return: (full_mask, downsample_factor) + full_mask: 形如[batch_size, num_camera, h, w],其中值为1 2 3 4分别代表右手、左手、object1(右手操作的物体)和object2(左手操作的物体), h为750, w为1024 + downsample_factor: 4 + ''' + + assert mask_type in ['masks', 'fake_masks'] + date = video_id[:8] + + dir = os.path.join('/share1/datasets/HOI-mocap/2Dmask_results_final', date, video_id) + + frame_idx_list = [(int(frame)-1) for frame in frame_list] + seg_batch = [] + for camera in camera_list: + + path = os.path.join(dir, f'{camera}_{mask_type}.npy') + mask = np.load(path) + + seg_batch.append(mask[frame_idx_list]) + + seg_batch = np.stack(seg_batch) + seg_batch = np.swapaxes(seg_batch, 0, 1) + + return seg_batch, 4 + +def cvt_multiview_imgs2mp4(img_dir:str, save_path, save_fps, camera_list, frame_list, height = 3000, width = 4096, downsample_factor = 4): + # camera_img_dict = {} + # paths = list(scandir(img_prefix, suffix='.png', recursive=True, full_path=True)) + # for path in paths: + # dirname, basename = os.path.split(path) + # camera_id = basename.split('_')[0] + # if not camera_id in camera_img_dict: + # camera_img_dict[camera_id] = [] + width = width // downsample_factor + height = height // downsample_factor + + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + videoWriter = cv2.VideoWriter(save_path, fourcc, save_fps, (width * 4, height * 3)) + + for frame_id in tqdm(frame_list): + saved_img = np.zeros((height * 3, width * 4, 3)).astype(np.uint8) + for camera_idx, camera_id in enumerate(camera_list): + img_path = os.path.join(img_dir, camera_id, camera_id+'_'+frame_id+'.png') + # assert os.path.exists(img_path) + if os.path.exists(img_path): + img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) + img = cv2.resize(img, (width, height)) + cv2.putText(img, f'{frame_id} {camera_id}', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=2) + else: + img = np.full((height, width, 3), 255, dtype=np.uint8) + saved_img[height*(camera_idx//4) : height*((camera_idx//4)+1), width*(camera_idx%4) : width*((camera_idx%4)+1)] = img + videoWriter.write(saved_img) + videoWriter.release() + +def load_mmpose_joints(root, video_id, camera_list, frame_id, is_righthand): + joints = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join(root, video_id, 'mmpose_right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join(root, video_id, 'mmpose_left_hand','predictions', camera_id + '_' + frame_id + '.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(video_id, camera_id, frame_id, is_righthand) + + # 注意这里是[w_min, h_min] + joints.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + joints = torch.stack(joints, dim=0) + return joints + +def load_mmpose_joints_test(camera_list, frame_id, is_righthand): + joints_path_prefix = '/home/hlyang/HOI/mmpose/test/output_both_hands/predictions' + + joints = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_lefthand.json') + else: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_righthand.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(camera_id, frame_id, is_righthand) + + # 注意这里是[w_min, h_min] + joints.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + joints = torch.stack(joints, dim=0) + return joints + +def load_mmpose_joints_batch(root, video_id, camera_list, frame_list, is_righthand): + joints = [] + for frame_id in frame_list: + joints_frame = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join(root, video_id, 'mmpose', 'right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join(root, video_id, 'mmpose', 'left_hand','predictions', camera_id + '_' + frame_id + '.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: # TODO: 如果joints_path不存在(或者mmpose没预测出来), 构建一个格式相同的joints_info, 把数值都填成1e9 + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(video_id, camera_id, frame_id, is_righthand) + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + # print('------------') + # print(joints) + return torch.stack(joints, dim=0) + +def load_mmpose_joints_batch_v2(root, video_id, camera_list, frame_list, is_righthand): + ''' + 允许一部分mmpose的结果找不到,某一帧可能存在某些视角缺失。 + ''' + num_frame = len(frame_list) + num_camera = len(camera_list) + invalid_exists_bool = False + valid_mask = torch.full((num_frame, num_camera), 1., dtype=torch.float32) + + joints = [] + for frame_idx, frame_id in enumerate(frame_list): + joints_frame = [] + for camera_idx, camera_id in enumerate(camera_list): + if is_righthand: + joints_path = os.path.join(root, video_id, 'mmpose', 'right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join(root, video_id, 'mmpose', 'left_hand','predictions', camera_id + '_' + frame_id + '.json') + + if os.path.exists(joints_path): # mmpose结果存在 + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(video_id, camera_id, frame_id, is_righthand) + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(xy) + torch.FloatTensor([w_min, h_min]) for xy in joints_info[0]['keypoints']], dim=0)) + else: # TODO: 如果joints_path不存在(或者mmpose没预测出来), 构建一个格式相同的joints_info, 把数值都填成1e9 + invalid_exists_bool = True + valid_mask[frame_idx, camera_idx] = 0. + joints_frame.append(torch.full((21,2), 1e9, dtype=torch.float32)) + + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + return torch.stack(joints, dim=0), invalid_exists_bool, valid_mask + +def load_mmpose_joints_batch_v3(root, date, video_id, crop_info_from_exp_name, camera_list, frame_list, represent_frame_id, is_righthand): + ''' + 允许一部分mmpose的结果找不到,某一帧可能存在某些视角缺失。 + ''' + num_frame = len(frame_list) + num_camera = len(camera_list) + invalid_exists_bool = False + valid_mask = torch.full((num_frame, num_camera), 1., dtype=torch.float32) + + crop_info_batch = load_crop_info_v2(root, date, video_id, crop_info_from_exp_name, camera_list, frame_list, is_righthand, represent_frame_id=represent_frame_id) + joints = [] + for frame_idx, frame_id in enumerate(frame_list): + joints_frame = [] + for camera_idx, camera_id in enumerate(camera_list): + if is_righthand: + joints_path = os.path.join(root, date, video_id, 'mmpose', 'right_hand','predictions', camera_id + '_' + frame_id + '.json') + else: + joints_path = os.path.join(root, date, video_id, 'mmpose', 'left_hand','predictions', camera_id + '_' + frame_id + '.json') + + if os.path.exists(joints_path): # mmpose结果存在 + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + # h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info_v2(date, video_id, camera_id, frame_id, is_righthand) + h_min, h_max, w_min, w_max, h_mean, w_mean = crop_info_batch[frame_id][camera_id] + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(xy) + torch.FloatTensor([w_min, h_min]) for xy in joints_info[0]['keypoints']], dim=0)) + else: # TODO: 如果joints_path不存在(或者mmpose没预测出来), 构建一个格式相同的joints_info, 把数值都填成1e9 + invalid_exists_bool = True + valid_mask[frame_idx, camera_idx] = 0. + joints_frame.append(torch.full((21,2), 1e9, dtype=torch.float32)) + + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + return torch.stack(joints, dim=0), invalid_exists_bool, valid_mask + +def load_mmpose_joints_batch_v3_acc(root, date, video_id, from_exp_name, camera_list, frame_list, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + ''' + 允许一部分mmpose的结果找不到,某一帧可能存在某些视角缺失。 + ''' + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + invalid_exists_bool = True + + hand_joints_2d_gt_batch = [] + mmpose_valid_mask_batch = [] + for represent_frame_id, represented_frame_list in represent_relation.items(): + if right_hand_bool: + path = os.path.join(root, date, video_id, from_exp_name, 'right_hand','predictions', f'hand_{represent_frame_id}.pkl') + else: + path = os.path.join(root, date, video_id, from_exp_name, 'left_hand','predictions', f'hand_{represent_frame_id}.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + hand_joints_2d_gt, mmpose_valid_mask = data_batch + + for frame_id in represented_frame_list: + idx = int(frame_id) - int(represent_frame_id) + hand_joints_2d_gt_batch.append(hand_joints_2d_gt[idx]) + mmpose_valid_mask_batch.append(mmpose_valid_mask[idx]) + + hand_joints_2d_gt_batch = torch.stack(hand_joints_2d_gt_batch) + mmpose_valid_mask_batch = torch.stack(mmpose_valid_mask_batch) + + return hand_joints_2d_gt_batch, mmpose_valid_mask_batch + +def load_mmpose_joints_batch_test(camera_list, frame_list, is_righthand): + joints_path_prefix = '/home/hlyang/HOI/mmpose/test/output_both_hands/predictions' + joints = [] + for frame_id in frame_list: + joints_frame = [] + for camera_id in camera_list: + if is_righthand: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_lefthand.json') + else: + joints_path = os.path.join(joints_path_prefix, camera_id + '_' + frame_id + '_crop_no_righthand.json') + assert os.path.exists(joints_path) + with open(joints_path, 'r') as f: + joints_info = json.load(f) + + h_min, h_max, w_min, w_max, h_mean, w_mean = load_crop_info(camera_id, frame_id, is_righthand) + # 注意这里是[w_min, h_min] + joints_frame.append(torch.stack([torch.FloatTensor(x) + torch.FloatTensor([w_min, h_min]) for x in joints_info[0]['keypoints']], dim=0)) + joints_frame = torch.stack(joints_frame, dim=0) + joints.append(joints_frame) + return torch.stack(joints, dim=0) + +def seg_downsample_2_set_test(seg, downsample_factor, adjust_factor): + ''' + 将seg转换成set以计算CD Loss,使用downsample来降低集合中元素中个数。 + + input: + seg: shape如[bs, num_camera, h, w] + + return: + seg_set: shape如[bs, num_camera, num_verts_in_this_camera, 2] + num_verts_in_this_camera是不定的,所以实际上是个二维list,元素类型为tensor + ''' + + batch_size, num_camera, h, w = seg.shape + assert downsample_factor >= 0 + + seg = torch.nn.functional.interpolate(seg.float(), size=[h // downsample_factor, w // downsample_factor], mode='nearest') + + seg_set = [] + for batch in range(batch_size): + seg_set_batch = [] + for camera in range(num_camera): + idx = torch.flip(torch.nonzero(seg[batch, camera]), dims=[-1]) + # 注意第一个相机的分辨率是别的相机的1/2 + if camera == 0: + seg_set_batch.append(idx * downsample_factor * adjust_factor / 2) + else: + seg_set_batch.append(idx * downsample_factor * adjust_factor) + seg_set.append(seg_set_batch) + + return seg_set + +def seg2set(seg, factor): + ''' + 将seg转换成set以计算CD Loss。 + + input: + seg: shape如[bs, num_camera, h, w] + + return: + seg_set: shape如[bs, num_camera, num_verts_in_this_camera, 2] + num_verts_in_this_camera是不定的,所以实际上是个二维list,元素类型为tensor + ''' + + batch_size, num_camera, h, w = seg.shape + assert factor >= 0 + + seg_set = [] + for batch in range(batch_size): + seg_set_batch = [] + for camera in range(num_camera): + # seg_set_camera = [] + idx = torch.flip(torch.nonzero(seg[batch, camera]), dims=[-1]) + # for x, y in idx[:]: + # seg_set_camera.append(torch.FloatTensor([y * factor, x * factor])) + # seg_set_camera = torch.stack(seg_set_camera) + seg_set_batch.append(idx * factor) + seg_set.append(seg_set_batch) + + return seg_set + +def load_mano_info_batch(root, video_id: str, from_exp_name: str, frame_list: str, right_hand_bool: bool): + if right_hand_bool: + dir = os.path.join(root, video_id, from_exp_name, 'res', 'right_hand') + else: + dir = os.path.join(root, video_id, from_exp_name, 'res', 'left_hand') + + hand_trans_batch = [] + hand_pose_batch = [] + mask_batch = [] + for frame_id in frame_list: + path = os.path.join(dir, 'hand_' + frame_id + '.pkl') + assert os.path.exists(path), path + with open(path, 'rb') as f: + data = pickle.load(f) + hand_trans = data['hand_trans'] + hand_pose = data['hand_pose'] + mask = data.get('joints_mask', None) + hand_trans_batch.append(hand_trans) + hand_pose_batch.append(hand_pose) + if mask is not None: + mask_batch.append(mask) + hand_trans_batch = torch.stack(hand_trans_batch) + hand_pose_batch = torch.stack(hand_pose_batch) + + if len(mask_batch) == 0: + return hand_trans_batch, hand_pose_batch, None + else: + mask_batch = torch.stack(mask_batch) + return hand_trans_batch, hand_pose_batch, mask_batch + +def load_mano_info_batch_acc(root, date, video_id: str, from_exp_name: str, frame_list: str, right_hand_bool: bool, BATCH_SIZE = 20, represent_frame_id = None, first_batch_len = 2): + if right_hand_bool: + dir = os.path.join(root, date, video_id, from_exp_name, 'res', 'right_hand') + else: + dir = os.path.join(root, date, video_id, from_exp_name, 'res', 'left_hand') + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=first_batch_len) + else: + represent_relation = {represent_frame_id: frame_list} + + hand_trans_batch = [] + hand_pose_batch = [] + mask_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + # print(represent_frame_id, represented_frame_list) + path = os.path.join(dir, 'hand_' + represent_frame_id + '.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + + for frame_id in represented_frame_list: + data = data_batch[frame_id] + + hand_trans = data['hand_trans'] + hand_pose = data['hand_pose'] + mask = data.get('joints_mask', None) + + hand_trans_batch.append(hand_trans) + hand_pose_batch.append(hand_pose) + if mask is not None: + mask_batch.append(mask) + + hand_trans_batch = torch.stack(hand_trans_batch) + hand_pose_batch = torch.stack(hand_pose_batch) + + if len(mask_batch) == 0: + return hand_trans_batch, hand_pose_batch, None + else: + mask_batch = torch.stack(mask_batch) + return hand_trans_batch, hand_pose_batch, mask_batch + +def load_obj_info_batch_acc(root, date, video_id: str, from_exp_name: str, frame_list: str, load_obj_type: str, BATCH_SIZE = 20, represent_frame_id = None, first_batch_len = 2): + + assert load_obj_type in ('tool', 'obj') + + if load_obj_type == 'tool': + dir = os.path.join(root, date, video_id, from_exp_name, 'res', 'tool') + else: + dir = os.path.join(root, date, video_id, from_exp_name, 'res', 'obj') + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=first_batch_len) + else: + represent_relation = {represent_frame_id: frame_list} + + obj_trans_batch = [] + obj_pose_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + # print(represent_frame_id, represented_frame_list) + path = os.path.join(dir, 'obj_' + represent_frame_id + '.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + + for frame_id in represented_frame_list: + data = data_batch[frame_id] + + hand_pose = data['obj_pose'] + hand_trans = data['obj_trans'] + + obj_pose_batch.append(hand_pose) + obj_trans_batch.append(hand_trans) + + obj_pose_batch = torch.stack(obj_pose_batch) + obj_trans_batch = torch.stack(obj_trans_batch) + + return obj_pose_batch, obj_trans_batch + +def get_mano_info_batch_test(mano_info_path_prefix: str, frame_list: str): + hand_trans_batch = [] + hand_pose_batch = [] + mask_batch = [] + for frame_id in frame_list: + path = os.path.join(mano_info_path_prefix, 'hand_' + frame_id + '.pkl') + assert os.path.exists(path) + with open(path, 'rb') as f: + data = pickle.load(f) + hand_trans = data['hand_trans'] + hand_pose = data['hand_pose'] + mask = data.get('joints_mask', None) + hand_trans_batch.append(hand_trans) + hand_pose_batch.append(hand_pose) + if mask is not None: + mask_batch.append(mask) + hand_trans_batch = torch.stack(hand_trans_batch) + hand_pose_batch = torch.stack(hand_pose_batch) + + if len(mask_batch) == 0: + return hand_trans_batch, hand_pose_batch, None + else: + mask_batch = torch.stack(mask_batch) + return hand_trans_batch, hand_pose_batch, mask_batch + +# def liuyun_convert_axangle_to_euler_2(): +# from transforms3d.euler import euler2axangle +# from transforms3d.axangles import axangle2euler +# a = np.float32([0.1, 0.2, 0.3]) # axangle +# ai, aj, ak = axangle2euler(a) # axes="sxyz" +# a_euler = np.float32([ai, aj, ak]) +# a_axangle = euler2axangle(ai, aj, ak) +# assert a == a_axangle + #nlopt + +def load_joint_ransac_batch(root, video_id, frame_list, mask_type, right_hand_bool): + ''' + 从文件中读取joint ransac得到的hand_trans_3d和joints_mask,并且打成batch。 + + return hand_trans_3d, hand_joints_mask + ''' + assert mask_type in ('joints_mask', 'ransac_mask', 'final_mask') + + if right_hand_bool: + join_ransac_res_dir = os.path.join(root, video_id, 'joint_ransac_every_joint_triangulation', 'res', 'right_hand') + else: + join_ransac_res_dir = os.path.join(root, video_id, 'joint_ransac_every_joint_triangulation', 'res', 'left_hand') + assert os.path.exists(join_ransac_res_dir) + + trans_batch = [] + mask_batch = [] + for frame_id in frame_list: + path = os.path.join(join_ransac_res_dir, f'hand_{frame_id}.pkl') + assert os.path.exists(path) + with open(path, 'rb') as f: + data = pickle.load(f) + trans_batch.append(data['joints_trans']) + mask_batch.append(data[mask_type]) + trans_batch = torch.stack(trans_batch) + mask_batch = torch.stack(mask_batch) + return trans_batch, mask_batch + +def load_joint_ransac_batch_acc(root, date, video_id, from_exp_name, frame_list, mask_type, right_hand_bool, BATCH_SIZE = 20, represent_frame_id = None): + ''' + 从文件中读取joint ransac得到的hand_trans_3d和joints_mask,并且打成batch。 + + return hand_trans_3d, hand_joints_mask + ''' + assert mask_type in ('joints_mask', 'ransac_mask', 'final_mask') + + if right_hand_bool: + join_ransac_res_dir = os.path.join(root, date, video_id, from_exp_name, 'res', 'right_hand') + else: + join_ransac_res_dir = os.path.join(root, date, video_id, from_exp_name, 'res', 'left_hand') + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + trans_batch = [] + mask_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join(join_ransac_res_dir, 'hand_' + represent_frame_id + '.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + for frame_id in represented_frame_list: + data = data_batch[frame_id] + trans_batch.append(data['joints_trans']) + mask_batch.append(data[mask_type]) + trans_batch = torch.stack(trans_batch) + mask_batch = torch.stack(mask_batch) + return trans_batch, mask_batch + +def world2camera_batch_cam(verts_world, R, T): + ''' + TODO:移除fit_hand_model.py中的world2camera_batch_cam + verts: [bs, num_verts, 3] + R: [num_cameras, 3, 3] + T: [num_cameras, 1, 3] + + return: + verts_camera: [bs, num_cameras, num_verts, 3] + ''' + # batch_size = verts_world.shape[0] + num_camera = R.shape[0] + verts_camera = torch.einsum('cij, bnj -> bcni', R, verts_world) + verts_camera = verts_camera + T.resize(1, num_camera, 1, 3) + return verts_camera + +def camera2pixel_batch_cam(verts_camera, K): + ''' + TODO:移除fit_hand_model.py中的camera2pixel_batch_cam + verts_camera: [bs, num_cameras, num_verts, 3] + K: [num_cameras, 3, 3] + ''' + verts_pixel = torch.einsum('cij, bcnj -> bcni', K, verts_camera) + ret1 = verts_pixel[..., 0] / verts_pixel[..., 2] + ret2 = verts_pixel[..., 1] / verts_pixel[..., 2] + verts_pixel = torch.stack([ret1, ret2], dim=-1) + return verts_pixel + +def get_camera_params(calibration_info_path: str, camera_list): + ''' + TODO:移除fit_hand_model.py中的get_camera_params + ''' + + assert os.path.exists(calibration_info_path) + with open(calibration_info_path) as f: + cali_data = json.load(f) + + R_list = [] + R_inverse_list = [] + T_list = [] + K_list = [] + focal_length_list = [] + principal_point_list = [] + + for camera_id in camera_list: + R = torch.tensor(cali_data[camera_id]['R']).reshape(1, 3, 3) + T = torch.tensor(cali_data[camera_id]['T']).reshape(1, 3) + K = torch.tensor(cali_data[camera_id]['K']).reshape(1, 3, 3) + fx = K[0, 0, 0] + fy = K[0, 1, 1] + px = K[0, 0, 2] + py = K[0, 1, 2] + + R_list.append(R) + R_inverse_list.append(R.inverse()) + T_list.append(T) + K_list.append(K) + focal_length_list.append(torch.tensor([fx, fy]).unsqueeze(0)) + principal_point_list.append(torch.tensor([px, py]).unsqueeze(0)) + + R = torch.concatenate(R_list, dim=0) + R_inverse = torch.concatenate(R_inverse_list, dim=0) + T = torch.concatenate(T_list, dim=0) + K = torch.concatenate(K_list, dim=0) + focal_length = torch.concatenate(focal_length_list, dim=0) + principal_point = torch.concatenate(principal_point_list, dim=0) + + image_size = torch.tensor([3000, 4096]).unsqueeze(0).repeat(len(camera_list), 1) + + return R, R_inverse, T, K, focal_length, principal_point, image_size + +def render_from_mano_params(root, video_id, exp_name, camera_list, frame_list, right_hand_bool, device): + ''' + 从一次实验结果(mano params的格式)得到render的结果,最终结果保存在cpu中。 + + Warning: 很容易内存爆炸,不应该那么多数据存内存里。 + ''' + hand_trans_batch, hand_pose_batch, _ = load_mano_info_batch(video_id, exp_name, frame_list, right_hand_bool) + num_frame = len(frame_list) + + device = torch.device(device) + + calibration_info_path = os.path.join(root, video_id, 'src', 'calibration.json') + assert os.path.join(calibration_info_path) + R, R_inverse, T, K, focal_length, principal_point, image_size = get_camera_params(calibration_info_path, camera_list) + + use_pca = False + ncomps = 45 + if right_hand_bool: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx=0) + else: + mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx=0) + + lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]]) + camera = PerspectiveCameras(device=device, R=R_inverse, T=T, image_size=image_size, in_ndc=False, focal_length=-focal_length, principal_point=principal_point) + raster_settings = RasterizationSettings( + image_size=(3000, 4096), + blur_radius=0, + faces_per_pixel=1, + ) + renderer = MeshRenderer(rasterizer=MeshRasterizer(cameras=camera, raster_settings=raster_settings), shader=SoftPhongShader(device=device, cameras=camera, lights=lights)) + faces_idx = mano_layer.th_faces.detach().clone().to(device) + + rendered_image_list = [] + + for i in tqdm(range(num_frame)): + hand_pose = hand_pose_batch[i, ...] + hand_trans = hand_trans_batch[i, ...] + + if len(hand_pose.shape) == 1: + hand_pose = hand_pose.unsqueeze(0) + verts, _, _ = mano_layer(hand_pose) + verts = verts.squeeze() + verts = verts / 1000.0 + verts += hand_trans + verts = verts.to(device) + + + mesh = Meshes(verts=[verts], faces=[faces_idx]) + color = torch.ones(1, verts.size(0), 3, device=device) + color[:, :, 2] = 255 + mesh.textures = TexturesVertex(verts_features=color) + mesh = mesh.extend(R.shape[0]) + + images = renderer(mesh)[..., :3].squeeze() + rendered_image_list.append(images.cpu()) + rendered_image_batch = torch.stack(rendered_image_list) + + return rendered_image_batch + +def denoise(mask, half_length = 60, threshold = 900, return_rate = False): + rows, cols = mask.shape + cnt = np.zeros(mask.shape) + idx = np.nonzero(mask) + + if return_rate: + len1 = len(idx[0]) + + min_x = np.maximum(0, idx[0] - half_length) + max_x = np.minimum(rows - 1, idx[0] + half_length + 1) + min_y = np.maximum(0, idx[1] - half_length) + max_y = np.minimum(cols - 1, idx[1] + half_length + 1) + # 能再次批处理优化吗? + for x, y, x1, x2, y1, y2 in zip(idx[0], idx[1], min_x, max_x, min_y, max_y): + cnt[x, y] = mask[x1:x2, y1:y2].sum() + valid = mask & (cnt > threshold) + + if return_rate: + len2 = valid.sum() + rate = len2/len1 + return valid, rate + else: + return valid + +def denoise_mask(mask, half_length = 60, threshold = 900): + ''' + input: + mask: shape: [height, width] + ''' + assert len(mask.shape) == 2 + + right_hand_mask = np.where(mask == 1, True, False) + left_hand_mask = np.where(mask == 2, True, False) + object1_mask = np.where(mask == 3, True, False) + object2_mask = np.where(mask == 4, True, False) + right_hand_mask = denoise(right_hand_mask, half_length, threshold) + left_hand_mask = denoise(left_hand_mask, half_length, threshold) + + denoised_mask = np.zeros_like(mask).astype(np.uint8) + denoised_mask[right_hand_mask] = 1 + denoised_mask[left_hand_mask] = 2 + denoised_mask[object1_mask] = 3 + denoised_mask[object2_mask] = 4 + + return denoised_mask + +def read_init_crop(root, video_id, camera_id, frame_id): + ''' + 第一排为右手,第二排为左手。 + 每一排分别是 min_h, max_h, min_w, max_w + + return shape: [2, 4] + ''' + path = os.path.join(root, video_id, 'src', 'init_crop', f'{camera_id}_{frame_id}.txt') + assert os.path.exists(path) + crop_info = np.loadtxt(path).astype(np.uint32) + + return crop_info + +def get_obj_mesh_path(root, obj_id: str): + obj_dir = os.path.join(root, 'object_models_final') + obj_filenames = os.listdir(obj_dir) + valid_filename = [filename for filename in obj_filenames if (filename.endswith(f'object{obj_id}') or ((obj_id[0] == '0') and (filename.endswith(f'object{obj_id[1:]}'))))] # 加上object防止多个匹配结果 + assert len(valid_filename) == 1, f'obj {obj_id} match failed' + obj_path = None + for mesh_name in os.listdir(os.path.join(obj_dir, valid_filename[0])): + # if "cm.obj" in mesh_name: + if ("m.obj" in mesh_name) and (not "cm.obj" in mesh_name): + assert obj_path is None + obj_path = os.path.join(obj_dir, valid_filename[0], mesh_name) + assert os.path.exists(obj_path) + return obj_path + +def verts_apply_pose_batch(verts, pose_batch): + rotation_batch = pose_batch[:, :3, :3] # [num_frame, 3, 3] + translation_batch = pose_batch[:, :3, 3] # [num_frame, 1, 3] + + verts = torch.einsum('fij, vj -> fvi', rotation_batch, verts) # [num_frame, num_verts, 3] + verts = verts + translation_batch.unsqueeze(1) + return verts + +def load_nokov_objs_mesh(root, video_id, frame_list): + ''' + + return: tool_verts_batch: [num_frame, num_verts, 3] + tool_faces + obj_verts_batch: [num_frame, num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + tool_mesh_path = get_obj_mesh_path(root, obj_id=tool_id) + tool_mesh = o3d.io.read_triangle_mesh(tool_mesh_path) + tool_mesh = tool_mesh.simplify_quadric_decimation(2000) + tool_verts = np.asarray(tool_mesh.vertices) + # tool_verts = torch.from_numpy(tool_verts / 100.).float() + tool_verts = torch.from_numpy(tool_verts).float() + tool_faces = torch.from_numpy(np.asarray(tool_mesh.triangles)) + + tool_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + assert os.path.exists(tool_pose_path) + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path))[frame_idx_list, ...] + tool_verts_batch = verts_apply_pose_batch(tool_verts, tool_pose_batch) + + obj_mesh_path = get_obj_mesh_path(root, obj_id=obj_id) + obj_mesh = o3d.io.read_triangle_mesh(obj_mesh_path) + obj_mesh = obj_mesh.simplify_quadric_decimation(2000) + obj_verts = np.asarray(obj_mesh.vertices) + # obj_verts = torch.from_numpy(obj_verts / 100.).float() + obj_verts = torch.from_numpy(obj_verts).float() + obj_faces = torch.from_numpy(np.asarray(obj_mesh.triangles)) + + obj_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + assert os.path.exists(obj_pose_path) + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path))[frame_idx_list, ...] + obj_verts_batch = verts_apply_pose_batch(obj_verts, obj_pose_batch) + + return tool_verts_batch, tool_faces, obj_verts_batch, obj_faces + +def check_nokov_exists(root, video_id): + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir), original_nokov_data_dir + + nokov_data_filenames = os.listdir(original_nokov_data_dir) + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + try: + tool_mesh_path = get_obj_mesh_path(root, obj_id=tool_id) + obj_mesh_path = get_obj_mesh_path(root, obj_id=obj_id) + tool_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + obj_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + + if not os.path.exists(tool_pose_path): + return False + if not os.path.exists(obj_pose_path): + return False + except: + return False + return True + + +def cal_normals(verts, faces): + assert len(verts.shape) == 3 + bs = verts.shape[0] + + + with torch.no_grad(): + verts = verts.detach().clone().cpu() + verts = torch.unbind(verts, dim=0) + verts = [tensor.numpy() for tensor in verts] + + faces = faces.detach().clone().cpu().numpy() + + normals_list = [] + for i in range(bs): + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(verts[i]) + mesh.triangles = o3d.utility.Vector3iVector(faces) + mesh.compute_vertex_normals() + normals = mesh.vertex_normals + normals_list.append(torch.from_numpy(np.asarray(normals))) + + normals = torch.stack(normals_list, dim=0) + + return normals + +def load_nokov_succeed_list(root, date): + nokov_succeed_record_path = os.path.join(root ,'record', f'{date}_nokov_succeed.txt') + nokov_succeed_video_list = [] + with open(nokov_succeed_record_path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + nokov_succeed_video_list.append(parts[0]) + + return nokov_succeed_video_list + +def get_valid_video_list(root: str, date: str, consider_pipiline_failed = False, consider_nokov_failed = False, given_list = None, remove_hand = True): + valid_record_path = os.path.join(root ,'record', f'{date}_valid_video_id.txt') + assert os.path.exists(valid_record_path), valid_record_path + + with open(valid_record_path, 'r') as f: + lines = f.readlines() + + valid_video_list = [] + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + valid_video_list.append(parts[0]) + + if remove_hand: + valid_video_list = [id for id in valid_video_list if 'hand' not in id] + + valid_video_list = list(set(valid_video_list)) + valid_video_list.sort() + + if given_list is not None: + # valid_video_list_ = [id for id in given_list if id in valid_video_list] + # valid_video_list = valid_video_list_ + valid_video_list = [id for id in valid_video_list if id in given_list] + valid_video_list.sort() + + if consider_pipiline_failed: + pipeline_failed_record_path = os.path.join(root ,'record', f'{date}_pipeline_failed.txt') + if os.path.exists(pipeline_failed_record_path): + failed_video_list = [] + with open(pipeline_failed_record_path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + failed_video_list.append(parts[0]) + valid_video_list = [video_id for video_id in valid_video_list if video_id not in failed_video_list] + valid_video_list.sort() + + if consider_nokov_failed: + nokov_failed_record_path = os.path.join(root ,'record', f'{date}_nokov_failed.txt') + if os.path.exists(nokov_failed_record_path): + failed_video_list = [] + with open(nokov_failed_record_path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + failed_video_list.append(int(parts[0])) + + failed_video_list = [f'{date}_{str(i).zfill(3)}' for i in failed_video_list] + else: + failed_video_list = [] + + nokov_succeed_video_list = load_nokov_succeed_list(root, date) + + valid_video_list = [video_id for video_id in valid_video_list if video_id not in failed_video_list and video_id in nokov_succeed_video_list] + valid_video_list.sort() + + return valid_video_list + +def get_time_diff(root, date, error_threshold = 16, valid_threshold = 3): + ''' + 仅适用于20230930及之后的time_diff文件 + ''' + + time_diff_record_root = os.path.join(root, 'record') + time_diff_data_path = os.path.join(time_diff_record_root, f'{date}_2m1.txt') + + if not os.path.exists(time_diff_data_path): + return {} + + time_diff_data = {} + with open(time_diff_data_path, 'r') as f: + lines = f.readlines() + + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + time_diff_data[parts[1]] = int(parts[0]) + + # denoise + time_diff_array = np.array([v for k, v in time_diff_data.items()]) + invalid_time_diff_list = [] + for time_diff in time_diff_array: + cnt = np.sum((time_diff_array >= time_diff - error_threshold) & (time_diff_array <= time_diff + error_threshold)) + if cnt <= 3: + invalid_time_diff_list.append(time_diff) + keys_to_remove = [k for k, v in time_diff_data.items() if v in invalid_time_diff_list] + for k in keys_to_remove: + del time_diff_data[k] + + return time_diff_data + + +def get_pipeline_failed_video_list(root, date: str, rename_bool = True): + peline_failed_record_path = os.path.join(root, 'record', f'{date}_pipeline_failed.txt') + assert os.path.exists(peline_failed_record_path), peline_failed_record_path + + with open(peline_failed_record_path, 'r') as f: + lines = f.readlines() + + pipeline_failed_video_list = [] + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + pipeline_failed_video_list.append(parts[0]) + + + pipeline_failed_video_list = list(set(pipeline_failed_video_list)) + pipeline_failed_video_list.sort() + + if rename_bool: + time1 = time() + peline_failed_record_dst_path = os.path.join(root, 'record', f'{date}_pipeline_failed_{time1}.txt') + shutil.copy(peline_failed_record_path, peline_failed_record_dst_path) + os.remove(peline_failed_record_path) + + return pipeline_failed_video_list + +def get_num_frame(root, video_id): + metadata_dir = os.path.join(root, video_id, 'metadata') + assert os.path.exists(metadata_dir) + metadata_list = [filename for filename in os.listdir(metadata_dir) if filename.endswith('.pkl')] + assert len(metadata_list) > 0 + metadata_path = os.path.join(root, video_id, 'metadata', metadata_list[0]) + + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + + return num_frame + +def get_num_frame_v2(root, video_id): + data = video_id[:8] + metadata_dir = os.path.join(root, data, video_id, 'metadata') + assert os.path.exists(metadata_dir), metadata_dir + metadata_list = [filename for filename in os.listdir(metadata_dir) if filename.endswith('.pkl')] + assert len(metadata_list) > 0 + metadata_path = os.path.join(root, data, video_id, 'metadata', metadata_list[0]) + + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + + return num_frame + +def load_hand_info_batch_acc(hand_info_dir, frame_list, BATCH_SIZE = 20, represent_frame_id = None): + ''' + 与hoi_io中的load_mano_info_batch功能类似,考虑去重 + ''' + + if represent_frame_id is None: + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list, first_batch_len=2) + else: + represent_relation = {represent_frame_id: frame_list} + + pose_batch = [] + trans_batch = [] + + for represent_frame_id, represented_frame_list in represent_relation.items(): + path = os.path.join(hand_info_dir, 'hand_' + represent_frame_id + '.pkl') + with open(path, 'rb') as f: + data_batch = pickle.load(f) + + for frame_id in represented_frame_list: + data = data_batch[frame_id] + pose = data['hand_pose'] + trans = data['hand_trans'] + pose_batch.append(pose) + trans_batch.append(trans) + + pose_batch = torch.stack(pose_batch) + trans_batch = torch.stack(trans_batch) + return pose_batch, trans_batch + +def load_bg_img_with_resize(root, video_id, camera_id, BATCH_SIZE, frame_list, width = None, height = None): + + resize_bool = True if (width is not None and height is not None) else False + + date = video_id[:8] + sub_video_root = os.path.join(root, date, video_id, 'sub_video') + + represent_relation = cal_represent_frame_list(BATCH_SIZE, frame_list) + + full_img_list = [] + for represent_frame_id, represented_frame_list in represent_relation.items(): + + video_path = os.path.join(sub_video_root, camera_id, camera_id + '_' + represent_frame_id + '.mp4') + + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + cap.set(cv2.CAP_PROP_FOURCC, fourcc) + # fps = cap.get(cv2.CAP_PROP_FPS) + # W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + # H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + suc = cap.isOpened() + img_list = [] + for i in range(BATCH_SIZE): + suc, img = cap.read() + if not suc: + break + if resize_bool: + img = cv2.resize(img, (width, height)) + img_list.append(img) + + # num_img = len(img_list) + + img_list_ = [] + for idx in [int(frame) - int(represent_frame_id) for frame in represented_frame_list]: + # assert idx >= 0 and idx < num_img + img_list_.append(img_list[idx]) + img_list = img_list_ + + for img in img_list: + full_img_list.append(img) + + return full_img_list + +def load_bg_imgs_with_resize(root, video_id, frame_list, camera_list, BATCH_SIZE, width = None, height = None): + + rgb_batch = [] + for c_idx, camera in enumerate(camera_list): + imgs = load_bg_img_with_resize(root, video_id, camera, BATCH_SIZE, frame_list, width, height) + imgs = np.stack(imgs) + rgb_batch.append(imgs) + rgb_batch = np.stack(rgb_batch) + rgb_batch = np.swapaxes(rgb_batch, 0, 1) + + return rgb_batch \ No newline at end of file diff --git a/utils/utils/inference_both_hands.py b/utils/utils/inference_both_hands.py new file mode 100755 index 0000000000000000000000000000000000000000..8a8fed0e64f54d786e007963c7fc438be0b03311 --- /dev/null +++ b/utils/utils/inference_both_hands.py @@ -0,0 +1,136 @@ +''' +TODO:根据device再进行细分 + +python utils/inference_both_hands.py --video_id 20230715_15 +''' + +from mmpose.apis import MMPoseInferencer +import os.path as osp +import os +from tqdm import tqdm +import argparse +import numpy as np +import multiprocessing as mlp +import torch + +def scandir(dir_path, suffix=None, recursive=False, full_path=False): + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('"suffix" must be a string or tuple of strings') + + root = dir_path + + def _scandir(dir_path, suffix, recursive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + if full_path: + return_path = entry.path + else: + return_path = osp.relpath(entry.path, root) + + if suffix is None: + yield return_path + elif return_path.endswith(suffix): + yield return_path + else: + if recursive: + yield from _scandir(entry.path, suffix=suffix, recursive=recursive) + else: + continue + + return _scandir(dir_path, suffix=suffix, recursive=recursive) + +def hand_detect(img_path_list, result_dir, gpu): + + device = torch.device(gpu) # 选择要使用的 GPU 设备 + torch.cuda.set_device(device) + + # 使用模型别名创建推断器 + inferencer = MMPoseInferencer('hand') + + # MMPoseInferencer采用了惰性推断方法,在给定输入时创建一个预测生成器 + result_generator = inferencer(img_path_list, out_dir=result_dir) + for path in tqdm(img_path_list): + next(result_generator) + +def handle_divide_by_gpu_capacity(video_id, img_filename_list, save_dir, gpu): + MAX_GPU_CAPACITY = 5 + num_img = len(img_filename_list) + num_img_per_process = np.ceil(num_img / MAX_GPU_CAPACITY).astype(np.uint32) + + procs = [] + for i in range(MAX_GPU_CAPACITY): + start_frame_idx = i * num_img_per_process + end_frame_idx = min(start_frame_idx + num_img_per_process, num_img) # 不包含 + img_filename_list_sub = img_filename_list[start_frame_idx:end_frame_idx] + + args = (img_filename_list_sub, save_dir, gpu) + proc = mlp.Process(target=hand_detect, args=args) + + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + +def handle_divide_by_num_gpu(video_id, right_hand_bool): + if right_hand_bool: + # hand_dir = os.path.join('/share/hlyang/results', video_id, 'crop_imgs_right_hand') + # save_dir = os.path.join('/share/hlyang/results', video_id, 'mmpose_right_hand') + hand_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'right_hand') + save_dir = os.path.join('/share/hlyang/results', video_id, 'mmpose', 'right_hand') + else: + # hand_dir = os.path.join('/share/hlyang/results', video_id, 'crop_imgs_left_hand') + # save_dir = os.path.join('/share/hlyang/results', video_id, 'mmpose_left_hand') + hand_dir = os.path.join('/share/hlyang/results', video_id, 'crop', 'left_hand') + save_dir = os.path.join('/share/hlyang/results', video_id, 'mmpose', 'left_hand') + + img_filename_list = list(scandir(hand_dir, suffix='png', recursive=True, full_path=True)) + num_img = len(img_filename_list) + + GPU_list = [1, 2, 3, 4, 6, 7] + NUM_GPU = len(GPU_list) + num_img_per_gpu = np.ceil(num_img / NUM_GPU).astype(np.uint32) + + procs = [] + for i in range(NUM_GPU): + start_frame_idx = i * num_img_per_gpu + end_frame_idx = min(start_frame_idx + num_img_per_gpu, num_img) # 不包含 + img_filename_list_sub = img_filename_list[start_frame_idx:end_frame_idx] + + # print(img_filename_list_sub) + + args = (video_id, img_filename_list_sub, save_dir, GPU_list[i]) + proc = mlp.Process(target=handle_divide_by_gpu_capacity, args=args) + + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + + +if __name__ == "__main__": + torch.multiprocessing.set_start_method('spawn') + + # parser = argparse.ArgumentParser() + # parser.add_argument('--video_id', required=True, type=str) + # args = parser.parse_args() + # video_id = args.video_id + + video_list = [f'20230818_0{i}_old' for i in ('4', '5', '6', '7')] + + for video_id in video_list: + procs = [] + + args = (video_id, True) + proc = mlp.Process(target=handle_divide_by_num_gpu, args=args) + proc.start() + procs.append(proc) + + args = (video_id, False) + proc = mlp.Process(target=handle_divide_by_num_gpu, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() \ No newline at end of file diff --git a/utils/utils/interpenetration_volume.py b/utils/utils/interpenetration_volume.py new file mode 100644 index 0000000000000000000000000000000000000000..3a22a649ceb445a919bb9897a4b036d51035f03f --- /dev/null +++ b/utils/utils/interpenetration_volume.py @@ -0,0 +1,94 @@ +import sys +sys.path.append('.') +import numpy as np +import open3d as o3d + +def cal_num_intersection_vox(mesh_x, mesh_y, voxel_size=0.001): + voxel_x = o3d.geometry.VoxelGrid.create_from_triangle_mesh(mesh_x, voxel_size) + voxel_y = o3d.geometry.VoxelGrid.create_from_triangle_mesh(mesh_y, voxel_size) + voxel_list_x = voxel_x.get_voxels() + grid_indexs_x = np.stack([voxel.grid_index for voxel in voxel_list_x]) + voxel_list_y = voxel_y.get_voxels() + grid_indexs_y = np.stack([voxel.grid_index for voxel in voxel_list_y]) + + set_x = {tuple(row) for row in grid_indexs_x} + set_y = {tuple(row) for row in grid_indexs_y} + intersection_set = set_x.intersection(set_y) + intersection_set = np.array(list(intersection_set)) + + num_x = grid_indexs_x.shape[0] + num_y = grid_indexs_y.shape[0] + num_intersection = intersection_set.shape[0] + return num_x, num_y, num_intersection + +def construct_mesh_from_verts_and_faces(verts, faces): + mesh = o3d.geometry.TriangleMesh() + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(verts) + mesh.vertices = pcd.points + mesh.triangles = o3d.utility.Vector3iVector(faces) + return mesh + +if __name__ == "__main__": + from utils.seal_mano import seal_mano_mesh + from manopth.manopth.manolayer import ManoLayer + import torch + + use_pca = False + ncomps = 45 + left_hand_mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx = 0) + right_hand_mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx = 0) + + right_hand_verts, _, _ = right_hand_mano_layer(torch.zeros(1, 48)) + right_hand_faces = right_hand_mano_layer.th_faces.detach() + left_hand_verts, _, _ = left_hand_mano_layer(torch.zeros(1, 48)) + left_hand_faces = left_hand_mano_layer.th_faces.detach() + + right_hand_sealed_vertices, right_hand_faces = seal_mano_mesh(right_hand_verts / 1000.0, right_hand_faces, True) + left_hand_sealed_vertices, left_hand_faces = seal_mano_mesh(left_hand_verts / 1000.0, left_hand_faces, False) + + # right_hand_mesh = trimesh.Trimesh(right_hand_sealed_vertices[0], right_hand_faces) + # left_hand_mesh = trimesh.Trimesh(left_hand_sealed_vertices[0], left_hand_faces) + + # right_hand_vox = right_hand_mesh.voxelized(0.001) + # left_hand_vox = left_hand_mesh.voxelized(0.001) + + # right_hand_mesh = o3d.geometry.TriangleMesh() + # right_hand_pcd = o3d.geometry.PointCloud() + # right_hand_pcd.points = o3d.utility.Vector3dVector(right_hand_sealed_vertices.numpy()[0]) + # right_hand_mesh.vertices = right_hand_pcd.points + # right_hand_mesh.triangles = o3d.utility.Vector3iVector(right_hand_faces) + # left_hand_mesh = o3d.geometry.TriangleMesh() + # left_hand_pcd = o3d.geometry.PointCloud() + # left_hand_pcd.points = o3d.utility.Vector3dVector(left_hand_sealed_vertices.numpy()[0]) + # left_hand_mesh.vertices = left_hand_pcd.points + # left_hand_mesh.triangles = o3d.utility.Vector3iVector(left_hand_faces) + + right_hand_mesh = construct_mesh_from_verts_and_faces(right_hand_sealed_vertices.numpy()[0], right_hand_faces) + left_hand_mesh = construct_mesh_from_verts_and_faces(left_hand_sealed_vertices.numpy()[0], left_hand_faces) + + # voxel_size = 0.001 + # right_hand_vox = o3d.geometry.VoxelGrid.create_from_triangle_mesh(right_hand_mesh, voxel_size) + # right_voxel_list = right_hand_vox.get_voxels() + # right_grid_indexs = np.stack([voxel.grid_index for voxel in right_voxel_list]) + # left_hand_vox = o3d.geometry.VoxelGrid.create_from_triangle_mesh(left_hand_mesh, voxel_size) + # left_voxel_list = left_hand_vox.get_voxels() + # left_grid_indexs = np.stack([voxel.grid_index for voxel in left_voxel_list]) + + # right_set = {tuple(row) for row in right_grid_indexs} + # left_set = {tuple(row) for row in left_grid_indexs} + # intersection_set = right_set.intersection(left_set) + # intersection_set = np.array(list(intersection_set)) + + # num_right_hand_vox = right_grid_indexs.shape[0] + # num_left_hand_vox = left_grid_indexs.shape[0] + # num_intersection_vox = intersection_set.shape[0] + # print(num_right_hand_vox, num_left_hand_vox, num_intersection_vox) + # print() + + num_right_hand, num_left_hand, num_intersection = cal_num_intersection_vox(right_hand_mesh, left_hand_mesh) + print(num_right_hand, num_left_hand, num_intersection) + + # o3d.io.write_triangle_mesh("/home/hlyang/HOI/HOI/tmp/right_T_pose.obj", right_hand_mesh) + # o3d.io.write_triangle_mesh("/home/hlyang/HOI/HOI/tmp/left_T_pose.obj", left_hand_mesh) + \ No newline at end of file diff --git a/utils/utils/match_rgb_nokov_timestamp.py b/utils/utils/match_rgb_nokov_timestamp.py new file mode 100644 index 0000000000000000000000000000000000000000..4bab7fea3463e8c7cf0d7fe415987df2938cd7ce --- /dev/null +++ b/utils/utils/match_rgb_nokov_timestamp.py @@ -0,0 +1,99 @@ +import os +import sys +sys.path.append('.') +import numpy as np +from utils.process_frame_loss2 import cal_common_timestamps + +def load_Luster_timestamps(file_path): + ts = [] + with open(file_path, "r") as f: + for line in f: + line = line.strip() + if len(line) == 0: + continue + ts.append(int(line.split(" ")[-1])) + return ts + +def parse_trc(trc_paths): + data_list = [] + for trc_path in trc_paths: + cnt = 0 + data = { + "timestamps": [], + "markers": [], + } + N_marker = None + with open(trc_path, "r") as f: + for line in f: + cnt += 1 + line = line.strip() + if cnt == 4: + while line.find("\t\t") > -1: + line = line.replace("\t\t", "\t") + N_marker = len(line.split("\t")) - 3 + if N_marker == 0: + N_marker = 10 + # print("[parse_trc] file {}: N_marker = {}".format(trc_path, N_marker)) + if cnt <= 6: + continue + # assert N_marker >= 3 + values = line.split("\t") + data["timestamps"].append(int(values[2])) + markers = np.ones((N_marker, 3)).astype(np.float32) * 10000 + for i in range(3, len(values), 3): + x, y, z = values[i : i + 3] + if len(x) > 0: + markers[(i//3) - 1, 0] = float(x) / 1000 + markers[(i//3) - 1, 1] = float(y) / 1000 + markers[(i//3) - 1, 2] = float(z) / 1000 + data["markers"].append(markers) + + data["timestamps"] = np.uint64(data["timestamps"]) + data["markers"] = np.float32(data["markers"]) + + data_list.append(data) + + return data_list + +def trc_to_timestamps(trc_data): + NOKOV_ts = trc_data[0]["timestamps"] + day_after_20230829 = (NOKOV_ts.min() - 1693238400000) // 86400000 + NOKOV_ts = list((NOKOV_ts - 1693238400000 - day_after_20230829 * 86400000).astype(np.int64)) + return NOKOV_ts + +if __name__ == '__main__': + + # upload_root = '/data2/HOI-mocap' + upload_root = '/share/datasets/HOI-mocap' + date = '20230930' + + # for i in range(1, 153): + # try: + # trc_video_id = f'{date}_{str(i).zfill(3)}' + # rgb_video_id = f'{date}_{str(i).zfill(3)}' + + trc_video_id = '20230930_002' + + nokov_dir = os.path.join(upload_root, date, trc_video_id, 'nokov') + trc_path = os.path.join(nokov_dir, [file for file in os.listdir(nokov_dir) if file.endswith('.trc')][0]) + # trc_path = '/data2/HOI-mocap/20231027/20231027_002/nokov/20231027_178_039_1-obj_039.trc' + + trc_data = parse_trc([trc_path]) + # print(trc_data[0]['timestamps']) + nokov_ts = trc_to_timestamps(trc_data) + + rgb_video_id = '20230930_001' + rgb_ts_path = os.path.join(upload_root, date, rgb_video_id, 'src', 'common_timestamp.txt') + rgb_ts = load_Luster_timestamps(rgb_ts_path) + # print(rgb_ts) + + ts_list = [nokov_ts, rgb_ts] + + error_threshold = 16 + common_timestamps = cal_common_timestamps(ts_list, error_threshold) + + print(trc_video_id, len(common_timestamps)) + # print(common_timestamps) + # except: + # continue + \ No newline at end of file diff --git a/utils/utils/organize_dataset.py b/utils/utils/organize_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..9343157b618b5b1b7857af5a18eb2d2e17f394e9 --- /dev/null +++ b/utils/utils/organize_dataset.py @@ -0,0 +1,437 @@ +import os +import sys +sys.path.append('.') +import cv2 +import numpy as np +import pickle +from os.path import join +from transforms3d.quaternions import quat2mat, mat2quat +# from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle +import torch +import open3d as o3d + +from utils.hoi_io2 import verts_apply_pose_batch + +def organize_record_file(path): + video_id_list = [] + with open(path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + video_id_list.append(parts[0]) + + video_id_list = list(set(video_id_list)) + video_id_list.sort(key=lambda x:int(x)) + + with open(path, 'w') as f: + for id in video_id_list: + f.write(f'{id}\n') + +def add_a_line(path, line): + with open(path, 'a') as f: + f.write(f'{line}\n') + +def mp42imgs(video_path, return_rgb = True, max_cnt=None, width = None, height = None): + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + cap.set(cv2.CAP_PROP_FOURCC, fourcc) + fps = cap.get(cv2.CAP_PROP_FPS) + W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + imgs = [] + + resize_bool = True if (width is not None and height is not None) else False + + suc = cap.isOpened() + cnt = 1 # 原视频第{n}帧,从1开始计数 + while True: + suc, img = cap.read() + if not suc: + break + if return_rgb: + if resize_bool: + imgs.append(cv2.resize(img, (width, height))) + else: + imgs.append(img) + cnt += 1 + if (not max_cnt is None) and (cnt > max_cnt): # 最多取到第max_cnt帧 + break + + cap.release() + + if return_rgb: + return imgs, fps, W, H, cnt - 1 + else: + return None, fps, W, H, cnt - 1 + +def txt2intrinsic(txt_path): + intrin = np.eye(3) + dist = np.zeros(5) + cnt = -1 + with open(txt_path, 'r') as f: + for line in f: + cnt += 1 + line = line.strip().split(',') + values = np.float32([float(v) for v in line]) + if cnt <= 2: + intrin[cnt] = values + else: + dist = values + return intrin, dist + +def get_ego_rigid_xrs_path(NOKOV_data_dir): + xrs_path = None + for fn in os.listdir(NOKOV_data_dir): + if (fn[-4:] == ".xrs") and ("helmet" in fn): + assert xrs_path is None + xrs_path = join(NOKOV_data_dir, fn) + return xrs_path + +def parse_xrs(xrs_paths): + + data_list = [] + + for xrs_path in xrs_paths: + cnt = 0 + data = { + "poses": [], + } + with open(xrs_path, "r") as f: + for line in f: + cnt += 1 + line = line.strip() + if cnt <= 11: + continue + values = line.split("\t") + + assert len(values) == 17 + + pose = np.eye(4).astype(np.float32) + pose[:3, 3] = 10000 + t = np.float32([float(values[2]), float(values[3]), float(values[4])]) / 1000 + pose[:3, 3] = t + q = np.float32([float(values[8]), float(values[5]), float(values[6]), float(values[7])]) # xyzw -> wxyz + R = quat2mat(q) + pose[:3, :3] = R + + data["poses"].append(pose) + + data["poses"] = np.float32(data["poses"]) # (N, 4, 4) + + data_list.append(data) + + return data_list + +def load_nokov_objs_params_arctic_style(root, video_id, frame_list): + ''' + + return: tool_verts_batch: [num_frame, num_verts, 3] + tool_faces + obj_verts_batch: [num_frame, num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + tool_id = nokov_data_filenames[0].split('_')[1] # obj1 + obj_id = nokov_data_filenames[0].split('_')[2] # obj2 + + tool_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + assert os.path.exists(tool_pose_path) + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path))[frame_idx_list, ...] + tool_rot_batch = tool_pose_batch[:, :3, :3] # [num_frame, 3, 3] + tool_anxis_angle_batch = matrix_to_axis_angle(tool_rot_batch) # [num_frame, 3] + tool_trans_batch = tool_pose_batch[:, :3, 3] # [num_frame, 3] + tool_params = torch.cat([tool_anxis_angle_batch, tool_trans_batch], dim=1) + + obj_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + assert os.path.exists(obj_pose_path) + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path))[frame_idx_list, ...] + obj_rot_batch = obj_pose_batch[:, :3, :3] # [num_frame, 3, 3] + obj_anxis_angle_batch = matrix_to_axis_angle(obj_rot_batch) # [num_frame, 3] + obj_trans_batch = obj_pose_batch[:, :3, 3] # [num_frame, 3] + obj_params = torch.cat([obj_anxis_angle_batch, obj_trans_batch], dim=1) + + return tool_params.numpy(), obj_params.numpy() + +def cvt_mano_info_2_arctic_style(hand_pose, hand_trans, hand_shape): + num_frame = hand_pose.shape[0] + + data = {} + data['rot'] = hand_pose[..., :3].numpy() + data['pose'] = hand_pose[..., 3:].numpy() + data['trans'] = hand_trans.numpy() + if hand_shape is not None: + data['shape'] = hand_shape.numpy() + else: + data['shape'] = np.zeros((10)) + + # data['fitting_err'] = [0. for i in range(num_frame)] + data['fitting_err'] = [0. for i in range(num_frame)] + return data + +def load_simplied_nokov_objs_mesh(root, video_id, frame_list = None, use_cm = True): + ''' + + return: tool_verts_batch: [num_frame, num_verts, 3] + tool_faces + obj_verts_batch: [num_frame, num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir), original_nokov_data_dir + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + if frame_list is not None: + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + if use_cm: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_cm.obj') + else: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_m.obj') + tool_mesh = o3d.io.read_triangle_mesh(tool_mesh_path) + tool_verts = np.asarray(tool_mesh.vertices) + if use_cm: + tool_verts = torch.from_numpy(tool_verts / 100.).float() + else: + tool_verts = torch.from_numpy(tool_verts).float() + tool_faces = torch.from_numpy(np.asarray(tool_mesh.triangles)) + + tool_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{tool_id}.npy') + assert os.path.exists(tool_pose_path) + if frame_list is None: + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path)) + else: + tool_pose_batch = torch.from_numpy(np.load(tool_pose_path))[frame_idx_list, ...] + tool_verts_batch = verts_apply_pose_batch(tool_verts, tool_pose_batch) + + if use_cm: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_cm.obj') + else: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_m.obj') + obj_mesh = o3d.io.read_triangle_mesh(obj_mesh_path) + obj_verts = np.asarray(obj_mesh.vertices) + if use_cm: + obj_verts = torch.from_numpy(obj_verts / 100.).float() + else: + obj_verts = torch.from_numpy(obj_verts).float() + obj_faces = torch.from_numpy(np.asarray(obj_mesh.triangles)) + + obj_pose_path = os.path.join(root, 'HO_poses', date, video_id, 'objpose', f'{obj_id}.npy') + assert os.path.exists(obj_pose_path) + if frame_list is None: + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path)) + else: + obj_pose_batch = torch.from_numpy(np.load(obj_pose_path))[frame_idx_list, ...] + obj_verts_batch = verts_apply_pose_batch(obj_verts, obj_pose_batch) + + return tool_verts_batch, tool_faces, obj_verts_batch, obj_faces + +def load_sequence_names_from_organized_record(path: str, date: str): + organized_sequence_list = [] + with open(path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) > 0 and parts[0].startswith(date): + organized_sequence_list.append(parts[0]) + + organized_sequence_list = list(set(organized_sequence_list)) + organized_sequence_list.sort(key=lambda x:int(x)) + + return organized_sequence_list + +def load_dates_from_organized_record(path: str): + organized_date_list = [] + with open(path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) > 0: + organized_date_list.append(parts[0][:8]) + + organized_date_list = list(set(organized_date_list)) + organized_date_list.sort(key=lambda x:int(x)) + + return organized_date_list + +def load_organized_mano_info(dataset_root, date, video_id, frame_list = None, mano_dirname = 'mano_wo_contact', right_hand_bool = True): + ''' + return:(hand_pose, hand_trans, hand_shape) + hand_pose: torch tensor, shape [num_frame, 48] + hand_trans: torch tensor, shape [num_frame, 3] + hand_shape: None or torch tensor, shape [10] + ''' + + if right_hand_bool: + path = os.path.join(dataset_root, date, video_id, mano_dirname, 'right_hand.pkl') + else: + path = os.path.join(dataset_root, date, video_id, mano_dirname, 'left_hand.pkl') + + with open(path, 'rb') as f: + data = pickle.load(f) + + if frame_list is None: + frame_list = list(data.keys()) + frame_list.sort() + hand_pose = [] + hand_trans = [] + for frame in frame_list: + hand_pose.append(data[frame]['hand_pose']) + hand_trans.append(data[frame]['hand_trans']) + + hand_pose = torch.stack(hand_pose) + hand_trans = torch.stack(hand_trans) + + if right_hand_bool: + shape_path = os.path.join(dataset_root, date, video_id, 'src', 'right_hand_shape.pkl') + else: + shape_path = os.path.join(dataset_root, date, video_id, 'src', 'left_hand_shape.pkl') + + if os.path.exists(shape_path): + with open(shape_path, 'rb') as f: + hand_shape = pickle.load(f)['hand_shape'] + else: + hand_shape = None + + return (hand_pose, hand_trans, hand_shape) + +def load_zero_nokov_objs_mesh(root, video_id, use_cm = True): + ''' + + return: tool_verts_batch: [num_verts, 3] + tool_faces + obj_verts_batch: [num_verts, 3] + obj_faces + ''' + date = video_id[:8] + original_nokov_data_dir = os.path.join(root, date, video_id, 'nokov') + assert os.path.exists(original_nokov_data_dir) + nokov_data_filenames = os.listdir(original_nokov_data_dir) + + tool_id = nokov_data_filenames[0].split("_")[1] # obj1 + obj_id = nokov_data_filenames[0].split("_")[2] # obj2 + + if use_cm: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_cm.obj') + else: + tool_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{tool_id}_m.obj') + tool_mesh = o3d.io.read_triangle_mesh(tool_mesh_path) + tool_verts = np.asarray(tool_mesh.vertices) + if use_cm: + tool_verts = torch.from_numpy(tool_verts / 100.).float() + else: + tool_verts = torch.from_numpy(tool_verts).float() + tool_faces = torch.from_numpy(np.asarray(tool_mesh.triangles)) + + if use_cm: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_cm.obj') + else: + obj_mesh_path = os.path.join(root, 'object_models_final_simplied', f'{obj_id}_m.obj') + obj_mesh = o3d.io.read_triangle_mesh(obj_mesh_path) + obj_verts = np.asarray(obj_mesh.vertices) + if use_cm: + obj_verts = torch.from_numpy(obj_verts / 100.).float() + else: + obj_verts = torch.from_numpy(obj_verts).float() + obj_faces = torch.from_numpy(np.asarray(obj_mesh.triangles)) + return tool_verts, tool_faces, obj_verts, obj_faces + +def load_organized_rgb_batch_with_resize(dataset_root, date, video_id, frame_list, camera_list, width, height): + + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + rgb_batch = [] + for camera in camera_list: + path = os.path.join(dataset_root, date, video_id, 'rgb', f'{camera}.mp4') + imgs, _, _, _, cnt = mp42imgs(path, return_rgb=True, width=width, height=height) + imgs = np.stack(imgs) + rgb_batch.append(imgs) + + rgb_batch = np.stack(rgb_batch) + rgb_batch = rgb_batch[:, frame_idx_list, ...] + rgb_batch = np.swapaxes(rgb_batch, 0, 1) + + return rgb_batch + +def load_organized_ego_rgb_with_resize(dataset_root, date, video_id, frame_list, width, height): + + if frame_list is not None: + frame_idx_list = [(int(frame)-1) for frame in frame_list] + + path = os.path.join(dataset_root, date, video_id, 'egocentric_rgb.mp4') + imgs, _, _, _, cnt = mp42imgs(path, return_rgb=True, width=width, height=height) + imgs = np.stack(imgs) + if frame_list is not None: + imgs = imgs[frame_idx_list, ...] + + return imgs + +def load_interaction_field(root, date, video_id): + save_path = os.path.join(root, 'interaction_field', date, f'{video_id}.pkl') + with open(save_path, 'rb') as f: + data = pickle.load(f) + + F_right_hand_2_tool = data['F_right_hand_2_tool'] + F_right_hand_2_tool_idxs = data['F_right_hand_2_tool_idxs'] + F_tool_2_right_hand = data['F_tool_2_right_hand'] + F_tool_2_right_hand_idxs = data['F_tool_2_right_hand_idxs'] + F_left_hand_2_obj = data['F_left_hand_2_obj'] + F_left_hand_2_obj_idxs = data['F_left_hand_2_obj_idxs'] + F_obj_2_left_hand = data['F_obj_2_left_hand'] + F_obj_2_left_hand_idxs = data['F_obj_2_left_hand_idxs'] + F_tool_2_obj = data['F_tool_2_obj'] + F_tool_2_obj_idxs = data['F_tool_2_obj_idxs'] + F_obj_2_tool = data['F_obj_2_tool'] + F_obj_2_tool_idxs = data['F_obj_2_tool_idxs'] + + return F_right_hand_2_tool, F_right_hand_2_tool_idxs, F_tool_2_right_hand, F_tool_2_right_hand_idxs, F_left_hand_2_obj, F_left_hand_2_obj_idxs, F_obj_2_left_hand, F_obj_2_left_hand_idxs, F_tool_2_obj, F_tool_2_obj_idxs, F_obj_2_tool, F_obj_2_tool_idxs + +def cvt_interaction_field_2_colors(interaction_field_list): + + colors = [] + + for interaction_field in interaction_field_list: + + device = interaction_field.device + # interaction_field = interaction_field.exp() + # interaction_field = torch.pow(1000000.0, interaction_field) + interaction_field = torch.clip(interaction_field, 0., 0.01) + + # min_weight = interaction_field.min() + # max_weight = interaction_field.max() + min_weight = 0. + max_weight = 0.01 + + red = (max_weight - interaction_field) / (max_weight - min_weight) + blue = (interaction_field - min_weight) / (max_weight - min_weight) + color = torch.stack((red, red, blue)).transpose(0, 1) # RGB + # color = torch.stack((red, torch.zeros_like(red, device=device), blue)).transpose(0, 1) # RGB + # color = torch.stack((blue, torch.zeros_like(red, device=device), red)).transpose(0, 1) # BRG + + + colors.append(color) + + return colors + +def load_nokov_succeed_list(root, date): + nokov_succeed_record_path = os.path.join(root ,'record', f'{date}_nokov_succeed.txt') + nokov_succeed_video_list = [] + with open(nokov_succeed_record_path, 'r') as f: + lines = f.readlines() + for line in lines: + parts = line.strip().split() + if len(parts) == 1: + nokov_succeed_video_list.append(parts[0]) + + return nokov_succeed_video_list \ No newline at end of file diff --git a/utils/utils/process_frame_loss.py b/utils/utils/process_frame_loss.py new file mode 100755 index 0000000000000000000000000000000000000000..61afcb5ee5f98572c4a629bc4c50d231a49ab9b0 --- /dev/null +++ b/utils/utils/process_frame_loss.py @@ -0,0 +1,384 @@ +''' +解析录制时的{camera_id}__FrameTimeStamp.txt,根据timestamp丢弃录制时有丢失的帧,并将剩余的帧数据存为pkl。之后video2img.py等一系列程序将基于该pkl进行处理。 + +在/share/hlyang/results/record生成{date}_invalid_video_id.txt, {date}_match_failed_video_id.txt, {date}_valid_video_id.txt等 + +example: +python utils/process_frame_loss.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' +import traceback + +import os +import sys +sys.path.append('.') +import argparse +import pickle +from shutil import copy +import numpy as np +from tqdm import tqdm +from utils.hoi_io import get_valid_video_list, get_time_diff +from time import time + +def is_monotonic(seq): + if len(seq) <= 1: + return True # 空序列或只有一个元素的序列被认为是单调递增的 + + for i in range(1, len(seq)): + if seq[i] < seq[i - 1]: + return False + + return True + +def get_computer_time_diff(date, error_threshold): + time_diff_record_root = '/share/hlyang/results/record' + time_diff_data_path = os.path.join(time_diff_record_root, f'{date}_2m1.txt') + + assert os.path.exists(time_diff_data_path), time_diff_data_path + with open(time_diff_data_path, 'r') as f: + lines = f.readlines() + time_diff_list = [] + for line in lines: + parts = line.strip().split() + if len(parts) > 0: + time_diff = parts[0] + time_diff_list.append(int(time_diff)) + + # TODO: 解决帧数相同,但两台电脑算出的时间戳之差相差太大 + assert len(time_diff_list) > 0 + + # time_diff_mean = np.array(time_diff_list).mean().astype(np.int32) + + time_diff_array = np.array(time_diff_list) + cnt_list = [] + for time_diff in time_diff_array: + cnt = np.sum((time_diff_array >= time_diff - error_threshold) & (time_diff_array <= time_diff + error_threshold)) + cnt_list.append(cnt) + cnt_array = np.array(cnt_list) + max_idx = np.argmax(cnt_array) + reasonable_time_diff = time_diff_array[max_idx] + + # for time_diff in time_diff_list: + # assert abs(time_diff - time_diff_mean) <= error_threshold + + return reasonable_time_diff + +def cal_common_timestamps(timestamps_list, error_threshold=16): + + # TODO 各个视角视角戳数量一致,就直接算平均数作为common_ts,并且短路 + timestamps_list = [np.array(timestamps) for timestamps in timestamps_list] + + common_timestamps = timestamps_list[0] + for t_idx, timestamps in enumerate(timestamps_list[1:]): + common_timestamps_ = [] + for timestamp in timestamps: + condition = (common_timestamps >= timestamp - error_threshold) & (common_timestamps <= timestamp + error_threshold) + within_range = common_timestamps[condition] + + if len(within_range) == 1: # 匹配上了 + res = within_range[0] + # 做个平滑 + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + elif len(within_range) == 0: # 没匹配上 + continue + else: # ??? + # print(camera_list[t_idx + 1], within_range) + res = within_range[np.abs(within_range - timestamp).argmin()] + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + # raise ValueError(f'len(within_range) should be 0 or 1, but got {len(within_range)}') + + common_timestamps = np.array(common_timestamps_) + + return common_timestamps.tolist() + +def process_frame_loss(camera_list, root_dir, video_id, time_diff, error_threshold): + + date = video_id[:8] + + assert os.path.exists(root_dir) + video_dir = os.path.join(root_dir, video_id, 'rgb') + assert os.path.exists(video_dir), video_dir + + # check if files exist + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + video_path = os.path.join(video_dir, camera_id + '.mp4') + if not os.path.exists(timestamp_path) or not os.path.exists(video_path): + date = video_id[:8] + valid_record_path = os.path.join('/share/hlyang/results/reference_record', f'{date}_invalid_video_id.txt') + with open(valid_record_path, 'a') as f: + f.write(f'{video_id}\n') + return + + computer1_camera_list = ['21218078', '22139906', '22139908', '22139910', '22139911', '22139913', '22139914', '22139946'] + computer2_camera_list = [camera for camera in camera_list if camera not in computer1_camera_list] + assert len(computer2_camera_list) == 4 + computer1_ids = [camera_list.index(camera) for camera in computer1_camera_list] + computer2_ids = [camera_list.index(camera) for camera in computer2_camera_list] + + timestamps_list = [] + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + + with open(timestamp_path, 'r') as f: + lines = f.readlines() + + timestamps = [] + cnt = 0 + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + timestamp = parts[1] + timestamps.append(int(timestamp)) + cnt += 1 + + timestamps_list.append(timestamps) + + # 使用计算出的误差 + modified_timestamps_list = [] + for idx, timestamps in enumerate(timestamps_list): + if idx in computer2_ids: + modified_timestamps = [timestamp - time_diff for timestamp in timestamps] + modified_timestamps_list.append(modified_timestamps) + else: + modified_timestamps_list.append(timestamps) + + common_timestamps = cal_common_timestamps(modified_timestamps_list, error_threshold) + num_common_frame = len(common_timestamps) + + if num_common_frame <= 0: + match_failed_record_path = os.path.join('/share/hlyang/results/reference_record', f'{date}_match_failed_video_id.txt') + with open(match_failed_record_path, 'a') as f: + f.write(f'{video_id}\n') + return + + valid_record_path = os.path.join('/share/hlyang/results/record', f'{date}_valid_video_id.txt') + with open(valid_record_path, 'a') as f: + f.write(f'{video_id}\n') + + result_dir = os.path.join('/share/hlyang/results', date, video_id) + os.makedirs(result_dir, exist_ok=True) + metadata_dir = os.path.join(result_dir, 'metadata') + os.makedirs(metadata_dir, exist_ok=True) + + common_frame_record_path = os.path.join(metadata_dir, 'common_timestamp.txt') + with open(common_frame_record_path, 'w') as f: + for timestamp in common_timestamps: + f.write(f'{timestamp}\n') + + for idx, camera_id in enumerate(camera_list): + cnt2frame_id_dict = {} + frame_id2cnt_dict = {} + cnt_list = [] + for idx2, cm_ts in enumerate(common_timestamps): + for original_ts_idx, timestamp in enumerate(modified_timestamps_list[idx]): + if abs(cm_ts - timestamp) <= error_threshold: + idx_in_original_ts_list = original_ts_idx + break + + # frame_id从00001开始记录 + cnt = idx_in_original_ts_list + 1 + frame_id = str(idx2 + 1).zfill(5) + cnt_list.append(cnt) + cnt2frame_id_dict[cnt] = frame_id + frame_id2cnt_dict[frame_id] = cnt + + assert len(cnt_list) == num_common_frame + metadata = {'num_frame': num_common_frame, 'original_num_frame': len(modified_timestamps_list[idx]), 'cnt_list':cnt_list, 'cnt2frame_id_dict': cnt2frame_id_dict, 'frame_id2cnt_dict': frame_id2cnt_dict} + + metadata_path = os.path.join(metadata_dir, camera_id+'.pkl') + with open(metadata_path, 'wb') as f: + pickle.dump(metadata, f) + +def get_reasonable_time_diff(video_id, time_diff_data): + # if video_id in time_diff_data.keys(): + # return time_diff_data[video_id] + # else: + video_id_list_with_time_diff = list(time_diff_data.keys()) + + # 如果就在数据中 + if video_id in video_id_list_with_time_diff: + return time_diff_data[video_id] + + # 如果不在数据中,那就匹配 + if video_id[-3:].isdigit(): + abs_arr = np.array([abs(int(video_id[-3:]) - int(k[-3:])) for k in video_id_list_with_time_diff if k[-3:].isdigit()]) + min_idx = np.argmin(abs_arr) + reasonable_video_id = video_id_list_with_time_diff[min_idx] + else: # 人手怎么办,暂定是用第一个 + reasonable_video_id = video_id_list_with_time_diff[0] + + return time_diff_data[reasonable_video_id] + +def modify_timestamps(video_id, time_diff_data, timestamps_list, computer2_ids): + modified_timestamps_list = [] + + time_diff = get_reasonable_time_diff(video_id, time_diff_data) + + for idx, timestamps in enumerate(timestamps_list): + if idx in computer2_ids: + modified_timestamps = [timestamp - time_diff for timestamp in timestamps] + modified_timestamps_list.append(modified_timestamps) + else: + modified_timestamps_list.append(timestamps) + + return modified_timestamps_list + +def process_frame_loss2(camera_list, root_dir, time_diff_data, video_id, error_threshold): + + date = video_id[:8] + + assert os.path.exists(root_dir) + video_dir = os.path.join(root_dir, video_id, 'rgb') + assert os.path.exists(video_dir), video_dir + + # check if files exist + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + video_path = os.path.join(video_dir, camera_id + '.mp4') + if not os.path.exists(timestamp_path) or not os.path.exists(video_path): + date = video_id[:8] + valid_record_path = os.path.join('/share/hlyang/results/reference_record', f'{date}_invalid_video_id.txt') + with open(valid_record_path, 'a') as f: + f.write(f'{video_id}\n') + return + + computer1_camera_list = ['21218078', '22139906', '22139908', '22139910', '22139911', '22139913', '22139914', '22139946'] + computer2_camera_list = [camera for camera in camera_list if camera not in computer1_camera_list] + assert len(computer2_camera_list) == 4 + computer1_ids = [camera_list.index(camera) for camera in computer1_camera_list] + computer2_ids = [camera_list.index(camera) for camera in computer2_camera_list] + + timestamps_list = [] + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + + with open(timestamp_path, 'r') as f: + lines = f.readlines() + + timestamps = [] + cnt = 0 + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + timestamp = parts[1] + timestamps.append(int(timestamp)) + cnt += 1 + + timestamps_list.append(timestamps) + + # # 使用计算出的误差 + # modified_timestamps_list = [] + # for idx, timestamps in enumerate(timestamps_list): + # if idx in computer2_ids: + # modified_timestamps = [timestamp - time_diff for timestamp in timestamps] + # modified_timestamps_list.append(modified_timestamps) + # else: + # modified_timestamps_list.append(timestamps) + + modified_timestamps_list = modify_timestamps(video_id, time_diff_data, timestamps_list, computer2_ids) + # for mtsl in modified_timestamps_list: + # print(mtsl[0]) + + common_timestamps = cal_common_timestamps(modified_timestamps_list, error_threshold) + num_common_frame = len(common_timestamps) + + if num_common_frame <= 0: + match_failed_record_path = os.path.join('/share/hlyang/results/reference_record', f'{date}_match_failed_video_id.txt') + with open(match_failed_record_path, 'a') as f: + f.write(f'{video_id}\n') + return + + valid_record_path = os.path.join('/share/hlyang/results/record', f'{date}_valid_video_id.txt') + with open(valid_record_path, 'a') as f: + f.write(f'{video_id}\n') + + result_dir = os.path.join('/share/hlyang/results', date, video_id) + # result_dir = os.path.join('/share/hlyang/results', video_id) + + os.makedirs(result_dir, exist_ok=True) + metadata_dir = os.path.join(result_dir, 'metadata') + os.makedirs(metadata_dir, exist_ok=True) + + common_frame_record_path = os.path.join(metadata_dir, 'common_timestamp.txt') + with open(common_frame_record_path, 'w') as f: + for timestamp in common_timestamps: + f.write(f'{timestamp}\n') + + line = f'{video_id}: {num_common_frame} ' + for idx, camera_id in enumerate(camera_list): + cnt2frame_id_dict = {} + frame_id2cnt_dict = {} + cnt_list = [] + for idx2, cm_ts in enumerate(common_timestamps): + for original_ts_idx, timestamp in enumerate(modified_timestamps_list[idx]): + if abs(cm_ts - timestamp) <= error_threshold: + idx_in_original_ts_list = original_ts_idx + break + + # frame_id从00001开始记录 + cnt = idx_in_original_ts_list + 1 + frame_id = str(idx2 + 1).zfill(5) + cnt_list.append(cnt) + cnt2frame_id_dict[cnt] = frame_id + frame_id2cnt_dict[frame_id] = cnt + + assert len(cnt_list) == num_common_frame + metadata = {'num_frame': num_common_frame, 'original_num_frame': len(modified_timestamps_list[idx]), 'cnt_list':cnt_list, 'cnt2frame_id_dict': cnt2frame_id_dict, 'frame_id2cnt_dict': frame_id2cnt_dict} + + metadata_path = os.path.join(metadata_dir, camera_id+'.pkl') + with open(metadata_path, 'wb') as f: + pickle.dump(metadata, f) + + line += f'{camera_id}: {len(modified_timestamps_list[idx])} ' + line += '\n' + match_record_path = f'/share/hlyang/results/reference_record/{date}_match_record.txt' + with open(match_record_path, 'a') as f: + f.write(line) + +if __name__ == '__main__': + + # camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + + date = '20230919' + root_dir = f'/share/datasets/HOI-mocap/{date}' + + result_root = os.path.join('/share/hlyang/results', date) + os.makedirs(result_root, exist_ok=True) + + dir_list = os.listdir(root_dir) + video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + + video_list.sort() + + print(video_list) + + error_threshold = 17 + + # timestamp_diff_2m1_mean = get_computer_time_diff(date, error_threshold) + + # time_diff_data = get_time_diff(date) + + # for video_id in tqdm(video_list): + # try: + # # process_frame_loss(camera_list, root_dir, video_id, timestamp_diff_2m1_mean, error_threshold) + # process_frame_loss2(camera_list, root_dir, time_diff_data, video_id, error_threshold) + + # except Exception as error: + # traceback.print_exc() + # print(error) + # continue + + video_list = get_valid_video_list(date, remove_hand=False) + # video_list = [id for id in video_list if 'hand' in id] + for video_id in tqdm(video_list): + # cp calibration file + src_cali_path = os.path.join(root_dir, video_id, 'src', 'calibration.json') + # dst_src_dir = os.path.join('/share/hlyang/results', video_id, 'src') + dst_src_dir = os.path.join('/share/hlyang/results', date, video_id, 'src') + os.makedirs(dst_src_dir, exist_ok=True) + dst_cali_path = os.path.join(dst_src_dir, 'calibration.json') + copy(src_cali_path, dst_cali_path) \ No newline at end of file diff --git a/utils/utils/process_frame_loss2.py b/utils/utils/process_frame_loss2.py new file mode 100755 index 0000000000000000000000000000000000000000..730c8498eeaa39bfa6fc536ac849cc1e9388cb7d --- /dev/null +++ b/utils/utils/process_frame_loss2.py @@ -0,0 +1,248 @@ +''' +解析录制时的{camera_id}__FrameTimeStamp.txt,根据timestamp丢弃录制时有丢失的帧,并将剩余的帧数据存为pkl。之后video2img.py等一系列程序将基于该pkl进行处理。 + +在/share/hlyang/results/record生成{date}_invalid_video_id.txt, {date}_match_failed_video_id.txt, {date}_valid_video_id.txt等 + +example: +python utils/process_frame_loss.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' +import traceback + +import os +import sys +sys.path.append('.') +import argparse +import pickle +from shutil import copy +import numpy as np +from tqdm import tqdm +from utils.hoi_io2 import get_valid_video_list, get_time_diff +from time import time + +def is_monotonic(seq): + if len(seq) <= 1: + return True # 空序列或只有一个元素的序列被认为是单调递增的 + + for i in range(1, len(seq)): + if seq[i] < seq[i - 1]: + return False + + return True + +def cal_common_timestamps(timestamps_list, error_threshold=16): + + # TODO 各个视角视角戳数量一致,就直接算平均数作为common_ts,并且短路 + timestamps_list = [np.array(timestamps) for timestamps in timestamps_list] + + common_timestamps = timestamps_list[0] + for t_idx, timestamps in enumerate(timestamps_list[1:]): + common_timestamps_ = [] + for timestamp in timestamps: + condition = (common_timestamps >= timestamp - error_threshold) & (common_timestamps <= timestamp + error_threshold) + within_range = common_timestamps[condition] + + if len(within_range) == 1: # 匹配上了 + res = within_range[0] + # 做个平滑 + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + elif len(within_range) == 0: # 没匹配上 + continue + else: # ??? + # print(camera_list[t_idx + 1], within_range) + res = within_range[np.abs(within_range - timestamp).argmin()] + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + # raise ValueError(f'len(within_range) should be 0 or 1, but got {len(within_range)}') + + common_timestamps = np.array(common_timestamps_) + + return common_timestamps.tolist() + +def get_reasonable_time_diff(video_id, time_diff_data): + # if video_id in time_diff_data.keys(): + # return time_diff_data[video_id] + # else: + video_id_list_with_time_diff = list(time_diff_data.keys()) + + # 如果就在数据中 + if video_id in video_id_list_with_time_diff: + return time_diff_data[video_id] + + # 如果不在数据中,那就匹配 + if video_id[-3:].isdigit(): + abs_arr = np.array([abs(int(video_id[-3:]) - int(k[-3:])) for k in video_id_list_with_time_diff if k[-3:].isdigit()]) + min_idx = np.argmin(abs_arr) + reasonable_video_id = video_id_list_with_time_diff[min_idx] + else: # 人手怎么办,暂定是用第一个 + reasonable_video_id = video_id_list_with_time_diff[0] + + return time_diff_data[reasonable_video_id] + +def modify_timestamps(video_id, time_diff_data, timestamps_list, computer2_ids): + modified_timestamps_list = [] + + time_diff = get_reasonable_time_diff(video_id, time_diff_data) + + for idx, timestamps in enumerate(timestamps_list): + if idx in computer2_ids: + modified_timestamps = [timestamp - time_diff for timestamp in timestamps] + modified_timestamps_list.append(modified_timestamps) + else: + modified_timestamps_list.append(timestamps) + + return modified_timestamps_list + +def process_frame_loss2(camera_list, upload_date_root, save_root, time_diff_data, video_id, error_threshold): + + date = video_id[:8] + + assert os.path.exists(upload_date_root) + video_dir = os.path.join(upload_date_root, video_id, 'rgb') + assert os.path.exists(video_dir), video_dir + + # check if files exist + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + video_path = os.path.join(video_dir, camera_id + '.mp4') + if not os.path.exists(timestamp_path) or not os.path.exists(video_path): + date = video_id[:8] + valid_record_path = os.path.join(save_root, 'reference_record', f'{date}_invalid_video_id.txt') + with open(valid_record_path, 'a') as f: + f.write(f'{video_id}\n') + return + + computer1_camera_list = ['21218078', '22139906', '22139908', '22139910', '22139911', '22139913', '22139914', '22139946'] + computer2_camera_list = [camera for camera in camera_list if camera not in computer1_camera_list] + assert len(computer2_camera_list) == 4 + computer1_ids = [camera_list.index(camera) for camera in computer1_camera_list] + computer2_ids = [camera_list.index(camera) for camera in computer2_camera_list] + + timestamps_list = [] + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + + with open(timestamp_path, 'r') as f: + lines = f.readlines() + + timestamps = [] + cnt = 0 + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + timestamp = parts[1] + timestamps.append(int(timestamp)) + cnt += 1 + + timestamps_list.append(timestamps) + + # # 使用计算出的误差 + # modified_timestamps_list = [] + # for idx, timestamps in enumerate(timestamps_list): + # if idx in computer2_ids: + # modified_timestamps = [timestamp - time_diff for timestamp in timestamps] + # modified_timestamps_list.append(modified_timestamps) + # else: + # modified_timestamps_list.append(timestamps) + + modified_timestamps_list = modify_timestamps(video_id, time_diff_data, timestamps_list, computer2_ids) + # for mtsl in modified_timestamps_list: + # print(mtsl[0]) + + common_timestamps = cal_common_timestamps(modified_timestamps_list, error_threshold) + num_common_frame = len(common_timestamps) + + if num_common_frame <= 0: + match_failed_record_path = os.path.join(save_root, 'reference_record', f'{date}_match_failed_video_id.txt') + with open(match_failed_record_path, 'a') as f: + f.write(f'{video_id}\n') + return + + valid_record_path = os.path.join(save_root, 'record', f'{date}_valid_video_id.txt') + with open(valid_record_path, 'a') as f: + f.write(f'{video_id}\n') + + result_dir = os.path.join(save_root, date, video_id) + + os.makedirs(result_dir, exist_ok=True) + metadata_dir = os.path.join(result_dir, 'metadata') + os.makedirs(metadata_dir, exist_ok=True) + + common_frame_record_path = os.path.join(metadata_dir, 'common_timestamp.txt') + with open(common_frame_record_path, 'w') as f: + for timestamp in common_timestamps: + f.write(f'{timestamp}\n') + + line = f'{video_id}: {num_common_frame} ' + for idx, camera_id in enumerate(camera_list): + cnt2frame_id_dict = {} + frame_id2cnt_dict = {} + cnt_list = [] + for idx2, cm_ts in enumerate(common_timestamps): + for original_ts_idx, timestamp in enumerate(modified_timestamps_list[idx]): + if abs(cm_ts - timestamp) <= error_threshold: + idx_in_original_ts_list = original_ts_idx + break + + # frame_id从00001开始记录 + cnt = idx_in_original_ts_list + 1 + frame_id = str(idx2 + 1).zfill(5) + cnt_list.append(cnt) + cnt2frame_id_dict[cnt] = frame_id + frame_id2cnt_dict[frame_id] = cnt + + assert len(cnt_list) == num_common_frame + # print(camera_id, len(modified_timestamps_list[idx]), cnt2frame_id_dict) + metadata = {'num_frame': num_common_frame, 'original_num_frame': len(modified_timestamps_list[idx]), 'cnt_list':cnt_list, 'cnt2frame_id_dict': cnt2frame_id_dict, 'frame_id2cnt_dict': frame_id2cnt_dict} + + metadata_path = os.path.join(metadata_dir, camera_id+'.pkl') + with open(metadata_path, 'wb') as f: + pickle.dump(metadata, f) + + line += f'{camera_id}: {len(modified_timestamps_list[idx])} ' + line += '\n' + match_record_path = os.path.join(save_root, 'reference_record', f'{date}_match_record.txt') + with open(match_record_path, 'a') as f: + f.write(line) + +if __name__ == '__main__': + + # camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + date = '20230930' + upload_root_dir = '/data2/HOI-mocap' + upload_date_root = os.path.join(upload_root_dir, date) + + save_root = '/data2/hlyang/results' + save_date_root = os.path.join(save_root, date) + os.makedirs(save_date_root, exist_ok=True) + + dir_list = os.listdir(upload_date_root) + video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + + video_list.sort() + + print(video_list) + + error_threshold = 17 + + time_diff_data = get_time_diff(save_root, date) + + for video_id in tqdm(video_list): + try: + process_frame_loss2(camera_list, upload_date_root, save_root, time_diff_data, video_id, error_threshold) + + except Exception as error: + traceback.print_exc() + print(error) + continue + + video_list = get_valid_video_list(save_root, date, remove_hand=False) + for video_id in tqdm(video_list): + # cp calibration file + src_cali_path = os.path.join(upload_date_root, video_id, 'src', 'calibration.json') + dst_src_dir = os.path.join(save_date_root, video_id, 'src') + os.makedirs(dst_src_dir, exist_ok=True) + dst_cali_path = os.path.join(dst_src_dir, 'calibration.json') + copy(src_cali_path, dst_cali_path) \ No newline at end of file diff --git a/utils/utils/process_frame_loss_global.py b/utils/utils/process_frame_loss_global.py new file mode 100755 index 0000000000000000000000000000000000000000..78f0f4d549806e1840fe44c4cc60a00b7a862b4c --- /dev/null +++ b/utils/utils/process_frame_loss_global.py @@ -0,0 +1,223 @@ +''' +解析录制时的{camera_id}__FrameTimeStamp.txt,根据timestamp丢弃录制时有丢失的帧,并将剩余的帧数据存为pkl。之后video2img.py等一系列程序将基于该pkl进行处理。 + +在/share/hlyang/results/record生成{date}_invalid_video_id.txt, {date}_match_failed_video_id.txt, {date}_valid_video_id.txt等 + +example: +python utils/process_frame_loss.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' +import traceback + +import os +import sys +sys.path.append('.') +import argparse +import pickle +from shutil import copy +import numpy as np +from tqdm import tqdm +from utils.hoi_io import get_valid_video_list +from time import time + +def is_monotonic(seq): + if len(seq) <= 1: + return True # 空序列或只有一个元素的序列被认为是单调递增的 + + for i in range(1, len(seq)): + if seq[i] < seq[i - 1]: + return False + + return True + +def get_computer_time_diff(date, error_threshold): + time_diff_record_root = '/share/hlyang/results/record' + time_diff_data_path = os.path.join(time_diff_record_root, f'{date}_2m1.txt') + + assert os.path.exists(time_diff_data_path), time_diff_data_path + with open(time_diff_data_path, 'r') as f: + lines = f.readlines() + time_diff_list = [] + for line in lines: + parts = line.strip().split() + if len(parts) >= 1: + time_diff = parts[0] + time_diff_list.append(int(time_diff)) + + # TODO: 解决帧数相同,但两台电脑算出的时间戳之差相差太大 + assert len(time_diff_list) > 0 + + # time_diff_mean = np.array(time_diff_list).mean().astype(np.int32) + + time_diff_array = np.array(time_diff_list) + cnt_list = [] + for time_diff in time_diff_array: + cnt = np.sum((time_diff_array >= time_diff - error_threshold) & (time_diff_array <= time_diff + error_threshold)) + cnt_list.append(cnt) + cnt_array = np.array(cnt_list) + max_idx = np.argmax(cnt_array) + reasonable_time_diff = time_diff_array[max_idx] + + # for time_diff in time_diff_list: + # assert abs(time_diff - time_diff_mean) <= error_threshold + + return reasonable_time_diff + +def cal_common_timestamps(timestamps_list, error_threshold=14): + timestamps_list = [np.array(timestamps) for timestamps in timestamps_list] + + common_timestamps = timestamps_list[0] + for t_idx, timestamps in enumerate(timestamps_list[1:]): + common_timestamps_ = [] + for timestamp in timestamps: + condition = (common_timestamps >= timestamp - error_threshold) & (common_timestamps <= timestamp + error_threshold) + within_range = common_timestamps[condition] + + if len(within_range) == 1: # 匹配上了 + res = within_range[0] + # 做个平滑 + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + elif len(within_range) == 0: # 没匹配上 + continue + else: # 多个匹配项? + print(camera_list[t_idx + 1], within_range) + res = within_range[0] + modified_cm_ts = (timestamp + res) // 2 + common_timestamps_.append(modified_cm_ts) + # raise ValueError(f'len(within_range) should be 0 or 1, but got {len(within_range)}') + + common_timestamps = np.array(common_timestamps_) + + return common_timestamps.tolist() + +def divide_timestamps_list(timestamps, divide_threshold = 35): + ''' + 将匹配出的长的公共时间戳序列,根据间隔切成多个不同视频所属的子公共时间戳序列。 + ''' + timestamps_list = [] + + last_idx = 0 + for i in range(1, len(timestamps)): + if abs(timestamps[i] - timestamps[i-1]) >= divide_threshold: + timestamps_list.append(timestamps[last_idx:i]) + last_idx = i + + timestamps_list.append(timestamps[last_idx:]) + return timestamps_list + +def process_frame_loss_global(camera_list, root_dir, video_list, time_diff, error_threshold): + date = video_list[0][:8] + + assert os.path.exists(root_dir) + for video_id in video_list: + video_dir = os.path.join(root_dir, video_id, 'rgb') + assert os.path.exists(video_dir), video_dir + + file_integrity_video_list = [] + # check if files exist + for video_id in tqdm(video_list): + integrity_bool = True + video_dir = os.path.join(root_dir, video_id, 'rgb') + + for camera_id in camera_list: + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + video_path = os.path.join(video_dir, camera_id + '.mp4') + if not os.path.exists(timestamp_path) or not os.path.exists(video_path): + integrity_bool = False + break + + if integrity_bool: + file_integrity_video_list.append(video_id) + else: + pass + # valid_record_path = os.path.join('/share/hlyang/results/record', f'{date}_invalid_video_id.txt') + # with open(valid_record_path, 'a') as f: + # f.write(f'{video_id}\n') + + # timestamps_list = [[]] * len(camera_list) + timestamps_list = [ [] for _ in range(len(camera_list))] + + for video_id in tqdm(file_integrity_video_list): + video_dir = os.path.join(root_dir, video_id, 'rgb') + for c_idx, camera_id in enumerate(camera_list): + timestamp_path = os.path.join(video_dir, camera_id + '_FrameTimeStamp.txt') + + with open(timestamp_path, 'r') as f: + lines = f.readlines() + + timestamps = [] + cnt = 0 + for line in lines: + parts = line.strip().split() + if len(parts) == 2: + timestamp = parts[1] + timestamps.append(int(timestamp)) + cnt += 1 + + timestamps_list[c_idx] += timestamps + + computer1_camera_list = ['21218078', '22139906', '22139908', '22139910', '22139911', '22139913', '22139914', '22139946'] + computer2_camera_list = [camera for camera in camera_list if camera not in computer1_camera_list] + assert len(computer2_camera_list) == 4 + computer1_ids = [camera_list.index(camera) for camera in computer1_camera_list] + computer2_ids = [camera_list.index(camera) for camera in computer2_camera_list] + + # 使用计算出的误差 + modified_timestamps_list = [] + for idx, timestamps in enumerate(timestamps_list): + if idx in computer2_ids: + modified_timestamps = [timestamp - time_diff for timestamp in timestamps] + modified_timestamps_list.append(modified_timestamps) + else: + modified_timestamps_list.append(timestamps) + + # 验证是不是单调递增,结果:不是!TODO:检查为什么不是 + # for timestamps in modified_timestamps_list: + # assert is_monotonic(timestamps) + + common_timestamps = cal_common_timestamps(modified_timestamps_list, error_threshold) + + common_timestamps_list = divide_timestamps_list(common_timestamps, divide_threshold=1000) + for common_ts in common_timestamps_list: + print(common_ts) + print(len(common_timestamps_list)) + +if __name__ == '__main__': + + # camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + + date = '20231010' + root_dir = f'/share/datasets/HOI-mocap/{date}' + + dir_list = os.listdir(root_dir) + video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + + video_list.sort() + print(video_list) + + error_threshold = 14 + + timestamp_diff_2m1_mean = get_computer_time_diff(date, error_threshold) + + process_frame_loss_global(camera_list, root_dir, video_list, timestamp_diff_2m1_mean, error_threshold) + + # for video_id in tqdm(video_list): + # try: + # process_frame_loss_global(camera_list, root_dir, video_id, timestamp_diff_2m1_mean, error_threshold) + # except Exception as error: + # traceback.print_exc() + # print(error) + # continue + + # video_list = get_valid_video_list(date) + # for video_id in tqdm(video_list): + # # cp calibration file + # src_cali_path = os.path.join(root_dir, video_id, 'src', 'calibration.json') + # os.path.exists(src_cali_path) + # dst_src_dir = os.path.join('/share/hlyang/results', video_id, 'src') + # os.makedirs(dst_src_dir, exist_ok=True) + # dst_src_path = os.path.join(dst_src_dir, 'calibration.json') + + # time2 = time() \ No newline at end of file diff --git a/utils/utils/process_mask_loss.py b/utils/utils/process_mask_loss.py new file mode 100755 index 0000000000000000000000000000000000000000..10b00f4c73848e05cd04fcbc1c2a133ba9ceb650 --- /dev/null +++ b/utils/utils/process_mask_loss.py @@ -0,0 +1,76 @@ +''' +有一些帧缺少了某一种mask,所以在metadata中加上key:frame_without_left_hand, frame_without_right_hand, frame_without_object1, frame_without_object2。以后的一些操作会跳过这里面的帧。 + +TODO:预计被跳过的帧全部使用前一帧的结果做平滑处理。 + +example: +python utils/process_mask_loss.py --video_id 20230715_15 +''' +import os +import sys +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(current_dir) +import argparse +import pickle +import numpy as np +from hoi_io import get_downsampled_seg_infos_batch + + +if __name__ == '__main__': + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + + video_meta_dir = os.path.join('/share/hlyang/results', video_id, 'metadata') + assert os.path.exists(video_meta_dir) + + for camera in camera_list: + metadata_path = os.path.join(video_meta_dir, f'{camera}.pkl') + assert os.path.exists(metadata_path) + with open(metadata_path, 'rb') as f: + data = pickle.load(f) + num_frame = data['num_frame'] + frame_list = [str(i).zfill(5) for i in range(1, num_frame + 1)] + + seg, downsample_factor = get_downsampled_seg_infos_batch(video_id, frame_list, [camera]) + right_hand_seg = np.where(seg == 1, 1, 0).astype(np.uint8) + left_hand_seg = np.where(seg == 2, 1, 0).astype(np.uint8) + object1_seg = np.where(seg == 3, 1, 0).astype(np.uint8) + object2_seg = np.where(seg == 4, 1, 0).astype(np.uint8) + + frame_without_right_hand = [] + frame_without_left_hand = [] + frame_without_object1 = [] + frame_without_object2 = [] + for i, frame_id in enumerate(frame_list): + if np.count_nonzero(right_hand_seg[i, 0, ...]) == 0: + frame_without_right_hand.append(frame_id) + if np.count_nonzero(left_hand_seg[i, 0, ...]) == 0: + frame_without_left_hand.append(frame_id) + if np.count_nonzero(object1_seg[i, 0, ...]) == 0: + frame_without_object1.append(frame_id) + if np.count_nonzero(object2_seg[i, 0, ...]) == 0: + frame_without_object2.append(frame_id) + print(camera) + print(frame_without_right_hand) + print(frame_without_left_hand) + print(frame_without_object1) + print(frame_without_object2) + data['frame_without_right_hand'] = frame_without_right_hand + data['frame_without_left_hand'] = frame_without_left_hand + data['frame_without_object1'] = frame_without_object1 + data['frame_without_object2'] = frame_without_object2 + + with open(metadata_path, 'wb') as f: + pickle.dump(data, f) + + + + + + + + \ No newline at end of file diff --git a/utils/utils/project.py b/utils/utils/project.py new file mode 100644 index 0000000000000000000000000000000000000000..2dc6fa434cd03e4153a60fe6aedf1f2da1dd1c6f --- /dev/null +++ b/utils/utils/project.py @@ -0,0 +1,308 @@ +import os +import sys +sys.path.append('.') +import torch +import math + +def project_point_to_face(point, face_vertices): + # 计算面的法线 + edge1 = face_vertices[1] - face_vertices[0] + edge2 = face_vertices[2] - face_vertices[0] + face_normal = torch.cross(edge1, edge2) + + # 计算面上的任意一点(这里使用面的第一个顶点) + face_point = face_vertices[0] + + # 计算点到面的向量 + point_to_face = face_point - point + + # 计算点到面的投影点 + projection = point + (torch.dot(point_to_face, face_normal) / torch.dot(face_normal, face_normal)) * face_normal + + return projection + +def project_point_to_face_batch(point, face_vertices_batch): + # 计算面的法线 + edge1_batch = face_vertices_batch[:, 1, :] - face_vertices_batch[:, 0, :] + edge2_batch = face_vertices_batch[:, 2, :] - face_vertices_batch[:, 0, :] + face_normal_batch = torch.cross(edge1_batch, edge2_batch, dim=-1) + + # 计算面上的任意一点(这里使用面的第一个顶点) + face_point_batch = face_vertices_batch[:, 0, :] + + # 计算点到面的向量 + point_to_face_batch = face_point_batch - point.unsqueeze(0) + + # 计算点到面的投影点 + projection_batch = point.unsqueeze(0) + (torch.sum(point_to_face_batch * face_normal_batch, dim=-1) / torch.sum(face_normal_batch * face_normal_batch, dim=-1)).unsqueeze(1) * face_normal_batch + + return projection_batch + +def is_point_in_face(point, face_vertices): + # 计算三角形的法向量 + edge1 = face_vertices[1] - face_vertices[0] + edge2 = face_vertices[2] - face_vertices[1] + edge3 = face_vertices[0] - face_vertices[2] + + # 计算待判断点到三角形顶点的向量 + to_vertex1 = point - face_vertices[0] + to_vertex2 = point - face_vertices[1] + to_vertex3 = point - face_vertices[2] + + # 计算点到三角形顶点的法向量 + normal1 = torch.cross(edge1, to_vertex1) + normal2 = torch.cross(edge2, to_vertex2) + normal3 = torch.cross(edge3, to_vertex3) # 注意取反以保持方向一致 + + # 检查法向量是否同向 + is_inside = (torch.dot(normal1, normal2) >= 0) and (torch.dot(normal2, normal3) >= 0) and (torch.dot(normal3, normal1) >= 0) + + return is_inside + +def is_point_in_face_batch(point_batch, face_vertices_batch): + # 计算三角形的法向量 + edge1_batch = face_vertices_batch[:, 1, :] - face_vertices_batch[:, 0, :] + edge2_batch = face_vertices_batch[:, 2, :] - face_vertices_batch[:, 1, :] + edge3_batch = face_vertices_batch[:, 0, :] - face_vertices_batch[:, 2, :] + + # 计算待判断点到三角形顶点的向量 + to_vertex1_batch = point_batch - face_vertices_batch[:, 0, :] + to_vertex2_batch = point_batch - face_vertices_batch[:, 1, :] + to_vertex3_batch = point_batch - face_vertices_batch[:, 2, :] + + # 计算点到三角形顶点的法向量 + normal1_batch = torch.cross(edge1_batch, to_vertex1_batch) + normal2_batch = torch.cross(edge2_batch, to_vertex2_batch) + normal3_batch = torch.cross(edge3_batch, to_vertex3_batch) # 注意取反以保持方向一致 + + # 检查法向量是否同向 + judge1 = torch.sum(normal1_batch * normal2_batch, dim=-1) >= 0 + judge2 = torch.sum(normal2_batch * normal3_batch, dim=-1) >= 0 + judge3 = torch.sum(normal3_batch * normal1_batch, dim=-1) >= 0 + is_inside_batch = judge1 & judge2 & judge3 + + return is_inside_batch + +def is_point_inside_faces_projection(vertices, faces, point): + + mask_list = [] + + for face_indices in faces: + face_vertices = vertices[face_indices] + projection = project_point_to_face(point, face_vertices) + mask_list.append(is_point_in_face(projection, face_vertices)) + + mask = torch.tensor(mask_list) + return mask + +def cal_dist_from_point_to_face(point, face_vertices): + # 计算面的法线 + edge1 = face_vertices[1] - face_vertices[0] + edge2 = face_vertices[2] - face_vertices[0] + face_normal = torch.cross(edge1, edge2) + + # 计算面上的任意一点(这里使用面的第一个顶点) + face_point = face_vertices[0] + + # 计算点到面的向量 + point_to_face = face_point - point + + # 计算点到面的垂线长度 + dist = torch.abs(torch.dot(point_to_face, face_normal) / torch.norm(face_normal)) + return dist + +def cal_dist_from_point_to_face_batch(point, face_vertices_batch): + # 计算面的法线 + edge1_batch = face_vertices_batch[:, 1] - face_vertices_batch[:, 0] + edge2_batch = face_vertices_batch[:, 2] - face_vertices_batch[:, 0] + face_normal_batch = torch.cross(edge1_batch, edge2_batch, dim=-1) + + # 标准化face_normal_batch + face_normal_batch = face_normal_batch / torch.norm(face_normal_batch, dim=-1).unsqueeze(1) + + # 计算面上的任意一点(这里使用面的第一个顶点) + face_point_batch = face_vertices_batch[:, 0, :] + + # 计算点到面的向量 + point_to_face_batch = face_point_batch - point.unsqueeze(0) + + # 计算点到面的垂线长度 + dist_batch = torch.sum(point_to_face_batch*face_normal_batch, dim=-1) + dist_batch_abs = torch.abs(dist_batch) + + # assert torch.any(dist_batch_abs == torch.inf) == False, 'dist_batch_abs should not contain inf' + + # 计算垂点 + point_stroke_batch = point.unsqueeze(0) + dist_batch.unsqueeze(1) * face_normal_batch + + return dist_batch_abs, point_stroke_batch + +def get_min_dist_batch(vertices, faces, point): + # TODO 考虑mask全为False的情况 + + mask_batch = is_point_inside_faces_projection(vertices, faces, point) + + dist_batch = [] + for idx, mask in enumerate(mask_batch): + if mask == True: + face_vertices = vertices[faces[idx]] + dist = cal_dist_from_point_to_face(point, face_vertices) + dist_batch.append(dist) + else: + dist_batch.append(torch.inf) + dist_batch = torch.tensor(dist_batch) + min_dist, min_idx = torch.min(dist_batch, 0) + return min_dist, min_idx + +def get_vertical_direction_from_point_to_face(point, face_vertices): + projection = project_point_to_face(point, face_vertices) + # 检查投影点是否与原始点相等 + if torch.all(torch.eq(projection, point)): + # 如果相等,返回法向量作为方向 + edge1 = face_vertices[1] - face_vertices[0] + edge2 = face_vertices[2] - face_vertices[0] + face_normal = torch.cross(edge1, edge2) + direction = face_normal / torch.norm(face_normal) + else: + vertical = projection - point + direction = vertical / torch.norm(vertical) + return direction + +def get_intersection_points(point, direction, point_stroke_batch, dist_batch): + perpendicular_batch = point_stroke_batch - point.unsqueeze(0) + projection_len_batch = torch.sum(perpendicular_batch * direction.unsqueeze(0), dim=-1) / torch.norm(perpendicular_batch, dim=-1) + projection_factor_batch = dist_batch / projection_len_batch + + point_to_intersection_points = projection_factor_batch.unsqueeze(1) * direction.unsqueeze(0) + len_of_point_to_intersection_points = torch.norm(point_to_intersection_points, dim=-1) + intersection_points = point.unsqueeze(0) + point_to_intersection_points + return intersection_points, len_of_point_to_intersection_points + +def get_nearest_intersection_point(vertices, faces, points, valid_vert_indices, dist_threshold): + # TODO:考虑找不到direction的情况 + # TODO: 考虑dist_batch的值全部为torch.inf的情况 + device = vertices.device + + intersection_point_list = [] + intersection_point_valid_mask = [] + + for point in points: + + projection_batch = project_point_to_face_batch(point, vertices[faces]) + mask_batch = is_point_in_face_batch(projection_batch, vertices[faces]) + + # dist_batch = [] + # for idx, mask in enumerate(mask_batch): + # if mask == True: + # face_vertices = vertices[faces[idx]] + # dist = cal_dist_from_point_to_face(point, face_vertices) + # dist_batch.append(dist) + # else: + # dist_batch.append(torch.inf) + # dist_batch = torch.tensor(dist_batch) + + + # TODO 有可能所选的点都不在面内 + dist_batch, point_stroke_batch = cal_dist_from_point_to_face_batch(point, vertices[faces]) + # dist_batch中mask_batch为False的索引值赋为inf + dist_batch[~mask_batch] = torch.inf + assert torch.any(dist_batch != torch.inf) + min_dist, min_idx = torch.min(dist_batch, 0) + direction = get_vertical_direction_from_point_to_face(point, vertices[faces[min_idx]]) + + print(point_stroke_batch[min_idx]) + + # TODO 交点求的有问题 + # intersection_point_batch = point.unsqueeze(0) + dist_batch.unsqueeze(1) * direction + intersection_point_batch, len_of_point_to_intersection_points = get_intersection_points(point, direction, point_stroke_batch, dist_batch) + print(intersection_point_batch[min_idx]) + is_intersection_point_in_faces_batch = is_point_in_face_batch(intersection_point_batch, vertices[faces]) + + # print(intersection_point_batch[is_intersection_point_in_faces_batch]) + + + face_valid_mask = [] + for idx, face_indices in enumerate(faces): + face_valid_mask.append(torch.all(torch.isin(face_indices, valid_vert_indices))) + face_valid_mask = torch.tensor(face_valid_mask).to(device) + + # print(is_intersection_point_in_faces_batch.sum(), face_valid_mask[is_intersection_point_in_faces_batch], len_of_point_to_intersection_points[is_intersection_point_in_faces_batch]) + + # print((torch.norm(dist_batch.unsqueeze(1) * direction, dim=-1) <= dist_threshold).sum()) + + judge = face_valid_mask & is_intersection_point_in_faces_batch & (len_of_point_to_intersection_points <= dist_threshold) + if torch.any(judge) == False: + intersection_point_list.append(torch.tensor([0, 0, 0], device=device)) + intersection_point_valid_mask.append(False) + continue + + # 好像上面的判断多余了,如果全是torch.inf,那么min_dist也是torch.inf + + # dist_batch中面非法的索引值赋为inf + dist_batch[~face_valid_mask] = torch.inf + + # dist_batch中交点不在面内部的索引值赋为inf + dist_batch[~is_intersection_point_in_faces_batch] = torch.inf + + # dist_batch中值超过阈值的索引值赋为inf + dist_batch[len_of_point_to_intersection_points > dist_threshold] = torch.inf + + min_dist, min_idx = torch.min(dist_batch, 0) + intersection_point = intersection_point_batch[min_idx] + # print(face_valid_mask[min_idx], is_intersection_point_in_faces_batch[min_idx], len_of_point_to_intersection_points[min_idx] <= dist_threshold) + + intersection_point_list.append(intersection_point) + intersection_point_valid_mask.append(True) + + # intersection_points = [] + # dist_batch = [] + # face_indices_batch = [] + # for face_indices in faces: + # if torch.all(torch.isin(face_indices, valid_vert_indices)): + # face_vertices = vertices[face_indices] + # dist = cal_dist_from_point_to_face(point, face_vertices) + # intersection_point = point + dist * direction + + # # 判断intersection_point是否在三角形内部 + # if is_point_in_face(intersection_point, face_vertices) and torch.norm(dist * direction) < dist_threshold: + # intersection_points.append(intersection_point) + # dist_batch.append(torch.norm(dist * direction)) + # face_indices_batch.append(face_indices) + + # # assert len(intersection_points) > 0, 'len(intersection_points) should be greater than 0' + # if len(intersection_points) == 0: + # intersection_point_list.append(torch.tensor([0, 0, 0])) + # intersection_point_valid_mask.append(False) + # continue + + # dist_batch = torch.tensor(dist_batch) + # min_dist, min_idx = torch.min(dist_batch, 0) + # intersection_point = intersection_points[min_idx] + # face_indices = face_indices_batch[min_idx] + + # intersection_point_list.append(intersection_point) + # intersection_point_valid_mask.append(True) + + intersection_point_list = torch.stack(intersection_point_list) + intersection_point_valid_mask = torch.tensor(intersection_point_valid_mask) + + return intersection_point_list, intersection_point_valid_mask + +def get_nearest_intersection_point_batch(vertices_batch, faces, points_batch, valid_vert_indices, dist_threshold): + num_frames = vertices_batch.shape[0] + + intersection_point_batch = [] + intersection_point_valid_mask_batch = [] + + for i in range(num_frames): + intersection_point_list, intersection_point_valid_mask = get_nearest_intersection_point(vertices_batch[i], faces, points_batch[i], valid_vert_indices, dist_threshold) + intersection_point_batch.append(intersection_point_list) + intersection_point_valid_mask_batch.append(intersection_point_valid_mask) + print(intersection_point_list) + print(intersection_point_valid_mask) + print('----------------------------------') + + intersection_point_batch = torch.stack(intersection_point_batch) + intersection_point_valid_mask_batch = torch.stack(intersection_point_valid_mask_batch) + + return intersection_point_batch, intersection_point_valid_mask_batch \ No newline at end of file diff --git a/utils/utils/sanity_check.py b/utils/utils/sanity_check.py new file mode 100755 index 0000000000000000000000000000000000000000..67f75d327aed4a4bf4f222745e55c4fe6672e24f --- /dev/null +++ b/utils/utils/sanity_check.py @@ -0,0 +1,97 @@ +''' +按处理顺序检查文件完整性。 + +example: +python utils/sanity_check.py --video_id 20230715_15 +''' + +import os +from tqdm import tqdm +import argparse +import pickle +import multiprocessing as mlp + +if __name__ == "__main__": + # camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + # parser = argparse.ArgumentParser() + # parser.add_argument('--video_id', required=True, type=str) + # args = parser.parse_args() + # video_id = args.video_id + + video_list = [f'20230915_{str(i).zfill(3)}' for i in range(4,16)] + for video_id in video_list: + + num_frame = None + for camera_id in camera_list: + metadata_path = os.path.join('/share/hlyang/results', video_id, 'metadata', camera_id+'.pkl') + if os.path.exists(metadata_path): + with open(metadata_path, 'rb') as f: + data = pickle.load(f) + num_frame = data['num_frame'] + else: + print('metadata sanity check failed!') + exit(1) + + frame_list = [str(i+1).zfill(5) for i in range(num_frame)] + + video_res_dir = os.path.join('/share/hlyang/results', video_id) + + for camera_id in camera_list: + for frame_id in frame_list: + # imgs + path = os.path.join(video_res_dir, 'imgs', camera_id, camera_id + '_' + frame_id + '.png') + if not os.path.exists(path): + print('sanity check failed!', path) + exit(1) + + # results + path = os.path.join(video_res_dir, 'fit_hand_joint_ransac_batch_by_squence', 'res', 'left_hand', f'hand_{frame_id}.pkl') + if not os.path.exists(path): + print('sanity check failed!', path) + path = os.path.join(video_res_dir, 'fit_hand_joint_ransac_batch_by_squence', 'res', 'right_hand', f'hand_{frame_id}.pkl') + if not os.path.exists(path): + print('sanity check failed!', path) + + # results + path = os.path.join(video_res_dir, 'get_world_mesh_from_mano_params', 'meshes', 'left_hand', f'hand_{frame_id}.obj') + if not os.path.exists(path): + print('sanity check failed!', path) + path = os.path.join(video_res_dir, 'get_world_mesh_from_mano_params', 'meshes', 'right_hand', f'hand_{frame_id}.obj') + if not os.path.exists(path): + print('sanity check failed!', path) + + # anno_results + # path = os.path.join(video_res_dir, 'anno_results', camera_id, camera_id + '_' + frame_id + '.npy') + # if not os.path.exists(path): + # print('sanity check failed!', path) + # exit(1) + + # crop + # path = os.path.join(video_res_dir, 'crop_imgs_left_hand', camera_id, camera_id + '_' + frame_id + '_crop_info.pkl') + # if not os.path.exists(path): + # print('sanity check failed!', path) + # exit(1) + # path = os.path.join(video_res_dir, 'crop_imgs_right_hand', camera_id, camera_id + '_' + frame_id + '_crop_info.pkl') + # if not os.path.exists(path): + # print('sanity check failed!', path) + # exit(1) + # path = os.path.join(video_res_dir, 'crop_imgs_left_hand', camera_id, camera_id + '_' + frame_id + '.png') + # if not os.path.exists(path): + # print('sanity check failed!', path) + # exit(1) + # path = os.path.join(video_res_dir, 'crop_imgs_right_hand', camera_id, camera_id + '_' + frame_id + '.png') + # if not os.path.exists(path): + # print('sanity check failed!', path) + # exit(1) + + # mmpose + # path = os.path.join(video_res_dir, 'mmpose_left_hand', 'predictions', camera_id + '_' + frame_id + '.json') + # if not os.path.exists(path): + # print('sanity check failed!', path) + # exit(1) + # path = os.path.join(video_res_dir, 'mmpose_right_hand', 'predictions', camera_id + '_' + frame_id + '.json') + # if not os.path.exists(path): + # print('sanity check failed!', path) + # exit(1) \ No newline at end of file diff --git a/utils/utils/save_obj.py b/utils/utils/save_obj.py new file mode 100755 index 0000000000000000000000000000000000000000..d267e9edbf10e6c2578919fb79986f2d70b74808 --- /dev/null +++ b/utils/utils/save_obj.py @@ -0,0 +1,10 @@ +def save_obj(path, v, f, c=None): + with open(path, 'w') as file: + if c is None: + for i in range(v.shape[0]): + file.write('v {} {} {}\n'.format(v[i, 0], v[i, 1], v[i, 2])) + else: + for i in range(v.shape[0]): + file.write('v {} {} {} {} {} {}\n'.format(v[i, 0], v[i, 1], v[i, 2], c[i, 0], c[i, 1], c[i, 2])) + for i in range(f.shape[0]): + file.write('f {} {} {}\n'.format(f[i, 0] + 1, f[i, 1] + 1, f[i, 2] + 1)) \ No newline at end of file diff --git a/utils/utils/save_time_diff.py b/utils/utils/save_time_diff.py new file mode 100755 index 0000000000000000000000000000000000000000..a5965538f8dbfe159fac0c14d1c868ee84d0ceee --- /dev/null +++ b/utils/utils/save_time_diff.py @@ -0,0 +1,80 @@ +''' +解析录制时的{camera_id}__FrameTimeStamp.txt,根据timestamp丢弃录制时有丢失的帧,并将剩余的帧数据存为pkl。之后video2img.py等一系列程序将基于该pkl进行处理。 + +在/share/hlyang/results/record生成{date}_invalid_video_id.txt, {date}_match_failed_video_id.txt, {date}_valid_video_id.txt等 + +example: +python utils/process_frame_loss.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' +import traceback + +import os +import sys +sys.path.append('.') +import argparse +import pickle +from shutil import copy +import numpy as np +from tqdm import tqdm +from time import time +from loguru import logger + +from utils.hoi_io2 import get_valid_video_list, get_time_diff +from utils.process_frame_loss import get_computer_time_diff +from utils.process_frame_loss2 import get_reasonable_time_diff +from utils.organize_dataset import load_sequence_names_from_organized_record, txt2intrinsic, load_simplied_nokov_objs_mesh, load_organized_mano_info, load_dates_from_organized_record + + +if __name__ == '__main__': + + root = '/data3/hlyang/results' + dataset_root = os.path.join(root, 'dataset') + hand_pose_organized_record_path = os.path.join(dataset_root, 'organized_record.txt') + date_list = load_dates_from_organized_record(hand_pose_organized_record_path) + new_form_date_list = ['20230919', '20230930', '20231005', '20231006', '20231010', '20231013', '20231015', '20231019', '20231020', '20231024', '20231026', '20231027', '20231031', '20231102', '20231103', '20231104', '20231105'] + old_form_date_list = [date for date in date_list if date not in new_form_date_list] + + save_root = '/data3/hlyang/results' + + upload_root_dir = '/data2/HOI-mocap' + tot_time_diff = {} + + for date in date_list: + upload_date_root = os.path.join(upload_root_dir, date) + dir_list = os.listdir(upload_date_root) + video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + video_list.sort() + + if date in new_form_date_list: + time_diff_data = get_time_diff(save_root, date) + + pbar = tqdm(video_list) + for video_id in pbar: + try: + pbar.set_description(f'Processing {video_id}') + time_diff = get_reasonable_time_diff(video_id, time_diff_data) + tot_time_diff[video_id] = time_diff + + except Exception as error: + traceback.print_exc() + print(error) + continue + + else: + error_threshold = 17 + time_diff = get_computer_time_diff(date, error_threshold) + pbar = tqdm(video_list) + for video_id in pbar: + try: + pbar.set_description(f'Processing {video_id}') + tot_time_diff[video_id] = time_diff + + except Exception as error: + traceback.print_exc() + print(error) + continue + + + save_path = os.path.join(save_root, 'dataset', 'time_diff.pkl') + with open(save_path, 'wb') as f: + pickle.dump(tot_time_diff, f) diff --git a/utils/utils/scandir.py b/utils/utils/scandir.py new file mode 100755 index 0000000000000000000000000000000000000000..add1b1341dd66d688d080a8c446479650852995b --- /dev/null +++ b/utils/utils/scandir.py @@ -0,0 +1,43 @@ +import os +import os.path as osp + +def scandir(dir_path, suffix=None, recursive=False, full_path=False): + """Scan a directory to find the interested files. + + Args: + dir_path (str): Path of the directory. + suffix (str | tuple(str), optional): File suffix that we are + interested in. Default: None. + recursive (bool, optional): If set to True, recursively scan the + directory. Default: False. + full_path (bool, optional): If set to True, include the dir_path. + Default: False. + + Returns: + A generator for all the interested files with relative paths. + """ + + if (suffix is not None) and not isinstance(suffix, (str, tuple)): + raise TypeError('"suffix" must be a string or tuple of strings') + + root = dir_path + + def _scandir(dir_path, suffix, recursive): + for entry in os.scandir(dir_path): + if not entry.name.startswith('.') and entry.is_file(): + if full_path: + return_path = entry.path + else: + return_path = osp.relpath(entry.path, root) + + if suffix is None: + yield return_path + elif return_path.endswith(suffix): + yield return_path + else: + if recursive: + yield from _scandir(entry.path, suffix=suffix, recursive=recursive) + else: + continue + + return _scandir(dir_path, suffix=suffix, recursive=recursive) \ No newline at end of file diff --git a/utils/utils/seal_mano.py b/utils/utils/seal_mano.py new file mode 100644 index 0000000000000000000000000000000000000000..be50bc57e73f89bc9e66a61799a905a12a191cd4 --- /dev/null +++ b/utils/utils/seal_mano.py @@ -0,0 +1,99 @@ +import sys +sys.path.append('.') +import numpy as np +import torch +from manopth.manopth.manolayer import ManoLayer +import open3d as o3d + +SEAL_FACES_R = [ + [120, 108, 778], + [108, 79, 778], + [79, 78, 778], + [78, 121, 778], + [121, 214, 778], + [214, 215, 778], + [215, 279, 778], + [279, 239, 778], + [239, 234, 778], + [234, 92, 778], + [92, 38, 778], + [38, 122, 778], + [122, 118, 778], + [118, 117, 778], + [117, 119, 778], + [119, 120, 778], +] + +# vertex ids around the ring of the wrist +CIRCLE_V_ID = np.array( + [108, 79, 78, 121, 214, 215, 279, 239, 234, 92, 38, 122, 118, 117, 119, 120], + dtype=np.int64, +) + +def seal_mano_mesh(v3d, faces, is_rhand): + # v3d: B, 778, 3 + # faces: 1538, 3 + # output: v3d(B, 779, 3); faces (1554, 3) + + seal_faces = torch.LongTensor(np.array(SEAL_FACES_R)).to(faces.device) + if not is_rhand: + # left hand + seal_faces = seal_faces[:, np.array([1, 0, 2])] # invert face normal + centers = v3d[:, CIRCLE_V_ID].mean(dim=1)[:, None, :] + sealed_vertices = torch.cat((v3d, centers), dim=1) + faces = torch.cat((faces, seal_faces), dim=0) + return sealed_vertices, faces + +if __name__ == "__main__": + use_pca = False + ncomps = 45 + left_hand_mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='left', center_idx = 0) + right_hand_mano_layer = ManoLayer(mano_root='./manopth/mano/models', use_pca=use_pca, ncomps=ncomps, side='right', center_idx = 0) + + right_hand_verts, _, _ = right_hand_mano_layer(torch.zeros(1, 48)) + right_hand_faces = right_hand_mano_layer.th_faces.detach() + left_hand_verts, _, _ = left_hand_mano_layer(torch.zeros(1, 48)) + left_hand_faces = left_hand_mano_layer.th_faces.detach() + + right_hand_sealed_vertices, right_hand_faces = seal_mano_mesh(right_hand_verts / 1000.0, right_hand_faces, True) + left_hand_sealed_vertices, left_hand_faces = seal_mano_mesh(left_hand_verts / 1000.0, left_hand_faces, False) + + # right_hand_mesh = trimesh.Trimesh(right_hand_sealed_vertices[0], right_hand_faces) + # left_hand_mesh = trimesh.Trimesh(left_hand_sealed_vertices[0], left_hand_faces) + + # right_hand_vox = right_hand_mesh.voxelized(0.001) + # left_hand_vox = left_hand_mesh.voxelized(0.001) + + right_hand_mesh = o3d.geometry.TriangleMesh() + right_hand_pcd = o3d.geometry.PointCloud() + right_hand_pcd.points = o3d.utility.Vector3dVector(right_hand_sealed_vertices.numpy()[0]) + right_hand_mesh.vertices = right_hand_pcd.points + right_hand_mesh.triangles = o3d.utility.Vector3iVector(right_hand_faces) + left_hand_mesh = o3d.geometry.TriangleMesh() + left_hand_pcd = o3d.geometry.PointCloud() + left_hand_pcd.points = o3d.utility.Vector3dVector(left_hand_sealed_vertices.numpy()[0]) + left_hand_mesh.vertices = left_hand_pcd.points + left_hand_mesh.triangles = o3d.utility.Vector3iVector(left_hand_faces) + + voxel_size = 0.001 + right_hand_vox = o3d.geometry.VoxelGrid.create_from_triangle_mesh(right_hand_mesh, voxel_size) + right_voxel_list = right_hand_vox.get_voxels() + right_grid_indexs = np.stack([voxel.grid_index for voxel in right_voxel_list]) + left_hand_vox = o3d.geometry.VoxelGrid.create_from_triangle_mesh(left_hand_mesh, voxel_size) + left_voxel_list = left_hand_vox.get_voxels() + left_grid_indexs = np.stack([voxel.grid_index for voxel in left_voxel_list]) + + right_set = {tuple(row) for row in right_grid_indexs} + left_set = {tuple(row) for row in left_grid_indexs} + intersection_set = right_set.intersection(left_set) + intersection_set = np.array(list(intersection_set)) + + num_right_hand_vox = right_grid_indexs.shape[0] + num_left_hand_vox = left_grid_indexs.shape[0] + num_intersection_vox = intersection_set.shape[0] + print(num_right_hand_vox, num_left_hand_vox, num_intersection_vox) + print() + + o3d.io.write_triangle_mesh("/home/hlyang/HOI/HOI/tmp/right_T_pose.obj", right_hand_mesh) + o3d.io.write_triangle_mesh("/home/hlyang/HOI/HOI/tmp/left_T_pose.obj", left_hand_mesh) + \ No newline at end of file diff --git a/utils/utils/slice_mask.py b/utils/utils/slice_mask.py new file mode 100755 index 0000000000000000000000000000000000000000..6e963bfa54e9a99485e3cc5f73565c59e4c1c428 --- /dev/null +++ b/utils/utils/slice_mask.py @@ -0,0 +1,76 @@ +''' +将标注工具的标注结果切成每个视角每一帧单独一个npy,减少后续读取时的内存负担。 +需要upsample! + +example: +python utils/slice_mask.py --video_id 20230818_03 +''' + +import numpy as np +import cv2 +import argparse +import os +import multiprocessing as mlp +from tqdm import tqdm + +def slice_mask(video_id, camera_id): + save_dir = os.path.join('/share/hlyang/results', video_id, 'anno_results', camera_id) + os.makedirs(save_dir, exist_ok=True) + + path = os.path.join('/share/hlyang/results', video_id, 'original_anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) # [num_frame, height, width] + + num_frame = mask.shape[0] + for i in tqdm(range(num_frame)): + + # left_hand = np.where(mask[i, ...] == 1, 1, 0).astype(np.uint8) + # left_hand = cv2.resize(left_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + # right_hand = np.where(mask[i, ...] == 2, 1, 0).astype(np.uint8) + # right_hand = cv2.resize(right_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + # object1 = np.where(mask[i, ...] == 3, 1, 0).astype(np.uint8) + # object1 = cv2.resize(object1, (4096, 3000), interpolation=cv2.INTER_LINEAR) + # object2 = np.where(mask[i, ...] == 4, 1, 0).astype(np.uint8) + # object2 = cv2.resize(object2, (4096, 3000), interpolation=cv2.INTER_LINEAR) + # upsampled_mask_ = np.zeros((3000, 4096), dtype=np.uint8) + # upsampled_mask_[left_hand == 1] = 1 + # upsampled_mask_[right_hand == 1] = 2 + # upsampled_mask_[object1 == 1] = 3 + # upsampled_mask_[object2 == 1] = 4 + + save_path = os.path.join(save_dir, camera_id+'_'+str(i+1).zfill(5)+'.npy') + np.save(save_path, mask[i]) + # np.save(save_path, upsampled_mask_) + + +if __name__ == '__main__': + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + + os.makedirs(os.path.join('/share/hlyang/results', video_id, 'anno_results'), exist_ok=True) + + procs = [] + for camera_id in camera_list: + args = (video_id, camera_id) + proc = mlp.Process(target=slice_mask, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + + # data = np.load('./20230715_15|22070938.npy') + # data = data[0] + # img = np.zeros((750,1024,3)) + + # data = np.where(data == 1, 1, 0).astype('uint8') + # interpolated_img = cv2.resize(data, (4096, 3000), interpolation=cv2.INTER_LINEAR) + # print(interpolated_img.shape) + + # color_img = np.zeros((3000, 4096, 3), dtype=np.uint8) + # color_img[interpolated_img == 1] = [0, 0, 255] + + # cv2.imwrite('./test.jpg', color_img) diff --git a/utils/utils/test_seg.py b/utils/utils/test_seg.py new file mode 100755 index 0000000000000000000000000000000000000000..903226b214dc0ac1738e792c8b7864c6b683cd85 --- /dev/null +++ b/utils/utils/test_seg.py @@ -0,0 +1,51 @@ +''' +只crop可扣除一只手的图,没有resize + +example: +python utils/test_seg.py +''' + +import os +import sys +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(current_dir) +import cv2 +import numpy as np +import os.path as osp +from tqdm import tqdm +import pickle +import multiprocessing as mlp +from hoi_io import get_seg_infos_batch3, load_bg_img +from scandir import scandir +import argparse + +if __name__ == '__main__': + video_id = '20230715_15' + frame_list = [str(i+1).zfill(5) for i in range(1)] + camera_list = ['22070938'] + seg = get_seg_infos_batch3(video_id, frame_list, camera_list) + + left_hand_seg = np.where(seg[0,0] == 1, 1, 0) + right_hand_seg = np.where(seg[0,0] == 2, 1, 0) + + left = np.zeros((3000, 4096, 3), dtype=np.uint8) + right = np.zeros((3000, 4096, 3), dtype=np.uint8) + + left[left_hand_seg == 1] = [255,0,0] + right[right_hand_seg == 1] = [255,0,0] + + cv2.imwrite('./left.png', left) + cv2.imwrite('./right.png', right) + + # seg = np.load('/share/hlyang/results/20230715_15/original_anno_results/20230715_15|22070938.npy') + # left_hand_seg = np.where(seg[0] == 1, 1, 0) + # right_hand_seg = np.where(seg[0] == 2, 1, 0) + + # left = np.zeros((750, 1024, 3), dtype=np.uint8) + # right = np.zeros((750, 1024, 3), dtype=np.uint8) + + # left[left_hand_seg == 1] = [255,0,0] + # right[right_hand_seg == 1] = [255,0,0] + + # cv2.imwrite('./left.png', left) + # cv2.imwrite('./right.png', right) \ No newline at end of file diff --git a/utils/utils/upsample_mask.py b/utils/utils/upsample_mask.py new file mode 100755 index 0000000000000000000000000000000000000000..4674ced0a6c3b486a612865d9eab58b16f21c70a --- /dev/null +++ b/utils/utils/upsample_mask.py @@ -0,0 +1,73 @@ +''' +由于标注工具有downsample,先把标注结果upsample一下。 + +example: +python utils/upsample_mask.py --video_id 20230715_15 +''' + +import numpy as np +import cv2 +import argparse +import os +import multiprocessing as mlp +from tqdm import tqdm + +def upsample_mask(video_id, camera_id): + path = os.path.join('/share/hlyang/results', video_id, 'original_anno_results', video_id+'|'+camera_id+'.npy') + mask = np.load(path) # [num_frame, ] + upsampled_mask_list = [] + for idx in tqdm(range(mask.shape[0])): + # 0和4能插出2来,肯定不行 + # upsampled_mask_ = cv2.resize(mask[idx, ...], (4096, 3000), interpolation=cv2.INTER_LINEAR) + left_hand = np.where(mask[idx, ...] == 1, 1, 0).astype(np.uint8) + left_hand = cv2.resize(left_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + right_hand = np.where(mask[idx, ...] == 2, 1, 0).astype(np.uint8) + right_hand = cv2.resize(right_hand, (4096, 3000), interpolation=cv2.INTER_LINEAR) + object1 = np.where(mask[idx, ...] == 3, 1, 0).astype(np.uint8) + object1 = cv2.resize(object1, (4096, 3000), interpolation=cv2.INTER_LINEAR) + object2 = np.where(mask[idx, ...] == 4, 1, 0).astype(np.uint8) + object2 = cv2.resize(object2, (4096, 3000), interpolation=cv2.INTER_LINEAR) + upsampled_mask_ = np.zeros((3000, 4096), dtype=np.uint8) + upsampled_mask_[left_hand == 1] = 1 + upsampled_mask_[right_hand == 1] = 2 + upsampled_mask_[object1 == 1] = 3 + upsampled_mask_[object2 == 1] = 4 + + upsampled_mask_list.append(upsampled_mask_) + upsampled_mask = np.stack(upsampled_mask_list, axis=0) + print(upsampled_mask.shape) + save_path = os.path.join('/share/hlyang/results', video_id, 'anno_results', video_id+'|'+camera_id+'.npy') + np.save(save_path, upsampled_mask) + +if __name__ == '__main__': + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + + os.makedirs(os.path.join('/share/hlyang/results', video_id, 'anno_results'), exist_ok=True) + + procs = [] + for camera_id in camera_list: + args = (video_id, camera_id) + proc = mlp.Process(target=upsample_mask, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + + # data = np.load('./20230715_15|22070938.npy') + # data = data[0] + # img = np.zeros((750,1024,3)) + + # data = np.where(data == 1, 1, 0).astype('uint8') + # interpolated_img = cv2.resize(data, (4096, 3000), interpolation=cv2.INTER_LINEAR) + # print(interpolated_img.shape) + + # color_img = np.zeros((3000, 4096, 3), dtype=np.uint8) + # color_img[interpolated_img == 1] = [0, 0, 255] + + # cv2.imwrite('./test.jpg', color_img) diff --git a/utils/utils/video2img.py b/utils/utils/video2img.py new file mode 100755 index 0000000000000000000000000000000000000000..8fbda1f650269c4f6677005952145e254bfab409 --- /dev/null +++ b/utils/utils/video2img.py @@ -0,0 +1,120 @@ +''' +读取pkl获得每一个视频的帧的数量,然后将video视频转换为一张张图片。 + +TODO:是否要将图片压成一个npy文件?也许可以减少IO。re:千万别,太吃内存 + +example: +python utils/video2img.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' + +import os +import sys +sys.path.append('.') +import cv2 +from tqdm import tqdm +import argparse +import pickle +import multiprocessing as mlp +from utils.hoi_io import get_valid_video_list + +def mp42img(video_path, img_dir, num_frame, original_num_frame, cnt2frame_id_dict, res_prefix='', res_suffix=''): + os.makedirs(img_dir, exist_ok=True) + assert os.path.exists(video_path) + + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + cap.set(cv2.CAP_PROP_FOURCC, fourcc) + fps = cap.get(cv2.CAP_PROP_FPS) + W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + # print(fps, W, H) + + suc = cap.isOpened() + + # for frame_cnt in tqdm(range(1, num_frame + 1)): + # suc, img = cap.read() + # assert suc + # cv2.imwrite(os.path.join(img_dir, res_prefix + str(frame_cnt).zfill(5) + res_suffix + ".png"), img) + cnt = 0 + while True: + suc, img = cap.read() + if not suc: + break + cnt += 1 + frame_id = cnt2frame_id_dict.get(cnt, None) + if frame_id is not None: + cv2.imwrite(os.path.join(img_dir, res_prefix + frame_id + res_suffix + ".png"), img) + assert cnt == original_num_frame, f'cnt: {cnt}, original_num_frame: {original_num_frame}' + + cap.release() + +if __name__ == "__main__": + # camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + # parser.add_argument('--root_dir', required=True, type=str) + # parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + + # root_dir = args.root_dir + # video_id = args.video_id + date = '20231005' + root_dir = f'/share/datasets/HOI-mocap/{date}' + + # video_list = [f'{date}_{str(i).zfill(3)}' for i in range(1, 54)] + + # dir_list = os.listdir(root_dir) + # video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + # video_list.sort() + + video_list = get_valid_video_list(date) + + print(video_list) + + for video_id in tqdm(video_list): + assert os.path.exists(root_dir) + video_dir = os.path.join(root_dir, video_id) + assert os.path.exists(video_dir) + + # metadata_dir = os.path.join('/share/hlyang/results', video_id, 'metadata') + metadata_dir = os.path.join('/share/hlyang/results', date, video_id, 'metadata') + + # 部分数据出错,没有metadata,直接跳过 + if not os.path.exists(metadata_dir) or len(os.listdir(metadata_dir)) == 0: + print(f'{video_id}部分数据出错,没有metadata') + continue + + # img_root = os.path.join('/share/hlyang/results', video_id, 'imgs') + img_root = os.path.join('/share/hlyang/results', date, video_id, 'imgs') + + + # 跳过已经处理过的 + # if os.path.exists(img_root): + # continue + + os.makedirs(img_root, exist_ok=True) + + procs = [] + + for camera_id in camera_list: + metadata_path = os.path.join(metadata_dir, camera_id + '.pkl') + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + original_num_frame = metadata['original_num_frame'] + cnt2frame_id_dict = metadata['cnt2frame_id_dict'] + + video_path = os.path.join(video_dir, 'rgb', camera_id + '.mp4') + img_dir = os.path.join(img_root, camera_id) + os.makedirs(img_dir, exist_ok=True) + + # mp42img(video_path, img_dir, num_frame, res_prefix = camera_id + '_') + args = (video_path, img_dir, num_frame, original_num_frame, cnt2frame_id_dict, camera_id + '_') + proc = mlp.Process(target=mp42img, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() \ No newline at end of file diff --git a/utils/utils/video2sub_video.py b/utils/utils/video2sub_video.py new file mode 100755 index 0000000000000000000000000000000000000000..34d5aad10c19a08e6e606986ae9c8152a168ca36 --- /dev/null +++ b/utils/utils/video2sub_video.py @@ -0,0 +1,151 @@ +''' +读取pkl获得每一个视频的帧的数量,然后将video视频转换为一张张图片。 + +将一个batch的图片压成一个mp4视频,编号以第一帧为主。 + +example: +python utils/video2img.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' +import traceback +import os +import sys +sys.path.append('.') +import cv2 +from tqdm import tqdm +import argparse +import pickle +import multiprocessing as mlp +from utils.hoi_io import get_valid_video_list + +def divide_mp4(video_path, sub_video_dir, num_frame, original_num_frame, cnt2frame_id_dict, res_prefix='', res_suffix=''): + os.makedirs(sub_video_dir, exist_ok=True) + assert os.path.exists(video_path) + + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + cap.set(cv2.CAP_PROP_FOURCC, fourcc) + fps = cap.get(cv2.CAP_PROP_FPS) + W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + # print(fps, W, H) + + suc = cap.isOpened() + BATCH_SIZE = 20 + + cnt = 0 # 代表在原视频里的第{cnt}帧 + represent_frame_id = str(1).zfill(5) + img_list = [] + while True: + suc, img = cap.read() + if not suc: + break + cnt += 1 + frame_id = cnt2frame_id_dict.get(cnt, None) # 匹配后的顺序中的第{frame_id}帧 + if frame_id is not None: + if int(frame_id) % BATCH_SIZE == 3: + + # 写入sub_video + fourcc2 = cv2.VideoWriter_fourcc(*'mp4v') + sub_video_save_path = os.path.join(sub_video_dir, res_prefix + represent_frame_id + res_suffix + '.mp4') + videoWriter = cv2.VideoWriter(sub_video_save_path, fourcc2, fps, (W, H)) + for img in img_list: + videoWriter.write(img) + videoWriter.release() + + #更新写一次写入需要的数值 + img_list = [] + represent_frame_id = frame_id + + img_list.append(img) + + # 保存最后一次 + fourcc2 = cv2.VideoWriter_fourcc(*'mp4v') + sub_video_save_path = os.path.join(sub_video_dir, res_prefix + represent_frame_id + res_suffix + '.mp4') + videoWriter = cv2.VideoWriter(sub_video_save_path, fourcc2, fps, (W, H)) + for img in img_list: + videoWriter.write(img) + videoWriter.release() + + if cnt != original_num_frame: + print(f'video: {video_path} cnt: {cnt}, original_num_frame: {original_num_frame}') + # assert cnt == original_num_frame, f'video: {video_path} cnt: {cnt}, original_num_frame: {original_num_frame}' + + cap.release() + + +if __name__ == "__main__": + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + # parser.add_argument('--root_dir', required=True, type=str) + # parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + + # root_dir = args.root_dir + # video_id = args.video_id + + date_list = ['20231015'] + + for date in date_list: + # date = '20231002' + root_dir = f'/share/datasets/HOI-mocap/{date}' + + # video_list = [f'{date}_{str(i).zfill(3)}' for i in range(1, 54)] + + # dir_list = os.listdir(root_dir) + # video_list = [dir for dir in dir_list if dir != 'camera_params' and 'cali' not in dir and not dir.endswith('txt')] + # video_list.sort() + + video_list = get_valid_video_list(date, remove_hand=False) + # video_list = [id for id in video_list if 'hand' in id] + + print(video_list) + + for video_id in tqdm(video_list): + try: + assert os.path.exists(root_dir) + video_dir = os.path.join(root_dir, video_id) + assert os.path.exists(video_dir) + + metadata_dir = os.path.join('/share/hlyang/results', date, video_id, 'metadata') + + # 部分数据出错,没有metadata,直接跳过 + if not os.path.exists(metadata_dir) or len(os.listdir(metadata_dir)) == 0: + print(f'{video_id}部分数据出错,没有metadata') + continue + + sub_video_root = os.path.join('/share/hlyang/results', date, video_id, 'sub_video') + + # 跳过已经处理过的 + # if os.path.exists(sub_video): + # continue + + os.makedirs(sub_video_root, exist_ok=True) + + procs = [] + + for camera_id in camera_list: + metadata_path = os.path.join(metadata_dir, camera_id + '.pkl') + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + original_num_frame = metadata['original_num_frame'] + cnt2frame_id_dict = metadata['cnt2frame_id_dict'] + + video_path = os.path.join(video_dir, 'rgb', camera_id + '.mp4') + sub_video_dir = os.path.join(sub_video_root, camera_id) + os.makedirs(sub_video_dir, exist_ok=True) + + # mp42img(video_path, img_dir, num_frame, res_prefix = camera_id + '_') + args = (video_path, sub_video_dir, num_frame, original_num_frame, cnt2frame_id_dict, camera_id + '_') + proc = mlp.Process(target=divide_mp4, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + except Exception as err: + traceback.print_exc() + print(err) + print(f'{video_id} failed!') \ No newline at end of file diff --git a/utils/utils/video2sub_video2.py b/utils/utils/video2sub_video2.py new file mode 100755 index 0000000000000000000000000000000000000000..07b1f66bf12aaf731a5213a4433df5a19af04f21 --- /dev/null +++ b/utils/utils/video2sub_video2.py @@ -0,0 +1,144 @@ +''' +读取pkl获得每一个视频的帧的数量,然后将video视频转换为一张张图片。 + +将一个batch的图片压成一个mp4视频,编号以第一帧为主。 + +example: +python utils/video2img.py --root_dir /share/datasets/HOI-mocap/20230904 --video_id 20230904_01 +''' +import traceback +import os +import sys +sys.path.append('.') +import cv2 +from tqdm import tqdm +import argparse +import pickle +import multiprocessing as mlp +from utils.hoi_io2 import get_valid_video_list + +def divide_mp4(video_path, sub_video_dir, num_frame, original_num_frame, cnt2frame_id_dict, res_prefix='', res_suffix=''): + os.makedirs(sub_video_dir, exist_ok=True) + assert os.path.exists(video_path) + + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + cap.set(cv2.CAP_PROP_FOURCC, fourcc) + fps = cap.get(cv2.CAP_PROP_FPS) + W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + + # print(fps, W, H) + + suc = cap.isOpened() + BATCH_SIZE = 20 + + cnt = 0 # 代表在原视频里的第{cnt}帧 + represent_frame_id = str(1).zfill(5) + img_list = [] + while True: + suc, img = cap.read() + if not suc: + break + cnt += 1 + frame_id = cnt2frame_id_dict.get(cnt, None) # 匹配后的顺序中的第{frame_id}帧 + if frame_id is not None: + if int(frame_id) % BATCH_SIZE == 3: + + # 写入sub_video + fourcc2 = cv2.VideoWriter_fourcc(*'mp4v') + sub_video_save_path = os.path.join(sub_video_dir, res_prefix + represent_frame_id + res_suffix + '.mp4') + videoWriter = cv2.VideoWriter(sub_video_save_path, fourcc2, fps, (W, H)) + for img in img_list: + videoWriter.write(img) + videoWriter.release() + + #更新写一次写入需要的数值 + img_list = [] + represent_frame_id = frame_id + + img_list.append(img) + + # 保存最后一次 + fourcc2 = cv2.VideoWriter_fourcc(*'mp4v') + sub_video_save_path = os.path.join(sub_video_dir, res_prefix + represent_frame_id + res_suffix + '.mp4') + videoWriter = cv2.VideoWriter(sub_video_save_path, fourcc2, fps, (W, H)) + for img in img_list: + videoWriter.write(img) + videoWriter.release() + + if cnt != original_num_frame: + print(f'video: {video_path} cnt: {cnt}, original_num_frame: {original_num_frame}') + # assert cnt == original_num_frame, f'video: {video_path} cnt: {cnt}, original_num_frame: {original_num_frame}' + + cap.release() + + +if __name__ == "__main__": + camera_list = ['21218078', '22070938', '22139905', '22139906', '22139908', '22139909', '22139910', '22139911', '22139913', '22139914', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + args = parser.parse_args() + + # date_list = ['20231103', '20231104', '20231105'] + date_list = ['20230930'] + + for date in date_list: + # date = '20231002' + upload_root_dir = '/data2/HOI-mocap' + upload_date_root = os.path.join(upload_root_dir, date) + + save_root = '/data2/hlyang/results' + + video_list = get_valid_video_list(save_root, date, remove_hand=False) + # video_list = [id for id in video_list if 'hand' in id] + + print(video_list) + + for video_id in tqdm(video_list): + try: + assert os.path.exists(upload_date_root) + video_dir = os.path.join(upload_date_root, video_id) + assert os.path.exists(video_dir) + + metadata_dir = os.path.join(save_root , date, video_id, 'metadata') + + # 部分数据出错,没有metadata,直接跳过 + if not os.path.exists(metadata_dir) or len(os.listdir(metadata_dir)) == 0: + print(f'{video_id}部分数据出错,没有metadata') + continue + + sub_video_root = os.path.join(save_root , date, video_id, 'sub_video') + + # 跳过已经处理过的 + # if os.path.exists(sub_video): + # continue + + os.makedirs(sub_video_root, exist_ok=True) + + procs = [] + + for camera_id in camera_list: + metadata_path = os.path.join(metadata_dir, camera_id + '.pkl') + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + original_num_frame = metadata['original_num_frame'] + cnt2frame_id_dict = metadata['cnt2frame_id_dict'] + + video_path = os.path.join(video_dir, 'rgb', camera_id + '.mp4') + sub_video_dir = os.path.join(sub_video_root, camera_id) + os.makedirs(sub_video_dir, exist_ok=True) + + # mp42img(video_path, img_dir, num_frame, res_prefix = camera_id + '_') + args = (video_path, sub_video_dir, num_frame, original_num_frame, cnt2frame_id_dict, camera_id + '_') + proc = mlp.Process(target=divide_mp4, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() + except Exception as err: + traceback.print_exc() + print(err) + print(f'{video_id} failed!') \ No newline at end of file diff --git a/utils/utils/vis_mask.py b/utils/utils/vis_mask.py new file mode 100755 index 0000000000000000000000000000000000000000..58b6a1aeca01021d427c1cd76de612e13bd33c42 --- /dev/null +++ b/utils/utils/vis_mask.py @@ -0,0 +1,90 @@ +''' +可视化标注结果,插值到(3000, 4096)大小再进行可视化。 + +example: +python utils/vis_mask.py --video_id 20230715_15 +''' + +import os +import sys +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(current_dir) +import cv2 +from tqdm import tqdm +import argparse +import pickle +import multiprocessing as mlp +import numpy as np +from hoi_io import load_bg_img + +def vis_mask(camera_id, video_id, img_root, anno_results_dir, save_root, res_prefix = '', res_suffix = ''): + + assert os.path.exists(img_root) + assert os.path.exists(anno_results_dir) + os.makedirs(save_root, exist_ok=True) + save_dir = os.path.join(save_root, camera_id) + os.makedirs(save_dir, exist_ok=True) + + camera_id + anno_path = os.path.join(anno_results_dir, video_id+'|'+camera_id+'.npy') + assert os.path.exists(anno_path) + anno_res = np.load(anno_path) + + num_frame = anno_res.shape[0] + for i in tqdm(range(num_frame)): + frame_cnt = i + 1 + frame_id = str(frame_cnt).zfill(5) + mask = anno_res[i] + bg = load_bg_img(video_id, camera_id, frame_id) + + left_hand_mask = np.where(mask == 1, 1, 0).astype('uint8') + interpolated_img = cv2.resize(left_hand_mask, (4096, 3000), interpolation=cv2.INTER_LINEAR) + bg[interpolated_img == 1, 0] = 255 + bg[interpolated_img == 1, 1:] = bg[interpolated_img == 1, 1:] / 2 + + right_hand_mask = np.where(mask == 2, 1, 0).astype('uint8') + interpolated_img = cv2.resize(right_hand_mask, (4096, 3000), interpolation=cv2.INTER_LINEAR) + bg[interpolated_img == 1, 1] = 255 + bg[interpolated_img == 1, 0] = bg[interpolated_img == 1, 0]/2 + bg[interpolated_img == 1, 2] = bg[interpolated_img == 1, 2]/2 + + object1_mask = np.where(mask == 3, 1, 0).astype('uint8') + interpolated_img = cv2.resize(object1_mask, (4096, 3000), interpolation=cv2.INTER_LINEAR) + bg[interpolated_img == 1, 2] = 255 + bg[interpolated_img == 1, 0:2] = bg[interpolated_img == 1, 0:2]/2 + + object2_mask = np.where(mask == 4, 1, 0).astype('uint8') + interpolated_img = cv2.resize(object2_mask, (4096, 3000), interpolation=cv2.INTER_LINEAR) + bg[interpolated_img == 1, 1:2] = 255 + bg[interpolated_img == 1, 0] = bg[interpolated_img == 1, 0]/2 + + img_save_path = os.path.join(save_dir, res_prefix + frame_id + res_suffix + '.png') + cv2.imwrite(img_save_path, bg) + +if __name__ == "__main__": + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + + img_root = os.path.join('.','results', video_id, 'imgs') + os.path.exists(img_root) + + mask_root = os.path.join('/share/hlyang/results', video_id, 'mask') + os.makedirs(mask_root, exist_ok=True) + + anno_results_dir = os.path.join('.','results', video_id, 'original_anno_results') + os.path.exists(anno_results_dir) + + procs = [] + + for camera_id in camera_list: + args = (camera_id, video_id, img_root, anno_results_dir, mask_root, camera_id+'_') + proc = mlp.Process(target=vis_mask, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() \ No newline at end of file diff --git a/utils/utils/vis_mask2.py b/utils/utils/vis_mask2.py new file mode 100755 index 0000000000000000000000000000000000000000..590786a17d329cee93c2015d74ae1a4a9136fa90 --- /dev/null +++ b/utils/utils/vis_mask2.py @@ -0,0 +1,95 @@ +''' +可视化标注结果,可视化在downsample后的域大小。 + +example: +python utils/vis_mask2.py --video_id 20230715_15 +''' + +import os +import sys +current_dir = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(current_dir) +import cv2 +from tqdm import tqdm +import argparse +import pickle +import multiprocessing as mlp +import numpy as np +from hoi_io import load_bg_img, get_downsampled_seg_infos_batch +from scandir import scandir + +def vis_mask(camera_id, video_id, anno_results_dir, save_root, res_prefix = '', res_suffix = ''): + assert os.path.exists(anno_results_dir) + os.makedirs(save_root, exist_ok=True) + save_dir = os.path.join(save_root, camera_id) + os.makedirs(save_dir, exist_ok=True) + + metadata_path = os.path.join('/share/hlyang/results', video_id, 'metadata', camera_id + '.pkl') + assert os.path.exists(metadata_path) + with open(metadata_path, 'rb') as f: + metadata = pickle.load(f) + num_frame = metadata['num_frame'] + + frame_list = [str(i+1).zfill(5) for i in range(num_frame)] + seg, downsample_factor = get_downsampled_seg_infos_batch(video_id, frame_list, [camera_id]) + downsampled_width = 4096 // downsample_factor + downsample_height = 3000 // downsample_factor + # seg_right_hand = np.where(seg == 1, 1, 0) + # seg_left_hand = np.where(seg == 2, 1, 0) + # seg_object1 = np.where(seg == 3, 1, 0) + # seg_object2 = np.where(seg == 4, 1, 0) + + num_frame = seg.shape[0] + for i in tqdm(range(num_frame)): + frame_cnt = i + 1 + frame_id = str(frame_cnt).zfill(5) + mask = seg[i, 0] + bg = load_bg_img(video_id, camera_id, frame_id) + bg = cv2.resize(bg, (downsampled_width, downsample_height), interpolation=cv2.INTER_LINEAR) + + right_hand_mask = np.where(mask == 1, 1, 0).astype('uint8') + bg[right_hand_mask == 1, 2] = 255 + bg[right_hand_mask == 1, 0] = bg[right_hand_mask == 1, 0]/2 + bg[right_hand_mask == 1, 1] = bg[right_hand_mask == 1, 1]/2 + + left_hand_mask = np.where(mask == 2, 1, 0).astype('uint8') + bg[left_hand_mask == 1, 0] = 255 + bg[left_hand_mask == 1, 1:] = bg[left_hand_mask == 1, 1:] / 2 + + object1_mask = np.where(mask == 3, 1, 0).astype('uint8') + bg[object1_mask == 1, 0] = 255 + bg[object1_mask == 1, 2] = 255 + bg[object1_mask == 1, 1] = bg[object1_mask == 1, 1]/2 + + object2_mask = np.where(mask == 4, 1, 0).astype('uint8') + bg[object2_mask == 1, 1] = 255 + bg[object2_mask == 1, 0] = bg[object2_mask == 1, 0]/2 + bg[object2_mask == 1, 2] = bg[object2_mask == 1, 2]/2 + + img_save_path = os.path.join(save_dir, res_prefix + frame_id + res_suffix + '.png') + cv2.imwrite(img_save_path, bg) + +if __name__ == "__main__": + camera_list = ['22070938', '22139905', '22139909', '22139910', '22139911', '22139913', '22139916', '22139946'] + + parser = argparse.ArgumentParser() + parser.add_argument('--video_id', required=True, type=str) + args = parser.parse_args() + video_id = args.video_id + + mask_root = os.path.join('/share/hlyang/results', video_id, 'mask') + os.makedirs(mask_root, exist_ok=True) + + anno_results_dir = os.path.join('.','results', video_id, 'anno_results') + os.path.exists(anno_results_dir) + + procs = [] + + for camera_id in camera_list: + args = (camera_id, video_id, anno_results_dir, mask_root, camera_id+'_') + proc = mlp.Process(target=vis_mask, args=args) + proc.start() + procs.append(proc) + + for i in range(len(procs)): + procs[i].join() \ No newline at end of file diff --git a/utils/utils/wide_crop.py b/utils/utils/wide_crop.py new file mode 100644 index 0000000000000000000000000000000000000000..6c36623e497439a2c42c4b4e895429b1e9ea7f73 --- /dev/null +++ b/utils/utils/wide_crop.py @@ -0,0 +1,5 @@ + +CROP_INFO_21218078 = [300, 1000, 500, 1500] +CROP_INFO_22139906 = [600, 1750, 1215, 2785] +CROP_INFO_22139908 = [700, 2400, 1700, 3360] +CROP_INFO_22139914 = [900, 2100, 750, 2350]