Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- external/peract_bimanual/agents/act_bc_lang/detr/LICENSE +201 -0
- external/peract_bimanual/agents/act_bc_lang/detr/models/__init__.py +9 -0
- external/peract_bimanual/agents/act_bc_lang/detr/models/backbone.py +120 -0
- external/peract_bimanual/agents/act_bc_lang/detr/models/detr_vae.py +276 -0
- external/peract_bimanual/agents/act_bc_lang/detr/models/position_encoding.py +91 -0
- external/peract_bimanual/agents/act_bc_lang/detr/models/transformer.py +312 -0
- external/peract_bimanual/agents/act_bc_lang/detr/util/misc.py +468 -0
- external/peract_bimanual/agents/arm/__init__.py +1 -0
- external/pyrep/.gitignore +102 -0
- external/pyrep/LICENSE +21 -0
- external/pyrep/README.md +284 -0
- external/pyrep/pyrep/__init__.py +5 -0
- external/pyrep/pyrep/const.py +110 -0
- external/pyrep/pyrep/errors.py +30 -0
- external/pyrep/pyrep/pyrep.py +332 -0
- external/pyrep/pyrep/robots/__init__.py +0 -0
- external/pyrep/pyrep/robots/arms/lbr_iiwa_14_r820.py +7 -0
- external/pyrep/pyrep/robots/arms/lbr_iiwa_7_r800.py +7 -0
- external/pyrep/pyrep/robots/arms/locobot_arm.py +7 -0
- external/pyrep/pyrep/robots/arms/mico.py +7 -0
- external/pyrep/pyrep/robots/arms/panda.py +7 -0
- external/pyrep/pyrep/robots/arms/sawyer.py +7 -0
- external/pyrep/pyrep/robots/arms/ur10.py +7 -0
- external/pyrep/pyrep/robots/arms/ur3.py +7 -0
- external/pyrep/pyrep/robots/arms/ur5.py +7 -0
- external/pyrep/pyrep/robots/arms/xarm7.py +7 -0
- external/pyrep/pyrep/robots/arms/youBot.py +7 -0
- external/pyrep/pyrep/robots/configuration_paths/__init__.py +0 -0
- external/pyrep/pyrep/robots/configuration_paths/arm_configuration_path.py +199 -0
- external/pyrep/pyrep/robots/configuration_paths/configuration_path.py +18 -0
- external/pyrep/pyrep/robots/configuration_paths/holonomic_configuration_path.py +71 -0
- external/pyrep/pyrep/robots/configuration_paths/mobile_configuration_path.py +114 -0
- external/pyrep/pyrep/robots/configuration_paths/nonholonomic_configuration_path.py +55 -0
- external/pyrep/pyrep/robots/end_effectors/__init__.py +0 -0
- external/pyrep/pyrep/robots/end_effectors/baxter_gripper.py +9 -0
- external/pyrep/pyrep/robots/end_effectors/baxter_suction_cup.py +7 -0
- external/pyrep/pyrep/robots/end_effectors/dobot_suction_cup.py +7 -0
- external/pyrep/pyrep/robots/end_effectors/dual_panda_gripper.py +22 -0
- external/pyrep/pyrep/robots/end_effectors/gripper.py +149 -0
- external/pyrep/pyrep/robots/end_effectors/jaco_gripper.py +10 -0
- external/pyrep/pyrep/robots/end_effectors/locobot_gripper.py +8 -0
- external/pyrep/pyrep/robots/end_effectors/mico_gripper.py +9 -0
- external/pyrep/pyrep/robots/end_effectors/panda_gripper.py +8 -0
- external/pyrep/pyrep/robots/end_effectors/robotiq85_gripper.py +58 -0
- external/pyrep/pyrep/robots/end_effectors/suction_cup.py +57 -0
- external/pyrep/pyrep/robots/end_effectors/xarm_gripper.py +13 -0
- external/pyrep/pyrep/robots/mobiles/holonomic_base.py +215 -0
- external/pyrep/pyrep/robots/mobiles/line_tracer.py +6 -0
- external/pyrep/pyrep/robots/mobiles/locobot.py +6 -0
- external/pyrep/pyrep/robots/mobiles/mobile_base.py +202 -0
external/peract_bimanual/agents/act_bc_lang/detr/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright 2020 - present, Facebook, Inc
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
external/peract_bimanual/agents/act_bc_lang/detr/models/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
from .detr_vae import build as build_vae
|
| 3 |
+
from .detr_vae import build_cnnmlp as build_cnnmlp
|
| 4 |
+
|
| 5 |
+
def build_ACT_model(args):
|
| 6 |
+
return build_vae(args)
|
| 7 |
+
|
| 8 |
+
def build_CNNMLP_model(args):
|
| 9 |
+
return build_cnnmlp(args)
|
external/peract_bimanual/agents/act_bc_lang/detr/models/backbone.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Backbone modules.
|
| 4 |
+
"""
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
import torchvision
|
| 10 |
+
from torch import nn
|
| 11 |
+
from torchvision.models._utils import IntermediateLayerGetter
|
| 12 |
+
from typing import Dict, List
|
| 13 |
+
|
| 14 |
+
from agents.act_bc_lang.detr.util.misc import NestedTensor, is_main_process
|
| 15 |
+
|
| 16 |
+
from .position_encoding import build_position_encoding
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class FrozenBatchNorm2d(torch.nn.Module):
|
| 20 |
+
"""
|
| 21 |
+
BatchNorm2d where the batch statistics and the affine parameters are fixed.
|
| 22 |
+
|
| 23 |
+
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
|
| 24 |
+
without which any other policy_models than torchvision.policy_models.resnet[18,34,50,101]
|
| 25 |
+
produce nans.
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(self, n):
|
| 29 |
+
super(FrozenBatchNorm2d, self).__init__()
|
| 30 |
+
self.register_buffer("weight", torch.ones(n))
|
| 31 |
+
self.register_buffer("bias", torch.zeros(n))
|
| 32 |
+
self.register_buffer("running_mean", torch.zeros(n))
|
| 33 |
+
self.register_buffer("running_var", torch.ones(n))
|
| 34 |
+
|
| 35 |
+
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
|
| 36 |
+
missing_keys, unexpected_keys, error_msgs):
|
| 37 |
+
num_batches_tracked_key = prefix + 'num_batches_tracked'
|
| 38 |
+
if num_batches_tracked_key in state_dict:
|
| 39 |
+
del state_dict[num_batches_tracked_key]
|
| 40 |
+
|
| 41 |
+
super(FrozenBatchNorm2d, self)._load_from_state_dict(
|
| 42 |
+
state_dict, prefix, local_metadata, strict,
|
| 43 |
+
missing_keys, unexpected_keys, error_msgs)
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
# move reshapes to the beginning
|
| 47 |
+
# to make it fuser-friendly
|
| 48 |
+
w = self.weight.reshape(1, -1, 1, 1)
|
| 49 |
+
b = self.bias.reshape(1, -1, 1, 1)
|
| 50 |
+
rv = self.running_var.reshape(1, -1, 1, 1)
|
| 51 |
+
rm = self.running_mean.reshape(1, -1, 1, 1)
|
| 52 |
+
eps = 1e-5
|
| 53 |
+
scale = w * (rv + eps).rsqrt()
|
| 54 |
+
bias = b - rm * scale
|
| 55 |
+
return x * scale + bias
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class BackboneBase(nn.Module):
|
| 59 |
+
|
| 60 |
+
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
|
| 61 |
+
super().__init__()
|
| 62 |
+
# for name, parameter in backbone.named_parameters(): # only train later layers # TODO do we want this?
|
| 63 |
+
# if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
|
| 64 |
+
# parameter.requires_grad_(False)
|
| 65 |
+
if return_interm_layers:
|
| 66 |
+
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
|
| 67 |
+
else:
|
| 68 |
+
return_layers = {'layer4': "0"}
|
| 69 |
+
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
|
| 70 |
+
self.num_channels = num_channels
|
| 71 |
+
|
| 72 |
+
def forward(self, tensor):
|
| 73 |
+
xs = self.body(tensor)
|
| 74 |
+
return xs
|
| 75 |
+
# out: Dict[str, NestedTensor] = {}
|
| 76 |
+
# for name, x in xs.items():
|
| 77 |
+
# m = tensor_list.mask
|
| 78 |
+
# assert m is not None
|
| 79 |
+
# mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
|
| 80 |
+
# out[name] = NestedTensor(x, mask)
|
| 81 |
+
# return out
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class Backbone(BackboneBase):
|
| 85 |
+
"""ResNet backbone with frozen BatchNorm."""
|
| 86 |
+
def __init__(self, name: str,
|
| 87 |
+
train_backbone: bool,
|
| 88 |
+
return_interm_layers: bool,
|
| 89 |
+
dilation: bool):
|
| 90 |
+
backbone = getattr(torchvision.models, name)(
|
| 91 |
+
replace_stride_with_dilation=[False, False, dilation],
|
| 92 |
+
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) # pretrained # TODO do we want frozen batch_norm??
|
| 93 |
+
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
|
| 94 |
+
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class Joiner(nn.Sequential):
|
| 98 |
+
def __init__(self, backbone, position_embedding):
|
| 99 |
+
super().__init__(backbone, position_embedding)
|
| 100 |
+
|
| 101 |
+
def forward(self, tensor_list: NestedTensor):
|
| 102 |
+
xs = self[0](tensor_list)
|
| 103 |
+
out: List[NestedTensor] = []
|
| 104 |
+
pos = []
|
| 105 |
+
for name, x in xs.items():
|
| 106 |
+
out.append(x)
|
| 107 |
+
# position encoding
|
| 108 |
+
pos.append(self[1](x).to(x.dtype))
|
| 109 |
+
|
| 110 |
+
return out, pos
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def build_backbone(args):
|
| 114 |
+
position_embedding = build_position_encoding(args)
|
| 115 |
+
train_backbone = args.lr_backbone > 0
|
| 116 |
+
return_interm_layers = args.masks
|
| 117 |
+
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
|
| 118 |
+
model = Joiner(backbone, position_embedding)
|
| 119 |
+
model.num_channels = backbone.num_channels
|
| 120 |
+
return model
|
external/peract_bimanual/agents/act_bc_lang/detr/models/detr_vae.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
DETR model and criterion classes.
|
| 4 |
+
"""
|
| 5 |
+
import torch
|
| 6 |
+
from torch import nn
|
| 7 |
+
from torch.autograd import Variable
|
| 8 |
+
from .backbone import build_backbone
|
| 9 |
+
from .transformer import build_transformer, TransformerEncoder, TransformerEncoderLayer
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def reparametrize(mu, logvar):
|
| 15 |
+
std = logvar.div(2).exp()
|
| 16 |
+
eps = Variable(std.data.new(std.size()).normal_())
|
| 17 |
+
return mu + std * eps
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def get_sinusoid_encoding_table(n_position, d_hid):
|
| 21 |
+
def get_position_angle_vec(position):
|
| 22 |
+
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
|
| 23 |
+
|
| 24 |
+
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
|
| 25 |
+
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
|
| 26 |
+
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
|
| 27 |
+
|
| 28 |
+
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class DETRVAE(nn.Module):
|
| 32 |
+
""" This is the DETR module that performs object detection """
|
| 33 |
+
def __init__(self, backbones, transformer, encoder, state_dim,
|
| 34 |
+
num_queries, camera_names, input_dim):
|
| 35 |
+
""" Initializes the model.
|
| 36 |
+
Parameters:
|
| 37 |
+
backbones: torch module of the backbone to be used. See backbone.py
|
| 38 |
+
transformer: torch module of the transformer architecture. See transformer.py
|
| 39 |
+
state_dim: robot state dimension of the environment
|
| 40 |
+
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
|
| 41 |
+
DETR can detect in a single image. For COCO, we recommend 100 queries.
|
| 42 |
+
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
|
| 43 |
+
"""
|
| 44 |
+
super().__init__()
|
| 45 |
+
self.num_queries = num_queries
|
| 46 |
+
self.camera_names = camera_names
|
| 47 |
+
self.transformer = transformer
|
| 48 |
+
self.encoder = encoder
|
| 49 |
+
hidden_dim = transformer.d_model
|
| 50 |
+
self.input_dim = input_dim
|
| 51 |
+
self.action_head = nn.Linear(hidden_dim, state_dim)
|
| 52 |
+
self.is_pad_head = nn.Linear(hidden_dim, 1)
|
| 53 |
+
self.query_embed = nn.Embedding(num_queries, hidden_dim)
|
| 54 |
+
if backbones is not None:
|
| 55 |
+
self.input_proj = nn.Conv2d(backbones[0].num_channels, hidden_dim, kernel_size=1)
|
| 56 |
+
self.backbones = nn.ModuleList(backbones)
|
| 57 |
+
self.input_proj_robot_state = nn.Linear(self.input_dim, hidden_dim)
|
| 58 |
+
else:
|
| 59 |
+
# input_dim + 7 # robot_state + env_state
|
| 60 |
+
self.input_proj_robot_state = nn.Linear(self.input_dim, hidden_dim)
|
| 61 |
+
self.input_proj_env_state = nn.Linear(self.input_dim, hidden_dim)
|
| 62 |
+
self.pos = torch.nn.Embedding(2, hidden_dim)
|
| 63 |
+
self.backbones = None
|
| 64 |
+
|
| 65 |
+
# encoder extra parameters
|
| 66 |
+
self.latent_dim = 32 # final size of latent z # TODO tune
|
| 67 |
+
self.cls_embed = nn.Embedding(1, hidden_dim) # extra cls token embedding
|
| 68 |
+
self.encoder_proj = nn.Linear(self.input_dim, hidden_dim) # project state to embedding
|
| 69 |
+
self.latent_proj = nn.Linear(hidden_dim, self.latent_dim*2) # project hidden state to latent std, var
|
| 70 |
+
self.register_buffer('pos_table', get_sinusoid_encoding_table(num_queries+1, hidden_dim))
|
| 71 |
+
|
| 72 |
+
# decoder extra parameters
|
| 73 |
+
self.latent_out_proj = nn.Linear(self.latent_dim, hidden_dim) # project latent sample to embedding
|
| 74 |
+
self.additional_pos_embed = nn.Embedding(2, hidden_dim) # learned position embedding for proprio and latent
|
| 75 |
+
|
| 76 |
+
def forward(self, qpos, image, env_state, actions=None, is_pad=None):
|
| 77 |
+
"""
|
| 78 |
+
qpos: batch, qpos_dim
|
| 79 |
+
image: batch, num_cam, channel, height, width
|
| 80 |
+
env_state: None
|
| 81 |
+
actions: batch, seq, action_dim
|
| 82 |
+
"""
|
| 83 |
+
is_training = actions is not None # train or val
|
| 84 |
+
bs, _ = qpos.shape
|
| 85 |
+
### Obtain latent z from action sequence
|
| 86 |
+
if is_training:
|
| 87 |
+
# project action sequence to embedding dim, and concat with a CLS token
|
| 88 |
+
action_embed = self.encoder_proj(actions) # (bs, seq, hidden_dim)
|
| 89 |
+
cls_embed = self.cls_embed.weight # (1, hidden_dim)
|
| 90 |
+
cls_embed = torch.unsqueeze(cls_embed, axis=0).repeat(bs, 1, 1) # (bs, 1, hidden_dim)
|
| 91 |
+
encoder_input = torch.cat([cls_embed, action_embed], axis=1) # (bs, seq+1, hidden_dim)
|
| 92 |
+
encoder_input = encoder_input.permute(1, 0, 2) # (seq+1, bs, hidden_dim)
|
| 93 |
+
# do not mask cls token
|
| 94 |
+
cls_is_pad = torch.full((bs, 1), False).to(qpos.device) # False: not a padding
|
| 95 |
+
is_pad = torch.cat([cls_is_pad, is_pad], axis=1) # (bs, seq+1)
|
| 96 |
+
# obtain position embedding
|
| 97 |
+
pos_embed = self.pos_table.clone().detach()
|
| 98 |
+
pos_embed = pos_embed.permute(1, 0, 2) # (seq+1, 1, hidden_dim)
|
| 99 |
+
# query model
|
| 100 |
+
encoder_output = self.encoder(encoder_input, pos=pos_embed, src_key_padding_mask=is_pad)
|
| 101 |
+
encoder_output = encoder_output[0] # take cls output only
|
| 102 |
+
latent_info = self.latent_proj(encoder_output)
|
| 103 |
+
mu = latent_info[:, :self.latent_dim]
|
| 104 |
+
logvar = latent_info[:, self.latent_dim:]
|
| 105 |
+
latent_sample = reparametrize(mu, logvar)
|
| 106 |
+
latent_input = self.latent_out_proj(latent_sample)
|
| 107 |
+
else:
|
| 108 |
+
mu = logvar = None
|
| 109 |
+
latent_sample = torch.zeros([bs, self.latent_dim], dtype=torch.float32).to(qpos.device)
|
| 110 |
+
latent_input = self.latent_out_proj(latent_sample)
|
| 111 |
+
|
| 112 |
+
if self.backbones is not None:
|
| 113 |
+
# Image observation features and position embeddings
|
| 114 |
+
all_cam_features = []
|
| 115 |
+
all_cam_pos = []
|
| 116 |
+
for cam_id, cam_name in enumerate(self.camera_names):
|
| 117 |
+
features, pos = self.backbones[cam_id](image[:, cam_id])
|
| 118 |
+
features = features[0] # take the last layer feature
|
| 119 |
+
pos = pos[0]
|
| 120 |
+
all_cam_features.append(self.input_proj(features))
|
| 121 |
+
all_cam_pos.append(pos)
|
| 122 |
+
# proprioception features
|
| 123 |
+
proprio_input = self.input_proj_robot_state(qpos)
|
| 124 |
+
# fold camera dimension into width dimension
|
| 125 |
+
src = torch.cat(all_cam_features, axis=3)
|
| 126 |
+
pos = torch.cat(all_cam_pos, axis=3)
|
| 127 |
+
hs = self.transformer(src, None, self.query_embed.weight, pos, latent_input, proprio_input, self.additional_pos_embed.weight)[0]
|
| 128 |
+
else:
|
| 129 |
+
qpos = self.input_proj_robot_state(qpos)
|
| 130 |
+
env_state = self.input_proj_env_state(env_state)
|
| 131 |
+
transformer_input = torch.cat([qpos, env_state], axis=1) # seq length = 2
|
| 132 |
+
hs = self.transformer(transformer_input, None, self.query_embed.weight, self.pos.weight)[0]
|
| 133 |
+
a_hat = self.action_head(hs)
|
| 134 |
+
is_pad_hat = self.is_pad_head(hs)
|
| 135 |
+
return a_hat, is_pad_hat, [mu, logvar]
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class CNNMLP(nn.Module):
|
| 140 |
+
def __init__(self, backbones, state_dim, camera_names):
|
| 141 |
+
""" Initializes the model.
|
| 142 |
+
Parameters:
|
| 143 |
+
backbones: torch module of the backbone to be used. See backbone.py
|
| 144 |
+
transformer: torch module of the transformer architecture. See transformer.py
|
| 145 |
+
state_dim: robot state dimension of the environment
|
| 146 |
+
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
|
| 147 |
+
DETR can detect in a single image. For COCO, we recommend 100 queries.
|
| 148 |
+
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
|
| 149 |
+
"""
|
| 150 |
+
super().__init__()
|
| 151 |
+
self.camera_names = camera_names
|
| 152 |
+
self.action_head = nn.Linear(1000, state_dim) # TODO add more
|
| 153 |
+
if backbones is not None:
|
| 154 |
+
self.backbones = nn.ModuleList(backbones)
|
| 155 |
+
backbone_down_projs = []
|
| 156 |
+
for backbone in backbones:
|
| 157 |
+
down_proj = nn.Sequential(
|
| 158 |
+
nn.Conv2d(backbone.num_channels, 128, kernel_size=5),
|
| 159 |
+
nn.Conv2d(128, 64, kernel_size=5),
|
| 160 |
+
nn.Conv2d(64, 32, kernel_size=5)
|
| 161 |
+
)
|
| 162 |
+
backbone_down_projs.append(down_proj)
|
| 163 |
+
self.backbone_down_projs = nn.ModuleList(backbone_down_projs)
|
| 164 |
+
|
| 165 |
+
mlp_in_dim = 768 * len(backbones) + 14
|
| 166 |
+
self.mlp = mlp(input_dim=mlp_in_dim, hidden_dim=1024, output_dim=14, hidden_depth=2)
|
| 167 |
+
else:
|
| 168 |
+
raise NotImplementedError
|
| 169 |
+
|
| 170 |
+
def forward(self, qpos, image, env_state, actions=None):
|
| 171 |
+
"""
|
| 172 |
+
qpos: batch, qpos_dim
|
| 173 |
+
image: batch, num_cam, channel, height, width
|
| 174 |
+
env_state: None
|
| 175 |
+
actions: batch, seq, action_dim
|
| 176 |
+
"""
|
| 177 |
+
is_training = actions is not None # train or val
|
| 178 |
+
bs, _ = qpos.shape
|
| 179 |
+
# Image observation features and position embeddings
|
| 180 |
+
all_cam_features = []
|
| 181 |
+
for cam_id, cam_name in enumerate(self.camera_names):
|
| 182 |
+
features, pos = self.backbones[cam_id](image[:, cam_id])
|
| 183 |
+
features = features[0] # take the last layer feature
|
| 184 |
+
pos = pos[0] # not used
|
| 185 |
+
all_cam_features.append(self.backbone_down_projs[cam_id](features))
|
| 186 |
+
# flatten everything
|
| 187 |
+
flattened_features = []
|
| 188 |
+
for cam_feature in all_cam_features:
|
| 189 |
+
flattened_features.append(cam_feature.reshape([bs, -1]))
|
| 190 |
+
flattened_features = torch.cat(flattened_features, axis=1) # 768 each
|
| 191 |
+
features = torch.cat([flattened_features, qpos], axis=1) # qpos: 14
|
| 192 |
+
a_hat = self.mlp(features)
|
| 193 |
+
return a_hat
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def mlp(input_dim, hidden_dim, output_dim, hidden_depth):
|
| 197 |
+
if hidden_depth == 0:
|
| 198 |
+
mods = [nn.Linear(input_dim, output_dim)]
|
| 199 |
+
else:
|
| 200 |
+
mods = [nn.Linear(input_dim, hidden_dim), nn.ReLU(inplace=True)]
|
| 201 |
+
for i in range(hidden_depth - 1):
|
| 202 |
+
mods += [nn.Linear(hidden_dim, hidden_dim), nn.ReLU(inplace=True)]
|
| 203 |
+
mods.append(nn.Linear(hidden_dim, output_dim))
|
| 204 |
+
trunk = nn.Sequential(*mods)
|
| 205 |
+
return trunk
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def build_encoder(args):
|
| 209 |
+
d_model = args.hidden_dim # 256
|
| 210 |
+
dropout = args.dropout # 0.1
|
| 211 |
+
nhead = args.nheads # 8
|
| 212 |
+
dim_feedforward = args.dim_feedforward # 2048
|
| 213 |
+
num_encoder_layers = args.enc_layers # 4 # TODO shared with VAE decoder
|
| 214 |
+
normalize_before = args.pre_norm # False
|
| 215 |
+
activation = "relu"
|
| 216 |
+
|
| 217 |
+
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
|
| 218 |
+
dropout, activation, normalize_before)
|
| 219 |
+
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
|
| 220 |
+
encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
| 221 |
+
|
| 222 |
+
return encoder
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
def build(args):
|
| 226 |
+
state_dim = args.input_dim
|
| 227 |
+
|
| 228 |
+
# From state
|
| 229 |
+
# backbone = None # from state for now, no need for conv nets
|
| 230 |
+
# From image
|
| 231 |
+
backbones = []
|
| 232 |
+
for _ in args.camera_names:
|
| 233 |
+
backbone = build_backbone(args)
|
| 234 |
+
backbones.append(backbone)
|
| 235 |
+
|
| 236 |
+
transformer = build_transformer(args)
|
| 237 |
+
|
| 238 |
+
encoder = build_encoder(args)
|
| 239 |
+
|
| 240 |
+
model = DETRVAE(
|
| 241 |
+
backbones,
|
| 242 |
+
transformer,
|
| 243 |
+
encoder,
|
| 244 |
+
state_dim=state_dim,
|
| 245 |
+
num_queries=args.num_queries,
|
| 246 |
+
camera_names=args.camera_names,
|
| 247 |
+
input_dim=args.input_dim,
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 251 |
+
print("number of parameters: %.2fM" % (n_parameters/1e6,))
|
| 252 |
+
|
| 253 |
+
return model
|
| 254 |
+
|
| 255 |
+
def build_cnnmlp(args):
|
| 256 |
+
state_dim = args.input_dim
|
| 257 |
+
|
| 258 |
+
# From state
|
| 259 |
+
# backbone = None # from state for now, no need for conv nets
|
| 260 |
+
# From image
|
| 261 |
+
backbones = []
|
| 262 |
+
for _ in args.camera_names:
|
| 263 |
+
backbone = build_backbone(args)
|
| 264 |
+
backbones.append(backbone)
|
| 265 |
+
|
| 266 |
+
model = CNNMLP(
|
| 267 |
+
backbones,
|
| 268 |
+
state_dim=state_dim,
|
| 269 |
+
camera_names=args.camera_names,
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 273 |
+
print("number of parameters: %.2fM" % (n_parameters/1e6,))
|
| 274 |
+
|
| 275 |
+
return model
|
| 276 |
+
|
external/peract_bimanual/agents/act_bc_lang/detr/models/position_encoding.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Various positional encodings for the transformer.
|
| 4 |
+
"""
|
| 5 |
+
import math
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
from agents.act_bc_lang.detr.util.misc import NestedTensor
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class PositionEmbeddingSine(nn.Module):
|
| 13 |
+
"""
|
| 14 |
+
This is a more standard version of the position embedding, very similar to the one
|
| 15 |
+
used by the Attention is all you need paper, generalized to work on images.
|
| 16 |
+
"""
|
| 17 |
+
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
|
| 18 |
+
super().__init__()
|
| 19 |
+
self.num_pos_feats = num_pos_feats
|
| 20 |
+
self.temperature = temperature
|
| 21 |
+
self.normalize = normalize
|
| 22 |
+
if scale is not None and normalize is False:
|
| 23 |
+
raise ValueError("normalize should be True if scale is passed")
|
| 24 |
+
if scale is None:
|
| 25 |
+
scale = 2 * math.pi
|
| 26 |
+
self.scale = scale
|
| 27 |
+
|
| 28 |
+
def forward(self, tensor):
|
| 29 |
+
x = tensor
|
| 30 |
+
# mask = tensor_list.mask
|
| 31 |
+
# assert mask is not None
|
| 32 |
+
# not_mask = ~mask
|
| 33 |
+
|
| 34 |
+
not_mask = torch.ones_like(x[0, [0]])
|
| 35 |
+
y_embed = not_mask.cumsum(1, dtype=torch.float32)
|
| 36 |
+
x_embed = not_mask.cumsum(2, dtype=torch.float32)
|
| 37 |
+
if self.normalize:
|
| 38 |
+
eps = 1e-6
|
| 39 |
+
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
| 40 |
+
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
| 41 |
+
|
| 42 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
| 43 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
| 44 |
+
|
| 45 |
+
pos_x = x_embed[:, :, :, None] / dim_t
|
| 46 |
+
pos_y = y_embed[:, :, :, None] / dim_t
|
| 47 |
+
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
|
| 48 |
+
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
|
| 49 |
+
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
| 50 |
+
return pos
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class PositionEmbeddingLearned(nn.Module):
|
| 54 |
+
"""
|
| 55 |
+
Absolute pos embedding, learned.
|
| 56 |
+
"""
|
| 57 |
+
def __init__(self, num_pos_feats=256):
|
| 58 |
+
super().__init__()
|
| 59 |
+
self.row_embed = nn.Embedding(50, num_pos_feats)
|
| 60 |
+
self.col_embed = nn.Embedding(50, num_pos_feats)
|
| 61 |
+
self.reset_parameters()
|
| 62 |
+
|
| 63 |
+
def reset_parameters(self):
|
| 64 |
+
nn.init.uniform_(self.row_embed.weight)
|
| 65 |
+
nn.init.uniform_(self.col_embed.weight)
|
| 66 |
+
|
| 67 |
+
def forward(self, tensor_list: NestedTensor):
|
| 68 |
+
x = tensor_list.tensors
|
| 69 |
+
h, w = x.shape[-2:]
|
| 70 |
+
i = torch.arange(w, device=x.device)
|
| 71 |
+
j = torch.arange(h, device=x.device)
|
| 72 |
+
x_emb = self.col_embed(i)
|
| 73 |
+
y_emb = self.row_embed(j)
|
| 74 |
+
pos = torch.cat([
|
| 75 |
+
x_emb.unsqueeze(0).repeat(h, 1, 1),
|
| 76 |
+
y_emb.unsqueeze(1).repeat(1, w, 1),
|
| 77 |
+
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
|
| 78 |
+
return pos
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def build_position_encoding(args):
|
| 82 |
+
N_steps = args.hidden_dim // 2
|
| 83 |
+
if args.position_embedding in ('v2', 'sine'):
|
| 84 |
+
# TODO find a better way of exposing other arguments
|
| 85 |
+
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
|
| 86 |
+
elif args.position_embedding in ('v3', 'learned'):
|
| 87 |
+
position_embedding = PositionEmbeddingLearned(N_steps)
|
| 88 |
+
else:
|
| 89 |
+
raise ValueError(f"not supported {args.position_embedding}")
|
| 90 |
+
|
| 91 |
+
return position_embedding
|
external/peract_bimanual/agents/act_bc_lang/detr/models/transformer.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
DETR Transformer class.
|
| 4 |
+
|
| 5 |
+
Copy-paste from torch.nn.Transformer with modifications:
|
| 6 |
+
* positional encodings are passed in MHattention
|
| 7 |
+
* extra LN at the end of encoder is removed
|
| 8 |
+
* decoder returns a stack of activations from all decoding layers
|
| 9 |
+
"""
|
| 10 |
+
import copy
|
| 11 |
+
from typing import Optional, List
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.nn.functional as F
|
| 15 |
+
from torch import nn, Tensor
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Transformer(nn.Module):
|
| 19 |
+
|
| 20 |
+
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
|
| 21 |
+
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
|
| 22 |
+
activation="relu", normalize_before=False,
|
| 23 |
+
return_intermediate_dec=False):
|
| 24 |
+
super().__init__()
|
| 25 |
+
|
| 26 |
+
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
|
| 27 |
+
dropout, activation, normalize_before)
|
| 28 |
+
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
|
| 29 |
+
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
| 30 |
+
|
| 31 |
+
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
|
| 32 |
+
dropout, activation, normalize_before)
|
| 33 |
+
decoder_norm = nn.LayerNorm(d_model)
|
| 34 |
+
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
|
| 35 |
+
return_intermediate=return_intermediate_dec)
|
| 36 |
+
|
| 37 |
+
self._reset_parameters()
|
| 38 |
+
|
| 39 |
+
self.d_model = d_model
|
| 40 |
+
self.nhead = nhead
|
| 41 |
+
|
| 42 |
+
def _reset_parameters(self):
|
| 43 |
+
for p in self.parameters():
|
| 44 |
+
if p.dim() > 1:
|
| 45 |
+
nn.init.xavier_uniform_(p)
|
| 46 |
+
|
| 47 |
+
def forward(self, src, mask, query_embed, pos_embed, latent_input=None, proprio_input=None, additional_pos_embed=None):
|
| 48 |
+
# TODO flatten only when input has H and W
|
| 49 |
+
if len(src.shape) == 4: # has H and W
|
| 50 |
+
# flatten NxCxHxW to HWxNxC
|
| 51 |
+
bs, c, h, w = src.shape
|
| 52 |
+
src = src.flatten(2).permute(2, 0, 1)
|
| 53 |
+
pos_embed = pos_embed.flatten(2).permute(2, 0, 1).repeat(1, bs, 1)
|
| 54 |
+
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
|
| 55 |
+
# mask = mask.flatten(1)
|
| 56 |
+
|
| 57 |
+
additional_pos_embed = additional_pos_embed.unsqueeze(1).repeat(1, bs, 1) # seq, bs, dim
|
| 58 |
+
pos_embed = torch.cat([additional_pos_embed, pos_embed], axis=0)
|
| 59 |
+
|
| 60 |
+
addition_input = torch.stack([latent_input, proprio_input], axis=0)
|
| 61 |
+
src = torch.cat([addition_input, src], axis=0)
|
| 62 |
+
else:
|
| 63 |
+
assert len(src.shape) == 3
|
| 64 |
+
# flatten NxHWxC to HWxNxC
|
| 65 |
+
bs, hw, c = src.shape
|
| 66 |
+
src = src.permute(1, 0, 2)
|
| 67 |
+
pos_embed = pos_embed.unsqueeze(1).repeat(1, bs, 1)
|
| 68 |
+
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
|
| 69 |
+
|
| 70 |
+
tgt = torch.zeros_like(query_embed)
|
| 71 |
+
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
|
| 72 |
+
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
|
| 73 |
+
pos=pos_embed, query_pos=query_embed)
|
| 74 |
+
hs = hs.transpose(1, 2)
|
| 75 |
+
return hs
|
| 76 |
+
|
| 77 |
+
class TransformerEncoder(nn.Module):
|
| 78 |
+
|
| 79 |
+
def __init__(self, encoder_layer, num_layers, norm=None):
|
| 80 |
+
super().__init__()
|
| 81 |
+
self.layers = _get_clones(encoder_layer, num_layers)
|
| 82 |
+
self.num_layers = num_layers
|
| 83 |
+
self.norm = norm
|
| 84 |
+
|
| 85 |
+
def forward(self, src,
|
| 86 |
+
mask: Optional[Tensor] = None,
|
| 87 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 88 |
+
pos: Optional[Tensor] = None):
|
| 89 |
+
output = src
|
| 90 |
+
|
| 91 |
+
for layer in self.layers:
|
| 92 |
+
output = layer(output, src_mask=mask,
|
| 93 |
+
src_key_padding_mask=src_key_padding_mask, pos=pos)
|
| 94 |
+
|
| 95 |
+
if self.norm is not None:
|
| 96 |
+
output = self.norm(output)
|
| 97 |
+
|
| 98 |
+
return output
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class TransformerDecoder(nn.Module):
|
| 102 |
+
|
| 103 |
+
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
|
| 104 |
+
super().__init__()
|
| 105 |
+
self.layers = _get_clones(decoder_layer, num_layers)
|
| 106 |
+
self.num_layers = num_layers
|
| 107 |
+
self.norm = norm
|
| 108 |
+
self.return_intermediate = return_intermediate
|
| 109 |
+
|
| 110 |
+
def forward(self, tgt, memory,
|
| 111 |
+
tgt_mask: Optional[Tensor] = None,
|
| 112 |
+
memory_mask: Optional[Tensor] = None,
|
| 113 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 114 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 115 |
+
pos: Optional[Tensor] = None,
|
| 116 |
+
query_pos: Optional[Tensor] = None):
|
| 117 |
+
output = tgt
|
| 118 |
+
|
| 119 |
+
intermediate = []
|
| 120 |
+
|
| 121 |
+
for layer in self.layers:
|
| 122 |
+
output = layer(output, memory, tgt_mask=tgt_mask,
|
| 123 |
+
memory_mask=memory_mask,
|
| 124 |
+
tgt_key_padding_mask=tgt_key_padding_mask,
|
| 125 |
+
memory_key_padding_mask=memory_key_padding_mask,
|
| 126 |
+
pos=pos, query_pos=query_pos)
|
| 127 |
+
if self.return_intermediate:
|
| 128 |
+
intermediate.append(self.norm(output))
|
| 129 |
+
|
| 130 |
+
if self.norm is not None:
|
| 131 |
+
output = self.norm(output)
|
| 132 |
+
if self.return_intermediate:
|
| 133 |
+
intermediate.pop()
|
| 134 |
+
intermediate.append(output)
|
| 135 |
+
|
| 136 |
+
if self.return_intermediate:
|
| 137 |
+
return torch.stack(intermediate)
|
| 138 |
+
|
| 139 |
+
return output.unsqueeze(0)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class TransformerEncoderLayer(nn.Module):
|
| 143 |
+
|
| 144 |
+
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
|
| 145 |
+
activation="relu", normalize_before=False):
|
| 146 |
+
super().__init__()
|
| 147 |
+
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
| 148 |
+
# Implementation of Feedforward model
|
| 149 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
| 150 |
+
self.dropout = nn.Dropout(dropout)
|
| 151 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
| 152 |
+
|
| 153 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 154 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 155 |
+
self.dropout1 = nn.Dropout(dropout)
|
| 156 |
+
self.dropout2 = nn.Dropout(dropout)
|
| 157 |
+
|
| 158 |
+
self.activation = _get_activation_fn(activation)
|
| 159 |
+
self.normalize_before = normalize_before
|
| 160 |
+
|
| 161 |
+
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
| 162 |
+
return tensor if pos is None else tensor + pos
|
| 163 |
+
|
| 164 |
+
def forward_post(self,
|
| 165 |
+
src,
|
| 166 |
+
src_mask: Optional[Tensor] = None,
|
| 167 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 168 |
+
pos: Optional[Tensor] = None):
|
| 169 |
+
q = k = self.with_pos_embed(src, pos)
|
| 170 |
+
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
|
| 171 |
+
key_padding_mask=src_key_padding_mask)[0]
|
| 172 |
+
src = src + self.dropout1(src2)
|
| 173 |
+
src = self.norm1(src)
|
| 174 |
+
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
|
| 175 |
+
src = src + self.dropout2(src2)
|
| 176 |
+
src = self.norm2(src)
|
| 177 |
+
return src
|
| 178 |
+
|
| 179 |
+
def forward_pre(self, src,
|
| 180 |
+
src_mask: Optional[Tensor] = None,
|
| 181 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 182 |
+
pos: Optional[Tensor] = None):
|
| 183 |
+
src2 = self.norm1(src)
|
| 184 |
+
q = k = self.with_pos_embed(src2, pos)
|
| 185 |
+
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
|
| 186 |
+
key_padding_mask=src_key_padding_mask)[0]
|
| 187 |
+
src = src + self.dropout1(src2)
|
| 188 |
+
src2 = self.norm2(src)
|
| 189 |
+
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
|
| 190 |
+
src = src + self.dropout2(src2)
|
| 191 |
+
return src
|
| 192 |
+
|
| 193 |
+
def forward(self, src,
|
| 194 |
+
src_mask: Optional[Tensor] = None,
|
| 195 |
+
src_key_padding_mask: Optional[Tensor] = None,
|
| 196 |
+
pos: Optional[Tensor] = None):
|
| 197 |
+
if self.normalize_before:
|
| 198 |
+
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
|
| 199 |
+
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class TransformerDecoderLayer(nn.Module):
|
| 203 |
+
|
| 204 |
+
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
|
| 205 |
+
activation="relu", normalize_before=False):
|
| 206 |
+
super().__init__()
|
| 207 |
+
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
| 208 |
+
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
| 209 |
+
# Implementation of Feedforward model
|
| 210 |
+
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
| 211 |
+
self.dropout = nn.Dropout(dropout)
|
| 212 |
+
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
| 213 |
+
|
| 214 |
+
self.norm1 = nn.LayerNorm(d_model)
|
| 215 |
+
self.norm2 = nn.LayerNorm(d_model)
|
| 216 |
+
self.norm3 = nn.LayerNorm(d_model)
|
| 217 |
+
self.dropout1 = nn.Dropout(dropout)
|
| 218 |
+
self.dropout2 = nn.Dropout(dropout)
|
| 219 |
+
self.dropout3 = nn.Dropout(dropout)
|
| 220 |
+
|
| 221 |
+
self.activation = _get_activation_fn(activation)
|
| 222 |
+
self.normalize_before = normalize_before
|
| 223 |
+
|
| 224 |
+
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
| 225 |
+
return tensor if pos is None else tensor + pos
|
| 226 |
+
|
| 227 |
+
def forward_post(self, tgt, memory,
|
| 228 |
+
tgt_mask: Optional[Tensor] = None,
|
| 229 |
+
memory_mask: Optional[Tensor] = None,
|
| 230 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 231 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 232 |
+
pos: Optional[Tensor] = None,
|
| 233 |
+
query_pos: Optional[Tensor] = None):
|
| 234 |
+
q = k = self.with_pos_embed(tgt, query_pos)
|
| 235 |
+
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
|
| 236 |
+
key_padding_mask=tgt_key_padding_mask)[0]
|
| 237 |
+
tgt = tgt + self.dropout1(tgt2)
|
| 238 |
+
tgt = self.norm1(tgt)
|
| 239 |
+
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
|
| 240 |
+
key=self.with_pos_embed(memory, pos),
|
| 241 |
+
value=memory, attn_mask=memory_mask,
|
| 242 |
+
key_padding_mask=memory_key_padding_mask)[0]
|
| 243 |
+
tgt = tgt + self.dropout2(tgt2)
|
| 244 |
+
tgt = self.norm2(tgt)
|
| 245 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
|
| 246 |
+
tgt = tgt + self.dropout3(tgt2)
|
| 247 |
+
tgt = self.norm3(tgt)
|
| 248 |
+
return tgt
|
| 249 |
+
|
| 250 |
+
def forward_pre(self, tgt, memory,
|
| 251 |
+
tgt_mask: Optional[Tensor] = None,
|
| 252 |
+
memory_mask: Optional[Tensor] = None,
|
| 253 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 254 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 255 |
+
pos: Optional[Tensor] = None,
|
| 256 |
+
query_pos: Optional[Tensor] = None):
|
| 257 |
+
tgt2 = self.norm1(tgt)
|
| 258 |
+
q = k = self.with_pos_embed(tgt2, query_pos)
|
| 259 |
+
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
|
| 260 |
+
key_padding_mask=tgt_key_padding_mask)[0]
|
| 261 |
+
tgt = tgt + self.dropout1(tgt2)
|
| 262 |
+
tgt2 = self.norm2(tgt)
|
| 263 |
+
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
|
| 264 |
+
key=self.with_pos_embed(memory, pos),
|
| 265 |
+
value=memory, attn_mask=memory_mask,
|
| 266 |
+
key_padding_mask=memory_key_padding_mask)[0]
|
| 267 |
+
tgt = tgt + self.dropout2(tgt2)
|
| 268 |
+
tgt2 = self.norm3(tgt)
|
| 269 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
| 270 |
+
tgt = tgt + self.dropout3(tgt2)
|
| 271 |
+
return tgt
|
| 272 |
+
|
| 273 |
+
def forward(self, tgt, memory,
|
| 274 |
+
tgt_mask: Optional[Tensor] = None,
|
| 275 |
+
memory_mask: Optional[Tensor] = None,
|
| 276 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 277 |
+
memory_key_padding_mask: Optional[Tensor] = None,
|
| 278 |
+
pos: Optional[Tensor] = None,
|
| 279 |
+
query_pos: Optional[Tensor] = None):
|
| 280 |
+
if self.normalize_before:
|
| 281 |
+
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
|
| 282 |
+
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
|
| 283 |
+
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
|
| 284 |
+
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def _get_clones(module, N):
|
| 288 |
+
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def build_transformer(args):
|
| 292 |
+
return Transformer(
|
| 293 |
+
d_model=args.hidden_dim,
|
| 294 |
+
dropout=args.dropout,
|
| 295 |
+
nhead=args.nheads,
|
| 296 |
+
dim_feedforward=args.dim_feedforward,
|
| 297 |
+
num_encoder_layers=args.enc_layers,
|
| 298 |
+
num_decoder_layers=args.dec_layers,
|
| 299 |
+
normalize_before=args.pre_norm,
|
| 300 |
+
return_intermediate_dec=True,
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def _get_activation_fn(activation):
|
| 305 |
+
"""Return an activation function given a string"""
|
| 306 |
+
if activation == "relu":
|
| 307 |
+
return F.relu
|
| 308 |
+
if activation == "gelu":
|
| 309 |
+
return F.gelu
|
| 310 |
+
if activation == "glu":
|
| 311 |
+
return F.glu
|
| 312 |
+
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
external/peract_bimanual/agents/act_bc_lang/detr/util/misc.py
ADDED
|
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Misc functions, including distributed helpers.
|
| 4 |
+
|
| 5 |
+
Mostly copy-paste from torchvision references.
|
| 6 |
+
"""
|
| 7 |
+
import os
|
| 8 |
+
import subprocess
|
| 9 |
+
import time
|
| 10 |
+
from collections import defaultdict, deque
|
| 11 |
+
import datetime
|
| 12 |
+
import pickle
|
| 13 |
+
from packaging import version
|
| 14 |
+
from typing import Optional, List
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.distributed as dist
|
| 18 |
+
from torch import Tensor
|
| 19 |
+
|
| 20 |
+
# needed due to empty tensor bug in pytorch and torchvision 0.5
|
| 21 |
+
import torchvision
|
| 22 |
+
if version.parse(torchvision.__version__) < version.parse('0.7'):
|
| 23 |
+
from torchvision.ops import _new_empty_tensor
|
| 24 |
+
from torchvision.ops.misc import _output_size
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class SmoothedValue(object):
|
| 28 |
+
"""Track a series of values and provide access to smoothed values over a
|
| 29 |
+
window or the global series average.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, window_size=20, fmt=None):
|
| 33 |
+
if fmt is None:
|
| 34 |
+
fmt = "{median:.4f} ({global_avg:.4f})"
|
| 35 |
+
self.deque = deque(maxlen=window_size)
|
| 36 |
+
self.total = 0.0
|
| 37 |
+
self.count = 0
|
| 38 |
+
self.fmt = fmt
|
| 39 |
+
|
| 40 |
+
def update(self, value, n=1):
|
| 41 |
+
self.deque.append(value)
|
| 42 |
+
self.count += n
|
| 43 |
+
self.total += value * n
|
| 44 |
+
|
| 45 |
+
def synchronize_between_processes(self):
|
| 46 |
+
"""
|
| 47 |
+
Warning: does not synchronize the deque!
|
| 48 |
+
"""
|
| 49 |
+
if not is_dist_avail_and_initialized():
|
| 50 |
+
return
|
| 51 |
+
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
|
| 52 |
+
dist.barrier()
|
| 53 |
+
dist.all_reduce(t)
|
| 54 |
+
t = t.tolist()
|
| 55 |
+
self.count = int(t[0])
|
| 56 |
+
self.total = t[1]
|
| 57 |
+
|
| 58 |
+
@property
|
| 59 |
+
def median(self):
|
| 60 |
+
d = torch.tensor(list(self.deque))
|
| 61 |
+
return d.median().item()
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def avg(self):
|
| 65 |
+
d = torch.tensor(list(self.deque), dtype=torch.float32)
|
| 66 |
+
return d.mean().item()
|
| 67 |
+
|
| 68 |
+
@property
|
| 69 |
+
def global_avg(self):
|
| 70 |
+
return self.total / self.count
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
def max(self):
|
| 74 |
+
return max(self.deque)
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def value(self):
|
| 78 |
+
return self.deque[-1]
|
| 79 |
+
|
| 80 |
+
def __str__(self):
|
| 81 |
+
return self.fmt.format(
|
| 82 |
+
median=self.median,
|
| 83 |
+
avg=self.avg,
|
| 84 |
+
global_avg=self.global_avg,
|
| 85 |
+
max=self.max,
|
| 86 |
+
value=self.value)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def all_gather(data):
|
| 90 |
+
"""
|
| 91 |
+
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
| 92 |
+
Args:
|
| 93 |
+
data: any picklable object
|
| 94 |
+
Returns:
|
| 95 |
+
list[data]: list of data gathered from each rank
|
| 96 |
+
"""
|
| 97 |
+
world_size = get_world_size()
|
| 98 |
+
if world_size == 1:
|
| 99 |
+
return [data]
|
| 100 |
+
|
| 101 |
+
# serialized to a Tensor
|
| 102 |
+
buffer = pickle.dumps(data)
|
| 103 |
+
storage = torch.ByteStorage.from_buffer(buffer)
|
| 104 |
+
tensor = torch.ByteTensor(storage).to("cuda")
|
| 105 |
+
|
| 106 |
+
# obtain Tensor size of each rank
|
| 107 |
+
local_size = torch.tensor([tensor.numel()], device="cuda")
|
| 108 |
+
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
|
| 109 |
+
dist.all_gather(size_list, local_size)
|
| 110 |
+
size_list = [int(size.item()) for size in size_list]
|
| 111 |
+
max_size = max(size_list)
|
| 112 |
+
|
| 113 |
+
# receiving Tensor from all ranks
|
| 114 |
+
# we pad the tensor because torch all_gather does not support
|
| 115 |
+
# gathering tensors of different shapes
|
| 116 |
+
tensor_list = []
|
| 117 |
+
for _ in size_list:
|
| 118 |
+
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
|
| 119 |
+
if local_size != max_size:
|
| 120 |
+
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
|
| 121 |
+
tensor = torch.cat((tensor, padding), dim=0)
|
| 122 |
+
dist.all_gather(tensor_list, tensor)
|
| 123 |
+
|
| 124 |
+
data_list = []
|
| 125 |
+
for size, tensor in zip(size_list, tensor_list):
|
| 126 |
+
buffer = tensor.cpu().numpy().tobytes()[:size]
|
| 127 |
+
data_list.append(pickle.loads(buffer))
|
| 128 |
+
|
| 129 |
+
return data_list
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def reduce_dict(input_dict, average=True):
|
| 133 |
+
"""
|
| 134 |
+
Args:
|
| 135 |
+
input_dict (dict): all the values will be reduced
|
| 136 |
+
average (bool): whether to do average or sum
|
| 137 |
+
Reduce the values in the dictionary from all processes so that all processes
|
| 138 |
+
have the averaged results. Returns a dict with the same fields as
|
| 139 |
+
input_dict, after reduction.
|
| 140 |
+
"""
|
| 141 |
+
world_size = get_world_size()
|
| 142 |
+
if world_size < 2:
|
| 143 |
+
return input_dict
|
| 144 |
+
with torch.no_grad():
|
| 145 |
+
names = []
|
| 146 |
+
values = []
|
| 147 |
+
# sort the keys so that they are consistent across processes
|
| 148 |
+
for k in sorted(input_dict.keys()):
|
| 149 |
+
names.append(k)
|
| 150 |
+
values.append(input_dict[k])
|
| 151 |
+
values = torch.stack(values, dim=0)
|
| 152 |
+
dist.all_reduce(values)
|
| 153 |
+
if average:
|
| 154 |
+
values /= world_size
|
| 155 |
+
reduced_dict = {k: v for k, v in zip(names, values)}
|
| 156 |
+
return reduced_dict
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
class MetricLogger(object):
|
| 160 |
+
def __init__(self, delimiter="\t"):
|
| 161 |
+
self.meters = defaultdict(SmoothedValue)
|
| 162 |
+
self.delimiter = delimiter
|
| 163 |
+
|
| 164 |
+
def update(self, **kwargs):
|
| 165 |
+
for k, v in kwargs.items():
|
| 166 |
+
if isinstance(v, torch.Tensor):
|
| 167 |
+
v = v.item()
|
| 168 |
+
assert isinstance(v, (float, int))
|
| 169 |
+
self.meters[k].update(v)
|
| 170 |
+
|
| 171 |
+
def __getattr__(self, attr):
|
| 172 |
+
if attr in self.meters:
|
| 173 |
+
return self.meters[attr]
|
| 174 |
+
if attr in self.__dict__:
|
| 175 |
+
return self.__dict__[attr]
|
| 176 |
+
raise AttributeError("'{}' object has no attribute '{}'".format(
|
| 177 |
+
type(self).__name__, attr))
|
| 178 |
+
|
| 179 |
+
def __str__(self):
|
| 180 |
+
loss_str = []
|
| 181 |
+
for name, meter in self.meters.items():
|
| 182 |
+
loss_str.append(
|
| 183 |
+
"{}: {}".format(name, str(meter))
|
| 184 |
+
)
|
| 185 |
+
return self.delimiter.join(loss_str)
|
| 186 |
+
|
| 187 |
+
def synchronize_between_processes(self):
|
| 188 |
+
for meter in self.meters.values():
|
| 189 |
+
meter.synchronize_between_processes()
|
| 190 |
+
|
| 191 |
+
def add_meter(self, name, meter):
|
| 192 |
+
self.meters[name] = meter
|
| 193 |
+
|
| 194 |
+
def log_every(self, iterable, print_freq, header=None):
|
| 195 |
+
i = 0
|
| 196 |
+
if not header:
|
| 197 |
+
header = ''
|
| 198 |
+
start_time = time.time()
|
| 199 |
+
end = time.time()
|
| 200 |
+
iter_time = SmoothedValue(fmt='{avg:.4f}')
|
| 201 |
+
data_time = SmoothedValue(fmt='{avg:.4f}')
|
| 202 |
+
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
|
| 203 |
+
if torch.cuda.is_available():
|
| 204 |
+
log_msg = self.delimiter.join([
|
| 205 |
+
header,
|
| 206 |
+
'[{0' + space_fmt + '}/{1}]',
|
| 207 |
+
'eta: {eta}',
|
| 208 |
+
'{meters}',
|
| 209 |
+
'time: {time}',
|
| 210 |
+
'data: {data}',
|
| 211 |
+
'max mem: {memory:.0f}'
|
| 212 |
+
])
|
| 213 |
+
else:
|
| 214 |
+
log_msg = self.delimiter.join([
|
| 215 |
+
header,
|
| 216 |
+
'[{0' + space_fmt + '}/{1}]',
|
| 217 |
+
'eta: {eta}',
|
| 218 |
+
'{meters}',
|
| 219 |
+
'time: {time}',
|
| 220 |
+
'data: {data}'
|
| 221 |
+
])
|
| 222 |
+
MB = 1024.0 * 1024.0
|
| 223 |
+
for obj in iterable:
|
| 224 |
+
data_time.update(time.time() - end)
|
| 225 |
+
yield obj
|
| 226 |
+
iter_time.update(time.time() - end)
|
| 227 |
+
if i % print_freq == 0 or i == len(iterable) - 1:
|
| 228 |
+
eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
| 229 |
+
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
| 230 |
+
if torch.cuda.is_available():
|
| 231 |
+
print(log_msg.format(
|
| 232 |
+
i, len(iterable), eta=eta_string,
|
| 233 |
+
meters=str(self),
|
| 234 |
+
time=str(iter_time), data=str(data_time),
|
| 235 |
+
memory=torch.cuda.max_memory_allocated() / MB))
|
| 236 |
+
else:
|
| 237 |
+
print(log_msg.format(
|
| 238 |
+
i, len(iterable), eta=eta_string,
|
| 239 |
+
meters=str(self),
|
| 240 |
+
time=str(iter_time), data=str(data_time)))
|
| 241 |
+
i += 1
|
| 242 |
+
end = time.time()
|
| 243 |
+
total_time = time.time() - start_time
|
| 244 |
+
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
| 245 |
+
print('{} Total time: {} ({:.4f} s / it)'.format(
|
| 246 |
+
header, total_time_str, total_time / len(iterable)))
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def get_sha():
|
| 250 |
+
cwd = os.path.dirname(os.path.abspath(__file__))
|
| 251 |
+
|
| 252 |
+
def _run(command):
|
| 253 |
+
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
|
| 254 |
+
sha = 'N/A'
|
| 255 |
+
diff = "clean"
|
| 256 |
+
branch = 'N/A'
|
| 257 |
+
try:
|
| 258 |
+
sha = _run(['git', 'rev-parse', 'HEAD'])
|
| 259 |
+
subprocess.check_output(['git', 'diff'], cwd=cwd)
|
| 260 |
+
diff = _run(['git', 'diff-index', 'HEAD'])
|
| 261 |
+
diff = "has uncommited changes" if diff else "clean"
|
| 262 |
+
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
|
| 263 |
+
except Exception:
|
| 264 |
+
pass
|
| 265 |
+
message = f"sha: {sha}, status: {diff}, branch: {branch}"
|
| 266 |
+
return message
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def collate_fn(batch):
|
| 270 |
+
batch = list(zip(*batch))
|
| 271 |
+
batch[0] = nested_tensor_from_tensor_list(batch[0])
|
| 272 |
+
return tuple(batch)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def _max_by_axis(the_list):
|
| 276 |
+
# type: (List[List[int]]) -> List[int]
|
| 277 |
+
maxes = the_list[0]
|
| 278 |
+
for sublist in the_list[1:]:
|
| 279 |
+
for index, item in enumerate(sublist):
|
| 280 |
+
maxes[index] = max(maxes[index], item)
|
| 281 |
+
return maxes
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
class NestedTensor(object):
|
| 285 |
+
def __init__(self, tensors, mask: Optional[Tensor]):
|
| 286 |
+
self.tensors = tensors
|
| 287 |
+
self.mask = mask
|
| 288 |
+
|
| 289 |
+
def to(self, device):
|
| 290 |
+
# type: (Device) -> NestedTensor # noqa
|
| 291 |
+
cast_tensor = self.tensors.to(device)
|
| 292 |
+
mask = self.mask
|
| 293 |
+
if mask is not None:
|
| 294 |
+
assert mask is not None
|
| 295 |
+
cast_mask = mask.to(device)
|
| 296 |
+
else:
|
| 297 |
+
cast_mask = None
|
| 298 |
+
return NestedTensor(cast_tensor, cast_mask)
|
| 299 |
+
|
| 300 |
+
def decompose(self):
|
| 301 |
+
return self.tensors, self.mask
|
| 302 |
+
|
| 303 |
+
def __repr__(self):
|
| 304 |
+
return str(self.tensors)
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
|
| 308 |
+
# TODO make this more general
|
| 309 |
+
if tensor_list[0].ndim == 3:
|
| 310 |
+
if torchvision._is_tracing():
|
| 311 |
+
# nested_tensor_from_tensor_list() does not export well to ONNX
|
| 312 |
+
# call _onnx_nested_tensor_from_tensor_list() instead
|
| 313 |
+
return _onnx_nested_tensor_from_tensor_list(tensor_list)
|
| 314 |
+
|
| 315 |
+
# TODO make it support different-sized images
|
| 316 |
+
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
|
| 317 |
+
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
|
| 318 |
+
batch_shape = [len(tensor_list)] + max_size
|
| 319 |
+
b, c, h, w = batch_shape
|
| 320 |
+
dtype = tensor_list[0].dtype
|
| 321 |
+
device = tensor_list[0].device
|
| 322 |
+
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
|
| 323 |
+
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
|
| 324 |
+
for img, pad_img, m in zip(tensor_list, tensor, mask):
|
| 325 |
+
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
| 326 |
+
m[: img.shape[1], :img.shape[2]] = False
|
| 327 |
+
else:
|
| 328 |
+
raise ValueError('not supported')
|
| 329 |
+
return NestedTensor(tensor, mask)
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
# _onnx_nested_tensor_from_tensor_list() is an implementation of
|
| 333 |
+
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
|
| 334 |
+
@torch.jit.unused
|
| 335 |
+
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
|
| 336 |
+
max_size = []
|
| 337 |
+
for i in range(tensor_list[0].dim()):
|
| 338 |
+
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
|
| 339 |
+
max_size.append(max_size_i)
|
| 340 |
+
max_size = tuple(max_size)
|
| 341 |
+
|
| 342 |
+
# work around for
|
| 343 |
+
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
| 344 |
+
# m[: img.shape[1], :img.shape[2]] = False
|
| 345 |
+
# which is not yet supported in onnx
|
| 346 |
+
padded_imgs = []
|
| 347 |
+
padded_masks = []
|
| 348 |
+
for img in tensor_list:
|
| 349 |
+
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
|
| 350 |
+
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
|
| 351 |
+
padded_imgs.append(padded_img)
|
| 352 |
+
|
| 353 |
+
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
|
| 354 |
+
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
|
| 355 |
+
padded_masks.append(padded_mask.to(torch.bool))
|
| 356 |
+
|
| 357 |
+
tensor = torch.stack(padded_imgs)
|
| 358 |
+
mask = torch.stack(padded_masks)
|
| 359 |
+
|
| 360 |
+
return NestedTensor(tensor, mask=mask)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def setup_for_distributed(is_master):
|
| 364 |
+
"""
|
| 365 |
+
This function disables printing when not in master process
|
| 366 |
+
"""
|
| 367 |
+
import builtins as __builtin__
|
| 368 |
+
builtin_print = __builtin__.print
|
| 369 |
+
|
| 370 |
+
def print(*args, **kwargs):
|
| 371 |
+
force = kwargs.pop('force', False)
|
| 372 |
+
if is_master or force:
|
| 373 |
+
builtin_print(*args, **kwargs)
|
| 374 |
+
|
| 375 |
+
__builtin__.print = print
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def is_dist_avail_and_initialized():
|
| 379 |
+
if not dist.is_available():
|
| 380 |
+
return False
|
| 381 |
+
if not dist.is_initialized():
|
| 382 |
+
return False
|
| 383 |
+
return True
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def get_world_size():
|
| 387 |
+
if not is_dist_avail_and_initialized():
|
| 388 |
+
return 1
|
| 389 |
+
return dist.get_world_size()
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
def get_rank():
|
| 393 |
+
if not is_dist_avail_and_initialized():
|
| 394 |
+
return 0
|
| 395 |
+
return dist.get_rank()
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
def is_main_process():
|
| 399 |
+
return get_rank() == 0
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
def save_on_master(*args, **kwargs):
|
| 403 |
+
if is_main_process():
|
| 404 |
+
torch.save(*args, **kwargs)
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def init_distributed_mode(args):
|
| 408 |
+
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
| 409 |
+
args.rank = int(os.environ["RANK"])
|
| 410 |
+
args.world_size = int(os.environ['WORLD_SIZE'])
|
| 411 |
+
args.gpu = int(os.environ['LOCAL_RANK'])
|
| 412 |
+
elif 'SLURM_PROCID' in os.environ:
|
| 413 |
+
args.rank = int(os.environ['SLURM_PROCID'])
|
| 414 |
+
args.gpu = args.rank % torch.cuda.device_count()
|
| 415 |
+
else:
|
| 416 |
+
print('Not using distributed mode')
|
| 417 |
+
args.distributed = False
|
| 418 |
+
return
|
| 419 |
+
|
| 420 |
+
args.distributed = True
|
| 421 |
+
|
| 422 |
+
torch.cuda.set_device(args.gpu)
|
| 423 |
+
args.dist_backend = 'nccl'
|
| 424 |
+
print('| distributed init (rank {}): {}'.format(
|
| 425 |
+
args.rank, args.dist_url), flush=True)
|
| 426 |
+
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
| 427 |
+
world_size=args.world_size, rank=args.rank)
|
| 428 |
+
torch.distributed.barrier()
|
| 429 |
+
setup_for_distributed(args.rank == 0)
|
| 430 |
+
|
| 431 |
+
|
| 432 |
+
@torch.no_grad()
|
| 433 |
+
def accuracy(output, target, topk=(1,)):
|
| 434 |
+
"""Computes the precision@k for the specified values of k"""
|
| 435 |
+
if target.numel() == 0:
|
| 436 |
+
return [torch.zeros([], device=output.device)]
|
| 437 |
+
maxk = max(topk)
|
| 438 |
+
batch_size = target.size(0)
|
| 439 |
+
|
| 440 |
+
_, pred = output.topk(maxk, 1, True, True)
|
| 441 |
+
pred = pred.t()
|
| 442 |
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
| 443 |
+
|
| 444 |
+
res = []
|
| 445 |
+
for k in topk:
|
| 446 |
+
correct_k = correct[:k].view(-1).float().sum(0)
|
| 447 |
+
res.append(correct_k.mul_(100.0 / batch_size))
|
| 448 |
+
return res
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
|
| 452 |
+
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
|
| 453 |
+
"""
|
| 454 |
+
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
|
| 455 |
+
This will eventually be supported natively by PyTorch, and this
|
| 456 |
+
class can go away.
|
| 457 |
+
"""
|
| 458 |
+
if version.parse(torchvision.__version__) < version.parse('0.7'):
|
| 459 |
+
if input.numel() > 0:
|
| 460 |
+
return torch.nn.functional.interpolate(
|
| 461 |
+
input, size, scale_factor, mode, align_corners
|
| 462 |
+
)
|
| 463 |
+
|
| 464 |
+
output_shape = _output_size(2, input, size, scale_factor)
|
| 465 |
+
output_shape = list(input.shape[:-2]) + list(output_shape)
|
| 466 |
+
return _new_empty_tensor(input, output_shape)
|
| 467 |
+
else:
|
| 468 |
+
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
|
external/peract_bimanual/agents/arm/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
import agents.arm.launch_utils
|
external/pyrep/.gitignore
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
./idea
|
| 7 |
+
.idea/
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
|
| 27 |
+
# PyInstaller
|
| 28 |
+
# Usually these files are written by a python script from a template
|
| 29 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 30 |
+
*.manifest
|
| 31 |
+
*.spec
|
| 32 |
+
|
| 33 |
+
# Installer logs
|
| 34 |
+
pip-log.txt
|
| 35 |
+
pip-delete-this-directory.txt
|
| 36 |
+
|
| 37 |
+
# Unit test / coverage reports
|
| 38 |
+
htmlcov/
|
| 39 |
+
.tox/
|
| 40 |
+
.coverage
|
| 41 |
+
.coverage.*
|
| 42 |
+
.cache
|
| 43 |
+
nosetests.xml
|
| 44 |
+
coverage.xml
|
| 45 |
+
*.cover
|
| 46 |
+
.hypothesis/
|
| 47 |
+
|
| 48 |
+
# Translations
|
| 49 |
+
*.mo
|
| 50 |
+
*.pot
|
| 51 |
+
|
| 52 |
+
# Django stuff:
|
| 53 |
+
*.log
|
| 54 |
+
local_settings.py
|
| 55 |
+
|
| 56 |
+
# Flask stuff:
|
| 57 |
+
instance/
|
| 58 |
+
.webassets-cache
|
| 59 |
+
|
| 60 |
+
# Scrapy stuff:
|
| 61 |
+
.scrapy
|
| 62 |
+
|
| 63 |
+
# Sphinx documentation
|
| 64 |
+
docs/_build/
|
| 65 |
+
|
| 66 |
+
# PyBuilder
|
| 67 |
+
target/
|
| 68 |
+
|
| 69 |
+
# Jupyter Notebook
|
| 70 |
+
.ipynb_checkpoints
|
| 71 |
+
|
| 72 |
+
# pyenv
|
| 73 |
+
.python-version
|
| 74 |
+
|
| 75 |
+
# celery beat schedule file
|
| 76 |
+
celerybeat-schedule
|
| 77 |
+
|
| 78 |
+
# SageMath parsed files
|
| 79 |
+
*.sage.py
|
| 80 |
+
|
| 81 |
+
# Environments
|
| 82 |
+
.env
|
| 83 |
+
.venv
|
| 84 |
+
env/
|
| 85 |
+
venv/
|
| 86 |
+
ENV/
|
| 87 |
+
|
| 88 |
+
# Spyder project settings
|
| 89 |
+
.spyderproject
|
| 90 |
+
.spyproject
|
| 91 |
+
|
| 92 |
+
# Rope project settings
|
| 93 |
+
.ropeproject
|
| 94 |
+
|
| 95 |
+
# mkdocs documentation
|
| 96 |
+
/site
|
| 97 |
+
|
| 98 |
+
# mypy
|
| 99 |
+
.mypy_cache/
|
| 100 |
+
|
| 101 |
+
pyrep/backend/_sim_cffi*
|
| 102 |
+
|
external/pyrep/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2019 Stephen James
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
external/pyrep/README.md
ADDED
|
@@ -0,0 +1,284 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PyRep [](https://github.com/stepjam/PyRep/actions) [](https://discord.gg/eTMsa5Y)
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
__PyRep is a toolkit for robot learning research, built on top of [CoppeliaSim](http://www.coppeliarobotics.com/) (previously called V-REP).__
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
- [Install](#install)
|
| 8 |
+
- [Running Headless](#running-headless)
|
| 9 |
+
- [Getting Started](#getting-started)
|
| 10 |
+
- [Usage](#usage)
|
| 11 |
+
- [Supported Robots](#supported-robots)
|
| 12 |
+
- [Adding Robots](#adding-robots)
|
| 13 |
+
- [Contributing](#contributing)
|
| 14 |
+
- [Projects Using PyRep](#projects-using-pyrep)
|
| 15 |
+
- [What Happened to V-REP?](#what-happened-to-v-rep)
|
| 16 |
+
- [Citation](#citation)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
## Install
|
| 20 |
+
|
| 21 |
+
PyRep requires version **4.1** of CoppeliaSim. Download:
|
| 22 |
+
- [Ubuntu 16.04](https://www.coppeliarobotics.com/files/CoppeliaSim_Edu_V4_1_0_Ubuntu16_04.tar.xz)
|
| 23 |
+
- [Ubuntu 18.04](https://www.coppeliarobotics.com/files/CoppeliaSim_Edu_V4_1_0_Ubuntu18_04.tar.xz)
|
| 24 |
+
- [Ubuntu 20.04](https://www.coppeliarobotics.com/files/CoppeliaSim_Edu_V4_1_0_Ubuntu20_04.tar.xz)
|
| 25 |
+
|
| 26 |
+
Once you have downloaded CoppeliaSim, you can pull PyRep from git:
|
| 27 |
+
|
| 28 |
+
```bash
|
| 29 |
+
git clone https://github.com/stepjam/PyRep.git
|
| 30 |
+
cd PyRep
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
Add the following to your *~/.bashrc* file: (__NOTE__: the 'EDIT ME' in the first line)
|
| 34 |
+
|
| 35 |
+
```bash
|
| 36 |
+
export COPPELIASIM_ROOT=EDIT/ME/PATH/TO/COPPELIASIM/INSTALL/DIR
|
| 37 |
+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$COPPELIASIM_ROOT
|
| 38 |
+
export QT_QPA_PLATFORM_PLUGIN_PATH=$COPPELIASIM_ROOT
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
__Remember to source your bashrc (`source ~/.bashrc`) or
|
| 42 |
+
zshrc (`source ~/.zshrc`) after this.
|
| 43 |
+
|
| 44 |
+
Finally install the python library:
|
| 45 |
+
|
| 46 |
+
```bash
|
| 47 |
+
pip3 install -r requirements.txt
|
| 48 |
+
pip3 install .
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
You should be good to go!
|
| 52 |
+
Try running one of the examples in the *examples/* folder.
|
| 53 |
+
|
| 54 |
+
_Although you can use CoppeliaSim on any platform, communication via PyRep is currently only supported on Linux._
|
| 55 |
+
|
| 56 |
+
#### Troubleshooting
|
| 57 |
+
|
| 58 |
+
Below are some problems you may encounter during installation. If none of these solve your problem, please raise an issue.
|
| 59 |
+
|
| 60 |
+
- ModuleNotFoundError: No module named 'pyrep.backend._v_rep_cffi'
|
| 61 |
+
- If you are getting this error, then please check that you are not running the interpreter from the project root. If you are, then your Python interpreter will try to import those files rather the installed files.
|
| 62 |
+
- error: command 'x86_64-linux-gnu-gcc' failed
|
| 63 |
+
- You may be missing packages needed for building python extensions. Try: `sudo apt-get install python3-dev`, and then re-run the installation.
|
| 64 |
+
|
| 65 |
+
## Running Headless
|
| 66 |
+
|
| 67 |
+
You can run PyRep/CoppeliaSim headlessly with VirtualGL. VirtualGL is an open source toolkit that gives any Unix or Linux remote display software the ability to run OpenGL applications **with full 3D hardware acceleration**.
|
| 68 |
+
First insure that you have the nVidia proprietary driver installed. I.e. you should get an output when running `nvidia-smi`. Now run the following commands:
|
| 69 |
+
```bash
|
| 70 |
+
sudo apt-get install xorg libxcb-randr0-dev libxrender-dev libxkbcommon-dev libxkbcommon-x11-0 libavcodec-dev libavformat-dev libswscale-dev
|
| 71 |
+
sudo nvidia-xconfig -a --use-display-device=None --virtual=1280x1024
|
| 72 |
+
# Install VirtualGL
|
| 73 |
+
wget https://sourceforge.net/projects/virtualgl/files/2.5.2/virtualgl_2.5.2_amd64.deb/download -O virtualgl_2.5.2_amd64.deb
|
| 74 |
+
sudo dpkg -i virtualgl*.deb
|
| 75 |
+
rm virtualgl*.deb
|
| 76 |
+
```
|
| 77 |
+
You will now need to reboot, and then start the X server:
|
| 78 |
+
```bash
|
| 79 |
+
sudo reboot
|
| 80 |
+
nohup sudo X &
|
| 81 |
+
```
|
| 82 |
+
Now we are good to go! To render the application with the first GPU, you can do the following:
|
| 83 |
+
```bash
|
| 84 |
+
export DISPLAY=:0.0
|
| 85 |
+
python my_pyrep_app.py
|
| 86 |
+
```
|
| 87 |
+
To render with the second GPU, you will insetad set display as: `export DISPLAY=:0.1`, and so on.
|
| 88 |
+
|
| 89 |
+
**Acknowledgement**: Special thanks to Boyuan Chen (UC Berkeley) for bringing VirtualGL to my attention!
|
| 90 |
+
|
| 91 |
+
## Getting Started
|
| 92 |
+
|
| 93 |
+
1. First take a look at [Usage](#usage) and the examples in the *examples/* folder to see if PyRep might be able to accelerate your research.
|
| 94 |
+
2. Take a look at the CoppeliaSim [tutorials](http://www.coppeliarobotics.com/helpFiles/en/tutorials.htm).
|
| 95 |
+
|
| 96 |
+
## Usage
|
| 97 |
+
|
| 98 |
+
The best way to see how PyRep can help in your research is to look at the examples in the *examples/* folder!
|
| 99 |
+
|
| 100 |
+
#### Launching the simulation
|
| 101 |
+
|
| 102 |
+
```python
|
| 103 |
+
from pyrep import PyRep
|
| 104 |
+
|
| 105 |
+
pr = PyRep()
|
| 106 |
+
# Launch the application with a scene file in headless mode
|
| 107 |
+
pr.launch('scene.ttt', headless=True)
|
| 108 |
+
pr.start() # Start the simulation
|
| 109 |
+
|
| 110 |
+
# Do some stuff
|
| 111 |
+
|
| 112 |
+
pr.stop() # Stop the simulation
|
| 113 |
+
pr.shutdown() # Close the application
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
#### Modifying the Scene
|
| 118 |
+
|
| 119 |
+
```python
|
| 120 |
+
from pyrep.objects.shape import Shape
|
| 121 |
+
from pyrep.const import PrimitiveShape
|
| 122 |
+
|
| 123 |
+
object = Shape.create(type=PrimitiveShape.CYLINDER,
|
| 124 |
+
color=[r,g,b], size=[w, h, d],
|
| 125 |
+
position=[x, y, z])
|
| 126 |
+
object.set_color([r, g, b])
|
| 127 |
+
object.set_position([x, y, z])
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
#### Using Robots
|
| 131 |
+
|
| 132 |
+
Robots are designed to be modular; arms are treated separately to grippers.
|
| 133 |
+
|
| 134 |
+
Use the robot ttm files defined in robots/ttms. These have been altered slightly from the original ones shipped with CoppeliaSim to allow them to be used with motional planning out of the box.
|
| 135 |
+
The 'tip' of the robot may not be where you want it, so feel free to play around with this.
|
| 136 |
+
|
| 137 |
+
```python
|
| 138 |
+
from pyrep import PyRep
|
| 139 |
+
from pyrep.robots.arms.panda import Panda
|
| 140 |
+
from pyrep.robots.end_effectors.panda_gripper import PandaGripper
|
| 141 |
+
|
| 142 |
+
pr = PyRep()
|
| 143 |
+
# Launch the application with a scene file that contains a robot
|
| 144 |
+
pr.launch('scene_with_panda.ttt')
|
| 145 |
+
pr.start() # Start the simulation
|
| 146 |
+
|
| 147 |
+
arm = Panda() # Get the panda from the scene
|
| 148 |
+
gripper = PandaGripper() # Get the panda gripper from the scene
|
| 149 |
+
|
| 150 |
+
velocities = [.1, .2, .3, .4, .5, .6, .7]
|
| 151 |
+
arm.set_joint_target_velocities(velocities)
|
| 152 |
+
pr.step() # Step physics simulation
|
| 153 |
+
|
| 154 |
+
done = False
|
| 155 |
+
# Open the gripper halfway at a velocity of 0.04.
|
| 156 |
+
while not done:
|
| 157 |
+
done = gripper.actuate(0.5, velocity=0.04)
|
| 158 |
+
pr.step()
|
| 159 |
+
|
| 160 |
+
pr.stop() # Stop the simulation
|
| 161 |
+
pr.shutdown() # Close the application
|
| 162 |
+
```
|
| 163 |
+
|
| 164 |
+
We recommend constructing your robot in a dictionary or a small structure, e.g.
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
```python
|
| 168 |
+
class MyRobot(object):
|
| 169 |
+
def __init__(self, arm, gripper):
|
| 170 |
+
self.arm = arm
|
| 171 |
+
self.gripper = gripper
|
| 172 |
+
|
| 173 |
+
arm = Panda() # Get the panda from the scene
|
| 174 |
+
gripper = PandaGripper() # Get the panda gripper from the scene
|
| 175 |
+
|
| 176 |
+
# Create robot structure
|
| 177 |
+
my_robot_1 = MyRobot(arm, gripper)
|
| 178 |
+
# OR
|
| 179 |
+
my_robot_2 = {
|
| 180 |
+
'arm': arm,
|
| 181 |
+
'gripper': gripper
|
| 182 |
+
}
|
| 183 |
+
```
|
| 184 |
+
|
| 185 |
+
#### Running Multiple PyRep Instances
|
| 186 |
+
|
| 187 |
+
Each PyRep instance needs its own process. This can be achieved using Pythons [multiprocessing](https://docs.python.org/3.6/library/multiprocessing.html) module. Here is a simple example:
|
| 188 |
+
|
| 189 |
+
```python
|
| 190 |
+
from multiprocessing import Process
|
| 191 |
+
|
| 192 |
+
PROCESSES = 10
|
| 193 |
+
|
| 194 |
+
def run():
|
| 195 |
+
pr = PyRep()
|
| 196 |
+
pr.launch('my_scene.ttt', headless=True)
|
| 197 |
+
pr.start()
|
| 198 |
+
# Do stuff...
|
| 199 |
+
pr.stop()
|
| 200 |
+
pr.shutdown()
|
| 201 |
+
|
| 202 |
+
processes = [Process(target=run, args=()) for i in range(PROCESSES)]
|
| 203 |
+
[p.start() for p in processes]
|
| 204 |
+
[p.join() for p in processes]
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
## Supported Robots
|
| 208 |
+
|
| 209 |
+
Here is a list of robots currently supported by PyRep:
|
| 210 |
+
|
| 211 |
+
#### Arms
|
| 212 |
+
|
| 213 |
+
- Kinova Mico
|
| 214 |
+
- Kinova Jaco
|
| 215 |
+
- Rethink Baxter
|
| 216 |
+
- Rethink Sawyer
|
| 217 |
+
- Franka Emika Panda
|
| 218 |
+
- Kuka LBR iiwa 7 R800
|
| 219 |
+
- Kuka LBR iiwa 14 R820
|
| 220 |
+
- Universal Robots UR3
|
| 221 |
+
- Universal Robots UR5
|
| 222 |
+
- Universal Robots UR10
|
| 223 |
+
|
| 224 |
+
#### Grippers
|
| 225 |
+
|
| 226 |
+
- Kinova Mico Hand
|
| 227 |
+
- Kinova Jaco Hand
|
| 228 |
+
- Rethink Baxter Gripper
|
| 229 |
+
- Franka Emika Panda Gripper
|
| 230 |
+
|
| 231 |
+
#### Mobile Robots
|
| 232 |
+
|
| 233 |
+
- Kuka YouBot
|
| 234 |
+
- Turtle Bot
|
| 235 |
+
- Line Tracer
|
| 236 |
+
|
| 237 |
+
Feel free to send pull requests for new robots!
|
| 238 |
+
|
| 239 |
+
## Adding Robots
|
| 240 |
+
|
| 241 |
+
If the robot you want is not currently supported, then why not add it in!
|
| 242 |
+
|
| 243 |
+
[Here is a tutorial for adding robots.](tutorials/adding_robots.md)
|
| 244 |
+
|
| 245 |
+
## Contributing
|
| 246 |
+
|
| 247 |
+
We want to make PyRep the best tool for rapid robot learning research. If you would like to get involved, then please [get in contact](https://www.doc.ic.ac.uk/~slj12/)!
|
| 248 |
+
|
| 249 |
+
Pull requests welcome for bug fixes!
|
| 250 |
+
|
| 251 |
+
## Projects Using PyRep
|
| 252 |
+
|
| 253 |
+
If you use PyRep in your work, then get in contact and we can add you to the list!
|
| 254 |
+
|
| 255 |
+
- [RLBench: The Robot Learning Benchmark & Learning Environment, arxiv 2019](https://arxiv.org/abs/1909.12271)
|
| 256 |
+
- [Learning One-Shot Imitation from Humans without Humans, arxiv 2019](https://arxiv.org/abs/1911.01103)
|
| 257 |
+
- [Task-Embedded Control Networks for Few-Shot Imitation Learning, CoRL 2018](https://arxiv.org/abs/1810.03237)
|
| 258 |
+
- [Transferring End-to-End Visuomotor Control from Simulation to Real World for a Multi-Stage Task
|
| 259 |
+
, CoRL 2017](https://arxiv.org/abs/1707.02267)
|
| 260 |
+
|
| 261 |
+
## Acknowledgements
|
| 262 |
+
|
| 263 |
+
- Georges Nomicos (Imperial College London) for the addition of mobile platforms.
|
| 264 |
+
- Boyuan Chen (UC Berkeley) for bringing VirtualGL to my attention.
|
| 265 |
+
|
| 266 |
+
## What Happened to V-REP?
|
| 267 |
+
|
| 268 |
+
Coppelia Robotics discontinued development of __V-REP__. Instead, they now focus
|
| 269 |
+
their efforts on __CoppeliaSim__. CoppeliaSim is 100% compatible with V-REP.
|
| 270 |
+
See more information [here](http://coppeliarobotics.com/helpFiles/en/versionInfo.htm#coppeliaSim4.0.0).
|
| 271 |
+
|
| 272 |
+
PyRep is fully compatible with both V-REP and CoppeliaSim.
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
## Citation
|
| 276 |
+
|
| 277 |
+
```
|
| 278 |
+
@article{james2019pyrep,
|
| 279 |
+
title={PyRep: Bringing V-REP to Deep Robot Learning},
|
| 280 |
+
author={James, Stephen and Freese, Marc and Davison, Andrew J.},
|
| 281 |
+
journal={arXiv preprint arXiv:1906.11176},
|
| 282 |
+
year={2019}
|
| 283 |
+
}
|
| 284 |
+
```
|
external/pyrep/pyrep/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__version__ = '4.1.0.3'
|
| 2 |
+
|
| 3 |
+
testing = False
|
| 4 |
+
|
| 5 |
+
from .pyrep import PyRep
|
external/pyrep/pyrep/const.py
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.backend import sim
|
| 2 |
+
from enum import Enum
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class PrimitiveShape(Enum):
|
| 6 |
+
CUBOID = 0
|
| 7 |
+
SPHERE = 1
|
| 8 |
+
CYLINDER = 2
|
| 9 |
+
CONE = 3
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ObjectType(Enum):
|
| 13 |
+
ALL = sim.sim_handle_all
|
| 14 |
+
SHAPE = sim.sim_object_shape_type
|
| 15 |
+
JOINT = sim.sim_object_joint_type
|
| 16 |
+
DUMMY = sim.sim_object_dummy_type
|
| 17 |
+
PROXIMITY_SENSOR = sim.sim_object_proximitysensor_type
|
| 18 |
+
GRAPH = sim.sim_object_graph_type
|
| 19 |
+
CAMERA = sim.sim_object_camera_type
|
| 20 |
+
PATH = sim.sim_object_path_type
|
| 21 |
+
VISION_SENSOR = sim.sim_object_visionsensor_type
|
| 22 |
+
VOLUME = sim.sim_object_volume_type
|
| 23 |
+
MILl = sim.sim_object_mill_type
|
| 24 |
+
FORCE_SENSOR = sim.sim_object_forcesensor_type
|
| 25 |
+
LIGHT = sim.sim_object_light_type
|
| 26 |
+
MIRROR = sim.sim_object_mirror_type
|
| 27 |
+
OCTREE = sim.sim_object_octree_type
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class JointType(Enum):
|
| 31 |
+
REVOLUTE = sim.sim_joint_revolute_subtype
|
| 32 |
+
PRISMATIC = sim.sim_joint_prismatic_subtype
|
| 33 |
+
SPHERICAL = sim.sim_joint_spherical_subtype
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class JointMode(Enum):
|
| 37 |
+
PASSIVE = sim.sim_jointmode_passive
|
| 38 |
+
IK = sim.sim_jointmode_ik
|
| 39 |
+
IK_DEPENDENT = sim.sim_jointmode_ikdependent
|
| 40 |
+
DEPENDENT = sim.sim_jointmode_dependent
|
| 41 |
+
FORCE = sim.sim_jointmode_force
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class ConfigurationPathAlgorithms(Enum):
|
| 45 |
+
BiTRRT = 'BiTRRT'
|
| 46 |
+
BITstar = 'BITstar'
|
| 47 |
+
BKPIECE1 = 'BKPIECE1'
|
| 48 |
+
CForest = 'CForest'
|
| 49 |
+
EST = 'EST'
|
| 50 |
+
FMT = 'FMT'
|
| 51 |
+
KPIECE1 = 'KPIECE1'
|
| 52 |
+
LazyPRM = 'LazyPRM'
|
| 53 |
+
LazyPRMstar = 'LazyPRMstar'
|
| 54 |
+
LazyRRT = 'LazyRRT'
|
| 55 |
+
LBKPIECE1 = 'LBKPIECE1'
|
| 56 |
+
LBTRRT = 'LBTRRT'
|
| 57 |
+
PDST = 'PDST'
|
| 58 |
+
PRM = 'PRM'
|
| 59 |
+
PRMstar = 'PRMstar'
|
| 60 |
+
pRRT = 'pRRT'
|
| 61 |
+
pSBL = 'pSBL'
|
| 62 |
+
RRT = 'RRT'
|
| 63 |
+
RRTConnect = 'RRTConnect'
|
| 64 |
+
RRTstar = 'RRTstar'
|
| 65 |
+
SBL = 'SBL'
|
| 66 |
+
SPARS = 'SPARS'
|
| 67 |
+
SPARStwo = 'SPARStwo'
|
| 68 |
+
STRIDE = 'STRIDE'
|
| 69 |
+
TRRT = 'TRRT'
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class TextureMappingMode(Enum):
|
| 73 |
+
PLANE = sim.sim_texturemap_plane
|
| 74 |
+
CYLINDER = sim.sim_texturemap_cylinder
|
| 75 |
+
SPHERE = sim.sim_texturemap_sphere
|
| 76 |
+
CUBE = sim.sim_texturemap_cube
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class PerspectiveMode(Enum):
|
| 80 |
+
ORTHOGRAPHIC = 0
|
| 81 |
+
PERSPECTIVE = 1
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class RenderMode(Enum):
|
| 85 |
+
OPENGL = sim.sim_rendermode_opengl
|
| 86 |
+
OPENGL_AUXILIARY = sim.sim_rendermode_auxchannels
|
| 87 |
+
OPENGL_COLOR_CODED = sim.sim_rendermode_colorcoded
|
| 88 |
+
POV_RAY = sim.sim_rendermode_povray
|
| 89 |
+
EXTERNAL = sim.sim_rendermode_extrenderer
|
| 90 |
+
EXTERNAL_WINDOWED = sim.sim_rendermode_extrendererwindowed
|
| 91 |
+
OPENGL3 = sim.sim_rendermode_opengl3
|
| 92 |
+
OPENGL3_WINDOWED = sim.sim_rendermode_opengl3windowed
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class Verbosity(Enum):
|
| 96 |
+
NONE = 'none'
|
| 97 |
+
ERRORS = 'errors'
|
| 98 |
+
WARNINGS = 'warnings'
|
| 99 |
+
LOAD_INFOS = 'loadinfos'
|
| 100 |
+
SCRIPT_ERRORS = 'scripterrors'
|
| 101 |
+
SCRIPT_WARNINGS = 'scriptwarnings'
|
| 102 |
+
SCRIPT_INFOS = 'scriptinfos'
|
| 103 |
+
INFOS = 'infos'
|
| 104 |
+
DEBUG = 'debug'
|
| 105 |
+
TRACE = 'trace'
|
| 106 |
+
TRACE_LUA = 'tracelua'
|
| 107 |
+
TYRACE_ALL = 'traceall'
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
PYREP_SCRIPT_TYPE = sim.sim_scripttype_addonscript
|
external/pyrep/pyrep/errors.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class PyRepError(Exception):
|
| 2 |
+
pass
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class WrongObjectTypeError(Exception):
|
| 6 |
+
pass
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class ObjectAlreadyRemovedError(Exception):
|
| 10 |
+
pass
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ObjectIsNotModelError(Exception):
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ConfigurationError(Exception):
|
| 18 |
+
pass
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ConfigurationPathError(Exception):
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class GripperError(Exception):
|
| 26 |
+
pass
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class IKError(Exception):
|
| 30 |
+
pass
|
external/pyrep/pyrep/pyrep.py
ADDED
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from contextlib import contextmanager
|
| 3 |
+
from pyrep.backend import sim, utils
|
| 4 |
+
from pyrep.const import Verbosity
|
| 5 |
+
from pyrep.objects.object import Object
|
| 6 |
+
from pyrep.objects.shape import Shape
|
| 7 |
+
from pyrep.textures.texture import Texture
|
| 8 |
+
from pyrep.errors import PyRepError
|
| 9 |
+
from pyrep.backend import sim
|
| 10 |
+
import os
|
| 11 |
+
import sys
|
| 12 |
+
import time
|
| 13 |
+
import threading
|
| 14 |
+
from typing import Tuple, List
|
| 15 |
+
import warnings
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class PyRep(object):
|
| 19 |
+
"""Used for interfacing with the CoppeliaSim simulation.
|
| 20 |
+
|
| 21 |
+
Can be used for starting, stopping, and stepping the simulation. As well
|
| 22 |
+
as getting, and creating scene objects and robots.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
self.running = False
|
| 27 |
+
self._process = None
|
| 28 |
+
self._robot_to_count = {}
|
| 29 |
+
self.connected = False
|
| 30 |
+
|
| 31 |
+
self._ui_thread = None
|
| 32 |
+
self._responsive_ui_thread = None
|
| 33 |
+
|
| 34 |
+
self._init_thread_id = None
|
| 35 |
+
self._shutting_down = False
|
| 36 |
+
|
| 37 |
+
self._handles_to_objects = {}
|
| 38 |
+
|
| 39 |
+
if 'COPPELIASIM_ROOT' not in os.environ:
|
| 40 |
+
raise PyRepError(
|
| 41 |
+
'COPPELIASIM_ROOT not defined. See installation instructions.')
|
| 42 |
+
self._vrep_root = os.environ['COPPELIASIM_ROOT']
|
| 43 |
+
if not os.path.exists(self._vrep_root):
|
| 44 |
+
raise PyRepError(
|
| 45 |
+
'COPPELIASIM_ROOT was not a correct path. '
|
| 46 |
+
'See installation instructions')
|
| 47 |
+
|
| 48 |
+
def _run_ui_thread(self, scene_file: str, headless: bool,
|
| 49 |
+
verbosity: Verbosity) -> None:
|
| 50 |
+
# Need this otherwise extensions will not be loaded
|
| 51 |
+
os.chdir(self._vrep_root)
|
| 52 |
+
options = sim.sim_gui_headless if headless else sim.sim_gui_all
|
| 53 |
+
sim.simSetStringParameter(
|
| 54 |
+
sim.sim_stringparam_verbosity, verbosity.value)
|
| 55 |
+
sim.simExtLaunchUIThread(
|
| 56 |
+
options=options, scene=scene_file, pyrep_root=self._vrep_root)
|
| 57 |
+
|
| 58 |
+
def _run_responsive_ui_thread(self) -> None:
|
| 59 |
+
while True:
|
| 60 |
+
if not self.running:
|
| 61 |
+
with utils.step_lock:
|
| 62 |
+
if self._shutting_down or sim.simExtGetExitRequest():
|
| 63 |
+
break
|
| 64 |
+
sim.simExtStep(False)
|
| 65 |
+
time.sleep(0.01)
|
| 66 |
+
# If the exit request was from the UI, then call shutdown, otherwise
|
| 67 |
+
# shutdown caused this thread to terminate.
|
| 68 |
+
if not self._shutting_down:
|
| 69 |
+
self.shutdown()
|
| 70 |
+
|
| 71 |
+
def launch(self, scene_file: str = "", headless: bool = False,
|
| 72 |
+
responsive_ui: bool = False, blocking: bool = False,
|
| 73 |
+
verbosity: Verbosity = Verbosity.NONE) -> None:
|
| 74 |
+
"""Launches CoppeliaSim.
|
| 75 |
+
|
| 76 |
+
Launches the UI thread, waits until the UI thread has finished, this
|
| 77 |
+
results in the current thread becoming the simulation thread.
|
| 78 |
+
|
| 79 |
+
:param scene_file: The scene file to load. Empty string for empty scene.
|
| 80 |
+
:param headless: Run CoppeliaSim in simulation mode.
|
| 81 |
+
:param responsive_ui: If True, then a separate thread will be created to
|
| 82 |
+
asynchronously step the UI of CoppeliaSim. Note, that will reduce
|
| 83 |
+
the responsiveness of the simulation thread.
|
| 84 |
+
:param blocking: Causes CoppeliaSim to launch as if running the default
|
| 85 |
+
c++ client application. This is causes the function to block.
|
| 86 |
+
For most users, this will be set to False.
|
| 87 |
+
:param verbosity: The verbosity level for CoppeliaSim.
|
| 88 |
+
Usually Verbosity.NONE or Verbosity.LOAD_INFOS.
|
| 89 |
+
"""
|
| 90 |
+
abs_scene_file = os.path.abspath(scene_file)
|
| 91 |
+
if len(scene_file) > 0 and not os.path.isfile(abs_scene_file):
|
| 92 |
+
raise PyRepError('Scene file does not exist: %s' % scene_file)
|
| 93 |
+
cwd = os.getcwd()
|
| 94 |
+
self._ui_thread = threading.Thread(
|
| 95 |
+
target=self._run_ui_thread,
|
| 96 |
+
args=(abs_scene_file, headless, verbosity))
|
| 97 |
+
self._ui_thread.daemon = True
|
| 98 |
+
self._ui_thread.start()
|
| 99 |
+
|
| 100 |
+
while not sim.simExtCanInitSimThread():
|
| 101 |
+
time.sleep(0.1)
|
| 102 |
+
|
| 103 |
+
sim.simExtSimThreadInit()
|
| 104 |
+
time.sleep(0.2) # Stops CoppeliaSim crashing if restarted too quickly.
|
| 105 |
+
|
| 106 |
+
if blocking:
|
| 107 |
+
while not sim.simExtGetExitRequest():
|
| 108 |
+
sim.simExtStep()
|
| 109 |
+
self.shutdown()
|
| 110 |
+
elif responsive_ui:
|
| 111 |
+
self._responsive_ui_thread = threading.Thread(
|
| 112 |
+
target=self._run_responsive_ui_thread)
|
| 113 |
+
self._responsive_ui_thread.daemon = True
|
| 114 |
+
try:
|
| 115 |
+
self._responsive_ui_thread.start()
|
| 116 |
+
except (KeyboardInterrupt, SystemExit):
|
| 117 |
+
if not self._shutting_down:
|
| 118 |
+
self.shutdown()
|
| 119 |
+
sys.exit()
|
| 120 |
+
self.step()
|
| 121 |
+
else:
|
| 122 |
+
self.step()
|
| 123 |
+
os.chdir(cwd) # Go back to the previous cwd
|
| 124 |
+
|
| 125 |
+
def script_call(self, function_name_at_script_name: str,
|
| 126 |
+
script_handle_or_type: int,
|
| 127 |
+
ints=(), floats=(), strings=(), bytes='') -> (
|
| 128 |
+
Tuple[List[int], List[float], List[str], str]):
|
| 129 |
+
"""Calls a script function (from a plugin, the main client application,
|
| 130 |
+
or from another script). This represents a callback inside of a script.
|
| 131 |
+
|
| 132 |
+
:param function_name_at_script_name: A string representing the function
|
| 133 |
+
name and script name, e.g. myFunctionName@theScriptName. When the
|
| 134 |
+
script is not associated with an object, then just specify the
|
| 135 |
+
function name.
|
| 136 |
+
:param script_handle_or_type: The handle of the script, otherwise the
|
| 137 |
+
type of the script.
|
| 138 |
+
:param ints: The input ints to the script.
|
| 139 |
+
:param floats: The input floats to the script.
|
| 140 |
+
:param strings: The input strings to the script.
|
| 141 |
+
:param bytes: The input bytes to the script (as a string).
|
| 142 |
+
:return: Any number of return values from the called Lua function.
|
| 143 |
+
"""
|
| 144 |
+
return utils.script_call(
|
| 145 |
+
function_name_at_script_name, script_handle_or_type, ints, floats,
|
| 146 |
+
strings, bytes)
|
| 147 |
+
|
| 148 |
+
def shutdown(self) -> None:
|
| 149 |
+
"""Shuts down the CoppeliaSim simulation.
|
| 150 |
+
"""
|
| 151 |
+
if self._ui_thread is None:
|
| 152 |
+
raise PyRepError(
|
| 153 |
+
'CoppeliaSim has not been launched. Call launch first.')
|
| 154 |
+
if self._ui_thread is not None:
|
| 155 |
+
self._shutting_down = True
|
| 156 |
+
self.stop()
|
| 157 |
+
self.step_ui()
|
| 158 |
+
sim.simExtPostExitRequest()
|
| 159 |
+
sim.simExtSimThreadDestroy()
|
| 160 |
+
self._ui_thread.join()
|
| 161 |
+
if self._responsive_ui_thread is not None:
|
| 162 |
+
self._responsive_ui_thread.join()
|
| 163 |
+
# CoppeliaSim crashes if new instance opened too quickly after shutdown.
|
| 164 |
+
# TODO: A small sleep stops this for now.
|
| 165 |
+
time.sleep(0.1)
|
| 166 |
+
self._ui_thread = None
|
| 167 |
+
self._shutting_down = False
|
| 168 |
+
|
| 169 |
+
def start(self) -> None:
|
| 170 |
+
"""Starts the physics simulation if it is not already running.
|
| 171 |
+
"""
|
| 172 |
+
if self._ui_thread is None:
|
| 173 |
+
raise PyRepError(
|
| 174 |
+
'CoppeliaSim has not been launched. Call launch first.')
|
| 175 |
+
if not self.running:
|
| 176 |
+
sim.simStartSimulation()
|
| 177 |
+
self.running = True
|
| 178 |
+
|
| 179 |
+
def stop(self) -> None:
|
| 180 |
+
"""Stops the physics simulation if it is running.
|
| 181 |
+
"""
|
| 182 |
+
if self._ui_thread is None:
|
| 183 |
+
raise PyRepError(
|
| 184 |
+
'CoppeliaSim has not been launched. Call launch first.')
|
| 185 |
+
if self.running:
|
| 186 |
+
sim.simStopSimulation()
|
| 187 |
+
self.running = False
|
| 188 |
+
# Need this so the UI updates
|
| 189 |
+
[self.step() for _ in range(5)] # type: ignore
|
| 190 |
+
|
| 191 |
+
def step(self) -> None:
|
| 192 |
+
"""Execute the next simulation step.
|
| 193 |
+
|
| 194 |
+
If the physics simulation is not running, then this will only update
|
| 195 |
+
the UI.
|
| 196 |
+
"""
|
| 197 |
+
with utils.step_lock:
|
| 198 |
+
sim.simExtStep()
|
| 199 |
+
|
| 200 |
+
def step_ui(self) -> None:
|
| 201 |
+
"""Update the UI.
|
| 202 |
+
|
| 203 |
+
This will not execute the next simulation step, even if the physics
|
| 204 |
+
simulation is running.
|
| 205 |
+
This is only applicable when PyRep was launched without a responsive UI.
|
| 206 |
+
"""
|
| 207 |
+
with utils.step_lock:
|
| 208 |
+
sim.simExtStep(False)
|
| 209 |
+
|
| 210 |
+
def set_simulation_timestep(self, dt: float) -> None:
|
| 211 |
+
"""Sets the simulation time step. Default is 0.05.
|
| 212 |
+
|
| 213 |
+
:param dt: The time step value in seconds.
|
| 214 |
+
"""
|
| 215 |
+
sim.simSetFloatParameter(sim.sim_floatparam_simulation_time_step, dt)
|
| 216 |
+
if not np.allclose(self.get_simulation_timestep(), dt):
|
| 217 |
+
warnings.warn('Could not change simulation timestep. You may need '
|
| 218 |
+
'to change it to "custom dt" using simulation '
|
| 219 |
+
'settings dialog.')
|
| 220 |
+
|
| 221 |
+
def get_simulation_timestep(self) -> float:
|
| 222 |
+
"""Gets the simulation time step.
|
| 223 |
+
|
| 224 |
+
:return: The time step value in seconds.
|
| 225 |
+
"""
|
| 226 |
+
return sim.simGetSimulationTimeStep()
|
| 227 |
+
|
| 228 |
+
def set_configuration_tree(self, config_tree: bytes) -> None:
|
| 229 |
+
"""Restores configuration information previously retrieved.
|
| 230 |
+
|
| 231 |
+
Configuration information (object relative positions/orientations,
|
| 232 |
+
joint/path values) can be retrieved with
|
| 233 |
+
:py:meth:`Object.get_configuration_tree`. Dynamically simulated
|
| 234 |
+
objects will implicitly be reset before the command is applied
|
| 235 |
+
(i.e. similar to calling :py:meth:`Object.reset_dynamic_object` just
|
| 236 |
+
before).
|
| 237 |
+
|
| 238 |
+
:param config_tree: The configuration tree to restore.
|
| 239 |
+
"""
|
| 240 |
+
sim.simSetConfigurationTree(config_tree)
|
| 241 |
+
|
| 242 |
+
def group_objects(self, objects: List[Shape]) -> Shape:
|
| 243 |
+
"""Groups several shapes into a compound shape (or simple shape).
|
| 244 |
+
|
| 245 |
+
:param objects: The list of shapes to group.
|
| 246 |
+
:return: A single grouped shape.
|
| 247 |
+
"""
|
| 248 |
+
handles = [o.get_handle() for o in objects]
|
| 249 |
+
handle = sim.simGroupShapes(handles)
|
| 250 |
+
return Shape(handle)
|
| 251 |
+
|
| 252 |
+
def merge_objects(self, objects: List[Shape]) -> Shape:
|
| 253 |
+
"""Merges several shapes into a compound shape (or simple shape).
|
| 254 |
+
|
| 255 |
+
:param objects: The list of shapes to group.
|
| 256 |
+
:return: A single merged shape.
|
| 257 |
+
"""
|
| 258 |
+
handles = [o.get_handle() for o in objects]
|
| 259 |
+
# FIXME: sim.simGroupShapes(merge=True) won't return correct handle,
|
| 260 |
+
# so we use name to find correct handle of the merged shape.
|
| 261 |
+
name = objects[-1].get_name()
|
| 262 |
+
sim.simGroupShapes(handles, merge=True)
|
| 263 |
+
return Shape(name)
|
| 264 |
+
|
| 265 |
+
def export_scene(self, filename: str) -> None:
|
| 266 |
+
"""Saves the current scene.
|
| 267 |
+
|
| 268 |
+
:param filename: scene filename. The filename extension is required
|
| 269 |
+
("ttt").
|
| 270 |
+
"""
|
| 271 |
+
sim.simSaveScene(filename)
|
| 272 |
+
|
| 273 |
+
def import_model(self, filename: str) -> Object:
|
| 274 |
+
""" Loads a previously saved model.
|
| 275 |
+
|
| 276 |
+
:param filename: model filename. The filename extension is required
|
| 277 |
+
("ttm"). An optional "@copy" can be appended to the filename, in
|
| 278 |
+
which case the model's objects will be named/renamed as if an
|
| 279 |
+
associated script was attached to the model.
|
| 280 |
+
:return: The imported model.
|
| 281 |
+
"""
|
| 282 |
+
handle = sim.simLoadModel(filename)
|
| 283 |
+
return utils.to_type(handle)
|
| 284 |
+
|
| 285 |
+
def create_texture(self, filename: str, interpolate=True, decal_mode=False,
|
| 286 |
+
repeat_along_u=False, repeat_along_v=False
|
| 287 |
+
) -> Tuple[Shape, Texture]:
|
| 288 |
+
"""Creates a planar shape that is textured.
|
| 289 |
+
|
| 290 |
+
:param filename: Path to the texture to load.
|
| 291 |
+
:param interpolate: Adjacent texture pixels are not interpolated.
|
| 292 |
+
:param decal_mode: Texture is applied as a decal (its appearance
|
| 293 |
+
won't be influenced by light conditions).
|
| 294 |
+
:param repeat_along_u: Texture will be repeated along the U direction.
|
| 295 |
+
:param repeat_along_v: Texture will be repeated along the V direction.
|
| 296 |
+
:return: A tuple containing the textured plane and the texture.
|
| 297 |
+
"""
|
| 298 |
+
options = 0
|
| 299 |
+
if not interpolate:
|
| 300 |
+
options |= 1
|
| 301 |
+
if decal_mode:
|
| 302 |
+
options |= 2
|
| 303 |
+
if repeat_along_u:
|
| 304 |
+
options |= 3
|
| 305 |
+
if repeat_along_v:
|
| 306 |
+
options |= 4
|
| 307 |
+
handle = sim.simCreateTexture(filename, options)
|
| 308 |
+
s = Shape(handle)
|
| 309 |
+
return s, s.get_texture()
|
| 310 |
+
|
| 311 |
+
def get_objects_in_tree(self, root_object=None, *args, **kwargs
|
| 312 |
+
) -> List[Object]:
|
| 313 |
+
"""Retrieves the objects in a given hierarchy tree.
|
| 314 |
+
|
| 315 |
+
:param root_object: The root object in the tree. Pass None to retrieve
|
| 316 |
+
all objects in the configuration tree. :py:class:`Object` or `int`.
|
| 317 |
+
:param object_type: The object type to retrieve.
|
| 318 |
+
One of :py:class:`.ObjectType`.
|
| 319 |
+
:param exclude_base: Exclude the tree base from the returned list.
|
| 320 |
+
:param first_generation_only: Include in the returned list only the
|
| 321 |
+
object's first children. Otherwise, entire hierarchy is returned.
|
| 322 |
+
:return: A list of objects in the hierarchy tree.
|
| 323 |
+
"""
|
| 324 |
+
return Object._get_objects_in_tree(root_object, *args, **kwargs)
|
| 325 |
+
|
| 326 |
+
def get_collection_handle_by_name(self, collection_name: str) -> int:
|
| 327 |
+
"""Retrieves the integer handle for a given collection.
|
| 328 |
+
|
| 329 |
+
:param collection_name: Name of the collection to retrieve the integer handle for
|
| 330 |
+
:return: An integer handle for the collection
|
| 331 |
+
"""
|
| 332 |
+
return sim.simGetCollectionHandle(collection_name)
|
external/pyrep/pyrep/robots/__init__.py
ADDED
|
File without changes
|
external/pyrep/pyrep/robots/arms/lbr_iiwa_14_r820.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LBRIwaa14R820(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'LBR_iiwa_14_R820', 7)
|
external/pyrep/pyrep/robots/arms/lbr_iiwa_7_r800.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LBRIwaa7R800(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'LBR_iiwa_7_R800', 7)
|
external/pyrep/pyrep/robots/arms/locobot_arm.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LoCoBotArm(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'LoCoBotArm', 5)
|
external/pyrep/pyrep/robots/arms/mico.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Mico(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'Mico', 6)
|
external/pyrep/pyrep/robots/arms/panda.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Panda(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'Panda', 7)
|
external/pyrep/pyrep/robots/arms/sawyer.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Sawyer(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'Sawyer', 7)
|
external/pyrep/pyrep/robots/arms/ur10.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class UR10(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'UR10', 6)
|
external/pyrep/pyrep/robots/arms/ur3.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class UR3(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'UR3', 6)
|
external/pyrep/pyrep/robots/arms/ur5.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class UR5(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'UR5', 6)
|
external/pyrep/pyrep/robots/arms/xarm7.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class XArm7(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'xarm', 7)
|
external/pyrep/pyrep/robots/arms/youBot.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.arms.arm import Arm
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class youBot(Arm):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'YouBot', 5)
|
external/pyrep/pyrep/robots/configuration_paths/__init__.py
ADDED
|
File without changes
|
external/pyrep/pyrep/robots/configuration_paths/arm_configuration_path.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.backend import sim
|
| 2 |
+
from pyrep.robots.configuration_paths.configuration_path import (
|
| 3 |
+
ConfigurationPath)
|
| 4 |
+
import numpy as np
|
| 5 |
+
from typing import List, Optional, Union
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ArmConfigurationPath(ConfigurationPath):
|
| 9 |
+
"""A path expressed in joint configuration space.
|
| 10 |
+
|
| 11 |
+
Paths are retrieved from an :py:class:`Arm`, and are associated with the
|
| 12 |
+
arm that generated the path.
|
| 13 |
+
|
| 14 |
+
This class is used for executing motion along a path via the
|
| 15 |
+
Reflexxes Motion Library type II or IV. The Reflexxes Motion Library
|
| 16 |
+
provides instantaneous trajectory generation capabilities for motion
|
| 17 |
+
control systems.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, arm: 'Arm', # type: ignore
|
| 21 |
+
path_points: Union[List[float], np.ndarray]):
|
| 22 |
+
self._arm = arm
|
| 23 |
+
self._path_points = np.asarray(path_points)
|
| 24 |
+
self._rml_handle: Optional[int] = None
|
| 25 |
+
self._drawing_handle = None
|
| 26 |
+
self._path_done = False
|
| 27 |
+
self._num_joints = arm.get_joint_count()
|
| 28 |
+
self._joint_position_action = None
|
| 29 |
+
|
| 30 |
+
def __len__(self):
|
| 31 |
+
return len(self._path_points) // self._num_joints
|
| 32 |
+
|
| 33 |
+
def __getitem__(self, i):
|
| 34 |
+
path_points = self._path_points.reshape(-1, self._num_joints)
|
| 35 |
+
path_points = path_points[i].flatten()
|
| 36 |
+
return self.__class__(arm=self._arm, path_points=path_points)
|
| 37 |
+
|
| 38 |
+
def step(self) -> bool:
|
| 39 |
+
"""Makes a step along the trajectory.
|
| 40 |
+
|
| 41 |
+
This function steps forward a trajectory generation algorithm from
|
| 42 |
+
Reflexxes Motion Library.
|
| 43 |
+
NOTE: This does not step the physics engine. This is left to the user.
|
| 44 |
+
|
| 45 |
+
:return: If the end of the trajectory has been reached.
|
| 46 |
+
"""
|
| 47 |
+
if self._path_done:
|
| 48 |
+
raise RuntimeError('This path has already been completed. '
|
| 49 |
+
'If you want to re-run, then call set_to_start.')
|
| 50 |
+
if self._rml_handle is None:
|
| 51 |
+
self._rml_handle = self._get_rml_handle()
|
| 52 |
+
done = self._step_motion() == 1
|
| 53 |
+
self._path_done = done
|
| 54 |
+
return done
|
| 55 |
+
|
| 56 |
+
def set_to_start(self, disable_dynamics=False) -> None:
|
| 57 |
+
"""Sets the arm to the beginning of this path.
|
| 58 |
+
"""
|
| 59 |
+
start_config = self._path_points[:len(self._arm.joints)]
|
| 60 |
+
self._arm.set_joint_positions(start_config, disable_dynamics=disable_dynamics)
|
| 61 |
+
self._path_done = False
|
| 62 |
+
|
| 63 |
+
def set_to_end(self, disable_dynamics=False) -> None:
|
| 64 |
+
"""Sets the arm to the end of this path.
|
| 65 |
+
"""
|
| 66 |
+
final_config = self._path_points[-len(self._arm.joints):]
|
| 67 |
+
self._arm.set_joint_positions(final_config, disable_dynamics=disable_dynamics)
|
| 68 |
+
|
| 69 |
+
def visualize(self) -> None:
|
| 70 |
+
"""Draws a visualization of the path in the scene.
|
| 71 |
+
|
| 72 |
+
The visualization can be removed
|
| 73 |
+
with :py:meth:`ConfigurationPath.clear_visualization`.
|
| 74 |
+
"""
|
| 75 |
+
if len(self._path_points) <= 0:
|
| 76 |
+
raise RuntimeError("Can't visualise a path with no points.")
|
| 77 |
+
|
| 78 |
+
tip = self._arm.get_tip()
|
| 79 |
+
self._drawing_handle = sim.simAddDrawingObject(
|
| 80 |
+
objectType=sim.sim_drawing_lines, size=3, duplicateTolerance=0,
|
| 81 |
+
parentObjectHandle=-1, maxItemCount=99999,
|
| 82 |
+
ambient_diffuse=[1, 0, 1])
|
| 83 |
+
sim.simAddDrawingObjectItem(self._drawing_handle, None)
|
| 84 |
+
init_angles = self._arm.get_joint_positions()
|
| 85 |
+
self._arm.set_joint_positions(
|
| 86 |
+
self._path_points[0: len(self._arm.joints)])
|
| 87 |
+
prev_point = list(tip.get_position())
|
| 88 |
+
|
| 89 |
+
for i in range(len(self._arm.joints), len(self._path_points),
|
| 90 |
+
len(self._arm.joints)):
|
| 91 |
+
points = self._path_points[i:i + len(self._arm.joints)]
|
| 92 |
+
self._arm.set_joint_positions(points)
|
| 93 |
+
p = list(tip.get_position())
|
| 94 |
+
sim.simAddDrawingObjectItem(self._drawing_handle, prev_point + p)
|
| 95 |
+
prev_point = p
|
| 96 |
+
|
| 97 |
+
# Set the arm back to the initial config
|
| 98 |
+
self._arm.set_joint_positions(init_angles)
|
| 99 |
+
|
| 100 |
+
def clear_visualization(self) -> None:
|
| 101 |
+
"""Clears/removes a visualization of the path in the scene.
|
| 102 |
+
"""
|
| 103 |
+
if self._drawing_handle is not None:
|
| 104 |
+
sim.simAddDrawingObjectItem(self._drawing_handle, None)
|
| 105 |
+
|
| 106 |
+
def get_executed_joint_position_action(self) -> np.ndarray:
|
| 107 |
+
return self._joint_position_action
|
| 108 |
+
|
| 109 |
+
def _get_rml_handle(self) -> int:
|
| 110 |
+
dt = sim.simGetSimulationTimeStep()
|
| 111 |
+
limits = np.array(self._arm.get_joint_upper_velocity_limits())
|
| 112 |
+
vel_correction = 1.0
|
| 113 |
+
max_vel = self._arm.max_velocity
|
| 114 |
+
max_accel = self._arm.max_acceleration
|
| 115 |
+
max_jerk = self._arm.max_jerk
|
| 116 |
+
lengths = self._get_path_point_lengths()
|
| 117 |
+
target_pos_vel = [lengths[-1],0]
|
| 118 |
+
previous_q = self._path_points[0:len(self._arm.joints)]
|
| 119 |
+
|
| 120 |
+
while True:
|
| 121 |
+
pos_vel_accel = [0, 0, 0]
|
| 122 |
+
rMax = 0
|
| 123 |
+
rml_handle = sim.simRMLPos(
|
| 124 |
+
1, 0.0001, -1, pos_vel_accel,
|
| 125 |
+
[max_vel * vel_correction, max_accel, max_jerk],
|
| 126 |
+
[1], target_pos_vel)
|
| 127 |
+
state = 0
|
| 128 |
+
while state == 0:
|
| 129 |
+
state, pos_vel_accel = sim.simRMLStep(rml_handle, dt, 1)
|
| 130 |
+
if state >= 0:
|
| 131 |
+
pos = pos_vel_accel[0]
|
| 132 |
+
for i in range(len(lengths)-1):
|
| 133 |
+
if lengths[i] <= pos <= lengths[i + 1]:
|
| 134 |
+
t = (pos - lengths[i]) / (lengths[i + 1] - lengths[i])
|
| 135 |
+
# For each joint
|
| 136 |
+
offset = len(self._arm.joints) * i
|
| 137 |
+
p1 = self._path_points[
|
| 138 |
+
offset:offset + self._num_joints]
|
| 139 |
+
offset = len(self._arm.joints) * (i + 1)
|
| 140 |
+
p2 = self._path_points[
|
| 141 |
+
offset:offset + self._num_joints]
|
| 142 |
+
dx = p2 - p1
|
| 143 |
+
qs = p1 + dx * t
|
| 144 |
+
dq = qs - previous_q
|
| 145 |
+
previous_q = qs
|
| 146 |
+
r = np.abs(dq / dt) / limits
|
| 147 |
+
m = np.max(r)
|
| 148 |
+
if m > rMax:
|
| 149 |
+
rMax = m
|
| 150 |
+
break
|
| 151 |
+
sim.simRMLRemove(rml_handle)
|
| 152 |
+
if rMax > 1.001:
|
| 153 |
+
vel_correction = vel_correction / rMax
|
| 154 |
+
else:
|
| 155 |
+
break
|
| 156 |
+
pos_vel_accel = [0, 0, 0]
|
| 157 |
+
rml_handle = sim.simRMLPos(
|
| 158 |
+
1, 0.0001, -1, pos_vel_accel,
|
| 159 |
+
[max_vel*vel_correction, max_accel, max_jerk], [1], target_pos_vel)
|
| 160 |
+
return rml_handle
|
| 161 |
+
|
| 162 |
+
def _step_motion(self) -> int:
|
| 163 |
+
self._joint_position_action = None
|
| 164 |
+
dt = sim.simGetSimulationTimeStep()
|
| 165 |
+
lengths = self._get_path_point_lengths()
|
| 166 |
+
state, posVelAccel = sim.simRMLStep(self._rml_handle, dt, 1)
|
| 167 |
+
if state >= 0:
|
| 168 |
+
pos = posVelAccel[0]
|
| 169 |
+
for i in range(len(lengths) - 1):
|
| 170 |
+
if lengths[i] <= pos <= lengths[i + 1]:
|
| 171 |
+
t = (pos - lengths[i]) / (lengths[i + 1] - lengths[i])
|
| 172 |
+
# For each joint
|
| 173 |
+
offset = len(self._arm.joints) * i
|
| 174 |
+
p1 = self._path_points[
|
| 175 |
+
offset:offset + len(self._arm.joints)]
|
| 176 |
+
offset = self._arm._num_joints * (i + 1)
|
| 177 |
+
p2 = self._path_points[
|
| 178 |
+
offset:offset + len(self._arm.joints)]
|
| 179 |
+
dx = p2 - p1
|
| 180 |
+
qs = p1 + dx * t
|
| 181 |
+
self._joint_position_action = qs
|
| 182 |
+
self._arm.set_joint_target_positions(qs)
|
| 183 |
+
break
|
| 184 |
+
if state == 1:
|
| 185 |
+
sim.simRMLRemove(self._rml_handle)
|
| 186 |
+
return state
|
| 187 |
+
|
| 188 |
+
def _get_path_point_lengths(self) -> List[float]:
|
| 189 |
+
path_points = self._path_points
|
| 190 |
+
prev_points = path_points[0:len(self._arm.joints)]
|
| 191 |
+
dists = [0.]
|
| 192 |
+
d = 0
|
| 193 |
+
for i in range(len(self._arm.joints), len(self._path_points),
|
| 194 |
+
len(self._arm.joints)):
|
| 195 |
+
points = path_points[i:i + len(self._arm.joints)]
|
| 196 |
+
d += np.sqrt(np.sum(np.square(prev_points - points)))
|
| 197 |
+
dists.append(d)
|
| 198 |
+
prev_points = points
|
| 199 |
+
return dists
|
external/pyrep/pyrep/robots/configuration_paths/configuration_path.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class ConfigurationPath(object):
|
| 2 |
+
"""Base class for paths expressed in joint configuration space.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
def step(self) -> bool:
|
| 6 |
+
raise NotImplementedError()
|
| 7 |
+
|
| 8 |
+
def set_to_start(self) -> None:
|
| 9 |
+
raise NotImplementedError()
|
| 10 |
+
|
| 11 |
+
def set_to_end(self) -> None:
|
| 12 |
+
raise NotImplementedError()
|
| 13 |
+
|
| 14 |
+
def visualize(self) -> None:
|
| 15 |
+
raise NotImplementedError()
|
| 16 |
+
|
| 17 |
+
def clear_visualization(self) -> None:
|
| 18 |
+
raise NotImplementedError()
|
external/pyrep/pyrep/robots/configuration_paths/holonomic_configuration_path.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.backend import sim, utils
|
| 2 |
+
from pyrep.robots.configuration_paths.mobile_configuration_path import (
|
| 3 |
+
MobileConfigurationPath)
|
| 4 |
+
from pyrep.const import PYREP_SCRIPT_TYPE
|
| 5 |
+
from math import sqrt
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class HolonomicConfigurationPath(MobileConfigurationPath):
|
| 9 |
+
"""A path expressed in joint configuration space.
|
| 10 |
+
|
| 11 |
+
Paths are retrieved from an :py:class:`Mobile`, and are associated with the
|
| 12 |
+
mobile base that generated the path.
|
| 13 |
+
|
| 14 |
+
This class is used for executing motion along a path via the
|
| 15 |
+
_get_base_actuation function employing a proportional controller.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def step(self) -> bool:
|
| 19 |
+
"""Make a step along the trajectory.
|
| 20 |
+
|
| 21 |
+
Step forward by calling _get_base_actuation to get the velocity needed
|
| 22 |
+
to be applied at the wheels.
|
| 23 |
+
|
| 24 |
+
NOTE: This does not step the physics engine. This is left to the user.
|
| 25 |
+
|
| 26 |
+
:return: If the end of the trajectory has been reached.
|
| 27 |
+
|
| 28 |
+
"""
|
| 29 |
+
if self._path_done:
|
| 30 |
+
raise RuntimeError('This path has already been completed. '
|
| 31 |
+
'If you want to re-run, then call set_to_start.')
|
| 32 |
+
|
| 33 |
+
pos_inter = self._mobile.intermediate_target_base.get_position(
|
| 34 |
+
relative_to=self._mobile)
|
| 35 |
+
|
| 36 |
+
if len(self._path_points) > 2: # Non-linear path
|
| 37 |
+
if self.inter_done:
|
| 38 |
+
self._next_i_path()
|
| 39 |
+
self._set_inter_target(self.i_path)
|
| 40 |
+
self.inter_done = False
|
| 41 |
+
|
| 42 |
+
handleBase = self._mobile.get_handle()
|
| 43 |
+
handleInterTargetBase = self._mobile.intermediate_target_base.get_handle()
|
| 44 |
+
|
| 45 |
+
__, ret_floats, _, _ = utils.script_call(
|
| 46 |
+
'getBoxAdjustedMatrixAndFacingAngle@PyRep',
|
| 47 |
+
PYREP_SCRIPT_TYPE,
|
| 48 |
+
ints=[handleBase, handleInterTargetBase])
|
| 49 |
+
|
| 50 |
+
m = ret_floats[:-1]
|
| 51 |
+
angle = ret_floats[-1]
|
| 52 |
+
self._mobile.intermediate_target_base.set_position(
|
| 53 |
+
[m[3], m[7], self._mobile.target_z])
|
| 54 |
+
self._mobile.intermediate_target_base.set_orientation(
|
| 55 |
+
[0, 0, angle])
|
| 56 |
+
|
| 57 |
+
if sqrt((pos_inter[0]) ** 2 + (pos_inter[1]) ** 2) < 0.1:
|
| 58 |
+
self.inter_done = True
|
| 59 |
+
actuation = [0, 0, 0]
|
| 60 |
+
else:
|
| 61 |
+
actuation, _ = self._mobile.get_base_actuation()
|
| 62 |
+
|
| 63 |
+
self._mobile.set_base_angular_velocites(actuation)
|
| 64 |
+
|
| 65 |
+
if self.i_path == len(self._path_points) - 1:
|
| 66 |
+
self._path_done = True
|
| 67 |
+
else:
|
| 68 |
+
actuation, self._path_done = self._mobile.get_base_actuation()
|
| 69 |
+
self._mobile.set_base_angular_velocites(actuation)
|
| 70 |
+
|
| 71 |
+
return self._path_done
|
external/pyrep/pyrep/robots/configuration_paths/mobile_configuration_path.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.backend import sim, utils
|
| 2 |
+
from pyrep.robots.configuration_paths.configuration_path import (
|
| 3 |
+
ConfigurationPath)
|
| 4 |
+
from pyrep.robots.mobiles.mobile_base import MobileBase
|
| 5 |
+
from pyrep.const import PYREP_SCRIPT_TYPE
|
| 6 |
+
from math import sqrt
|
| 7 |
+
from typing import List
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class MobileConfigurationPath(ConfigurationPath):
|
| 11 |
+
"""A path expressed in joint configuration space.
|
| 12 |
+
|
| 13 |
+
Paths are retrieved from an :py:class:`Mobile`, and are associated with the
|
| 14 |
+
mobile base that generated the path.
|
| 15 |
+
|
| 16 |
+
This class is used for executing motion along a path via the
|
| 17 |
+
_get_base_actuation function employing a proportional controller.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, mobile: MobileBase, path_points: List[List[float]]):
|
| 21 |
+
self._mobile = mobile
|
| 22 |
+
self._path_points = path_points
|
| 23 |
+
self._drawing_handle = None
|
| 24 |
+
self._path_done = False
|
| 25 |
+
self.i_path = -1
|
| 26 |
+
self.inter_done = True
|
| 27 |
+
self._num_joints = mobile.get_joint_count()
|
| 28 |
+
|
| 29 |
+
self.set_to_start()
|
| 30 |
+
|
| 31 |
+
if len(self._path_points) > 2:
|
| 32 |
+
self._set_inter_target(0)
|
| 33 |
+
|
| 34 |
+
def step(self) -> bool:
|
| 35 |
+
"""Make a step along the trajectory.
|
| 36 |
+
|
| 37 |
+
Step forward by calling _get_base_actuation to get the velocity needed
|
| 38 |
+
to be applied at the wheels.
|
| 39 |
+
|
| 40 |
+
NOTE: This does not step the physics engine. This is left to the user.
|
| 41 |
+
|
| 42 |
+
:return: If the end of the trajectory has been reached.
|
| 43 |
+
|
| 44 |
+
"""
|
| 45 |
+
raise NotImplementedError()
|
| 46 |
+
|
| 47 |
+
def set_to_start(self) -> None:
|
| 48 |
+
"""Sets the mobile base to the beginning of this path.
|
| 49 |
+
|
| 50 |
+
:param allow_force_mode: Not used.
|
| 51 |
+
"""
|
| 52 |
+
start_config = self._path_points[0]
|
| 53 |
+
self._mobile.set_2d_pose(start_config[:3])
|
| 54 |
+
self._path_done = False
|
| 55 |
+
|
| 56 |
+
def set_to_end(self) -> None:
|
| 57 |
+
"""Sets the mobile base to the end of this path.
|
| 58 |
+
|
| 59 |
+
:param allow_force_mode: Not used.
|
| 60 |
+
"""
|
| 61 |
+
final_config = self._path_points[-1]
|
| 62 |
+
self._mobile.set_2d_pose(final_config[:3])
|
| 63 |
+
|
| 64 |
+
def visualize(self) -> None:
|
| 65 |
+
"""Draws a visualization of the path in the scene.
|
| 66 |
+
|
| 67 |
+
The visualization can be removed
|
| 68 |
+
with :py:meth:`ConfigurationPath.clear_visualization`.
|
| 69 |
+
"""
|
| 70 |
+
if len(self._path_points) <= 0:
|
| 71 |
+
raise RuntimeError("Can't visualise a path with no points.")
|
| 72 |
+
|
| 73 |
+
tip = self._mobile
|
| 74 |
+
self._drawing_handle = sim.simAddDrawingObject(
|
| 75 |
+
objectType=sim.sim_drawing_lines, size=3, duplicateTolerance=0,
|
| 76 |
+
parentObjectHandle=-1, maxItemCount=99999,
|
| 77 |
+
ambient_diffuse=[1, 0, 1])
|
| 78 |
+
sim.simAddDrawingObjectItem(self._drawing_handle, None)
|
| 79 |
+
init_pose = self._mobile.get_2d_pose()
|
| 80 |
+
self._mobile.set_2d_pose(self._path_points[0][:3])
|
| 81 |
+
prev_point = list(tip.get_position())
|
| 82 |
+
|
| 83 |
+
for i in range(len(self._path_points)):
|
| 84 |
+
points = self._path_points[i]
|
| 85 |
+
self._mobile.set_2d_pose(points[:3])
|
| 86 |
+
p = list(tip.get_position())
|
| 87 |
+
sim.simAddDrawingObjectItem(self._drawing_handle, prev_point + p)
|
| 88 |
+
prev_point = p
|
| 89 |
+
|
| 90 |
+
# Set the arm back to the initial config
|
| 91 |
+
self._mobile.set_2d_pose(init_pose[:3])
|
| 92 |
+
|
| 93 |
+
def clear_visualization(self) -> None:
|
| 94 |
+
"""Clears/removes a visualization of the path in the scene.
|
| 95 |
+
"""
|
| 96 |
+
if self._drawing_handle is not None:
|
| 97 |
+
sim.simAddDrawingObjectItem(self._drawing_handle, None)
|
| 98 |
+
|
| 99 |
+
def _next_i_path(self):
|
| 100 |
+
incr = 0.01
|
| 101 |
+
dist_to_next = 0
|
| 102 |
+
while dist_to_next < incr:
|
| 103 |
+
self.i_path += 1
|
| 104 |
+
if self.i_path == len(self._path_points) - 1:
|
| 105 |
+
self.i_path = len(self._path_points) - 1
|
| 106 |
+
break
|
| 107 |
+
dist_to_next += self._path_points[self.i_path][-1]
|
| 108 |
+
|
| 109 |
+
def _set_inter_target(self, i):
|
| 110 |
+
self._mobile.intermediate_target_base.set_position(
|
| 111 |
+
[self._path_points[i][0], self._path_points[i][1],
|
| 112 |
+
self._mobile.target_z])
|
| 113 |
+
self._mobile.intermediate_target_base.set_orientation(
|
| 114 |
+
[0, 0, self._path_points[i][2]])
|
external/pyrep/pyrep/robots/configuration_paths/nonholonomic_configuration_path.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.configuration_paths.mobile_configuration_path import (
|
| 2 |
+
MobileConfigurationPath)
|
| 3 |
+
from math import sqrt
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class NonHolonomicConfigurationPath(MobileConfigurationPath):
|
| 7 |
+
"""A path expressed in joint configuration space.
|
| 8 |
+
|
| 9 |
+
Paths are retrieved from an :py:class:`Mobile`, and are associated with the
|
| 10 |
+
mobile base that generated the path.
|
| 11 |
+
|
| 12 |
+
This class is used for executing motion along a path via the
|
| 13 |
+
_get_base_actuation function employing a proportional controller.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def step(self) -> bool:
|
| 17 |
+
"""Make a step along the trajectory.
|
| 18 |
+
|
| 19 |
+
Step forward by calling _get_base_actuation to get the velocity needed
|
| 20 |
+
to be applied at the wheels.
|
| 21 |
+
|
| 22 |
+
NOTE: This does not step the physics engine. This is left to the user.
|
| 23 |
+
|
| 24 |
+
:return: If the end of the trajectory has been reached.
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
if self._path_done:
|
| 28 |
+
raise RuntimeError('This path has already been completed. '
|
| 29 |
+
'If you want to re-run, then call set_to_start.')
|
| 30 |
+
|
| 31 |
+
pos_inter = self._mobile.intermediate_target_base.get_position(
|
| 32 |
+
relative_to=self._mobile)
|
| 33 |
+
|
| 34 |
+
if len(self._path_points) > 2: # Non-linear path
|
| 35 |
+
if self.inter_done:
|
| 36 |
+
self._next_i_path()
|
| 37 |
+
self._set_inter_target(self.i_path)
|
| 38 |
+
self.inter_done = False
|
| 39 |
+
|
| 40 |
+
if sqrt((pos_inter[0]) ** 2 + (pos_inter[1]) ** 2) < 0.1:
|
| 41 |
+
self.inter_done = True
|
| 42 |
+
actuation, _ = self._mobile.get_base_actuation()
|
| 43 |
+
else:
|
| 44 |
+
actuation, _ = self._mobile.get_base_actuation()
|
| 45 |
+
|
| 46 |
+
self._mobile.set_joint_target_velocities(actuation)
|
| 47 |
+
|
| 48 |
+
if self.i_path == len(self._path_points) - 1:
|
| 49 |
+
self._path_done = True
|
| 50 |
+
|
| 51 |
+
else:
|
| 52 |
+
actuation, self._path_done = self._mobile.get_base_actuation()
|
| 53 |
+
self._mobile.set_joint_target_velocities(actuation)
|
| 54 |
+
|
| 55 |
+
return self._path_done
|
external/pyrep/pyrep/robots/end_effectors/__init__.py
ADDED
|
File without changes
|
external/pyrep/pyrep/robots/end_effectors/baxter_gripper.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.gripper import Gripper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class BaxterGripper(Gripper):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'BaxterGripper',
|
| 8 |
+
['BaxterGripper_leftJoint',
|
| 9 |
+
'BaxterGripper_rightJoint'])
|
external/pyrep/pyrep/robots/end_effectors/baxter_suction_cup.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.suction_cup import SuctionCup
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class BaxterSuctionCup(SuctionCup):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'BaxterSuctionCup')
|
external/pyrep/pyrep/robots/end_effectors/dobot_suction_cup.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.suction_cup import SuctionCup
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class DobotSuctionCup(SuctionCup):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'Dobot_suctionCup')
|
external/pyrep/pyrep/robots/end_effectors/dual_panda_gripper.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.gripper import Gripper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class PandaGripperRight(Gripper):
|
| 6 |
+
|
| 7 |
+
def __init__(self, count: int = 0):
|
| 8 |
+
super().__init__(count, 'Panda_rightArm_gripper',
|
| 9 |
+
['Panda_rightArm_gripper_joint1', 'Panda_rightArm_gripper_joint2'])
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class PandaGripperLeft(Gripper):
|
| 14 |
+
|
| 15 |
+
def __init__(self, count: int = 0):
|
| 16 |
+
super().__init__(count, 'Panda_leftArm_gripper',
|
| 17 |
+
['Panda_leftArm_gripper_joint1', 'Panda_leftArm_gripper_joint2'])
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
external/pyrep/pyrep/robots/end_effectors/gripper.py
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Union
|
| 2 |
+
from pyrep.objects.object import Object
|
| 3 |
+
from pyrep.objects.proximity_sensor import ProximitySensor
|
| 4 |
+
from pyrep.objects.force_sensor import ForceSensor
|
| 5 |
+
from pyrep.robots.robot_component import RobotComponent
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
import logging
|
| 9 |
+
|
| 10 |
+
POSITION_ERROR = 0.001
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Gripper(RobotComponent):
|
| 14 |
+
"""Represents all types of end-effectors, e.g. grippers.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, count: int, name: str, joint_names: List[str]):
|
| 18 |
+
super().__init__(count, name, joint_names)
|
| 19 |
+
suffix = '' if count == 0 else '#%d' % (count - 1)
|
| 20 |
+
prox_name = '%s_attachProxSensor%s' % (name, suffix)
|
| 21 |
+
attach_name = '%s_attachPoint%s' % (name, suffix)
|
| 22 |
+
self._proximity_sensor = ProximitySensor(prox_name)
|
| 23 |
+
self._attach_point = ForceSensor(attach_name)
|
| 24 |
+
self._old_parents: List[Object] = []
|
| 25 |
+
self._grasped_objects: List[Object] = []
|
| 26 |
+
self._prev_positions = [None] * len(joint_names)
|
| 27 |
+
self._prev_vels = [None] * len(joint_names) # Used to stop oscillating
|
| 28 |
+
|
| 29 |
+
self._touch_sensors = []
|
| 30 |
+
i = 0
|
| 31 |
+
while True:
|
| 32 |
+
fname = '%s_touchSensor%d%s' % (name, i, suffix)
|
| 33 |
+
if not ForceSensor.exists(fname):
|
| 34 |
+
break
|
| 35 |
+
self._touch_sensors.append(ForceSensor(fname))
|
| 36 |
+
i += 1
|
| 37 |
+
|
| 38 |
+
def grasp(self, obj: Object) -> bool:
|
| 39 |
+
"""Grasp object if it is detected.
|
| 40 |
+
|
| 41 |
+
Note: The does not actuate the gripper, but instead simply attaches the
|
| 42 |
+
detected object to the gripper to 'fake' a grasp.
|
| 43 |
+
|
| 44 |
+
:param obj: The object to grasp if detected.
|
| 45 |
+
:return: True if the object was detected/grasped.
|
| 46 |
+
"""
|
| 47 |
+
detected = self._proximity_sensor.is_detected(obj)
|
| 48 |
+
# Check if detected and that we are not already grasping it.
|
| 49 |
+
if detected and obj not in self._grasped_objects:
|
| 50 |
+
self._grasped_objects.append(obj)
|
| 51 |
+
logging.debug("Grasping object with name: %s. Object is being attached to gripper.", obj.get_name())
|
| 52 |
+
self._old_parents.append(obj.get_parent()) # type: ignore
|
| 53 |
+
obj.set_parent(self._attach_point, keep_in_place=True)
|
| 54 |
+
return detected
|
| 55 |
+
|
| 56 |
+
def release(self) -> None:
|
| 57 |
+
"""Release any grasped objects.
|
| 58 |
+
|
| 59 |
+
Note: The does not actuate the gripper, but instead simply detaches any
|
| 60 |
+
grasped objects.
|
| 61 |
+
"""
|
| 62 |
+
for grasped_obj, old_parent in zip(
|
| 63 |
+
self._grasped_objects, self._old_parents):
|
| 64 |
+
# Check if the object still exists
|
| 65 |
+
if grasped_obj.still_exists():
|
| 66 |
+
grasped_obj.set_parent(old_parent, keep_in_place=True)
|
| 67 |
+
self._grasped_objects = []
|
| 68 |
+
self._old_parents = []
|
| 69 |
+
|
| 70 |
+
def get_grasped_objects(self) -> List[Object]:
|
| 71 |
+
"""Gets the objects that are currently grasped.
|
| 72 |
+
|
| 73 |
+
:return: A list of grasped objects.
|
| 74 |
+
"""
|
| 75 |
+
return self._grasped_objects
|
| 76 |
+
|
| 77 |
+
def actuate(self, amount: float, velocity: float) -> bool:
|
| 78 |
+
"""Actuate the gripper, but return after each simulation step.
|
| 79 |
+
|
| 80 |
+
The functions attempts to open/close the gripper according to 'amount',
|
| 81 |
+
where 1 represents open, and 0 represents close. The user should
|
| 82 |
+
iteratively call this function until it returns True.
|
| 83 |
+
|
| 84 |
+
This is a convenience method. If you would like direct control of the
|
| 85 |
+
gripper, use the :py:class:`RobotComponent` methods instead.
|
| 86 |
+
|
| 87 |
+
For some grippers, this method will need to be overridden.
|
| 88 |
+
|
| 89 |
+
:param amount: A float between 0 and 1 representing the gripper open
|
| 90 |
+
state. 1 means open, whilst 0 means closed.
|
| 91 |
+
:param velocity: The velocity to apply to the gripper joints.
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
:raises: ValueError if 'amount' is not between 0 and 1.
|
| 95 |
+
|
| 96 |
+
:return: True if the gripper has reached its open/closed limits, or if
|
| 97 |
+
the 'max_force' has been exerted.
|
| 98 |
+
"""
|
| 99 |
+
if not (0.0 <= amount <= 1.0):
|
| 100 |
+
raise ValueError("'open_amount' should be between 0 and 1.'")
|
| 101 |
+
_, joint_intervals_list = self.get_joint_intervals()
|
| 102 |
+
joint_intervals = np.array(joint_intervals_list)
|
| 103 |
+
|
| 104 |
+
# Decide on if we need to open or close
|
| 105 |
+
joint_range = joint_intervals[:, 1] - joint_intervals[:, 0]
|
| 106 |
+
target_pos = joint_intervals[:, 0] + (joint_range * amount)
|
| 107 |
+
|
| 108 |
+
current_positions = self.get_joint_positions()
|
| 109 |
+
done = True
|
| 110 |
+
for i, (j, target, cur, prev) in enumerate(zip(
|
| 111 |
+
self.joints, target_pos, current_positions,
|
| 112 |
+
self._prev_positions)):
|
| 113 |
+
# Check if the joint has moved much
|
| 114 |
+
not_moving = (prev is not None and
|
| 115 |
+
np.fabs(cur - prev) < POSITION_ERROR)
|
| 116 |
+
reached_target = np.fabs(target - cur) < POSITION_ERROR
|
| 117 |
+
vel = -velocity if cur - target > 0 else velocity
|
| 118 |
+
oscillating = (self._prev_vels[i] is not None and
|
| 119 |
+
vel != self._prev_vels[i])
|
| 120 |
+
if not_moving or reached_target or oscillating:
|
| 121 |
+
j.set_joint_target_velocity(0)
|
| 122 |
+
continue
|
| 123 |
+
done = False
|
| 124 |
+
self._prev_vels[i] = vel # type: ignore
|
| 125 |
+
j.set_joint_target_velocity(vel)
|
| 126 |
+
self._prev_positions = current_positions # type: ignore
|
| 127 |
+
if done:
|
| 128 |
+
self._prev_positions = [None] * self._num_joints
|
| 129 |
+
self._prev_vels = [None] * self._num_joints
|
| 130 |
+
self.set_joint_target_velocities([0.0] * self._num_joints)
|
| 131 |
+
return done
|
| 132 |
+
|
| 133 |
+
def get_open_amount(self) -> List[float]:
|
| 134 |
+
"""Gets the gripper open state. 1 means open, whilst 0 means closed.
|
| 135 |
+
|
| 136 |
+
:return: A list of floats between 0 and 1 representing the gripper open
|
| 137 |
+
state for each joint. 1 means open, whilst 0 means closed.
|
| 138 |
+
"""
|
| 139 |
+
_, joint_intervals_list = self.get_joint_intervals()
|
| 140 |
+
joint_intervals = np.array(joint_intervals_list)
|
| 141 |
+
joint_range = joint_intervals[:, 1] - joint_intervals[:, 0]
|
| 142 |
+
return list(np.clip((np.array(
|
| 143 |
+
self.get_joint_positions()) - joint_intervals[:, 0]) /
|
| 144 |
+
joint_range, 0.0, 1.0))
|
| 145 |
+
|
| 146 |
+
def get_touch_sensor_forces(self) -> List[List[float]]:
|
| 147 |
+
if len(self._touch_sensors) == 0:
|
| 148 |
+
raise NotImplementedError('No touch sensors for this robot!')
|
| 149 |
+
return [ts.read()[0] for ts in self._touch_sensors]
|
external/pyrep/pyrep/robots/end_effectors/jaco_gripper.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.gripper import Gripper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class JacoGripper(Gripper):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'JacoHand',
|
| 8 |
+
['JacoHand_joint1_finger1',
|
| 9 |
+
'JacoHand_joint1_finger2',
|
| 10 |
+
'JacoHand_joint1_finger3'])
|
external/pyrep/pyrep/robots/end_effectors/locobot_gripper.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.gripper import Gripper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LoCoBotGripper(Gripper):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'LoCoBotGripper',
|
| 8 |
+
['LoCoBotGripper_joint1', 'LoCoBotGripper_joint2'])
|
external/pyrep/pyrep/robots/end_effectors/mico_gripper.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.gripper import Gripper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class MicoGripper(Gripper):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'MicoHand',
|
| 8 |
+
['MicoHand_joint1_finger1',
|
| 9 |
+
'MicoHand_joint1_finger3'])
|
external/pyrep/pyrep/robots/end_effectors/panda_gripper.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.gripper import Gripper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class PandaGripper(Gripper):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'Panda_gripper',
|
| 8 |
+
['Panda_gripper_joint1', 'Panda_gripper_joint2'])
|
external/pyrep/pyrep/robots/end_effectors/robotiq85_gripper.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
from pyrep.objects import Joint
|
| 4 |
+
from pyrep.robots.end_effectors.gripper import Gripper, POSITION_ERROR
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
POSITION_ERROR = 0.00001
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class Robotiq85Gripper(Gripper):
|
| 11 |
+
|
| 12 |
+
def __init__(self, count: int = 0):
|
| 13 |
+
super().__init__(count, 'ROBOTIQ_85', ['ROBOTIQ_85_active1',
|
| 14 |
+
'ROBOTIQ_85_active2'])
|
| 15 |
+
self._open_amount_query_joints = [
|
| 16 |
+
Joint(jname) for jname in [
|
| 17 |
+
'ROBOTIQ_85_Rjoint1', 'ROBOTIQ_85_Ljoint1']]
|
| 18 |
+
|
| 19 |
+
def actuate(self, amount: float, velocity: float) -> bool:
|
| 20 |
+
if amount != 0.0 and amount != 1.0:
|
| 21 |
+
raise ValueError(
|
| 22 |
+
'This gripper currently only supports fully open or closed.')
|
| 23 |
+
|
| 24 |
+
current_positions = self.get_joint_positions()
|
| 25 |
+
vel = velocity if amount == 1.0 else -velocity
|
| 26 |
+
done = True
|
| 27 |
+
for i, (j, cur, prev) in enumerate(zip(
|
| 28 |
+
self.joints, current_positions,
|
| 29 |
+
self._prev_positions)):
|
| 30 |
+
# Check if the joint has moved much
|
| 31 |
+
not_moving = (prev is not None and
|
| 32 |
+
np.fabs(cur - prev) < POSITION_ERROR)
|
| 33 |
+
if not_moving:
|
| 34 |
+
j.set_joint_target_velocity(0)
|
| 35 |
+
continue
|
| 36 |
+
done = False
|
| 37 |
+
self._prev_vels[i] = vel # type: ignore
|
| 38 |
+
j.set_joint_target_velocity(vel)
|
| 39 |
+
self._prev_positions = current_positions # type: ignore
|
| 40 |
+
if done:
|
| 41 |
+
self._prev_positions = [None] * self._num_joints
|
| 42 |
+
self._prev_vels = [None] * self._num_joints
|
| 43 |
+
self.set_joint_target_velocities([0.0] * self._num_joints)
|
| 44 |
+
return done
|
| 45 |
+
|
| 46 |
+
def get_open_amount(self) -> List[float]:
|
| 47 |
+
"""Gets the gripper open state. 1 means open, whilst 0 means closed.
|
| 48 |
+
|
| 49 |
+
:return: A list of floats between 0 and 1 representing the gripper open
|
| 50 |
+
state for each joint. 1 means open, whilst 0 means closed.
|
| 51 |
+
"""
|
| 52 |
+
joint_intervals = np.array([j.get_joint_interval()[1]
|
| 53 |
+
for j in self._open_amount_query_joints])
|
| 54 |
+
joint_range = joint_intervals[:, 1] - joint_intervals[:, 0]
|
| 55 |
+
# Flip open amount
|
| 56 |
+
return list(1. - np.clip((np.array(
|
| 57 |
+
[j.get_joint_position() for j in self._open_amount_query_joints]
|
| 58 |
+
) - joint_intervals[:, 0]) / joint_range, 0.0, 1.0))
|
external/pyrep/pyrep/robots/end_effectors/suction_cup.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
from pyrep.objects.object import Object
|
| 3 |
+
from pyrep.objects.shape import Shape
|
| 4 |
+
from pyrep.objects.proximity_sensor import ProximitySensor
|
| 5 |
+
from pyrep.objects.force_sensor import ForceSensor
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class SuctionCup(Shape):
|
| 9 |
+
"""Represents all suction cups.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, count: int, name: str, base_name: str = None):
|
| 13 |
+
suffix = '' if count == 0 else '#%d' % (count - 1)
|
| 14 |
+
super().__init__(name + suffix if base_name is None else base_name + suffix)
|
| 15 |
+
self._proximity_sensor = ProximitySensor('%s_sensor%s' % (name, suffix))
|
| 16 |
+
self._attach_point = ForceSensor('%s_connection%s' % (name, suffix))
|
| 17 |
+
self._old_parents: List[Object] = []
|
| 18 |
+
self._grasped_objects: List[Object] = []
|
| 19 |
+
|
| 20 |
+
def grasp(self, obj: Object) -> bool:
|
| 21 |
+
"""Attach the object to the suction cup if it is detected.
|
| 22 |
+
|
| 23 |
+
Note: The does not move the object up to the suction cup. Therefore, the
|
| 24 |
+
proximity sensor should have a short range in order for the suction
|
| 25 |
+
grasp to look realistic.
|
| 26 |
+
|
| 27 |
+
:param obj: The object to grasp if detected.
|
| 28 |
+
:return: True if the object was detected/grasped.
|
| 29 |
+
"""
|
| 30 |
+
detected = self._proximity_sensor.is_detected(obj)
|
| 31 |
+
# Check if detected and that we are not already grasping it.
|
| 32 |
+
if detected and obj not in self._grasped_objects:
|
| 33 |
+
self._grasped_objects.append(obj)
|
| 34 |
+
self._old_parents.append(obj.get_parent()) # type: ignore
|
| 35 |
+
obj.set_parent(self._attach_point, keep_in_place=True)
|
| 36 |
+
return detected
|
| 37 |
+
|
| 38 |
+
def release(self) -> None:
|
| 39 |
+
"""Release any objects that have been sucked.
|
| 40 |
+
|
| 41 |
+
Note: The does not actuate the gripper, but instead simply detaches any
|
| 42 |
+
grasped objects.
|
| 43 |
+
"""
|
| 44 |
+
for grasped_obj, old_parent in zip(
|
| 45 |
+
self._grasped_objects, self._old_parents):
|
| 46 |
+
# Check if the object still exists
|
| 47 |
+
if grasped_obj.still_exists():
|
| 48 |
+
grasped_obj.set_parent(old_parent, keep_in_place=True)
|
| 49 |
+
self._grasped_objects = []
|
| 50 |
+
self._old_parents = []
|
| 51 |
+
|
| 52 |
+
def get_grasped_objects(self) -> List[Object]:
|
| 53 |
+
"""Gets the objects that are currently in the suction cup.
|
| 54 |
+
|
| 55 |
+
:return: A list of grasped objects.
|
| 56 |
+
"""
|
| 57 |
+
return self._grasped_objects
|
external/pyrep/pyrep/robots/end_effectors/xarm_gripper.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.end_effectors.gripper import Gripper
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class XArmGripper(Gripper):
|
| 5 |
+
|
| 6 |
+
def __init__(self, count: int = 0):
|
| 7 |
+
super().__init__(count, 'xarm_gripper',
|
| 8 |
+
['xarm_left_inner_knuckle_joint',
|
| 9 |
+
'xarm_right_inner_knuckle_joint',
|
| 10 |
+
'xarm_drive_joint',
|
| 11 |
+
'xarm_left_finger_joint',
|
| 12 |
+
'xarm_right_outer_knuckle_joint',
|
| 13 |
+
'xarm_right_finger_joint'])
|
external/pyrep/pyrep/robots/mobiles/holonomic_base.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.mobiles.mobile_base import MobileBase
|
| 2 |
+
from pyrep.robots.configuration_paths.holonomic_configuration_path import (
|
| 3 |
+
HolonomicConfigurationPath)
|
| 4 |
+
from pyrep.backend import utils
|
| 5 |
+
from pyrep.const import PYREP_SCRIPT_TYPE
|
| 6 |
+
from pyrep.const import ConfigurationPathAlgorithms as Algos
|
| 7 |
+
from pyrep.errors import ConfigurationPathError
|
| 8 |
+
from typing import List
|
| 9 |
+
from pyrep.objects.joint import Joint
|
| 10 |
+
from math import pi, sqrt
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class HolonomicBase(MobileBase):
|
| 14 |
+
|
| 15 |
+
def __init__(self,
|
| 16 |
+
count: int,
|
| 17 |
+
num_wheels: int,
|
| 18 |
+
distance_from_target: float,
|
| 19 |
+
name: str,
|
| 20 |
+
max_velocity: float = 4,
|
| 21 |
+
max_velocity_rotation: float = 6,
|
| 22 |
+
max_acceleration: float = 0.035):
|
| 23 |
+
"""Init.
|
| 24 |
+
|
| 25 |
+
:param count: used for multiple copies of robots.
|
| 26 |
+
:param num_wheels: number of actuated wheels.
|
| 27 |
+
:param distance_from_target: offset from target.
|
| 28 |
+
:param name: string with robot name (same as base in vrep model).
|
| 29 |
+
:param max_velocity: bounds x,y velocity for motion planning.
|
| 30 |
+
:param max_velocity_rotation: bounds yaw velocity for motion planning.
|
| 31 |
+
:param max_acceleration: bounds acceleration for motion planning.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
super().__init__(count, num_wheels, name)
|
| 35 |
+
|
| 36 |
+
suffix = '' if count == 0 else '#%d' % (count - 1)
|
| 37 |
+
|
| 38 |
+
self.paramP = 20
|
| 39 |
+
self.paramO = 10
|
| 40 |
+
# self.paramO = 30
|
| 41 |
+
self.previous_forw_back_vel = 0
|
| 42 |
+
self.previous_left_right_vel = 0
|
| 43 |
+
self.previous_rot_vel = 0
|
| 44 |
+
self.accelF = max_acceleration
|
| 45 |
+
self.maxV = max_velocity
|
| 46 |
+
self.max_v_rot = max_velocity_rotation
|
| 47 |
+
self.dist1 = distance_from_target
|
| 48 |
+
|
| 49 |
+
joint_slipping_names = [
|
| 50 |
+
'%s_slipping_m_joint%s%s' % (name, str(i + 1), suffix) for i in
|
| 51 |
+
range(self.num_wheels)]
|
| 52 |
+
self.joints_slipping = [Joint(jsname)
|
| 53 |
+
for jsname in joint_slipping_names]
|
| 54 |
+
|
| 55 |
+
def set_base_angular_velocites(self, velocity: List[float]):
|
| 56 |
+
"""Calls required functions to achieve desired omnidirectional effect.
|
| 57 |
+
|
| 58 |
+
:param velocity: A List with forwardBackward, leftRight and rotation
|
| 59 |
+
velocity (in radian/s)
|
| 60 |
+
"""
|
| 61 |
+
# self._reset_wheel()
|
| 62 |
+
fBVel = velocity[0]
|
| 63 |
+
lRVel = velocity[1]
|
| 64 |
+
rVel = velocity[2]
|
| 65 |
+
self.set_joint_target_velocities(
|
| 66 |
+
[-fBVel - lRVel - rVel, -fBVel + lRVel - rVel,
|
| 67 |
+
-fBVel - lRVel + rVel, -fBVel + lRVel + rVel])
|
| 68 |
+
|
| 69 |
+
def get_linear_path(self, position: List[float],
|
| 70 |
+
angle=0) -> HolonomicConfigurationPath:
|
| 71 |
+
"""Initialize linear path and check for collision along it.
|
| 72 |
+
|
| 73 |
+
Must specify either rotation in euler or quaternions, but not both!
|
| 74 |
+
|
| 75 |
+
:param position: The x, y position of the target.
|
| 76 |
+
:param angle: The z orientation of the target (in radians).
|
| 77 |
+
:raises: ConfigurationPathError if no path could be created.
|
| 78 |
+
|
| 79 |
+
:return: A linear path in the 2d space.
|
| 80 |
+
"""
|
| 81 |
+
position_base = self.get_position()
|
| 82 |
+
angle_base = self.get_orientation()[-1]
|
| 83 |
+
|
| 84 |
+
self.target_base.set_position(
|
| 85 |
+
[position[0], position[1], self.target_z])
|
| 86 |
+
self.target_base.set_orientation([0, 0, angle])
|
| 87 |
+
|
| 88 |
+
handle_base = self.get_handle()
|
| 89 |
+
handle_target_base = self.target_base.get_handle()
|
| 90 |
+
_, ret_floats, _, _ = utils.script_call(
|
| 91 |
+
'getBoxAdjustedMatrixAndFacingAngle@PyRep', PYREP_SCRIPT_TYPE,
|
| 92 |
+
ints=[handle_base, handle_target_base])
|
| 93 |
+
|
| 94 |
+
m = ret_floats[:-1]
|
| 95 |
+
angle = ret_floats[-1]
|
| 96 |
+
self.intermediate_target_base.set_position(
|
| 97 |
+
[m[3] - m[0] * self.dist1, m[7] - m[4] * self.dist1,
|
| 98 |
+
self.target_z])
|
| 99 |
+
self.intermediate_target_base.set_orientation([0, 0, angle])
|
| 100 |
+
self.target_base.set_orientation([0, 0, angle])
|
| 101 |
+
|
| 102 |
+
path = [[position_base[0], position_base[1], angle_base],
|
| 103 |
+
[position[0], position[1], angle]]
|
| 104 |
+
|
| 105 |
+
if self._check_collision_linear_path(path):
|
| 106 |
+
raise ConfigurationPathError(
|
| 107 |
+
'Could not create path. '
|
| 108 |
+
'An object was detected on the linear path.')
|
| 109 |
+
|
| 110 |
+
return HolonomicConfigurationPath(self, path)
|
| 111 |
+
|
| 112 |
+
def get_nonlinear_path(self, position: List[float],
|
| 113 |
+
angle=0,
|
| 114 |
+
boundaries=2,
|
| 115 |
+
path_pts=600,
|
| 116 |
+
ignore_collisions=False,
|
| 117 |
+
algorithm=Algos.RRTConnect
|
| 118 |
+
) -> HolonomicConfigurationPath:
|
| 119 |
+
"""Gets a non-linear (planned) configuration path given a target pose.
|
| 120 |
+
|
| 121 |
+
:param position: The x, y, z position of the target.
|
| 122 |
+
:param angle: The z orientation of the target (in radians).
|
| 123 |
+
:param boundaries: A float defining the path search in x and y direction
|
| 124 |
+
[[-boundaries,boundaries],[-boundaries,boundaries]].
|
| 125 |
+
:param path_pts: The number of sampled points returned from the
|
| 126 |
+
computed path
|
| 127 |
+
:param ignore_collisions: If collision checking should be disabled.
|
| 128 |
+
:param algorithm: Algorithm used to compute path
|
| 129 |
+
:raises: ConfigurationPathError if no path could be created.
|
| 130 |
+
|
| 131 |
+
:return: A non-linear path (x,y,angle) in the xy configuration space.
|
| 132 |
+
"""
|
| 133 |
+
|
| 134 |
+
path = self._get_nonlinear_path_points(
|
| 135 |
+
position, angle, boundaries, path_pts, ignore_collisions, algorithm)
|
| 136 |
+
|
| 137 |
+
return HolonomicConfigurationPath(self, path)
|
| 138 |
+
|
| 139 |
+
def get_base_actuation(self):
|
| 140 |
+
"""Proportional controller.
|
| 141 |
+
|
| 142 |
+
:return: A list with left and right joint velocity,
|
| 143 |
+
and a bool representing target is reached.
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
handleBase = self.get_handle()
|
| 147 |
+
handle_inter_target_base = self.intermediate_target_base.get_handle()
|
| 148 |
+
pos_v = self.target_base.get_position(relative_to=self)
|
| 149 |
+
or_v = self.target_base.get_orientation(relative_to=self)
|
| 150 |
+
|
| 151 |
+
pos_inter = self.intermediate_target_base.get_position(
|
| 152 |
+
relative_to=self)
|
| 153 |
+
or_inter = self.intermediate_target_base.get_orientation(
|
| 154 |
+
relative_to=self)
|
| 155 |
+
|
| 156 |
+
if (sqrt((pos_v[0]) ** 2 + (
|
| 157 |
+
pos_v[1]) ** 2) - self.dist1) < 0.001 and or_v[-1] < 0.1 * pi / 180:
|
| 158 |
+
return [self.previous_forw_back_vel, self.previous_left_right_vel,
|
| 159 |
+
self.previous_rot_vel], True
|
| 160 |
+
|
| 161 |
+
forw_back_vel = pos_inter[1] * self.paramP
|
| 162 |
+
left_right_vel = pos_inter[0] * self.paramP
|
| 163 |
+
rot_vel = - or_inter[2] * self.paramO
|
| 164 |
+
|
| 165 |
+
v = sqrt(forw_back_vel * forw_back_vel +
|
| 166 |
+
left_right_vel * left_right_vel)
|
| 167 |
+
if v > self.maxV:
|
| 168 |
+
forw_back_vel = forw_back_vel * self.maxV / v
|
| 169 |
+
left_right_vel = left_right_vel * self.maxV / v
|
| 170 |
+
|
| 171 |
+
if (abs(rot_vel) > self.max_v_rot):
|
| 172 |
+
rot_vel = self.max_v_rot * rot_vel / abs(rot_vel)
|
| 173 |
+
|
| 174 |
+
df = forw_back_vel - self.previous_forw_back_vel
|
| 175 |
+
ds = left_right_vel - self.previous_left_right_vel
|
| 176 |
+
dr = rot_vel - self.previous_rot_vel
|
| 177 |
+
|
| 178 |
+
if abs(df) > self.maxV * self.accelF:
|
| 179 |
+
df = abs(df) * (self.maxV * self.accelF) / df
|
| 180 |
+
|
| 181 |
+
if abs(ds) > self.maxV * self.accelF:
|
| 182 |
+
ds = abs(ds) * (self.maxV * self.accelF) / ds
|
| 183 |
+
|
| 184 |
+
if abs(dr) > self.max_v_rot * self.accelF:
|
| 185 |
+
dr = abs(dr) * (self.max_v_rot * self.accelF) / dr
|
| 186 |
+
|
| 187 |
+
forw_back_vel = self.previous_forw_back_vel + df
|
| 188 |
+
left_right_vel = self.previous_left_right_vel + ds
|
| 189 |
+
rot_vel = self.previous_rot_vel + dr
|
| 190 |
+
|
| 191 |
+
self.previous_forw_back_vel = forw_back_vel
|
| 192 |
+
self.previous_left_right_vel = left_right_vel
|
| 193 |
+
self.previous_rot_vel = rot_vel
|
| 194 |
+
|
| 195 |
+
return [forw_back_vel, left_right_vel, rot_vel], False
|
| 196 |
+
|
| 197 |
+
def _reset_wheel(self):
|
| 198 |
+
"""Required to achieve desired omnidirectional wheel effect.
|
| 199 |
+
"""
|
| 200 |
+
[j.reset_dynamic_object() for j in self.wheels]
|
| 201 |
+
|
| 202 |
+
p = [[-pi / 4, 0, 0], [pi / 4, 0, pi], [-pi / 4, 0, 0], [pi / 4, 0, pi]]
|
| 203 |
+
|
| 204 |
+
for i in range(self.num_wheels):
|
| 205 |
+
self.joints_slipping[i].set_position([0, 0, 0],
|
| 206 |
+
relative_to=self.joints[i],
|
| 207 |
+
reset_dynamics=False)
|
| 208 |
+
self.joints_slipping[i].set_orientation(p[i],
|
| 209 |
+
relative_to=self.joints[i],
|
| 210 |
+
reset_dynamics=False)
|
| 211 |
+
self.wheels[i].set_position([0, 0, 0], relative_to=self.joints[i],
|
| 212 |
+
reset_dynamics=False)
|
| 213 |
+
self.wheels[i].set_orientation([0, 0, 0],
|
| 214 |
+
relative_to=self.joints[i],
|
| 215 |
+
reset_dynamics=False)
|
external/pyrep/pyrep/robots/mobiles/line_tracer.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.mobiles.mobile_base import MobileBase
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LineTracer(MobileBase):
|
| 5 |
+
def __init__(self, count: int = 0):
|
| 6 |
+
super().__init__(count, 2, 'LineTracer')
|
external/pyrep/pyrep/robots/mobiles/locobot.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.robots.mobiles.nonholonomic_base import NonHolonomicBase
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class LoCoBot(NonHolonomicBase):
|
| 5 |
+
def __init__(self, count: int = 0):
|
| 6 |
+
super().__init__(count, 2, 'LoCoBot')
|
external/pyrep/pyrep/robots/mobiles/mobile_base.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pyrep.backend import sim, utils
|
| 2 |
+
from pyrep.objects.dummy import Dummy
|
| 3 |
+
from pyrep.objects.shape import Shape
|
| 4 |
+
from pyrep.robots.robot_component import RobotComponent
|
| 5 |
+
from pyrep.const import ConfigurationPathAlgorithms as Algos
|
| 6 |
+
from pyrep.errors import ConfigurationPathError
|
| 7 |
+
from pyrep.const import PYREP_SCRIPT_TYPE
|
| 8 |
+
from typing import List, Union
|
| 9 |
+
from math import sqrt
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MobileBase(RobotComponent):
|
| 14 |
+
"""Base class representing a robot mobile base with path planning support.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, count: int, num_wheels: int, name: str):
|
| 18 |
+
"""Count is used for when we have multiple copies of mobile bases.
|
| 19 |
+
|
| 20 |
+
:param count: used for multiple copies of robots
|
| 21 |
+
:param num_wheels: number of actuated wheels
|
| 22 |
+
:param name: string with robot name (same as base in vrep model).
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
joint_names = ['%s_m_joint%s' % (name, str(i + 1)) for i in
|
| 26 |
+
range(num_wheels)]
|
| 27 |
+
super().__init__(count, name, joint_names)
|
| 28 |
+
|
| 29 |
+
self.num_wheels = num_wheels
|
| 30 |
+
suffix = '' if count == 0 else '#%d' % (count - 1)
|
| 31 |
+
|
| 32 |
+
wheel_names = ['%s_wheel%s%s' % (name, str(i + 1), suffix) for i in
|
| 33 |
+
range(self.num_wheels)]
|
| 34 |
+
self.wheels = [Shape(name) for name in wheel_names]
|
| 35 |
+
|
| 36 |
+
# Motion planning handles
|
| 37 |
+
self.intermediate_target_base = Dummy(
|
| 38 |
+
'%s_intermediate_target_base%s' % (name, suffix))
|
| 39 |
+
self.target_base = Dummy('%s_target_base%s' % (name, suffix))
|
| 40 |
+
|
| 41 |
+
self._collision_collection = sim.simGetCollectionHandle(
|
| 42 |
+
'%s_base%s' % (name, suffix))
|
| 43 |
+
|
| 44 |
+
# Robot parameters and handle
|
| 45 |
+
self.z_pos = self.get_position()[2]
|
| 46 |
+
self.target_z = self.target_base.get_position()[-1]
|
| 47 |
+
self.wheel_radius = self.wheels[0].get_bounding_box()[5] # Z
|
| 48 |
+
self.wheel_distance = np.linalg.norm(
|
| 49 |
+
np.array(self.wheels[0].get_position()) -
|
| 50 |
+
np.array(self.wheels[1].get_position()))
|
| 51 |
+
|
| 52 |
+
# Make sure dummies are orphan if loaded with ttm
|
| 53 |
+
self.intermediate_target_base.set_parent(None)
|
| 54 |
+
self.target_base.set_parent(None)
|
| 55 |
+
|
| 56 |
+
def get_2d_pose(self) -> np.ndarray:
|
| 57 |
+
"""Gets the 2D (top-down) pose of the robot [x, y, yaw].
|
| 58 |
+
|
| 59 |
+
:return: A List containing the x, y, yaw (in radians).
|
| 60 |
+
"""
|
| 61 |
+
return np.r_[self.get_position()[:2], self.get_orientation()[-1:]]
|
| 62 |
+
|
| 63 |
+
def set_2d_pose(self, pose: Union[List[float], np.ndarray]) -> None:
|
| 64 |
+
"""Sets the 2D (top-down) pose of the robot [x, y, yaw]
|
| 65 |
+
|
| 66 |
+
:param pose: A List containing the x, y, yaw (in radians).
|
| 67 |
+
"""
|
| 68 |
+
x, y, yaw = pose
|
| 69 |
+
self.set_position([x, y, self.z_pos])
|
| 70 |
+
self.set_orientation([0, 0, yaw])
|
| 71 |
+
|
| 72 |
+
def assess_collision(self):
|
| 73 |
+
"""Silent detection of the robot base with all other entities present in the scene.
|
| 74 |
+
|
| 75 |
+
:return: True if collision is detected
|
| 76 |
+
"""
|
| 77 |
+
return sim.simCheckCollision(self._collision_collection,
|
| 78 |
+
sim.sim_handle_all) == 1
|
| 79 |
+
|
| 80 |
+
def set_cartesian_position(self, position: List[float]):
|
| 81 |
+
"""Set a delta target position (x,y) and rotation position
|
| 82 |
+
|
| 83 |
+
:param position: length 3 list containing x and y position, and angle position
|
| 84 |
+
|
| 85 |
+
NOTE: not supported for nonholonomic mobile bases.
|
| 86 |
+
"""
|
| 87 |
+
vel = [0, 0, 0]
|
| 88 |
+
vel[-1] = position[-1]
|
| 89 |
+
for i in range(2):
|
| 90 |
+
vel[i] = position[i] / (0.05 * self.wheel_radius / 2) # "0.05 is dt"
|
| 91 |
+
|
| 92 |
+
self.set_base_angular_velocites(vel)
|
| 93 |
+
|
| 94 |
+
def set_base_angular_velocites(self, velocity: List[float]):
|
| 95 |
+
"""This function has no effect for two_wheels robot. More control is required for omnidirectional robot.
|
| 96 |
+
|
| 97 |
+
:param velocity: for two wheels robot: each wheel velocity, for omnidirectional robot forwardBackward, leftRight and rotation velocity
|
| 98 |
+
"""
|
| 99 |
+
raise NotImplementedError()
|
| 100 |
+
|
| 101 |
+
def _get_nonlinear_path_points(self, position: List[float],
|
| 102 |
+
angle=0,
|
| 103 |
+
boundaries=2,
|
| 104 |
+
path_pts=600,
|
| 105 |
+
ignore_collisions=False,
|
| 106 |
+
algorithm=Algos.RRTConnect) -> List[List[float]]:
|
| 107 |
+
"""Gets a non-linear (planned) configuration path given a target pose.
|
| 108 |
+
|
| 109 |
+
:param position: The x, y, z position of the target.
|
| 110 |
+
:param angle: The z orientation of the target (in radians).
|
| 111 |
+
:param boundaries: A float defining the path search in x and y direction
|
| 112 |
+
[[-boundaries,boundaries],[-boundaries,boundaries]].
|
| 113 |
+
:param path_pts: number of sampled points returned from the computed path
|
| 114 |
+
:param ignore_collisions: If collision checking should be disabled.
|
| 115 |
+
:param algorithm: Algorithm used to compute path
|
| 116 |
+
:raises: ConfigurationPathError if no path could be created.
|
| 117 |
+
|
| 118 |
+
:return: A non-linear path (x,y,angle) in the xy configuration space.
|
| 119 |
+
"""
|
| 120 |
+
|
| 121 |
+
# Base dummy required to be parent of the robot tree
|
| 122 |
+
# self.base_ref.set_parent(None)
|
| 123 |
+
# self.set_parent(self.base_ref)
|
| 124 |
+
|
| 125 |
+
# Missing the dist1 for intermediate target
|
| 126 |
+
|
| 127 |
+
self.target_base.set_position([position[0], position[1], self.target_z])
|
| 128 |
+
self.target_base.set_orientation([0, 0, angle])
|
| 129 |
+
|
| 130 |
+
handle_base = self.get_handle()
|
| 131 |
+
handle_target_base = self.target_base.get_handle()
|
| 132 |
+
|
| 133 |
+
_, ret_floats, _, _ = utils.script_call(
|
| 134 |
+
'getNonlinearPathMobile@PyRep', PYREP_SCRIPT_TYPE,
|
| 135 |
+
ints=[handle_base, handle_target_base,
|
| 136 |
+
self._collision_collection,
|
| 137 |
+
int(ignore_collisions), path_pts], floats=[boundaries],
|
| 138 |
+
strings=[algorithm.value])
|
| 139 |
+
|
| 140 |
+
# self.set_parent(None)
|
| 141 |
+
# self.base_ref.set_parent(self)
|
| 142 |
+
|
| 143 |
+
if len(ret_floats) == 0:
|
| 144 |
+
raise ConfigurationPathError('Could not create path.')
|
| 145 |
+
|
| 146 |
+
path = []
|
| 147 |
+
for i in range(0, len(ret_floats) // 3):
|
| 148 |
+
inst = ret_floats[3 * i:3 * i + 3]
|
| 149 |
+
if i > 0:
|
| 150 |
+
dist_change = sqrt((inst[0] - prev_inst[0]) ** 2 + (
|
| 151 |
+
inst[1] - prev_inst[1]) ** 2)
|
| 152 |
+
else:
|
| 153 |
+
dist_change = 0
|
| 154 |
+
inst.append(dist_change)
|
| 155 |
+
|
| 156 |
+
path.append(inst)
|
| 157 |
+
|
| 158 |
+
prev_inst = inst
|
| 159 |
+
|
| 160 |
+
return path
|
| 161 |
+
|
| 162 |
+
def _check_collision_linear_path(self,path):
|
| 163 |
+
"""Check for collision on a linear path from start to goal
|
| 164 |
+
|
| 165 |
+
:param path: A list containing start and goal as [x,y,yaw]
|
| 166 |
+
:return: A bool, True if collision was detected
|
| 167 |
+
"""
|
| 168 |
+
start = path[0]
|
| 169 |
+
end = path[1]
|
| 170 |
+
|
| 171 |
+
m = (end[1] - start[1])/(end[0] - start[0])
|
| 172 |
+
b = start[1] - m * start[0]
|
| 173 |
+
x_range = [start[0],end[0]]
|
| 174 |
+
x_span = start[0] - end[0]
|
| 175 |
+
|
| 176 |
+
incr = round(abs(x_span)/50, 3)
|
| 177 |
+
if x_range[1] < x_range[0]:
|
| 178 |
+
incr = - incr
|
| 179 |
+
|
| 180 |
+
x = x_range[0]
|
| 181 |
+
for k in range(50):
|
| 182 |
+
x += incr
|
| 183 |
+
y = m * x + b
|
| 184 |
+
self.set_2d_pose([x,y,start[-1] if k < 46 else end[-1]])
|
| 185 |
+
status_collision = self.assess_collision()
|
| 186 |
+
if status_collision == True:
|
| 187 |
+
break
|
| 188 |
+
|
| 189 |
+
return status_collision
|
| 190 |
+
|
| 191 |
+
def get_base_actuation(self):
|
| 192 |
+
"""Controller for mobile bases.
|
| 193 |
+
"""
|
| 194 |
+
raise NotImplementedError()
|
| 195 |
+
|
| 196 |
+
def copy(self) -> RobotComponent:
|
| 197 |
+
self.intermediate_target_base.set_parent(self)
|
| 198 |
+
self.target_base.set_parent(self)
|
| 199 |
+
c = super().copy()
|
| 200 |
+
self.intermediate_target_base.set_parent(None)
|
| 201 |
+
self.target_base.set_parent(None)
|
| 202 |
+
return c
|