file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 2.5
98.5
| max_line_length
int64 5
993
| alphanum_fraction
float64 0.27
0.91
|
---|---|---|---|---|---|---|
gazebosim/gz-omni/source/ignition_live/Scene.hpp | /*
* Copyright (C) 2021 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License"); * you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_SCENE_HPP
#define IGNITION_OMNIVERSE_SCENE_HPP
#include "Error.hpp"
#include "ThreadSafe.hpp"
#include <ignition/utils/ImplPtr.hh>
#include <ignition/math/Pose3.hh>
#include <ignition/msgs/joint.pb.h>
#include <ignition/msgs/link.pb.h>
#include <ignition/msgs/model.pb.h>
#include <ignition/msgs/pose.pb.h>
#include <ignition/msgs/pose_v.pb.h>
#include <ignition/msgs/scene.pb.h>
#include <ignition/msgs/vector3d.pb.h>
#include <ignition/msgs/visual.pb.h>
#include <ignition/transport.hh>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usdGeom/sphere.h>
#include <pxr/usd/usdGeom/capsule.h>
#include <pxr/usd/usdGeom/cube.h>
#include <pxr/usd/usdGeom/cylinder.h>
#include <pxr/usd/usdGeom/mesh.h>
#include <pxr/usd/usdShade/material.h>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
#include <cstdint>
#include <memory>
#include <string>
#include <thread>
#include <unordered_map>
namespace ignition
{
namespace omniverse
{
enum class Simulator : int { Ignition, IsaacSim };
class Scene
{
public:
Scene(
const std::string &_worldName,
const std::string &_stageUrl,
Simulator _simulatorPoses);
/// \brief Initialize the scene and subscribes for updates. This blocks until
/// the scene is initialized.
/// \return true if success
bool Init();
/// \brief Equivalent to `scene.Stage().Lock()->Save()`.
void Save();
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &Stage();
/// \internal
/// \brief Private data pointer
IGN_UTILS_UNIQUE_IMPL_PTR(dataPtr)
};
} // namespace omniverse
} // namespace ignition
#endif
| 2,162 | C++ | 24.75 | 80 | 0.726179 |
gazebosim/gz-omni/source/ignition_live/Mesh.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_MESH_HPP
#define IGNITION_OMNIVERSE_MESH_HPP
#include <ignition/msgs/meshgeom.pb.h>
#include <pxr/usd/usd/stage.h>
#include <pxr/usd/usdGeom/mesh.h>
namespace ignition
{
namespace omniverse
{
pxr::UsdGeomMesh UpdateMesh(const ignition::msgs::MeshGeom& _meshMsg,
const std::string& _path,
const pxr::UsdStageRefPtr& _stage);
}
} // namespace ignition
#endif
| 1,067 | C++ | 28.666666 | 75 | 0.705717 |
gazebosim/gz-omni/source/ignition_live/Material.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "Material.hpp"
#include <ignition/common/Console.hh>
#include <ignition/math/Color.hh>
#include <pxr/usd/usd/tokens.h>
#include <pxr/usd/usdGeom/gprim.h>
#include <pxr/usd/usdShade/material.h>
#include <pxr/usd/usdShade/materialBindingAPI.h>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <OmniClient.h>
namespace ignition
{
namespace omniverse
{
/// Return the full path of an URL
/// If the resource is a URI we try to find to file in the filesystem
/// \brief _fullPath URI of the resource
std::string checkURI(const std::string _fullPath)
{
// TODO (ahcorde): This code is duplicated is the USD converter (sdformat)
ignition::common::URI uri(_fullPath);
std::string fullPath = _fullPath;
std::string home;
if (!ignition::common::env("HOME", home, false))
{
ignwarn << "The HOME environment variable was not defined, "
<< "so the resource [" << fullPath << "] could not be found\n";
return "";
}
if (uri.Scheme() == "http" || uri.Scheme() == "https")
{
auto systemPaths = ignition::common::systemPaths();
std::vector<std::string> tokens = ignition::common::split(uri.Path().Str(), "/");
std::string server = tokens[0];
std::string versionServer = tokens[1];
std::string owner = ignition::common::lowercase(tokens[2]);
std::string type = ignition::common::lowercase(tokens[3]);
std::string modelName = ignition::common::lowercase(tokens[4]);
std::string modelVersion = ignition::common::lowercase(tokens[5]);
fullPath = ignition::common::joinPaths(
home, ".ignition", "fuel", server, owner, type, modelName, modelVersion);
systemPaths->AddFilePaths(fullPath);
for (int i = 7; i < tokens.size(); i++)
{
fullPath = ignition::common::joinPaths(
fullPath, ignition::common::lowercase(tokens[i]));
systemPaths->AddFilePaths(fullPath);
}
}
return fullPath;
}
/// \brief Copy a file in a directory
/// \param[in] _path path where the copy will be located
/// \param[in] _fullPath name of the file to copy
/// \param[in] _stageDirUrl stage directory URL to copy materials if required
bool copyMaterial(
const std::string &_path,
const std::string &_fullPath,
const std::string &_stageDirUrl)
{
if (!_path.empty() && !_fullPath.empty())
{
///
auto fileName = ignition::common::basename(_path);
auto filePathIndex = _path.rfind(fileName);
auto filePath = _path.substr(0, filePathIndex);
if (!omniClientWaitFor(omniClientCopy(
_fullPath.c_str(),
std::string(_stageDirUrl + "/" + _path).c_str(),
nullptr,
nullptr), 1000))
{
ignerr << "omniClientCopy timeout. Not able to copy file ["
<< _fullPath.c_str() << "]" << "in nucleus ["
<< std::string(_stageDirUrl + "/" + _path) << "]." ;
}
}
return false;
}
/// \brief Create the path to copy the material
/// \param[in] _uri full path of the file to copy
/// \return A relative path to save the material, the path looks like:
/// materials/textures/<filename with extension>
std::string getMaterialCopyPath(const std::string &_uri)
{
return ignition::common::joinPaths(
".",
"materials",
"textures",
ignition::common::basename(_uri));
}
/// \brief Fill Material shader attributes and properties
/// \param[in] _prim USD primitive
/// \param[in] _name Name of the field attribute or property
/// \param[in] _vType Type of the field
/// \param[in] _value Value of the field
/// \param[in] _customData Custom data to set the field
/// \param[in] _displayName Display name
/// \param[in] _displayGroup Display group
/// \param[in] _doc Documentation of the field
/// \param[in] _colorSpace if the material is a texture, we can specify the
/// colorSpace of the image
template <typename T>
void CreateMaterialInput(
const pxr::UsdPrim &_prim, const std::string &_name,
const pxr::SdfValueTypeName &_vType, T _value,
const std::map<pxr::TfToken, pxr::VtValue> &_customData,
const pxr::TfToken &_displayName = pxr::TfToken(""),
const pxr::TfToken &_displayGroup = pxr::TfToken(""),
const std::string &_doc = "",
const pxr::TfToken &_colorSpace = pxr::TfToken(""))
{
auto shader = pxr::UsdShadeShader(_prim);
if (shader)
{
auto existingInput = shader.GetInput(pxr::TfToken(_name));
pxr::SdfValueTypeName vTypeName;
if (_vType.IsScalar())
{
vTypeName = _vType.GetScalarType();
}
else if (_vType.IsArray())
{
vTypeName = _vType.GetArrayType();
}
auto surfaceInput = shader.CreateInput(pxr::TfToken(_name), vTypeName);
surfaceInput.Set(_value);
auto attr = surfaceInput.GetAttr();
for (const auto &[key, customValue] : _customData)
{
attr.SetCustomDataByKey(key, customValue);
}
if (!_displayName.GetString().empty())
{
attr.SetDisplayName(_displayName);
}
if (!_displayGroup.GetString().empty())
{
attr.SetDisplayGroup(_displayGroup);
}
if (!_doc.empty())
{
attr.SetDocumentation(_doc);
}
if (!_colorSpace.GetString().empty())
{
attr.SetColorSpace(_colorSpace);
}
}
else
{
ignerr << "Not able to convert the prim to a UsdShadeShader" << std::endl;
}
}
/// \param[in] _stageDirUrl stage directory URL to copy materials if required
bool SetMaterial(const pxr::UsdGeomGprim &_gprim,
const ignition::msgs::Visual &_visualMsg,
const pxr::UsdStageRefPtr &_stage,
const std::string &_stageDirUrl)
{
if (!_visualMsg.has_material())
{
return true;
}
const std::string mtlPath = "/Looks/Material_" + _visualMsg.name() + "_" +
std::to_string(_visualMsg.id());
pxr::UsdShadeMaterial material =
pxr::UsdShadeMaterial::Define(_stage, pxr::SdfPath(mtlPath));
auto usdShader =
pxr::UsdShadeShader::Define(_stage, pxr::SdfPath(mtlPath + "/Shader"));
auto shaderPrim = usdShader.GetPrim();
auto shaderOut =
pxr::UsdShadeConnectableAPI(shaderPrim)
.CreateOutput(pxr::TfToken("out"), pxr::SdfValueTypeNames->Token);
material.CreateSurfaceOutput(pxr::TfToken("mdl")).ConnectToSource(shaderOut);
material.CreateVolumeOutput(pxr::TfToken("mdl")).ConnectToSource(shaderOut);
material.CreateDisplacementOutput(pxr::TfToken("mdl"))
.ConnectToSource(shaderOut);
usdShader.GetImplementationSourceAttr().Set(pxr::UsdShadeTokens->sourceAsset);
usdShader.SetSourceAsset(pxr::SdfAssetPath("OmniPBR.mdl"),
pxr::TfToken("mdl"));
usdShader.SetSourceAssetSubIdentifier(pxr::TfToken("OmniPBR"),
pxr::TfToken("mdl"));
std::map<pxr::TfToken, pxr::VtValue> customDataDiffuse = {
{pxr::TfToken("default"), pxr::VtValue(pxr::GfVec3f(0.2, 0.2, 0.2))},
{pxr::TfToken("range:max"),
pxr::VtValue(pxr::GfVec3f(100000, 100000, 100000))},
{pxr::TfToken("range:min"), pxr::VtValue(pxr::GfVec3f(0, 0, 0))}};
ignition::math::Color diffuse(
_visualMsg.material().diffuse().r(), _visualMsg.material().diffuse().g(),
_visualMsg.material().diffuse().b(), _visualMsg.material().diffuse().a());
CreateMaterialInput<pxr::GfVec3f>(
shaderPrim, "diffuse_color_constant", pxr::SdfValueTypeNames->Color3f,
pxr::GfVec3f(diffuse.R(), diffuse.G(), diffuse.B()), customDataDiffuse,
pxr::TfToken("Base Color"), pxr::TfToken("Albedo"),
"This is the base color");
std::map<pxr::TfToken, pxr::VtValue> customDataEmissive = {
{pxr::TfToken("default"), pxr::VtValue(pxr::GfVec3f(1, 0.1, 0.1))},
{pxr::TfToken("range:max"),
pxr::VtValue(pxr::GfVec3f(100000, 100000, 100000))},
{pxr::TfToken("range:min"), pxr::VtValue(pxr::GfVec3f(0, 0, 0))}};
ignition::math::Color emissive(_visualMsg.material().emissive().r(),
_visualMsg.material().emissive().g(),
_visualMsg.material().emissive().b(),
_visualMsg.material().emissive().a());
CreateMaterialInput<pxr::GfVec3f>(
shaderPrim, "emissive_color", pxr::SdfValueTypeNames->Color3f,
pxr::GfVec3f(emissive.R(), emissive.G(), emissive.B()),
customDataEmissive, pxr::TfToken("Emissive Color"),
pxr::TfToken("Emissive"), "The emission color");
std::map<pxr::TfToken, pxr::VtValue> customDataEnableEmission = {
{pxr::TfToken("default"), pxr::VtValue(0)}};
CreateMaterialInput<bool>(
shaderPrim, "enable_emission", pxr::SdfValueTypeNames->Bool,
emissive.A() > 0, customDataEnableEmission,
pxr::TfToken("Enable Emissive"), pxr::TfToken("Emissive"),
"Enables the emission of light from the material");
std::map<pxr::TfToken, pxr::VtValue> customDataIntensity = {
{pxr::TfToken("default"), pxr::VtValue(40)},
{pxr::TfToken("range:max"), pxr::VtValue(100000)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}};
CreateMaterialInput<float>(
shaderPrim, "emissive_intensity", pxr::SdfValueTypeNames->Float,
emissive.A(), customDataIntensity, pxr::TfToken("Emissive Intensity"),
pxr::TfToken("Emissive"), "Intensity of the emission");
if (_visualMsg.material().has_pbr())
{
auto pbr = _visualMsg.material().pbr();
std::map<pxr::TfToken, pxr::VtValue> customDataMetallicConstant =
{
{pxr::TfToken("default"), pxr::VtValue(0.5)},
{pxr::TfToken("range:max"), pxr::VtValue(1)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}
};
CreateMaterialInput<float>(
shaderPrim,
"metallic_constant",
pxr::SdfValueTypeNames->Float,
pbr.metalness(),
customDataMetallicConstant,
pxr::TfToken("Metallic Amount"),
pxr::TfToken("Reflectivity"),
"Metallic Material");
std::map<pxr::TfToken, pxr::VtValue> customDataRoughnessConstant =
{
{pxr::TfToken("default"), pxr::VtValue(0.5)},
{pxr::TfToken("range:max"), pxr::VtValue(1)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}
};
CreateMaterialInput<float>(
shaderPrim,
"reflection_roughness_constant",
pxr::SdfValueTypeNames->Float,
pbr.roughness(),
customDataRoughnessConstant,
pxr::TfToken("Roughness Amount"),
pxr::TfToken("Reflectivity"),
"Higher roughness values lead to more blurry reflections");
if (!pbr.albedo_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataDiffuseTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.albedo_map());
std::string albedoMapURI = checkURI(pbr.albedo_map());
std::string fullnameAlbedoMap =
ignition::common::findFile(
ignition::common::basename(albedoMapURI));
if (fullnameAlbedoMap.empty())
{
fullnameAlbedoMap = pbr.albedo_map();
}
copyMaterial(copyPath, fullnameAlbedoMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"diffuse_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataDiffuseTexture,
pxr::TfToken("Base Map"),
pxr::TfToken("Albedo"),
"",
pxr::TfToken("auto"));
}
if (!pbr.metalness_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataMetallnessTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.metalness_map());
std::string fullnameMetallnessMap =
ignition::common::findFile(
ignition::common::basename(pbr.metalness_map()));
if (fullnameMetallnessMap.empty())
{
fullnameMetallnessMap = pbr.metalness_map();
}
copyMaterial(copyPath, fullnameMetallnessMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"metallic_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataMetallnessTexture,
pxr::TfToken("Metallic Map"),
pxr::TfToken("Reflectivity"),
"",
pxr::TfToken("raw"));
}
if (!pbr.normal_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataNormalTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.normal_map());
std::string fullnameNormalMap =
ignition::common::findFile(
ignition::common::basename(pbr.normal_map()));
if (fullnameNormalMap.empty())
{
fullnameNormalMap = pbr.normal_map();
}
copyMaterial(copyPath, fullnameNormalMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"normalmap_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataNormalTexture,
pxr::TfToken("Normal Map"),
pxr::TfToken("Normal"),
"",
pxr::TfToken("raw"));
}
if (!pbr.roughness_map().empty())
{
std::map<pxr::TfToken, pxr::VtValue> customDataRoughnessTexture =
{
{pxr::TfToken("default"), pxr::VtValue(pxr::SdfAssetPath())},
};
std::string copyPath = getMaterialCopyPath(pbr.roughness_map());
std::string fullnameRoughnessMap =
ignition::common::findFile(
ignition::common::basename(pbr.roughness_map()));
if (fullnameRoughnessMap.empty())
{
fullnameRoughnessMap = pbr.roughness_map();
}
copyMaterial(copyPath, fullnameRoughnessMap, _stageDirUrl);
CreateMaterialInput<pxr::SdfAssetPath>(
shaderPrim,
"reflectionroughness_texture",
pxr::SdfValueTypeNames->Asset,
pxr::SdfAssetPath(copyPath),
customDataRoughnessTexture,
pxr::TfToken("RoughnessMap Map"),
pxr::TfToken("RoughnessMap"),
"",
pxr::TfToken("raw"));
std::map<pxr::TfToken, pxr::VtValue>
customDataRoughnessTextureInfluence =
{
{pxr::TfToken("default"), pxr::VtValue(0)},
{pxr::TfToken("range:max"), pxr::VtValue(1)},
{pxr::TfToken("range:min"), pxr::VtValue(0)}
};
CreateMaterialInput<bool>(
shaderPrim,
"reflection_roughness_texture_influence",
pxr::SdfValueTypeNames->Bool,
true,
customDataRoughnessTextureInfluence,
pxr::TfToken("Roughness Map Influence"),
pxr::TfToken("Reflectivity"),
"",
pxr::TfToken("raw"));
}
}
pxr::UsdShadeMaterialBindingAPI(_gprim).Bind(material);
return true;
}
} // namespace omniverse
} // namespace ignition
| 15,400 | C++ | 33.224444 | 85 | 0.633701 |
gazebosim/gz-omni/source/ignition_live/FUSDNoticeListener.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "FUSDNoticeListener.hpp"
#include "GetOp.hpp"
#include <ignition/common/Console.hh>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/notice.h>
#include <pxr/usd/usd/primRange.h>
#include <pxr/usd/usdGeom/sphere.h>
#include <pxr/usd/usdGeom/cube.h>
#include <pxr/usd/usdGeom/cylinder.h>
#include <ignition/transport/Node.hh>
#include <ignition/msgs/model.pb.h>
#include <sdf/Collision.hh>
#include <sdf/Geometry.hh>
#include <sdf/Root.hh>
#include <sdf/Link.hh>
#include <sdf/Model.hh>
#include <sdf/Sphere.hh>
#include <sdf/Visual.hh>
namespace ignition
{
namespace omniverse
{
class FUSDNoticeListener::Implementation
{
public:
void ParseCube(const pxr::UsdPrim &_prim, sdf::Link &_link);
void ParseCylinder(const pxr::UsdPrim &_prim, sdf::Link &_link);
void ParseSphere(const pxr::UsdPrim &_prim, sdf::Link &_link);
bool ParsePrim(const pxr::UsdPrim &_prim, sdf::Link &_link)
{
if (_prim.IsA<pxr::UsdGeomSphere>())
{
ParseSphere(_prim, _link);
return true;
}
else if (_prim.IsA<pxr::UsdGeomCylinder>())
{
ParseCylinder(_prim, _link);
}
return false;
}
void CreateSDF(sdf::Link &_link, const pxr::UsdPrim &_prim)
{
if (!_prim)
return;
if (ParsePrim(_prim, _link))
{
return;
}
else
{
auto children = _prim.GetChildren();
for (const pxr::UsdPrim &childPrim : children)
{
if (ParsePrim(childPrim, _link))
{
return;
}
else
{
CreateSDF(_link, childPrim);
}
}
}
}
void jointStateCb(const ignition::msgs::Model &_msg);
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> stage;
std::string worldName;
std::unordered_map<std::string, transport::Node::Publisher> revoluteJointPublisher;
/// \brief Ignition communication node.
public: transport::Node node;
Simulator simulatorPoses;
std::mutex jointStateMsgMutex;
std::unordered_map<std::string, double> jointStateMap;
std::unordered_map<std::string, uint32_t> * entitiesByName;
};
void FUSDNoticeListener::Implementation::ParseCube(
const pxr::UsdPrim &_prim, sdf::Link &_link)
{
// double size;
// auto variant_cylinder = pxr::UsdGeomCube(_prim);
// variant_cylinder.GetSizeAttr().Get(&size);
}
void FUSDNoticeListener::Implementation::ParseCylinder(
const pxr::UsdPrim &_prim, sdf::Link &_link)
{
// auto variant_cylinder = pxr::UsdGeomCylinder(_prim);
// double radius;
// double height;
// variant_cylinder.GetRadiusAttr().Get(&radius);
// variant_cylinder.GetHeightAttr().Get(&height);
}
void FUSDNoticeListener::Implementation::ParseSphere(
const pxr::UsdPrim &_prim, sdf::Link &_link)
{
double radius;
auto variant_sphere = pxr::UsdGeomSphere(_prim);
variant_sphere.GetRadiusAttr().Get(&radius);
sdf::Visual visual;
sdf::Collision collision;
sdf::Geometry geom;
sdf::Sphere sphere;
geom.SetType(sdf::GeometryType::SPHERE);
sphere.SetRadius(radius);
geom.SetSphereShape(sphere);
visual.SetName("sphere_visual");
visual.SetGeom(geom);
collision.SetName("sphere_collision");
collision.SetGeom(geom);
_link.AddVisual(visual);
_link.AddCollision(collision);
}
FUSDNoticeListener::FUSDNoticeListener(
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &_stage,
const std::string &_worldName,
Simulator _simulatorPoses,
std::unordered_map<std::string, uint32_t> &_entitiesByName)
: dataPtr(ignition::utils::MakeUniqueImpl<Implementation>())
{
this->dataPtr->stage = _stage;
this->dataPtr->worldName = _worldName;
this->dataPtr->simulatorPoses = _simulatorPoses;
this->dataPtr->entitiesByName = &_entitiesByName;
std::string jointStateTopic = "/joint_states";
this->dataPtr->node.Subscribe(
jointStateTopic,
&FUSDNoticeListener::Implementation::jointStateCb,
this->dataPtr.get());
}
void FUSDNoticeListener::Implementation::jointStateCb(
const ignition::msgs::Model &_msg)
{
std::lock_guard<std::mutex> lock(this->jointStateMsgMutex);
for(int i = 0; i < _msg.joint_size(); ++i)
{
this->jointStateMap[_msg.joint(i).name()] =
_msg.joint(i).axis1().position();
}
}
void FUSDNoticeListener::Handle(
const class pxr::UsdNotice::ObjectsChanged &ObjectsChanged)
{
auto stage = this->dataPtr->stage->Lock();
for (const pxr::SdfPath &objectsChanged : ObjectsChanged.GetResyncedPaths())
{
ignmsg << "Resynced Path: " << objectsChanged.GetText() << std::endl;
auto modelUSD = stage->GetPrimAtPath(objectsChanged);
std::string primName = modelUSD.GetName();
if (primName.find("ROS_") != std::string::npos ||
primName.find("PhysicsScene") != std::string::npos)
{
continue;
}
if (modelUSD)
{
std::string strPath = objectsChanged.GetText();
if (strPath.find("_link") != std::string::npos
|| strPath.find("_visual") != std::string::npos
|| strPath.find("geometry") != std::string::npos) {
return;
}
auto it = this->dataPtr->entitiesByName->find(modelUSD.GetName().GetString());
if (it != this->dataPtr->entitiesByName->end())
{
continue;
}
auto range = pxr::UsdPrimRange::Stage(*stage);
for (auto const &prim : range)
{
if (prim.GetName().GetString() == primName)
{
continue;
}
}
sdf::Root root;
sdf::Model model;
model.SetName(modelUSD.GetPath().GetName());
model.SetRawPose(ignition::math::Pose3d());
sdf::Link link;
link.SetName(modelUSD.GetPath().GetName());
this->dataPtr->CreateSDF(link, modelUSD);
model.AddLink(link);
root.SetModel(model);
// Prepare the input parameters.
ignition::msgs::EntityFactory req;
req.set_sdf(root.ToElement()->ToString(""));
req.set_name(modelUSD.GetPath().GetName());
req.set_allow_renaming(false);
igndbg << "root.ToElement()->ToString("") "
<< root.ToElement()->ToString("") << '\n';
ignition::msgs::Boolean rep;
bool result;
unsigned int timeout = 5000;
bool executed = this->dataPtr->node.Request(
"/world/" + this->dataPtr->worldName + "/create",
req, timeout, rep, result);
if (executed)
{
if (rep.data())
{
igndbg << "Model was inserted [" << modelUSD.GetPath().GetName()
<< "]" << '\n';
}
else
{
igndbg << "Error model was not inserted" << '\n';
}
}
}
}
ignition::msgs::Pose_V req;
if (this->dataPtr->simulatorPoses == Simulator::IsaacSim)
{
// this loop checks all paths to find revolute joints
// if there is some, we get the body0 and body1 and calculate the
// joint angle.
auto range = pxr::UsdPrimRange::Stage(*stage);
{
std::lock_guard<std::mutex> lock(this->dataPtr->jointStateMsgMutex);
for (auto const &prim : range)
{
std::string primType = prim.GetPrimTypeInfo().GetTypeName().GetText();
if (primType == std::string("PhysicsRevoluteJoint"))
{
std::string topic = transport::TopicUtils::AsValidTopic(
std::string("/model/") + std::string("panda") +
std::string("/joint/") + prim.GetPath().GetName() +
std::string("/0/cmd_pos"));
auto pub = this->dataPtr->revoluteJointPublisher.find(topic);
if (pub == this->dataPtr->revoluteJointPublisher.end())
{
this->dataPtr->revoluteJointPublisher[topic] =
this->dataPtr->node.Advertise<msgs::Double>(topic);
}
else
{
msgs::Double cmd;
float pos = this->dataPtr->jointStateMap[prim.GetName()];
cmd.set_data(pos);
pub->second.Publish(cmd);
}
}
}
}
for (const pxr::SdfPath &objectsChanged :
ObjectsChanged.GetChangedInfoOnlyPaths())
{
if (std::string(objectsChanged.GetText()) == "/")
continue;
igndbg << "path " << objectsChanged.GetText() << std::endl;
auto modelUSD = stage->GetPrimAtPath(objectsChanged.GetParentPath());
auto property = modelUSD.GetPropertyAtPath(objectsChanged);
std::string strProperty = property.GetBaseName().GetText();
if (strProperty == "radius")
{
double radius;
auto attribute = modelUSD.GetAttributeAtPath(objectsChanged);
attribute.Get(&radius);
}
if (strProperty == "translate")
{
auto xform = pxr::UsdGeomXformable(modelUSD);
auto transforms = GetOp(xform);
auto currentPrim = modelUSD;
ignition::math::Quaterniond q(
transforms.rotXYZ[0],
transforms.rotXYZ[1],
transforms.rotXYZ[2]);
if (currentPrim.GetName() == "geometry")
{
currentPrim = currentPrim.GetParent();
auto visualXform = pxr::UsdGeomXformable(currentPrim);
auto visualOp = GetOp(visualXform);
transforms.position += visualOp.position;
ignition::math::Quaterniond qX, qY, qZ;
ignition::math::Angle angleX(IGN_DTOR(visualOp.rotXYZ[0]));
ignition::math::Angle angleY(IGN_DTOR(visualOp.rotXYZ[1]));
ignition::math::Angle angleZ(IGN_DTOR(visualOp.rotXYZ[2]));
qX = ignition::math::Quaterniond(angleX.Normalized().Radian(), 0, 0);
qY = ignition::math::Quaterniond(0, angleY.Normalized().Radian(), 0);
qZ = ignition::math::Quaterniond(0, 0, angleZ.Normalized().Radian());
q = ((q * qX) * qY) * qZ;
transforms.scale = pxr::GfVec3f(
transforms.scale[0] * visualOp.scale[0],
transforms.scale[1] * visualOp.scale[1],
transforms.scale[2] * visualOp.scale[2]);
}
auto currentPrimName = currentPrim.GetName().GetString();
int substrIndex = currentPrimName.size() - std::string("_visual").size();
if (substrIndex >= 0 && substrIndex < currentPrimName.size())
{
if (currentPrimName.substr(substrIndex).find("_visual") !=
std::string::npos)
{
currentPrim = currentPrim.GetParent();
auto linkXform = pxr::UsdGeomXformable(currentPrim);
auto linkOp = GetOp(linkXform);
transforms.position += linkOp.position;
ignition::math::Quaterniond qX, qY, qZ;
ignition::math::Angle angleX(IGN_DTOR(linkOp.rotXYZ[0]));
ignition::math::Angle angleY(IGN_DTOR(linkOp.rotXYZ[1]));
ignition::math::Angle angleZ(IGN_DTOR(linkOp.rotXYZ[2]));
qX = ignition::math::Quaterniond(angleX.Normalized().Radian(), 0, 0);
qY = ignition::math::Quaterniond(0, angleY.Normalized().Radian(), 0);
qZ = ignition::math::Quaterniond(0, 0, angleZ.Normalized().Radian());
q = ((q * qX) * qY) * qZ;
transforms.scale = pxr::GfVec3f(
transforms.scale[0] * linkOp.scale[0],
transforms.scale[1] * linkOp.scale[1],
transforms.scale[2] * linkOp.scale[2]);
}
}
currentPrimName = currentPrim.GetName().GetString();
substrIndex = currentPrimName.size() - std::string("_link").size();
if (substrIndex >= 0 && substrIndex < currentPrimName.size())
{
if (currentPrimName.substr(substrIndex).find("_link") !=
std::string::npos)
{
currentPrim = currentPrim.GetParent();
auto modelXform = pxr::UsdGeomXformable(currentPrim);
auto modelOp = GetOp(modelXform);
transforms.position += modelOp.position;
ignition::math::Quaterniond qX, qY, qZ;
ignition::math::Angle angleX(IGN_DTOR(modelOp.rotXYZ[0]));
ignition::math::Angle angleY(IGN_DTOR(modelOp.rotXYZ[1]));
ignition::math::Angle angleZ(IGN_DTOR(modelOp.rotXYZ[2]));
qX = ignition::math::Quaterniond(angleX.Normalized().Radian(), 0, 0);
qY = ignition::math::Quaterniond(0, angleY.Normalized().Radian(), 0);
qZ = ignition::math::Quaterniond(0, 0, angleZ.Normalized().Radian());
q = ((q * qX) * qY) * qZ;
transforms.scale = pxr::GfVec3f(
transforms.scale[0] * modelOp.scale[0],
transforms.scale[1] * modelOp.scale[1],
transforms.scale[2] * modelOp.scale[2]);
}
}
std::size_t found = std::string(currentPrim.GetName()).find("_link");
if (found != std::string::npos)
continue;
found = std::string(currentPrim.GetName()).find("_visual");
if (found != std::string::npos)
continue;
auto poseMsg = req.add_pose();
poseMsg->set_name(currentPrim.GetName());
poseMsg->mutable_position()->set_x(transforms.position[0]);
poseMsg->mutable_position()->set_y(transforms.position[1]);
poseMsg->mutable_position()->set_z(transforms.position[2]);
poseMsg->mutable_orientation()->set_x(q.X());
poseMsg->mutable_orientation()->set_y(q.Y());
poseMsg->mutable_orientation()->set_z(q.Z());
poseMsg->mutable_orientation()->set_w(q.W());
}
}
if (req.pose_size() > 0)
{
bool result;
ignition::msgs::Boolean rep;
unsigned int timeout = 100;
bool executed = this->dataPtr->node.Request(
"/world/" + this->dataPtr->worldName + "/set_pose_vector",
req, timeout, rep, result);
if (executed)
{
if (!result)
ignerr << "Service call failed" << std::endl;
}
else
ignerr << "Service [/world/" << this->dataPtr->worldName
<< "/set_pose_vector] call timed out" << std::endl;
}
}
}
} // namespace omniverse
} // namespace ignition
| 14,498 | C++ | 31.582022 | 85 | 0.608222 |
gazebosim/gz-omni/source/ignition_live/Joint.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_JOINT_HPP
#define IGNITION_OMNIVERSE_JOINT_HPP
#include <ignition/msgs/joint.pb.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usd/stage.h>
namespace ignition::omniverse
{
pxr::UsdPrim CreateFixedJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage);
pxr::UsdPrim CreateRevoluteJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage);
} // namespace ignition::omniverse
#endif
| 1,123 | C++ | 31.114285 | 75 | 0.705254 |
gazebosim/gz-omni/source/ignition_live/Mesh.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "Mesh.hpp"
#include <ignition/common/Console.hh>
#include <ignition/common/Mesh.hh>
#include <ignition/common/MeshManager.hh>
#include <ignition/common/SubMesh.hh>
#include <ignition/common/URI.hh>
#include <ignition/common/Util.hh>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
namespace ignition::omniverse
{
bool endsWith(const std::string_view &str, const std::string_view &suffix)
{
return str.size() >= suffix.size() &&
0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
}
inline std::string removeDash(const std::string &_str)
{
std::string result = _str;
std::replace(result.begin(), result.end(), '-', '_');
return result;
}
pxr::UsdGeomMesh UpdateMesh(const ignition::msgs::MeshGeom &_meshMsg,
const std::string &_path,
const pxr::UsdStageRefPtr &_stage)
{
ignition::common::URI uri(_meshMsg.filename());
std::string fullname;
std::string home;
if (!ignition::common::env("HOME", home, false))
{
ignerr << "The HOME environment variable was not defined, "
<< "so the resource [" << fullname << "] could not be found\n";
return pxr::UsdGeomMesh();
}
if (uri.Scheme() == "https" || uri.Scheme() == "http")
{
auto systemPaths = ignition::common::systemPaths();
std::vector<std::string> tokens = ignition::common::split(uri.Path().Str(), "/");
std::string server = tokens[0];
std::string versionServer = tokens[1];
std::string owner = ignition::common::lowercase(tokens[2]);
std::string type = ignition::common::lowercase(tokens[3]);
std::string modelName = ignition::common::lowercase(tokens[4]);
std::string modelVersion = ignition::common::lowercase(tokens[5]);
fullname = ignition::common::joinPaths(
home, ".ignition", "fuel", server, owner, type, modelName, modelVersion);
systemPaths->AddFilePaths(fullname);
for (int i = 7; i < tokens.size(); i++)
{
fullname = ignition::common::joinPaths(
fullname, ignition::common::lowercase(tokens[i]));
systemPaths->AddFilePaths(fullname);
}
}
else
{
fullname = ignition::common::findFile(_meshMsg.filename());
}
auto ignMesh = ignition::common::MeshManager::Instance()->Load(fullname);
// Some Meshes are splited in some submeshes, this loop check if the name
// of the path is the same as the name of the submesh. In this case
// we create a USD mesh per submesh.
bool isUSDPathInSubMeshName = false;
for (unsigned int i = 0; i < ignMesh->SubMeshCount(); ++i)
{
auto subMesh = ignMesh->SubMeshByIndex(i).lock();
if (ignMesh->SubMeshCount() != 1)
{
std::string pathLowerCase = ignition::common::lowercase(_path);
std::string subMeshLowerCase =
ignition::common::lowercase(subMesh->Name());
if (pathLowerCase.find(subMeshLowerCase) != std::string::npos)
{
isUSDPathInSubMeshName = true;
break;
}
}
}
for (unsigned int i = 0; i < ignMesh->SubMeshCount(); ++i)
{
pxr::VtArray<pxr::GfVec3f> meshPoints;
pxr::VtArray<pxr::GfVec2f> uvs;
pxr::VtArray<pxr::GfVec3f> normals;
pxr::VtArray<int> faceVertexIndices;
pxr::VtArray<int> faceVertexCounts;
auto subMesh = ignMesh->SubMeshByIndex(i).lock();
if (!subMesh)
{
ignerr << "Unable to get a shared pointer to submesh at index [" << i
<< "] of parent mesh [" << ignMesh->Name() << "]" << std::endl;
return pxr::UsdGeomMesh();
}
if (isUSDPathInSubMeshName)
{
if (ignMesh->SubMeshCount() != 1)
{
std::string pathLowerCase = ignition::common::lowercase(_path);
std::string subMeshLowerCase =
ignition::common::lowercase(subMesh->Name());
if (pathLowerCase.find(subMeshLowerCase) == std::string::npos)
{
continue;
}
}
}
// copy the submesh's vertices to the usd mesh's "points" array
for (unsigned int v = 0; v < subMesh->VertexCount(); ++v)
{
const auto &vertex = subMesh->Vertex(v);
meshPoints.push_back(pxr::GfVec3f(vertex.X(), vertex.Y(), vertex.Z()));
}
// copy the submesh's indices to the usd mesh's "faceVertexIndices" array
for (unsigned int j = 0; j < subMesh->IndexCount(); ++j)
faceVertexIndices.push_back(subMesh->Index(j));
// copy the submesh's texture coordinates
for (unsigned int j = 0; j < subMesh->TexCoordCount(); ++j)
{
const auto &uv = subMesh->TexCoord(j);
uvs.push_back(pxr::GfVec2f(uv[0], 1 - uv[1]));
}
// copy the submesh's normals
for (unsigned int j = 0; j < subMesh->NormalCount(); ++j)
{
const auto &normal = subMesh->Normal(j);
normals.push_back(pxr::GfVec3f(normal[0], normal[1], normal[2]));
}
// set the usd mesh's "faceVertexCounts" array according to
// the submesh primitive type
// TODO(adlarkin) support all primitive types. The computations are more
// involved for LINESTRIPS, TRIFANS, and TRISTRIPS. I will need to spend
// some time deriving what the number of faces for these primitive types
// are, given the number of indices. The "faceVertexCounts" array will
// also not have the same value for every element in the array for these
// more complex primitive types (see the TODO note in the for loop below)
unsigned int verticesPerFace = 0;
unsigned int numFaces = 0;
switch (subMesh->SubMeshPrimitiveType())
{
case ignition::common::SubMesh::PrimitiveType::POINTS:
verticesPerFace = 1;
numFaces = subMesh->IndexCount();
break;
case ignition::common::SubMesh::PrimitiveType::LINES:
verticesPerFace = 2;
numFaces = subMesh->IndexCount() / 2;
break;
case ignition::common::SubMesh::PrimitiveType::TRIANGLES:
verticesPerFace = 3;
numFaces = subMesh->IndexCount() / 3;
break;
case ignition::common::SubMesh::PrimitiveType::LINESTRIPS:
case ignition::common::SubMesh::PrimitiveType::TRIFANS:
case ignition::common::SubMesh::PrimitiveType::TRISTRIPS:
default:
ignerr << "Submesh " << subMesh->Name()
<< " has a primitive type that is not supported." << std::endl;
return pxr::UsdGeomMesh();
}
// TODO(adlarkin) update this loop to allow for varying element
// values in the array (see TODO note above). Right now, the
// array only allows for all elements to have one value, which in
// this case is "verticesPerFace"
for (unsigned int n = 0; n < numFaces; ++n)
faceVertexCounts.push_back(verticesPerFace);
std::string primName = _path + "/" + subMesh->Name();
primName = removeDash(primName);
if (endsWith(primName, "/"))
{
primName.erase(primName.size() - 1);
}
auto usdMesh = pxr::UsdGeomMesh::Define(_stage, pxr::SdfPath(_path));
usdMesh.CreatePointsAttr().Set(meshPoints);
usdMesh.CreateFaceVertexIndicesAttr().Set(faceVertexIndices);
usdMesh.CreateFaceVertexCountsAttr().Set(faceVertexCounts);
auto coordinates = usdMesh.CreatePrimvar(
pxr::TfToken("st"), pxr::SdfValueTypeNames->Float2Array,
pxr::UsdGeomTokens->vertex);
coordinates.Set(uvs);
usdMesh.CreateNormalsAttr().Set(normals);
usdMesh.SetNormalsInterpolation(pxr::TfToken("vertex"));
usdMesh.CreateSubdivisionSchemeAttr(pxr::VtValue(pxr::TfToken("none")));
const auto &meshMin = ignMesh->Min();
const auto &meshMax = ignMesh->Max();
pxr::VtArray<pxr::GfVec3f> extentBounds;
extentBounds.push_back(pxr::GfVec3f(meshMin.X(), meshMin.Y(), meshMin.Z()));
extentBounds.push_back(pxr::GfVec3f(meshMax.X(), meshMax.Y(), meshMax.Z()));
usdMesh.CreateExtentAttr().Set(extentBounds);
// TODO (ahcorde): Material inside the submesh
int materialIndex = subMesh->MaterialIndex();
if (materialIndex != -1)
{
auto material = ignMesh->MaterialByIndex(materialIndex);
// sdf::Material materialSdf = sdf::usd::convert(material);
// auto materialUSD = ParseSdfMaterial(&materialSdf, _stage);
// if(materialSdf.Emissive() != ignition::math::Color(0, 0, 0, 1)
// || materialSdf.Specular() != ignition::math::Color(0, 0, 0, 1)
// || materialSdf.PbrMaterial())
// {
// if (materialUSD)
// {
// pxr::UsdShadeMaterialBindingAPI(usdMesh).Bind(materialUSD);
// }
// }
}
pxr::UsdGeomXformCommonAPI meshXformAPI(usdMesh);
meshXformAPI.SetScale(pxr::GfVec3f(
_meshMsg.scale().x(), _meshMsg.scale().y(), _meshMsg.scale().z()));
return usdMesh;
}
return pxr::UsdGeomMesh();
}
} // namespace ignition::omniverse
| 9,358 | C++ | 34.721374 | 85 | 0.643941 |
gazebosim/gz-omni/source/ignition_live/main.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "GetOp.hpp"
#include "OmniverseConnect.hpp"
#include "Scene.hpp"
#include "SetOp.hpp"
#include "ThreadSafe.hpp"
#include <ignition/common/Console.hh>
#include <ignition/common/SystemPaths.hh>
#include <ignition/common/StringUtils.hh>
#include <ignition/utils/cli.hh>
#include <pxr/usd/sdf/path.h>
#include <pxr/usd/usd/prim.h>
#include <pxr/usd/usdGeom/xformCommonAPI.h>
#include <string>
using namespace ignition::omniverse;
constexpr double kTargetFps = 60;
constexpr std::chrono::duration<double> kUpdateRate(1 / kTargetFps);
int main(int argc, char* argv[])
{
CLI::App app("Ignition omniverse connector");
std::string destinationPath;
app.add_option("-p,--path", destinationPath,
// clang-format off
"Location of the omniverse stage. e.g. \"omniverse://localhost/Users/ignition/stage.usd\"")
// clang-format on
->required();
std::string worldName;
ignition::omniverse::Simulator simulatorPoses{
ignition::omniverse::Simulator::Ignition};
app.add_option("-w,--world", worldName, "Name of the ignition world")
->required();
std::map<std::string, ignition::omniverse::Simulator> map{
{"ignition", ignition::omniverse::Simulator::Ignition},
{"isaacsim", ignition::omniverse::Simulator::IsaacSim}};
app.add_option("--pose", simulatorPoses, "Which simulator will handle the poses")
->required()
->transform(CLI::CheckedTransformer(map, CLI::ignore_case));;
app.add_flag_callback("-v,--verbose",
[]() { ignition::common::Console::SetVerbosity(4); });
CLI11_PARSE(app, argc, argv);
std::string ignGazeboResourcePath;
auto systemPaths = ignition::common::systemPaths();
ignition::common::env("IGN_GAZEBO_RESOURCE_PATH", ignGazeboResourcePath);
for (const auto& resourcePath :
ignition::common::Split(ignGazeboResourcePath, ':'))
{
systemPaths->AddFilePaths(resourcePath);
}
// Connect with omniverse
if (!StartOmniverse())
{
ignerr << "Not able to start Omniverse" << std::endl;
return -1;
}
// Open the USD model in Omniverse
const std::string stageUrl = [&]()
{
auto result = CreateOmniverseModel(destinationPath);
if (!result)
{
ignerr << result.Error() << std::endl;
exit(-1);
}
return result.Value();
}();
omniUsdLiveSetModeForUrl(stageUrl.c_str(),
OmniUsdLiveMode::eOmniUsdLiveModeEnabled);
PrintConnectedUsername(stageUrl);
Scene scene(worldName, stageUrl, simulatorPoses);
if (!scene.Init())
{
return -1;
};
auto lastUpdate = std::chrono::steady_clock::now();
// don't spam the console, show the fps only once a sec
auto nextShowFps =
lastUpdate.time_since_epoch() + std::chrono::duration<double>(1);
while (true)
{
std::this_thread::sleep_for((lastUpdate + kUpdateRate) -
std::chrono::steady_clock::now());
auto now = std::chrono::steady_clock::now();
if (now.time_since_epoch() > nextShowFps)
{
double curFps =
1 / std::chrono::duration<double>(now - lastUpdate).count();
nextShowFps = now.time_since_epoch() + std::chrono::duration<double>(1);
igndbg << "fps: " << curFps << std::endl;
}
lastUpdate = now;
scene.Save();
omniUsdLiveProcess();
}
return 0;
}
| 3,968 | C++ | 29.068182 | 108 | 0.662802 |
gazebosim/gz-omni/source/ignition_live/OmniClientpp.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* C++ wrappers for various omniclient apis
*/
#ifndef IGNITION_OMNIVERSE_OMNICLIENTPP_HPP
#define IGNITION_OMNIVERSE_OMNICLIENTPP_HPP
#include "Error.hpp"
#include <OmniClient.h>
#include <ostream>
#include <string>
namespace ignition::omniverse
{
/// \brief RAII wrapper to omniClientLock and omniClientUnlock
class OmniverseLock
{
public:
OmniverseLock(const std::string& _url);
~OmniverseLock();
OmniverseLock(const OmniverseLock&) = delete;
OmniverseLock(OmniverseLock&&) = delete;
OmniverseLock& operator=(const OmniverseLock&) = delete;
private:
const std::string url;
};
/// \brief Synchronous API for omniverse
class OmniverseSync
{
public:
template <typename T>
using MaybeError = MaybeError<T, OmniClientResult>;
static MaybeError<OmniClientListEntry> Stat(const std::string& url) noexcept;
};
} // namespace ignition::omniverse
#endif
| 1,507 | C++ | 23.721311 | 79 | 0.741871 |
gazebosim/gz-omni/source/ignition_live/FUSDLayerNoticeListener.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_FUSDLAYERNOTICELISTENER_HPP
#define IGNITION_OMNIVERSE_FUSDLAYERNOTICELISTENER_HPP
#include "Scene.hpp"
#include "ThreadSafe.hpp"
#include <pxr/usd/usd/stage.h>
#include <ignition/common/Console.hh>
#include <ignition/utils/ImplPtr.hh>
namespace ignition
{
namespace omniverse
{
class FUSDLayerNoticeListener : public pxr::TfWeakBase
{
public:
FUSDLayerNoticeListener(
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &_stage,
const std::string& _worldName);
void HandleGlobalLayerReload(const pxr::SdfNotice::LayerDidReloadContent& n);
// Print some interesting info about the LayerNotice
void HandleRootOrSubLayerChange(
const class pxr::SdfNotice::LayersDidChangeSentPerLayer& _layerNotice,
const pxr::TfWeakPtr<pxr::SdfLayer>& _sender);
/// \internal
/// \brief Private data pointer
IGN_UTILS_UNIQUE_IMPL_PTR(dataPtr)
};
} // namespace omniverse
} // namespace ignition
#endif
| 1,577 | C++ | 27.178571 | 79 | 0.749524 |
gazebosim/gz-omni/source/ignition_live/Joint.cpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "Joint.hpp"
pxr::UsdPrim CreateFixedJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage)
{
pxr::TfToken usdPrimTypeName("PhysicsFixedJoint");
return _stage->DefinePrim(pxr::SdfPath(_path), usdPrimTypeName);
}
pxr::UsdPrim CreateRevoluteJoint(const std::string& _path,
const pxr::UsdStageRefPtr& _stage)
{
pxr::TfToken usdPrimTypeName("PhysicsRevoluteJoint");
return _stage->DefinePrim(pxr::SdfPath(_path), usdPrimTypeName);
}
| 1,145 | C++ | 33.727272 | 75 | 0.703057 |
gazebosim/gz-omni/source/ignition_live/FUSDNoticeListener.hpp | /*
* Copyright (C) 2022 Open Source Robotics Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef IGNITION_OMNIVERSE_FUSDNOTICELISTENER_HPP
#define IGNITION_OMNIVERSE_FUSDNOTICELISTENER_HPP
#include <memory>
#include <string>
#include "ThreadSafe.hpp"
#include "Scene.hpp"
#include <pxr/usd/usd/notice.h>
namespace ignition
{
namespace omniverse
{
class FUSDNoticeListener : public pxr::TfWeakBase
{
public:
FUSDNoticeListener(
std::shared_ptr<ThreadSafe<pxr::UsdStageRefPtr>> &_stage,
const std::string &_worldName,
Simulator _simulatorPoses,
std::unordered_map<std::string, uint32_t> &entitiesByName);
void Handle(const class pxr::UsdNotice::ObjectsChanged &ObjectsChanged);
/// \internal
/// \brief Private data pointer
IGN_UTILS_UNIQUE_IMPL_PTR(dataPtr)
};
} // namespace omniverse
} // namespace ignition
#endif
| 1,385 | C++ | 26.17647 | 75 | 0.740072 |
NVlabs/ACID/README.md | [![NVIDIA Source Code License](https://img.shields.io/badge/license-NSCL-blue.svg)](https://github.com/NVlabs/ACID/blob/master/LICENSE)
![Python 3.7](https://img.shields.io/badge/python-3.7-green.svg)
# ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation
### [Project Page](https://b0ku1.github.io/acid/) | [Paper](https://arxiv.org/abs/2203.06856)
<div style="text-align: center">
<img src="_media/model_figure.png" width="600"/>
</div>
This repository contains the codebase used in [**ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation**](https://b0ku1.github.io/acid/), which will appear in [RSS 2022](https://roboticsconference.org/program/papers/) and is nominated for Best Student Paper Award. Specifically, the repo contains code for:
* [**PlushSim**](./PlushSim/), the simulation environment used to generate all manipulation data.
* [**ACID model**](./ACID/), the implicit visual dynamics model's model and training code.
If you find our code or paper useful, please consider citing
```bibtex
@article{shen2022acid,
title={ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation},
author={Shen, Bokui and Jiang, Zhenyu and Choy, Christopher and J. Guibas, Leonidas and Savarese, Silvio and Anandkumar, Anima and Zhu, Yuke},
journal={Robotics: Science and Systems (RSS)},
year={2022}
}
```
# ACID model
Please see the [README](./ACID/README.md) for more detailed information.
# PlushSim
Please see the [README](./PlushSim/README.md) for more detailed information.
# License
Please check the [LICENSE](./LICENSE) file. ACID may be used non-commercially, meaning for research or evaluation purposes only. For business inquiries, please contact researchinquiries@nvidia.com.
| 1,794 | Markdown | 48.86111 | 337 | 0.758082 |
NVlabs/ACID/PlushSim/README.md | [![NVIDIA Source Code License](https://img.shields.io/badge/license-NSCL-blue.svg)](https://github.com/NVlabs/ACID/blob/master/LICENSE)
![Python 3.7](https://img.shields.io/badge/python-3.7-green.svg)
# PlushSim
<div style="text-align: center">
<img src="../_media/plushsim.png" width="600"/>
</div>
Our PlushSim simulation environment is based on [Omniverse Kit](https://docs.omniverse.nvidia.com/prod_kit/prod_kit.html). This codebase contains the docker image and the code to simulate and manipulate deformable objects.
## Prerequisites
Omniverse Kit has a set of hardware requirements. Specifically, it requires a RTX gpu (e.g. RTX 2080, RTX 30x0, Titan RTX etc.). Also, a 16GB+ memory is recommended.
The codebase is tested on Linux Ubuntu 20.04.
## Getting the Docker Image
First, you need to install [Docker](https://docs.docker.com/engine/install/ubuntu/) and [NVIDIA Container Toolkit](https://github.com/NVIDIA/nvidia-docker) before proceeding.
After you have installed Docker and NVIDIA container toolkit, you can obtain the PlushSim Docker image from DockerHub, with command:
```
docker pull b0ku1/acid-docker:cleaned
```
## Preparing Simulation Assets
You can download the simulation assets `raw_assets.zip` at: [Google Drive](https://drive.google.com/file/d/1OO8Wi0PHF3ROmW8088JNOMJn4EcDLDPB/view?usp=sharing).
After you download it, unzip the assets within this directory. You should have a folder structure like:
```
PlushSim/
assets/
animals/
...
attic_clean/
...
```
## Generating Manipulation Trajectories
Generating manipulation data consists of two steps:
1. Start Docker image, and mount the correct directory.
2. Run script
To start the docker image with an interactive session, run the following command inside `PlushSim/`:
```
export PLUSHSIM_ROOT=$(pwd)
docker run -it -v $PLUSHSIM_ROOT:/result --gpus all b0ku1/acid-docker:cleaned bash
```
Aftery entering the interactive session, you can run the following commands to start generating manipulation trajectories:
```
./python.sh /result/scripts/data_gen_attic.py
```
The above scripts will generate sample interaction sequences in `PlushSim/interaction_sequence`. There are various command line arguments that you can give to `data_gen_attic.py`. Please see documentation of the python script.
## Visualizing the assets in GUI
To visualize the assets in Omniverse GUI, you need to download and install [Omniverse](https://docs.omniverse.nvidia.com/prod_install-guide/prod_install-guide.html). The link contains NVIDIA's official instruction for installation.
After you install Omniverse, you can open the `.usda` files in the assets folder. To run PlushSim's scripts outside of Docker (e.g. with your native Omniverse installation), you can find more information at [Omniverse Kit's Python Manual](https://docs.omniverse.nvidia.com/py/kit/index.html). For questions regarding Omniverse usage, please visit [NVIDIA developer forum](https://forums.developer.nvidia.com/c/omniverse/300).
## License
Please check the [LICENSE](../LICENSE) file. ACID may be used non-commercially, meaning for research or evaluation purposes only. For business inquiries, please contact researchinquiries@nvidia.com.
If you find our code or paper useful, please consider citing
```bibtex
@article{shen2022acid,
title={ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation},
author={Shen, Bokui and Jiang, Zhenyu and Choy, Christopher and J. Guibas, Leonidas and Savarese, Silvio and Anandkumar, Anima and Zhu, Yuke},
journal={Robotics: Science and Systems (RSS)},
year={2022}
}
``` | 3,640 | Markdown | 48.876712 | 425 | 0.765659 |
NVlabs/ACID/PlushSim/scripts/python_app.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import carb
import omni.kit.app
import omni.kit
import os
import sys
import time
import asyncio
import argparse
DEFAULT_CONFIG = {
"width": 1024,
"height": 800,
"renderer": "PathTracing", # Can also be RayTracedLighting
"anti_aliasing": 3, # 3 for dlss, 2 for fxaa, 1 for taa, 0 to disable aa
"samples_per_pixel_per_frame": 64,
"denoiser": True,
"subdiv_refinement_level": 0,
"headless": True,
"max_bounces": 4,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"sync_loads": False,
"experience": f'{os.environ["EXP_PATH"]}/omni.bloky.python.kit',
}
class OmniKitHelper:
"""Helper class for launching OmniKit from a Python environment.
Launches and configures OmniKit and exposes useful functions.
Typical usage example:
.. highlight:: python
.. code-block:: python
config = {'width': 800, 'height': 600, 'renderer': 'PathTracing'}
kit = OmniKitHelper(config) # Start omniverse kit
# <Code to generate or load a scene>
kit.update() # Render a single frame"""
def __init__(self, config=DEFAULT_CONFIG):
"""The config variable is a dictionary containing the following entries
Args:
width (int): Width of the viewport and generated images. Defaults to 1024
height (int): Height of the viewport and generated images. Defaults to 800
renderer (str): Rendering mode, can be `RayTracedLighting` or `PathTracing`. Defaults to `PathTracing`
samples_per_pixel_per_frame (int): The number of samples to render per frame, used for `PathTracing` only. Defaults to 64
denoiser (bool): Enable this to use AI denoising to improve image quality. Defaults to True
subdiv_refinement_level (int): Number of subdivisons to perform on supported geometry. Defaults to 0
headless (bool): Disable UI when running. Defaults to True
max_bounces (int): Maximum number of bounces, used for `PathTracing` only. Defaults to 4
max_specular_transmission_bounces(int): Maximum number of bounces for specular or transmission, used for `PathTracing` only. Defaults to 6
max_volume_bounces(int): Maximum number of bounces for volumetric, used for `PathTracing` only. Defaults to 4
sync_loads (bool): When enabled, will pause rendering until all assets are loaded. Defaults to False
experience (str): The config json used to launch the application.
"""
# only import custom loop runner if we create this object
# from omni.kit.loop import _loop
# initialize vars
self._exiting = False
self._is_dirty_instance_mappings = True
self._previous_physics_dt = 1.0 / 60.0
self.config = DEFAULT_CONFIG
if config is not None:
self.config.update(config)
# Load app plugin
self._framework = carb.get_framework()
print(os.environ["CARB_APP_PATH"])
self._framework.load_plugins(
loaded_file_wildcards=["omni.kit.app.plugin"],
search_paths=[os.path.abspath(f'{os.environ["CARB_APP_PATH"]}/kit/plugins')],
)
print(DEFAULT_CONFIG)
# launch kit
self.last_update_t = time.time()
self.app = omni.kit.app.get_app()
self.kit_settings = None
self._start_app()
self.carb_settings = carb.settings.acquire_settings_interface()
self.setup_renderer(mode="default") # set rtx-defaults settings
self.setup_renderer(mode="non-default") # set rtx settings
self.timeline = omni.timeline.get_timeline_interface()
# Wait for new stage to open
new_stage_task = asyncio.ensure_future(omni.usd.get_context().new_stage_async())
print("OmniKitHelper Starting up ...")
while not new_stage_task.done():
time.sleep(0.001) # This sleep prevents a deadlock in certain cases
self.update()
self.update()
# Dock windows if they exist
main_dockspace = omni.ui.Workspace.get_window("DockSpace")
def dock_window(space, name, location):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location)
return window
view = dock_window(main_dockspace, "Viewport", omni.ui.DockPosition.TOP)
self.update()
console = dock_window(view, "Console", omni.ui.DockPosition.BOTTOM)
prop = dock_window(view, "Property", omni.ui.DockPosition.RIGHT)
dock_window(view, "Main ToolBar", omni.ui.DockPosition.LEFT)
self.update()
dock_window(prop, "Render Settings", omni.ui.DockPosition.SAME)
self.update()
print("OmniKitHelper Startup Complete")
def _start_app(self):
args = [
os.path.abspath(__file__),
f'{self.config["experience"]}',
"--/persistent/app/viewport/displayOptions=0", # hide extra stuff in viewport
# Forces kit to not render until all USD files are loaded
f'--/rtx/materialDb/syncLoads={self.config["sync_loads"]}',
f'--/rtx/hydra/materialSyncLoads={self.config["sync_loads"]}'
f'--/omni.kit.plugin/syncUsdLoads={self.config["sync_loads"]}',
"--/app/content/emptyStageOnStart=False", # This is required due to a infinite loop but results in errors on launch
"--/app/hydraEngine/waitIdle=True",
"--/app/asyncRendering=False",
f'--/app/renderer/resolution/width={self.config["width"]}',
f'--/app/renderer/resolution/height={self.config["height"]}',
]
args.append(f"--portable")
args.append(f"--no-window")
args.append(f"--allow-root")
print(args)
self.app.startup("kit", f'{os.environ["CARB_APP_PATH"]}/kit', args)
def __del__(self):
if self._exiting is False and sys.meta_path is None:
print(
"\033[91m"
+ "ERROR: Python exiting while OmniKitHelper was still running, Please call shutdown() on the OmniKitHelper object to exit cleanly"
+ "\033[0m"
)
def shutdown(self):
self._exiting = True
print("Shutting Down OmniKitHelper...")
# We are exisitng but something is still loading, wait for it to load to avoid a deadlock
if self.is_loading():
print(" Waiting for USD resource operations to complete (this may take a few seconds)")
while self.is_loading():
self.app.update()
self.app.shutdown()
self._framework.unload_all_plugins()
print("Shutting Down Complete")
def get_stage(self):
"""Returns the current USD stage."""
return omni.usd.get_context().get_stage()
def set_setting(self, setting, value):
"""Convenience function to set settings.
Args:
setting (str): string representing the setting being changed
value: new value for the setting being changed, the type of this value must match its repsective setting
"""
if isinstance(value, str):
self.carb_settings.set_string(setting, value)
elif isinstance(value, bool):
self.carb_settings.set_bool(setting, value)
elif isinstance(value, int):
self.carb_settings.set_int(setting, value)
elif isinstance(value, float):
self.carb_settings.set_float(setting, value)
else:
raise ValueError(f"Value of type {type(value)} is not supported.")
def set_physics_dt(self, physics_dt: float = 1.0 / 150.0, physics_substeps: int = 1):
"""Specify the physics step size to use when simulating, default is 1/60.
Note that a physics scene has to be in the stage for this to do anything
Args:
physics_dt (float): Use this value for physics step
"""
if self.get_stage() is None:
return
if physics_dt == self._previous_physics_dt:
return
if physics_substeps is None or physics_substeps <= 1:
physics_substeps = 1
self._previous_physics_dt = physics_dt
from pxr import UsdPhysics, PhysxSchema
steps_per_second = int(1.0 / physics_dt)
min_steps = int(steps_per_second / physics_substeps)
physxSceneAPI = None
for prim in self.get_stage().Traverse():
if prim.IsA(UsdPhysics.Scene):
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(prim)
if physxSceneAPI is not None:
physxSceneAPI.GetTimeStepsPerSecondAttr().Set(steps_per_second)
settings = carb.settings.get_settings()
settings.set_int("persistent/simulation/minFrameRate", min_steps)
def update(self, dt=0.0, physics_dt=None, physics_substeps=None):
"""Render one frame. Optionally specify dt in seconds, specify None to use wallclock.
Specify physics_dt and physics_substeps to decouple the physics step size from rendering
For example: to render with a dt of 1/30 and simulate physics at 1/120 use:
- dt = 1/30.0
- physics_dt = 1/120.0
- physics_substeps = 4
Args:
dt (float): The step size used for the overall update, set to None to use wallclock
physics_dt (float, optional): If specified use this value for physics step
physics_substeps (int, optional): Maximum number of physics substeps to perform
"""
# dont update if exit was called
if self._exiting:
return
# a physics dt was specified and is > 0
if physics_dt is not None and physics_dt > 0.0:
self.set_physics_dt(physics_dt, physics_substeps)
# a dt was specified and is > 0
if dt is not None and dt > 0.0:
# if physics dt was not specified, use rendering dt
if physics_dt is None:
self.set_physics_dt(dt)
# self.loop_runner.set_runner_dt(dt)
self.app.update()
else:
# dt not specified, run in realtime
time_now = time.time()
dt = time_now - self.last_update_t
if physics_dt is None:
self.set_physics_dt(1.0 / 60.0, 4)
self.last_update_t = time_now
# self.loop_runner.set_runner_dt(dt)
self.app.update()
def play(self):
"""Starts the editor physics simulation"""
self.update()
self.timeline.play()
self.update()
def pause(self):
"""Pauses the editor physics simulation"""
self.update()
self.timeline.pause()
self.update()
def stop(self):
"""Stops the editor physics simulation"""
self.update()
self.timeline.stop()
self.update()
def get_status(self):
"""Get the status of the renderer to see if anything is loading"""
return omni.usd.get_context().get_stage_loading_status()
def is_loading(self):
"""convenience function to see if any files are being loaded
Returns:
bool: True if loading, False otherwise
"""
message, loaded, loading = self.get_status()
return loading > 0
def is_exiting(self):
"""get current exit status for this object
Returns:
bool: True if exit() was called previously, False otherwise
"""
return self._exiting
def execute(self, *args, **kwargs):
"""Allow use of omni.kit.commands interface"""
omni.kit.commands.execute(*args, **kwargs)
def setup_renderer(self, mode="non-default"):
rtx_mode = "/rtx-defaults" if mode == "default" else "/rtx"
"""Reset render settings to those in config. This should be used in case a new stage is opened and the desired config needs to be re-applied"""
self.set_setting(rtx_mode + "/rendermode", self.config["renderer"])
# Raytrace mode settings
self.set_setting(rtx_mode + "/post/aa/op", self.config["anti_aliasing"])
self.set_setting(rtx_mode + "/directLighting/sampledLighting/enabled", True)
# self.set_setting(rtx_mode + "/ambientOcclusion/enabled", True)
# Pathtrace mode settings
self.set_setting(rtx_mode + "/pathtracing/spp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/totalSpp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/clampSpp", self.config["samples_per_pixel_per_frame"])
self.set_setting(rtx_mode + "/pathtracing/maxBounces", self.config["max_bounces"])
self.set_setting(
rtx_mode + "/pathtracing/maxSpecularAndTransmissionBounces",
self.config["max_specular_transmission_bounces"],
)
self.set_setting(rtx_mode + "/pathtracing/maxVolumeBounces", self.config["max_volume_bounces"])
self.set_setting(rtx_mode + "/pathtracing/optixDenoiser/enabled", self.config["denoiser"])
self.set_setting(rtx_mode + "/hydra/subdivision/refinementLevel", self.config["subdiv_refinement_level"])
# Experimental, forces kit to not render until all USD files are loaded
self.set_setting(rtx_mode + "/materialDb/syncLoads", self.config["sync_loads"])
self.set_setting(rtx_mode + "/hydra/materialSyncLoads", self.config["sync_loads"])
self.set_setting("/omni.kit.plugin/syncUsdLoads", self.config["sync_loads"])
def create_prim(
self, path, prim_type, translation=None, rotation=None, scale=None, ref=None, semantic_label=None, attributes={}
):
"""Create a prim, apply specified transforms, apply semantic label and
set specified attributes.
args:
path (str): The path of the new prim.
prim_type (str): Prim type name
translation (tuple(float, float, float), optional): prim translation (applied last)
rotation (tuple(float, float, float), optional): prim rotation in radians with rotation
order ZYX.
scale (tuple(float, float, float), optional): scaling factor in x, y, z.
ref (str, optional): Path to the USD that this prim will reference.
semantic_label (str, optional): Semantic label.
attributes (dict, optional): Key-value pairs of prim attributes to set.
"""
from pxr import UsdGeom, Semantics
prim = self.get_stage().DefinePrim(path, prim_type)
for k, v in attributes.items():
prim.GetAttribute(k).Set(v)
xform_api = UsdGeom.XformCommonAPI(prim)
if ref:
prim.GetReferences().AddReference(ref)
if semantic_label:
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(semantic_label)
if rotation:
xform_api.SetRotate(rotation, UsdGeom.XformCommonAPI.RotationOrderXYZ)
if scale:
xform_api.SetScale(scale)
if translation:
xform_api.SetTranslate(translation)
return prim
def set_up_axis(self, axis):
"""Change the up axis of the current stage
Args:
axis: valid values are `UsdGeom.Tokens.y`, or `UsdGeom.Tokens.z`
"""
from pxr import UsdGeom, Usd
stage = self.get_stage()
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with Usd.EditContext(stage, rootLayer):
UsdGeom.SetStageUpAxis(stage, axis)
| 16,266 | Python | 42.034391 | 151 | 0.624185 |
NVlabs/ACID/PlushSim/scripts/data_gen_attic.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import time
import argparse
import json
from utils import *
parser = argparse.ArgumentParser("Dataset generation")
################################################################
# save to args
parser.add_argument("--save_dir", type=str, default="/result/interaction_sequence")
parser.add_argument("--img_subdir", type=str, default='img')
parser.add_argument("--geom_subdir", type=str, default='geom')
parser.add_argument("--info_subdir", type=str, default='info')
parser.add_argument("--save_every", type=int, default=25)
################################################################
# interaction args
parser.add_argument("--num_interaction", type=int, default=18)
parser.add_argument("--reset_every", type=int, default=6)
################################################################
# scene args
parser.add_argument("--asset_root", type=str, default="/result/assets")
parser.add_argument("--scene_path", type=str, default="attic_lean/Attic_clean_v2.usda")
parser.add_argument("--plush_path", type=str, default="animals/teddy/teddy_scaled/teddy_scaled.usda")
parser.add_argument("--skip_layout_randomization", action="store_true", default=False)
parser.add_argument("--skip_lights_randomization", action="store_true", default=False)
args = parser.parse_args()
os.makedirs(args.save_dir, exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.img_subdir), exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.geom_subdir), exist_ok=True)
os.makedirs(os.path.join(args.save_dir, args.info_subdir), exist_ok=True)
img_dir = os.path.join(args.save_dir, args.img_subdir)
geom_dir = os.path.join(args.save_dir, args.geom_subdir)
info_dir = os.path.join(args.save_dir, args.info_subdir)
def main():
from attic_scene import attic_scene
scene_path = os.path.join(args.asset_root, args.scene_path)
plush_path = os.path.join(args.asset_root, args.plush_path)
scene = attic_scene(
scene_path,
plush_path,
RESET_STATIC=True,
RAND_LAYOUT=not args.skip_layout_randomization,
RAND_LIGHTS=not args.skip_lights_randomization,)
start_time = time.time()
# save scene overall info
with open(os.path.join(info_dir, "scene_meta.json"), 'w') as fp:
json.dump(scene.get_scene_metadata(), fp)
# number of resets
num_resets = (args.num_interaction + args.reset_every - 1) // args.reset_every
for reset in range(num_resets):
# save scene reset collider info
np.savez_compressed(os.path.join(info_dir, f"clutter_info_{reset:04d}.npz"), **scene.get_scene_background_state())
num_steps = min(args.num_interaction, (reset + 1) * args.reset_every) - reset * args.reset_every
# sample interactions
actions = {
'grasp_points':[],
'target_points':[],
'grasp_pixels':[],
'start_frames':[],
'release_frames':[],
'static_frames':[], }
# save start frame
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
for interaction in range(num_steps):
# stop simulating
scene.kit.pause()
action = scene.sample_action()
if action is None:
scene.kit.play()
continue
grasp_point, target_point, grasp_pixel = action
actions['grasp_points'].append(np.array(grasp_point,np.float16))
actions['target_points'].append(np.array(target_point,np.float16))
actions['grasp_pixels'].append(np.array(grasp_pixel,np.uint16))
actions['start_frames'].append(np.array(scene.frame,np.uint16))
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
scene.kit.play()
init_traj = scene.gripper.plan_trajectory(scene.gripper.eef_default_loc, grasp_point)
# move
for pos in init_traj:
scene.step()
scene.gripper.set_translation(tuple(pos))
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
scene.kit.pause()
#init_move_traj = scene.gripper.set_translation(grasp_point)
scene.gripper.grasp(scene.plush)
scene.kit.play()
traj = scene.gripper.plan_trajectory(grasp_point, target_point)
# move
for pos in traj:
scene.step()
scene.gripper.set_translation(tuple(pos))
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
# wait until stable
for ff in range(scene.FALL_MAX):
scene.step()
if scene.check_scene_static():
print(f"grasp reaching a resting state after {ff} steps")
break
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
actions['release_frames'].append(np.array(scene.frame,np.uint16))
# release
scene.kit.pause()
scene.gripper.ungrasp()
# TODO: delete gripper collider
scene.kit.play()
for ff in range(scene.FALL_MAX+scene.DROP_MIN):
scene.step()
if scene.frame % args.save_every == args.save_every - 1:
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
if ff < scene.DROP_MIN:
continue
if scene.check_scene_static():
print(f"release reaching a resting state after {ff} steps")
break
scene.gripper.reset_translation()
save_frame(f"{reset:04d}_{scene.frame:06d}", scene.get_observations(), img_dir)
np.savez_compressed(
os.path.join(geom_dir, f"{reset:04d}_{scene.frame:06d}.npz"),
**scene.get_scene_state_plush(convert_to=np.float16))
actions['static_frames'].append(np.array(scene.frame,np.uint16))
np.savez_compressed(os.path.join(info_dir, f"interaction_info_{reset:04d}.npz"), **actions)
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Sampling {num_steps} interactions takes: {time_str}')
scene.reset()
# cleanup
scene.kit.shutdown()
if __name__ == "__main__":
main()
| 8,282 | Python | 43.05851 | 122 | 0.588747 |
NVlabs/ACID/PlushSim/scripts/syntheticdata.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper class for obtaining groundtruth data from OmniKit.
Support provided for RGB, Depth, Bounding Box (2D Tight, 2D Loose, 3D),
segmentation (instance and semantic), and camera parameters.
Typical usage example:
kit = OmniKitHelper() # Start omniverse kit
sd_helper = SyntheticDataHelper()
gt = sd_helper.get_groundtruth(('rgb', 'depth', 'boundingBox2DTight'))
"""
import math
import carb
import omni
import time
from pxr import UsdGeom, Semantics, Gf
import numpy as np
class SyntheticDataHelper:
def __init__(self):
self.app = omni.kit.app.get_app_interface()
ext_manager = self.app.get_extension_manager()
ext_manager.set_extension_enabled("omni.syntheticdata", True)
from omni.syntheticdata import sensors, helpers
import omni.syntheticdata._syntheticdata as sd # Must be imported after getting app interface
self.sd = sd
self.sd_interface = self.sd.acquire_syntheticdata_interface()
self.viewport = omni.kit.viewport.get_viewport_interface()
self.carb_settings = carb.settings.acquire_settings_interface()
self.sensor_helper_lib = sensors
self.generic_helper_lib = helpers
mode = "numpy"
self.sensor_helpers = {
"rgb": sensors.get_rgb,
"depth": sensors.get_depth_linear,
"depthLinear": self.get_depth_linear,
"instanceSegmentation": sensors.get_instance_segmentation,
"semanticSegmentation": self.get_semantic_segmentation,
"boundingBox2DTight": sensors.get_bounding_box_2d_tight,
"boundingBox2DLoose": sensors.get_bounding_box_2d_loose,
"boundingBox3D": sensors.get_bounding_box_3d,
"camera": self.get_camera_params,
"pose": self.get_pose,
}
self.sensor_types = {
"rgb": self.sd.SensorType.Rgb,
"depth": self.sd.SensorType.DepthLinear,
"depthLinear": self.sd.SensorType.DepthLinear,
"instanceSegmentation": self.sd.SensorType.InstanceSegmentation,
"semanticSegmentation": self.sd.SensorType.SemanticSegmentation,
"boundingBox2DTight": self.sd.SensorType.BoundingBox2DTight,
"boundingBox2DLoose": self.sd.SensorType.BoundingBox2DLoose,
"boundingBox3D": self.sd.SensorType.BoundingBox3D,
}
self.sensor_state = {s: False for s in list(self.sensor_helpers.keys())}
def get_depth_linear(self, viewport):
""" Get Depth Linear sensor output.
Args:
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
Return:
(numpy.ndarray): A float32 array of shape (height, width, 1).
"""
sensor = self.sensor_helper_lib.create_or_retrieve_sensor(viewport, self.sd.SensorType.DepthLinear)
data = self.sd_interface.get_sensor_host_float_texture_array(sensor)
h, w = data.shape[:2]
return np.frombuffer(data, np.float32).reshape(h, w, -1)
def get_semantic_segmentation(self, viewport):
instance_data, instance_mappings = self.sensor_helpers['instanceSegmentation'](viewport, return_mapping=True)
ins_to_sem = np.zeros(np.max(instance_data)+1,dtype=np.uint8)
for im in instance_mappings[::-1]:
for i in im["instanceIds"]:
if i >= len(ins_to_sem):
continue
ins_to_sem[i] = 1 #if im['semanticLabel'] == 'teddy' else 2
return np.take(ins_to_sem, instance_data)
def get_camera_params(self, viewport):
"""Get active camera intrinsic and extrinsic parameters.
Returns:
A dict of the active camera's parameters.
pose (numpy.ndarray): camera position in world coordinates,
fov (float): horizontal field of view in radians
focal_length (float)
horizontal_aperture (float)
view_projection_matrix (numpy.ndarray(dtype=float64, shape=(4, 4)))
resolution (dict): resolution as a dict with 'width' and 'height'.
clipping_range (tuple(float, float)): Near and Far clipping values.
"""
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(viewport.get_active_camera())
prim_tf = UsdGeom.Camera(prim).GetLocalTransformation()
focal_length = prim.GetAttribute("focalLength").Get()
horiz_aperture = prim.GetAttribute("horizontalAperture").Get()
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
x_min, y_min, x_max, y_max = viewport.get_viewport_rect()
width, height = x_max - x_min, y_max - y_min
aspect_ratio = width / height
near, far = prim.GetAttribute("clippingRange").Get()
view_proj_mat = self.generic_helper_lib.get_view_proj_mat(prim, aspect_ratio, near, far)
return {
"pose": np.array(prim_tf),
"fov": fov,
"focal_length": focal_length,
"horizontal_aperture": horiz_aperture,
"view_projection_matrix": view_proj_mat,
"resolution": {"width": width, "height": height},
"clipping_range": (near, far),
}
def get_pose(self):
"""Get pose of all objects with a semantic label.
"""
stage = omni.usd.get_context().get_stage()
mappings = self.generic_helper_lib.get_instance_mappings()
pose = []
for m in mappings:
prim_path = m[0]
prim = stage.GetPrimAtPath(prim_path)
prim_tf = UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(0.0)
pose.append((str(prim_path), m[1], str(m[2]), np.array(prim_tf)))
return pose
async def initialize_async(self, viewport, sensor_types, timeout=10):
""" Initialize sensors in the list provided.
Args:
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
sensor_types (list of omni.syntheticdata._syntheticdata.SensorType): List of sensor types to initialize.
timeout (int): Maximum time in seconds to attempt to initialize sensors.
"""
start = time.time()
is_initialized = False
while not is_initialized and time.time() < (start + timeout):
sensors = []
for sensor_type in sensor_types:
sensors.append(self.sensor_helper_lib.create_or_retrieve_sensor(viewport, sensor_type))
await omni.kit.app.get_app_interface().next_update_async()
is_initialized = not any([not self.sd_interface.is_sensor_initialized(s) for s in sensors])
if not is_initialized:
unititialized = [s for s in sensors if not self.sd_interface.is_sensor_initialized(s)]
raise TimeoutError(f"Unable to initialized sensors: [{unititialized}] within {timeout} seconds.")
await omni.kit.app.get_app_interface().next_update_async() # Extra frame required to prevent access violation error
def get_groundtruth(self, gt_sensors, viewport, verify_sensor_init=True):
"""Get groundtruth from specified gt_sensors.
Args:
gt_sensors (list): List of strings of sensor names. Valid sensors names: rgb, depth,
instanceSegmentation, semanticSegmentation, boundingBox2DTight,
boundingBox2DLoose, boundingBox3D, camera
viewport (omni.kit.viewport._viewport.IViewportWindow): Viewport from which to retrieve/create sensor.
verify_sensor_init (bool): Additional check to verify creation and initialization of sensors.
Returns:
Dict of sensor outputs
"""
if isinstance(gt_sensors, str):
gt_sensors = (gt_sensors,)
# Create and initialize sensors
while verify_sensor_init:
flag = 0
# Render frame
self.app.update()
for sensor_name in gt_sensors:
if sensor_name != "camera" and sensor_name != "pose":
current_sensor = self.sensor_helper_lib.create_or_retrieve_sensor(
viewport, self.sensor_types[sensor_name]
)
if not self.sd_interface.is_sensor_initialized(current_sensor):
flag = 1
# Render frame
self.app.update()
self.app.update()
if flag == 0:
break
gt = {}
sensor_state = {}
# Process non-RT-only sensors
for sensor in gt_sensors:
if sensor not in ["camera", "pose"]:
if sensor == "instanceSegmentation":
gt[sensor] = self.sensor_helpers[sensor](viewport, parsed=True, return_mapping=True)
elif sensor == "boundingBox3D":
gt[sensor] = self.sensor_helpers[sensor](viewport, parsed=True, return_corners=True)
else:
gt[sensor] = self.sensor_helpers[sensor](viewport)
current_sensor = self.sensor_helper_lib.create_or_retrieve_sensor(viewport, self.sensor_types[sensor])
current_sensor_state = self.sd_interface.is_sensor_initialized(current_sensor)
sensor_state[sensor] = current_sensor_state
else:
gt[sensor] = self.sensor_helpers[sensor](viewport)
gt["state"] = sensor_state
return gt
| 9,968 | Python | 42.532751 | 124 | 0.623596 |
NVlabs/ACID/PlushSim/scripts/attic_scene.py | import os
import cv2
import time
import random
import asyncio
import numpy as np
from python_app import OmniKitHelper
import omni
import carb
from utils import *
RESOLUTION=720
# specify a custom config
CUSTOM_CONFIG = {
"width": RESOLUTION,
"height": RESOLUTION,
"anti_aliasing": 3, # 3 for dlss, 2 for fxaa, 1 for taa, 0 to disable aa
"renderer": "RayTracedLighting",
"samples_per_pixel_per_frame": 128,
"max_bounces": 10,
"max_specular_transmission_bounces": 6,
"max_volume_bounces": 4,
"subdiv_refinement_level": 2,
"headless": True,
"sync_loads": True,
"experience": f'{os.environ["EXP_PATH"]}/omni.bloky.kit',
}
"""
plush animal material: /Root/physics/stuff_animal
magic gripper: /Root/physics/magic_gripper
real object group: /Root/physics/real_objects
magic object group: /Root/physics/magic_objects
"""
class attic_scene(object):
def __init__(self,
SCENE_PATH,
PLUSH_ANIMAL_PATH,
PLUSH_SCALE=4,
FALL_MAX=300,
REST_THRESHOLD=8,
PHYSX_DT=1/150.,
SAVE_EVERY=25,
DROP_MIN=20,
RESET_STATIC=True,
RAND_LAYOUT=True,
RAND_LIGHTS=True,
ROBOT_SPEED=1.):
for k,v in locals().items():
if k != 'self':
self.__dict__[k] = v
self.plush_animal_mat = "/Root/physics/stuff_animal"
self.magic_gripper = "/Root/physics/magic_gripper"
self.fingerL = "/Root/physics/magic_gripper/fingerL"
self.fingerR = "/Root/physics/magic_gripper/fingerR"
self.real_object_group = "/Root/physics/real_objects"
self.magic_object_group = "/Root/physics/magic_objects"
self.front_path = "/Root/scene_front"
self.back_path = "/Root/scene_back"
self.scene_range = np.array([[-50*12,-50*8,0],[50*12,50*8,50*8]])
self.drop_range = np.array([[-50*self.PLUSH_SCALE,-50*self.PLUSH_SCALE,],
[50*self.PLUSH_SCALE,50*self.PLUSH_SCALE,]]) #/ 2.
self.back_clutter_range = np.array([[-50*12,50*8,],[50*12,50*12,]])
self.total_range = np.array([[-50*12,-50*12,0],[50*12,50*12,50*8]])
self.kit = OmniKitHelper(CUSTOM_CONFIG)
self.kit.set_physics_dt(physics_dt=self.PHYSX_DT)
physx_interface = omni.physx.get_physx_interface()
physx_interface.force_load_physics_from_usd()
physx_interface.reset_simulation()
async def load_stage(path):
await omni.usd.get_context().open_stage_async(path)
setup_task = asyncio.ensure_future(load_stage(SCENE_PATH))
while not setup_task.done():
self.kit.update()
self.kit.setup_renderer()
self.kit.update()
self.stage = omni.usd.get_context().get_stage()
self.front_group = self.stage.GetPrimAtPath(self.front_path)
self.back_group = self.stage.GetPrimAtPath(self.back_path)
from syntheticdata import SyntheticDataHelper
self.sd_helper = SyntheticDataHelper()
# force RayTracedLighting mode for better performance while simulating physics
self.kit.set_setting("/rtx/rendermode", "RayTracedLighting")
# wait until all materials are loaded
print("waiting for things to load...")
# if self.kit.is_loading():
# time.sleep(10)
while self.kit.is_loading():
time.sleep(0.1)
# set up cameras
self._setup_cameras()
_viewport_api = omni.kit.viewport.get_viewport_interface()
viewport = _viewport_api.get_instance_list()[0]
self._viewport = _viewport_api.get_viewport_window(viewport)
# touch the sensors to kick in anti-aliasing
for _ in range(20):
_ = self.sd_helper.get_groundtruth(
[ "rgb","depth","instanceSegmentation","semanticSegmentation",], self._viewport)
# set up objects
self._import_plush_animal(PLUSH_ANIMAL_PATH)
self._setup_robots()
# # start off Omniverse
self.kit.play()
# store original sim and vis points for reset
self.sim_og_pts, self.vis_og_pts = self._get_plush_points()
# # stop Omniverse
# self.kit.pause()
# reset the scene
self.frame = 0
self.reset()
def step(self):
self.kit.update(self.PHYSX_DT)
self.frame += 1
return self.frame
def sample_action(self, grasp_point=None):
if grasp_point is None:
gt = self.sd_helper.get_groundtruth(
[ "rgb","depth","instanceSegmentation","semanticSegmentation",], self._viewport)
pts = get_partial_point_cloud(self._viewport, project_factor=100.)
semseg = gt['semanticSegmentation']
kernel = np.ones((2,2), np.uint8)
semseg = cv2.erode(semseg, kernel, iterations=1)
plush_pts = np.where(semseg == 1)
if len(plush_pts[0]) == 0:
return None
idx = random.randint(0,len(plush_pts[0])-1)
grasp_pixel = (plush_pts[0][idx], plush_pts[1][idx])
grasp_point = tuple(pts[grasp_pixel[0], grasp_pixel[1],:])
else:
grasp_pixel = None
target_point = self._sample_displacement_vector(grasp_point)
if target_point is None:
return None
return grasp_point, target_point, grasp_pixel
def reset(self):
self.kit.stop()
from pxr import Gf
self.frame = 0
print("Reseting plush geometry...")
self._reset_plush_geometry(self.sim_og_pts, self.vis_og_pts)
print("Finished reseting plush geometry...")
# randonly drop the plush into the scene
print("Reseting plush translation...")
self.plush_translateOp.Set(Gf.Vec3f((0.,0.,250.)))
print("Reseting plush rotation...")
def randrot():
return random.random() * 360.
rotx,roty,rotz = randrot(), randrot(), randrot()
self.plush_rotationOp.Set(rpy2quat(rotx,roty,rotz))
print("Finished reseting plush pose...")
print("Reseting scene...")
self._randomize_scene()
print("Finished reseting scene...")
self.kit.play()
# wait until stable
if self.RESET_STATIC:
print("Waiting to reach stable...")
for _ in range(self.DROP_MIN):
self.step()
for ff in range(self.FALL_MAX*6):
self.step()
if self.check_scene_static():
print(f"Initial configuration becomes static after {ff} steps")
break
print("Reset Finished")
self.frame = 0
def reset_to(self, state):
self.kit.stop()
loc = state['loc']
rot = state['rot']
sim = state['sim']
vis = state['vis']
self._reset_plush_geometry(sim, vis)
self.plush_translateOp.Set(loc)
self.plush_rotationOp.Set(rot)
self.kit.play()
def check_scene_static(self):
_,_,_,v = self._get_object_velocity_stats()
return v < self.REST_THRESHOLD
def get_scene_metadata(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
faces = sbAPI.GetSimulationIndicesAttr().Get()
return {'plush_path': self.PLUSH_ANIMAL_PATH,
'sim_faces':np.array(faces, int).tolist(),
'sim_pts':np.array(self.sim_og_pts, np.float16).tolist(),
'vis_pts':np.array(self.vis_og_pts, np.float16).tolist(),
'scene_range': self.scene_range.tolist(),
'back_clutter_range': self.back_clutter_range.tolist(),
'cam_info': self._get_camera_info()}
# background state is different per reset
def get_scene_background_state(self):
collider = {}
for p in find_immediate_children(self.front_group):
name = str(p.GetPath()).split("/")[-1]
e,f = find_collider(p)
collider[f"{name}_box"] = e
collider[f"{name}_tran"] = f
for p in find_immediate_children(self.back_group):
name = str(p.GetPath()).split("/")[-1]
e,f = find_collider(p)
collider[f"{name}_box"] = e
collider[f"{name}_tran"] = f
return collider
def get_scene_state_plush(self,raw=False,convert_to=None):
sim,vis = self._get_plush_points()
loc,rot,scale = self._get_plush_loc(),self._get_plush_rot(),self._get_plush_scale()
if not raw:
loc,rot,scale = tuple(loc),eval(str(rot)),tuple(scale)
state = {'sim':sim, 'vis':vis,
'loc':loc, 'rot':rot, 'scale':scale}
if convert_to is not None:
for k,v in state.items():
state[k] = np.array(v, convert_to)
return state
def get_observations(self,
sensors=["rgb","depth",
# "instanceSegmentation",
"semanticSegmentation",],
partial_pointcloud=False):
frame = self.sd_helper.get_groundtruth(sensors, self._viewport)
gt = {}
gt['rgb_img'] = frame['rgb'][:,:,:-1]
gt['seg_img'] = frame['semanticSegmentation']
gt['dep_img'] = frame['depth'].squeeze()
if partial_pointcloud:
gt['pxyz'] = get_partial_point_cloud(self._viewport, project_factor=100.)
return gt
################################################################
#
# Below are "private" functions ;)
#
################################################################
def _import_plush_animal(self, usda_path):
from omni.physx.scripts import physicsUtils
mesh_name = usda_path.split('/')[-1].split('.')[0]
from pxr import PhysxSchema,UsdGeom,UsdShade,Semantics
###################
# import object
abspath = carb.tokens.get_tokens_interface().resolve(usda_path)
physics_root = "/Root"
assert self.stage.DefinePrim(physics_root+f"/{mesh_name}").GetReferences().AddReference(abspath)
self.mesh_path = f"{physics_root}/{mesh_name}/{mesh_name}_obj/mesh"
self.plush= self.stage.GetPrimAtPath(self.mesh_path)
###################
# add deformable property
schema_parameters = {
"self_collision": True,
"vertex_velocity_damping": 0.005,
"sleep_damping": 10,
"sleep_threshold": 5,
"settling_threshold": 11,
"solver_position_iteration_count": 60,
"collisionRestOffset": 0.1,
"collisionContactOffset": 0.5,
"voxel_resolution": 45,
}
skin_mesh = UsdGeom.Mesh.Get(self.stage, self.mesh_path)
skin_mesh.AddTranslateOp().Set(Gf.Vec3f(0.0, 0.0, 300.0))
skin_mesh.AddOrientOp().Set(Gf.Quatf(0.707, 0.707, 0, 0))
skin_points = skin_mesh.GetPointsAttr().Get()
skin_indices = physicsUtils.triangulateMesh(skin_mesh)
# Create tet meshes for simulation and collision based on the skin mesh
simulation_resolution = schema_parameters["voxel_resolution"]
skin_mesh_scale = Gf.Vec3f(1.0, 1.0, 1.0)
collision_points, collision_indices = physicsUtils.create_conforming_tetrahedral_mesh(skin_points, skin_indices)
simulation_points, simulation_indices = physicsUtils.create_voxel_tetrahedral_mesh(collision_points, collision_indices, skin_mesh_scale, simulation_resolution)
# Apply PhysxDeformableBodyAPI and PhysxCollisionAPI to skin mesh and set parameter and tet meshes
deformable_body_api = PhysxSchema.PhysxDeformableBodyAPI.Apply(skin_mesh.GetPrim())
deformable_body_api.CreateSolverPositionIterationCountAttr().Set(schema_parameters['solver_position_iteration_count'])
deformable_body_api.CreateSelfCollisionAttr().Set(schema_parameters['self_collision'])
deformable_body_api.CreateCollisionIndicesAttr().Set(collision_indices)
deformable_body_api.CreateCollisionRestPointsAttr().Set(collision_points)
deformable_body_api.CreateSimulationIndicesAttr().Set(simulation_indices)
deformable_body_api.CreateSimulationRestPointsAttr().Set(simulation_points)
deformable_body_api.CreateVertexVelocityDampingAttr().Set(schema_parameters['vertex_velocity_damping'])
deformable_body_api.CreateSleepDampingAttr().Set(schema_parameters['sleep_damping'])
deformable_body_api.CreateSleepThresholdAttr().Set(schema_parameters['sleep_threshold'])
deformable_body_api.CreateSettlingThresholdAttr().Set(schema_parameters['settling_threshold'])
PhysxSchema.PhysxCollisionAPI.Apply(skin_mesh.GetPrim())
###################
# add deformable material
def add_physics_material_to_prim(stage, prim, materialPath):
bindingAPI = UsdShade.MaterialBindingAPI.Apply(prim)
materialPrim = UsdShade.Material(stage.GetPrimAtPath(materialPath))
bindingAPI.Bind(materialPrim, UsdShade.Tokens.weakerThanDescendants, "physics")
add_physics_material_to_prim(self.stage, self.plush, self.plush_animal_mat)
###################
# add collision group
physicsUtils.add_collision_to_collision_group(self.stage, self.mesh_path, self.real_object_group)
###################
# add semantic info
sem = Semantics.SemanticsAPI.Apply(self.stage.GetPrimAtPath(self.mesh_path), "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set("plush")
###################
# standarize transform
physicsUtils.setup_transform_as_scale_orient_translate(self.plush)
xform = UsdGeom.Xformable(self.plush)
ops = xform.GetOrderedXformOps()
self.plush_translateOp = ops[0]
self.plush_rotationOp = ops[1]
self.plush_scaleOp = ops[2]
scale_factor = self.PLUSH_SCALE
self.plush_scaleOp.Set((scale_factor,scale_factor,scale_factor))
def _get_object_velocity_stats(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
velocity = np.array(sbAPI.GetSimulationVelocitiesAttr().Get())
vnorm = np.linalg.norm(velocity, axis=1)
return np.percentile(vnorm, [0,50,90,99])
def _setup_robots(self):
actor = self.stage.GetPrimAtPath(self.magic_gripper)
fingerL = self.stage.GetPrimAtPath(self.fingerL)
fingerR = self.stage.GetPrimAtPath(self.fingerR)
self.gripper = magic_eef(actor,
self.stage,
eef_default_loc=(0.,0.,600.),
default_speed=self.ROBOT_SPEED,
fingerL=fingerL,
fingerR=fingerR)
def _setup_cameras(self):
from pxr import UsdGeom
stage = omni.usd.get_context().get_stage()
# Need to set this before setting viewport window size
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/width", -1)
carb.settings.acquire_settings_interface().set_int("/app/renderer/resolution/height", -1)
viewport_window = omni.kit.viewport.get_default_viewport_window()
viewport_window.set_active_camera("/Root/cam_light/Camera")
viewport_window.set_texture_resolution(RESOLUTION,RESOLUTION)
viewport_window.set_window_size(RESOLUTION, RESOLUTION)
def _get_plush_loc(self):
return self.plush_translateOp.Get()
def _get_plush_rot(self):
return self.plush_rotationOp.Get()
def _get_plush_scale(self):
return self.plush_scaleOp.Get()
def _get_plush_points(self):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(self.plush)
sim = sbAPI.GetSimulationPointsAttr().Get()
mesh = UsdGeom.Mesh(self.plush)
vis = mesh.GetPointsAttr().Get()
return sim, vis
def _get_camera_info(self):
cam_info = {}
camera_pose, camera_intr = get_camera_params(self._viewport)
cam_name = get_camera_name(self._viewport)
cam_info[cam_name] = [camera_pose.tolist(), camera_intr.tolist()]
return cam_info
def _randomize_collection(self, collection_prim, scene_range, drop_range=None, rand_rot=True, padding=True):
extents,objs = [],[]
for p in find_immediate_children(collection_prim):
objs.append(str(p.GetPath()))
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
objects = [standardize_bbox(bbox) for bbox in np.array(extents)[:,:,:-1]]
canvas = get_canvas(scene_range)
if drop_range is not None:
fill_canvas(canvas, scene_range, drop_range)
translations = []
for b,n in zip(objects,objs):
for _ in range(3):
t = sample_bbox_translation(b, scene_range)
if padding:
tb = scale(pad_to_square(b + t))
else:
tb = b + t
if not overlaps_with_current(canvas, scene_range, tb):
fill_canvas(canvas, scene_range, tb)
translations.append((n,t))
break
if len(translations) == 0 or translations[-1][0] != n:
translations.append((n,np.array([0,-2000])))
def randrot():
return random.random() * 360.
from pxr import UsdGeom
from omni.physx.scripts import physicsUtils
for n,t in translations:
xform = UsdGeom.Xformable(self.stage.GetPrimAtPath(n))
physicsUtils.setup_transform_as_scale_orient_translate(xform)
ops = xform.GetOrderedXformOps()
translateOp = ops[0]
translateOp.Set(tuple(np.array(tuple(translateOp.Get())) + np.append(t, 0)))
if rand_rot:
orientOp = ops[1]
orientOp.Set(rpy2quat(0,0,randrot()))
def _randomize_lighting(self):
domelight = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DomeLight")
light = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DistantLight")
light1 = self.stage.GetPrimAtPath("/Root/cam_light/Lights/DistantLight_01")
temp = np.random.rand(1)[0] * 5000 + 2500
domelight.GetAttribute('colorTemperature').Set(temp)
light.GetAttribute('colorTemperature').Set(temp)
light1.GetAttribute('colorTemperature').Set(temp)
int_range = 10000
int_min = 2500
for l in [domelight, light, light1]:
intensity = np.random.rand(1)[0] * int_range + int_min
l.GetAttribute('intensity').Set(intensity)
def _randomize_scene(self):
if self.RAND_LAYOUT:
# randomize front scene
self._randomize_collection(self.front_group, self.scene_range[:,:-1], self.drop_range)
# randomize back scene
self._randomize_collection(self.back_group, self.back_clutter_range,rand_rot=False, padding=False)
if self.RAND_LIGHTS:
# randomize lights
self._randomize_lighting()
def _get_2d_layout_occupancy_map(self):
extents = []
for p in find_immediate_children(self.front_group):
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
for p in find_immediate_children(self.back_group):
extent, transform = find_collider(p)
extents.append(transform_verts(extent, transform))
objects = [standardize_bbox(bbox) for bbox in np.array(extents)[:,:,:-1]]
#canvas = get_canvas(self.scene_range[:,:-1])
canvas = get_canvas(self.total_range[:,:-1])
for b in objects:
fill_canvas(canvas, self.total_range[:,:-1], b)
return canvas
def _sample_displacement_vector(self, grasp_point):
sampled_for = 0
mean_len = 160
std_len = 80
max_len = 240
min_len = 80
canvas = self._get_2d_layout_occupancy_map()
while(True):
sampled_for = sampled_for + 1
move_len = np.clip(np.random.normal(loc=mean_len,scale=std_len), min_len, max_len)
move_dir = sample_direction_zup(100).squeeze()
#move_dir[1,:] = np.abs(move_dir[1,:])
move_vec = move_dir * move_len
target_pts = grasp_point + move_vec.T
in_world = np.logical_and(
target_pts > self.total_range[0],
target_pts < self.total_range[1]).all(axis=1)
occupancies = []
try:
# assure that no obstacle is in path for length times 1.3
for i in range(int(max_len*1.3)):
temp = grasp_point + (target_pts - grasp_point) / max_len * i
temp[:,0] = np.clip(target_pts[:,0], self.total_range[0,0], self.total_range[1,0])
temp[:,1] = np.clip(target_pts[:,1], self.total_range[0,1], self.total_range[1,1])
occupancies.append(get_occupancy_value(
canvas, self.total_range[:,:-1], temp[:,:-1]))
path_no_collision = (np.array(occupancies) == 0).all(axis=0)
viable = np.logical_and(in_world, path_no_collision)
in_idx = np.nonzero(viable)[0]
except:
continue
if len(in_idx) > 0:
target_point = target_pts[np.random.choice(in_idx)]
return target_point
else:
if sampled_for > 10:
break
return None
def _reset_plush_geometry(self, sim, vis):
from pxr import PhysxSchema, Gf, Vt
# reset simulation points
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(self.plush)
sbAPI.GetSimulationPointsAttr().Set(sim)
# reset simulation points velocity
sbAPI = PhysxSchema.PhysxDeformableAPI(self.plush)
velocity = np.array(sbAPI.GetSimulationVelocitiesAttr().Get())
zero_velocity = np.zeros_like(velocity)
velocity_vec = Vt.Vec3fArray([Gf.Vec3f(tuple(m)) for m in zero_velocity])
sbAPI.GetSimulationVelocitiesAttr().Set(velocity_vec)
# reset visual points
mesh = UsdGeom.Mesh(self.plush)
mesh.GetPointsAttr().Set(vis) | 22,641 | Python | 42.710425 | 167 | 0.591184 |
NVlabs/ACID/PlushSim/scripts/utils.py | import os
import math
import omni
import numpy as np
from PIL import Image
from pxr import UsdGeom, Usd, UsdPhysics, Gf
import matplotlib.pyplot as plt
################################################################
# State Saving Utils
# (Geometry)
################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def project_depth_world_space(depth_image, camera_intr, camera_pose, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False, project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
pts = world_pts.reshape([W, H, 3])
return pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
# print("cam_pts: ", cam_pts.max(axis=0), cam_pts.min(axis=0))
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_camera_params(viewport):
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(viewport.get_active_camera())
prim_tf = np.array(UsdGeom.Camera(prim).GetLocalTransformation())
focal_length = prim.GetAttribute("focalLength").Get()
horiz_aperture = prim.GetAttribute("horizontalAperture").Get()
fov = 2 * math.atan(horiz_aperture / (2 * focal_length))
image_w, image_h = viewport.get_texture_resolution()
camera_focal_length = (float(image_w) / 2) / np.tan(fov/ 2)
cam_intr = np.array(
[[camera_focal_length, 0, float(image_h) / 2],
[0, camera_focal_length, float(image_w) / 2],
[0, 0, 1]])
return prim_tf.T, cam_intr
def get_partial_point_cloud(viewport, in_world_space=True, project_factor=1.):
from omni.syntheticdata import sensors
data = sensors.get_depth_linear(viewport)
h, w = data.shape[:2]
depth_data = -np.frombuffer(data, np.float32).reshape(h, w, -1)
camera_pose, camera_intr = get_camera_params(viewport)
if in_world_space:
return project_depth_world_space(depth_data.squeeze(), camera_intr, camera_pose, project_factor=project_factor)
else:
return project_depth_cam_space(depth_data.squeeze(), camera_intr, project_factor=project_factor)
def export_visual_mesh(prim, export_path, loc=None, rot=None, binarize=True):
assert prim.IsA(UsdGeom.Mesh), "prim needs to be a UsdGeom.Mesh"
mesh = UsdGeom.Mesh(prim)
points = mesh.GetPointsAttr().Get()
if binarize:
path = os.path.splitext(export_path)[0]+'.npy'
np.save(path, np.array(points, np.float16))
else:
print(export_path)
faces = np.array(mesh.GetFaceVertexIndicesAttr().Get()).reshape(-1,3) + 1
uv = mesh.GetPrimvar("st").Get()
with open(export_path, "w") as fp:
fp.write("mtllib teddy.mtl\nusemtl Material.004\n")
for x,y,z in points:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
for u,v in uv:
fp.write(f"vt {u:=.4f} {v:.4f}\n")
for i, (x,y,z) in enumerate(faces):
fp.write(f"f {x}/{i*3+1} {y}/{i*3+2} {z}/{i*3+3}\n")
def get_sim_points(prim, loc=None, rot=None):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableBodyAPI(prim)
points = sbAPI.GetSimulationPointsAttr().Get()
if rot is not None:
points = np.array(points)
w,x,y,z = eval(str(rot))
from scipy.spatial.transform import Rotation
rot = Rotation.from_quat(np.array([x,y,z,w]))
points = rot.apply(points)
if loc is not None:
loc = np.array(tuple(loc))
points = points + loc
return points
def get_sim_faces(prim):
from pxr import PhysxSchema
sbAPI = PhysxSchema.PhysxDeformableAPI(prim)
faces = sbAPI.GetSimulationIndicesAttr().Get()
return faces
def export_simulation_voxels(prim, export_path, binarize=True, export_faces=False):
points = get_sim_points(prim)
if export_faces:
faces = get_sim_faces(prim)
if binarize:
path = os.path.splitext(export_path)[0]+'.npy'
if export_faces:
np.savez(path, points=np.array(points, np.float16), faces=np.array(faces, int))
else:
np.save(path, np.array(points, np.float16))
else:
with open(export_path, 'w') as fp:
for p in points:
fp.write(f"v {p[0]:.3f} {p[1]:.3f} {p[2]:.3f}\n")
if export_faces:
faces = np.array(faces, int).reshape([-1,4]) + 1
for f in faces:
fp.write(f"f {f[0]} {f[1]} {f[2]} {f[3]}\n")
def visualize_sensors(gt, save_path):
from omni.syntheticdata import visualize
# GROUNDTRUTH VISUALIZATION
# Setup a figure
fig, axes = plt.subplots(1, 3, figsize=(20, 6))
axes = axes.flat
for ax in axes:
ax.axis("off")
# RGB
axes[0].set_title("RGB")
for ax in axes[:-1]:
ax.imshow(gt["rgb"])
# DEPTH
axes[1].set_title("Depth")
depth_data = np.clip(gt["depth"], 0, 255)
axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))
# SEMSEG
axes[2].set_title("Semantic Segmentation")
semantic_seg = gt["semanticSegmentation"]
semantic_rgb = visualize.colorize_segmentation(semantic_seg)
axes[2].imshow(semantic_rgb, alpha=0.7)
# Save figure
fig.savefig(save_path)
plt.close(fig)
def save_frame(frame_name, frame_data, save_dir,
save_rgb=True, save_seg=True, save_depth=True, save_partial_pointcloud=False):
if save_rgb:
rgb = frame_data['rgb_img']
Image.fromarray(rgb).save(f"{save_dir}/rgb_{frame_name}.jpg")
if save_seg:
seg= frame_data['seg_img']
sem = np.tile(seg[:,:,np.newaxis], (1,1,3)).astype(np.uint8) * 255
Image.fromarray(sem).save(f"{save_dir}/seg_{frame_name}.jpg")
if save_depth:
depth_img = Image.fromarray((frame_data['dep_img'].squeeze() * 1000).astype(np.uint16), mode='I;16').convert(mode='I')
depth_img.save(f"{save_dir}/depth_{frame_name}.png")
def save_state(state_name, state_data, save_dir):
loc, rot, sim, vis = state_data
state_dict = {}
state_dict['loc'] = np.array(tuple(loc))
state_dict['rot'] = np.array(eval(str(rot)))
state_dict['sim'] = np.array(sim)
state_dict['vis'] = np.array(vis)
np.savez(f"{save_dir}/state_{state_name}.npz", **state_dict)
################################################################
# Interaction Utils
################################################################
def sample_pick_point(partial_point_cloud, segmentation):
im_h = segmentation.shape[0]
im_w = segmentation.shape[1]
# point cloud "image" height and width
pc_h = partial_point_cloud.shape[0]
pc_w = partial_point_cloud.shape[1]
assert im_h == pc_h and im_w == pc_w, "partial_point_cloud dimension should match with that of segmentation mask"
def sample_spherical(npoints, ndim=3):
vec = np.random.randn(ndim, npoints)
vec /= np.linalg.norm(vec, axis=0)
return vec
def sample_direction(npoints):
phi = np.random.randn(npoints) * 2 * np.pi
theta = np.clip(np.random.normal(loc=np.pi / 4.,scale=np.pi / 12., size=npoints), np.pi / 6., np.pi / 2.)
x = np.cos(phi) * np.sin(theta)
z = np.sin(phi) * np.sin(theta)
y = np.cos(theta)
vec = np.vstack([x,y,z])
return vec
def sample_direction_zup(npoints):
phi = np.random.randn(npoints) * 2 * np.pi
theta = np.clip(np.random.normal(loc=np.pi / 4.,scale=np.pi / 12., size=npoints), np.pi / 6., np.pi / 2.)
x = np.cos(phi) * np.sin(theta)
y = np.sin(phi) * np.sin(theta)
z = np.cos(theta)
vec = np.vstack([x,y,z])
return vec
def interpolate(start_loc, end_loc, speed):
start_loc = np.array(start_loc)
end_loc = np.array(end_loc)
dist = np.linalg.norm(end_loc - start_loc)
chunks = dist // speed
return start_loc + np.outer(np.arange(chunks+1,dtype=float), (end_loc - start_loc) / chunks)
class magic_eef(object):
def __init__(self, end_effector, stage, eef_default_loc=None, default_speed=1,
fingerL=None, fingerR=None):
self.end_effector = end_effector
self.eef_default_loc = eef_default_loc
self.default_speed = default_speed
self.stage = stage
xform = UsdGeom.Xformable(end_effector)
self.ops = xform.GetOrderedXformOps()
assert self.ops[0].GetOpType() == UsdGeom.XformOp.TypeTranslate,\
"Code is based on UsdGeom.Xformable with first op as translation"
assert self.ops[1].GetOpType() == UsdGeom.XformOp.TypeOrient,\
"Code is based on UsdGeom.Xformable with second op as orientation"
self.attachmentPath = None
self.set_translation(eef_default_loc)
self.fingerL=fingerL
if fingerL is not None:
xform = UsdGeom.Xformable(fingerL)
self.fingerL_ops = xform.GetOrderedXformOps()[0]
self.fingerL_ops.Set((-5,0,20))
self.fingerR=fingerR
if fingerR is not None:
xform = UsdGeom.Xformable(fingerR)
self.fingerR_ops = xform.GetOrderedXformOps()[0]
self.fingerL_ops.Set((5,0,20))
def get_translation(self):
return self.ops[0].Get()
def set_translation(self, loc):
self.ops[0].Set(loc)
def reset_translation(self):
self.set_translation(self.eef_default_loc)
def get_orientation(self):
return self.ops[1].Get()
def set_orientation(self, rot):
self.ops[1].Set(rot)
def grasp(self, target_object):
# enable collision
self.end_effector.GetAttribute("physics:collisionEnabled").Set(True)
# create magic grasp
self.attachmentPath = target_object.GetPath().AppendChild("rigidAttachment_0")
omni.kit.commands.execute(
"AddSoftBodyRigidAttachmentCommand",
target_attachment_path=self.attachmentPath,
softbody_path=target_object.GetPath(),
rigidbody_path=self.end_effector.GetPath(),
)
attachmentPrim = self.stage.GetPrimAtPath(self.attachmentPath)
assert attachmentPrim
assert attachmentPrim.GetAttribute("physxEnableHaloParticleFiltering").Set(True)
assert attachmentPrim.GetAttribute("physxEnableVolumeParticleAttachments").Set(True)
assert attachmentPrim.GetAttribute("physxEnableSurfaceTetraAttachments").Set(True)
omni.physx.get_physx_interface().release_physics_objects()
self.fingerL_ops.Set((-5,0,20))
self.fingerR_ops.Set((5,0,20))
def ungrasp(self):
assert self.attachmentPath is not None, "nothing is grasped! (there is no attachment registered)"
# release magic grasp
omni.kit.commands.execute(
"DeletePrimsCommand",
paths=[self.attachmentPath]
)
self.end_effector.GetAttribute("physics:collisionEnabled").Set(False)
omni.physx.get_physx_interface().release_physics_objects()
self.attachmentPath = None
self.fingerL_ops.Set((-80,0,20))
self.fingerR_ops.Set((80,0,20))
#self.reset_translation()
def plan_trajectory(self, start_loc, end_loc, speed=None):
return interpolate(start_loc, end_loc, self.default_speed if speed is None else speed)
################################
# Random utils
################################
def get_camera_name(viewport):
stage = omni.usd.get_context().get_stage()
return stage.GetPrimAtPath(viewport.get_active_camera()).GetName()
def rpy2quat(roll,pitch,yaw):
roll*=0.5
pitch*=0.5
yaw*=0.5
cr = math.cos(roll)
cp = math.cos(pitch)
cy = math.cos(yaw)
sr = math.sin(roll)
sp = math.sin(pitch)
sy = math.sin(yaw)
cpcy = cp * cy
spsy = sp * sy
spcy = sp * cy
cpsy = cp * sy
qx = (sr * cpcy - cr * spsy)
qy = (cr * spcy + sr * cpsy)
qz = (cr * cpsy - sr * spcy)
qw = cr * cpcy + sr * spsy
return Gf.Quatf(qw,qx,qy,qz)
################################
# Scene randomization utils
################################
def is_collider(prim):
try:
return prim.GetAttribute("physics:collisionEnabled").Get()
except:
return False
def find_collider(prim):
#from pxr import UsdPhysics
primRange = iter(Usd.PrimRange(prim))
extent, transform = None, None
for p in primRange:
#if p.HasAPI(UsdPhysics.CollisionAPI):
if is_collider(p):
extent = p.GetAttribute("extent").Get()
if extent is None:
# this means that the object is a cube
extent = np.array([[-50,-50,-50],[50,50,50]])
transform = omni.usd.get_world_transform_matrix(p, Usd.TimeCode.Default())
primRange.PruneChildren()
break
return np.array(extent), np.array(transform)
def find_immediate_children(prim):
primRange = Usd.PrimRange(prim)
primPath = prim.GetPath()
immediate_children = []
for p in primRange:
if p.GetPath().GetParentPath() == primPath:
immediate_children.append(p)
return immediate_children
def extent_to_cube(extent):
min_x,min_y,min_z = extent[0]
max_x,max_y,max_z = extent[1]
verts = np.array([
(max_x,max_y,max_z),
(max_x,max_y,min_z),
(max_x,min_y,max_z),
(max_x,min_y,min_z),
(min_x,max_y,max_z),
(min_x,max_y,min_z),
(min_x,min_y,max_z),
(min_x,min_y,min_z),])
faces = np.array([
(1,5,7,3),
(4,3,7,8),
(8,7,5,6),
(6,2,4,8),
(2,1,3,4),
(6,5,1,2),])
return verts, faces
def transform_verts(verts, transform):
verts_app = np.concatenate([verts,np.ones((verts.shape[0], 1))], axis=-1)
return (verts_app @ transform)[:,:-1]
def export_quad_obj(verts, faces, export_path):
with open(export_path, 'w') as fp:
for p in verts:
fp.write(f"v {p[0]:.3f} {p[1]:.3f} {p[2]:.3f}\n")
for f in faces:
fp.write(f"f {f[0]} {f[1]} {f[2]} {f[3]}\n")
def standardize_bbox(bbox):
return np.array([bbox.min(axis=0),bbox.max(axis=0)])
def get_bbox_translation_range(bbox, scene_range):
# bbox size
size_x,size_y = bbox[1] - bbox[0]
center_range = scene_range + np.array([[size_x, size_y],[-size_x,-size_y]]) / 2
center = np.mean(bbox, axis=0)
return center_range - center
def sample_bbox_translation(bbox, scene_range):
translation_range = get_bbox_translation_range(bbox, scene_range)
sample = np.random.rand(2)
return translation_range[0] + sample * (translation_range[1] - translation_range[0])
def get_canvas(scene_range):
scene_size = scene_range[1] - scene_range[0]
scene_size = ( scene_size * 1.1 ).astype(int)
return np.zeros(scene_size)
def fill_canvas(canvas, scene_range, bbox,val=1):
canvas_center = np.array(canvas.shape) / 2
cb = (bbox - np.mean(scene_range, axis=0) + canvas_center).astype(int)
if cb[0,0] < 0 or cb[0,1] < 0:
return
h,w = canvas.shape
if cb[1,0] >= h or cb[1,1] >= w:
return
canvas[cb[0,0]:cb[1,0], cb[0,1]:cb[1,1]] = val
def get_occupancy_value(canvas, scene_range, pts):
canvas_center = np.array(canvas.shape) / 2
pts = (pts - np.mean(scene_range, axis=0) + canvas_center).astype(int)
return canvas[pts[:,0], pts[:,1]]
def overlaps_with_current(canvas, scene_range, bbox,val=0):
canvas_center = np.array(canvas.shape) / 2
cb = (bbox - np.mean(scene_range, axis=0) + canvas_center).astype(int)
return (canvas[cb[0,0]:cb[1,0], cb[0,1]:cb[1,1]] != val).any()
def pad_to_square(bbox):
size_x,size_y = (bbox[1] - bbox[0]) / 2.
center = np.mean(bbox, axis=0)
length = max(size_x,size_y)
return np.stack([center-length,center+length])
def scale(bbox,factor=1.1):
size_x,size_y = (bbox[1] - bbox[0]) / 2. *factor
center = np.mean(bbox, axis=0)
return np.stack([center-[size_x,size_y],center+[size_x,size_y]])
| 16,913 | Python | 36.923767 | 126 | 0.601845 |
NVlabs/ACID/PlushSim/scripts/writer.py | #!/usr/bin/env python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
"""Helper class for writing groundtruth data offline.
"""
import atexit
import colorsys
import queue
import omni
import os
import threading
import numpy as np
from PIL import Image, ImageDraw
class DataWriter:
def __init__(self, data_dir, num_worker_threads, max_queue_size=500, sensor_settings=None):
from omni.isaac.synthetic_utils import visualization as vis
self.vis = vis
atexit.register(self.stop_threads)
self.data_dir = data_dir
# Threading for multiple scenes
self.num_worker_threads = num_worker_threads
# Initialize queue with a specified size
self.q = queue.Queue(max_queue_size)
self.threads = []
self._viewport = omni.kit.viewport.get_viewport_interface()
self.create_output_folders(sensor_settings)
def start_threads(self):
"""Start worker threads."""
for _ in range(self.num_worker_threads):
t = threading.Thread(target=self.worker, daemon=True)
t.start()
self.threads.append(t)
def stop_threads(self):
"""Waits for all tasks to be completed before stopping worker threads."""
print(f"Finish writing data...")
# Block until all tasks are done
self.q.join()
# Stop workers
for _ in range(self.num_worker_threads):
self.q.put(None)
for t in self.threads:
t.join()
print(f"Done.")
def worker(self):
"""Processes task from queue. Each tasks contains groundtruth data and metadata which is used to transform the output and write it to disk."""
while True:
groundtruth = self.q.get()
if groundtruth is None:
break
filename = groundtruth["METADATA"]["image_id"]
viewport_name = groundtruth["METADATA"]["viewport_name"]
for gt_type, data in groundtruth["DATA"].items():
if gt_type == "RGB":
self.save_image(viewport_name, gt_type, data, filename)
elif gt_type == "DEPTH":
if groundtruth["METADATA"]["DEPTH"]["NPY"]:
self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/"
np.save(self.depth_folder + filename + ".npy", data)
if groundtruth["METADATA"]["DEPTH"]["COLORIZE"]:
self.save_image(viewport_name, gt_type, data, filename)
elif gt_type == "INSTANCE":
self.save_segmentation(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"]["INSTANCE"]["WIDTH"],
groundtruth["METADATA"]["INSTANCE"]["HEIGHT"],
groundtruth["METADATA"]["INSTANCE"]["COLORIZE"],
groundtruth["METADATA"]["INSTANCE"]["NPY"],
)
elif gt_type == "SEMANTIC":
self.save_segmentation(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"]["SEMANTIC"]["WIDTH"],
groundtruth["METADATA"]["SEMANTIC"]["HEIGHT"],
groundtruth["METADATA"]["SEMANTIC"]["COLORIZE"],
groundtruth["METADATA"]["SEMANTIC"]["NPY"],
)
elif gt_type in ["BBOX2DTIGHT", "BBOX2DLOOSE"]:
self.save_bbox(
viewport_name,
gt_type,
data,
filename,
groundtruth["METADATA"][gt_type]["COLORIZE"],
groundtruth["DATA"]["RGB"],
groundtruth["METADATA"][gt_type]["NPY"],
)
elif gt_type == "CAMERA":
self.camera_folder = self.data_dir + "/" + str(viewport_name) + "/camera/"
np.save(self.camera_folder + filename + ".npy", data)
elif gt_type == "POSES":
self.poses_folder = self.data_dir + "/" + str(viewport_name) + "/poses/"
np.save(self.poses_folder + filename + ".npy", data)
else:
raise NotImplementedError
self.q.task_done()
def save_segmentation(
self, viewport_name, data_type, data, filename, width=1280, height=720, display_rgb=True, save_npy=True
):
self.instance_folder = self.data_dir + "/" + str(viewport_name) + "/instance/"
self.semantic_folder = self.data_dir + "/" + str(viewport_name) + "/semantic/"
# Save ground truth data locally as npy
if data_type == "INSTANCE" and save_npy:
np.save(self.instance_folder + filename + ".npy", data)
if data_type == "SEMANTIC" and save_npy:
np.save(self.semantic_folder + filename + ".npy", data)
if display_rgb:
image_data = np.frombuffer(data, dtype=np.uint8).reshape(*data.shape, -1)
num_colors = 50 if data_type == "SEMANTIC" else None
color_image = self.vis.colorize_segmentation(image_data, width, height, 3, num_colors)
# color_image = visualize.colorize_instance(image_data)
color_image_rgb = Image.fromarray(color_image, "RGB")
if data_type == "INSTANCE":
color_image_rgb.save(f"{self.instance_folder}/{filename}.png")
if data_type == "SEMANTIC":
color_image_rgb.save(f"{self.semantic_folder}/{filename}.png")
def save_image(self, viewport_name, img_type, image_data, filename):
self.rgb_folder = self.data_dir + "/" + str(viewport_name) + "/rgb/"
self.depth_folder = self.data_dir + "/" + str(viewport_name) + "/depth/"
if img_type == "RGB":
# Save ground truth data locally as png
rgb_img = Image.fromarray(image_data, "RGBA")
rgb_img.save(f"{self.rgb_folder}/{filename}.png")
elif img_type == "DEPTH":
# Convert linear depth to inverse depth for better visualization
image_data = image_data * 100
image_data = np.reciprocal(image_data)
# Save ground truth data locally as png
image_data[image_data == 0.0] = 1e-5
image_data = np.clip(image_data, 0, 255)
image_data -= np.min(image_data)
if np.max(image_data) > 0:
image_data /= np.max(image_data)
depth_img = Image.fromarray((image_data * 255.0).astype(np.uint8))
depth_img.save(f"{self.depth_folder}/{filename}.png")
def save_bbox(self, viewport_name, data_type, data, filename, display_rgb=True, rgb_data=None, save_npy=True):
self.bbox_2d_tight_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_tight/"
self.bbox_2d_loose_folder = self.data_dir + "/" + str(viewport_name) + "/bbox_2d_loose/"
# Save ground truth data locally as npy
if data_type == "BBOX2DTIGHT" and save_npy:
np.save(self.bbox_2d_tight_folder + filename + ".npy", data)
if data_type == "BBOX2DLOOSE" and save_npy:
np.save(self.bbox_2d_loose_folder + filename + ".npy", data)
if display_rgb and rgb_data is not None:
color_image = self.vis.colorize_bboxes(data, rgb_data)
color_image_rgb = Image.fromarray(color_image, "RGBA")
if data_type == "BBOX2DTIGHT":
color_image_rgb.save(f"{self.bbox_2d_tight_folder}/{filename}.png")
if data_type == "BBOX2DLOOSE":
color_image_rgb.save(f"{self.bbox_2d_loose_folder}/{filename}.png")
def create_output_folders(self, sensor_settings=None):
"""Checks if the sensor output folder corresponding to each viewport is created. If not, it creates them."""
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
if sensor_settings is None:
sensor_settings = dict()
viewports = self._viewport.get_instance_list()
viewport_names = [self._viewport.get_viewport_window_name(vp) for vp in viewports]
sensor_settings_viewport = {
"rgb": {"enabled": True},
"depth": {"enabled": True, "colorize": True, "npy": True},
"instance": {"enabled": True, "colorize": True, "npy": True},
"semantic": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_tight": {"enabled": True, "colorize": True, "npy": True},
"bbox_2d_loose": {"enabled": True, "colorize": True, "npy": True},
"camera": {"enabled": True, "npy": True},
"poses": {"enabled": True, "npy": True},
}
for name in viewport_names:
sensor_settings[name] = copy.deepcopy(sensor_settings_viewport)
for viewport_name in sensor_settings:
viewport_folder = self.data_dir + "/" + str(viewport_name)
if not os.path.exists(viewport_folder):
os.mkdir(viewport_folder)
for sensor_name in sensor_settings[viewport_name]:
if sensor_settings[viewport_name][sensor_name]["enabled"]:
sensor_folder = self.data_dir + "/" + str(viewport_name) + "/" + str(sensor_name)
if not os.path.exists(sensor_folder):
os.mkdir(sensor_folder)
| 10,072 | Python | 47.196172 | 150 | 0.552621 |
NVlabs/ACID/ACID/environment.yaml | name: acid_train
channels:
- conda-forge
- pytorch
- defaults
dependencies:
- cython=0.29.2
- imageio=2.4.1
- numpy=1.15.4
- numpy-base=1.15.4
- matplotlib=3.0.3
- matplotlib-base=3.0.3
- pandas=0.23.4
- pillow=5.3.0
- pyembree=0.1.4
- pytest=4.0.2
- python=3.7.10
- pytorch=1.4.0
- pyyaml=3.13
- scikit-image=0.14.1
- scipy=1.5.2
- tensorboardx=1.4
- torchvision=0.2.1
- tqdm=4.28.1
- trimesh=2.37.7
- pip
- pip:
- scikit-learn==0.24.2
- h5py==2.9.0
- plyfile==0.7
- polyscope==1.2.0
| 551 | YAML | 15.727272 | 26 | 0.575318 |
NVlabs/ACID/ACID/setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
import numpy
# Get the numpy include directory.
numpy_include_dir = numpy.get_include()
# Extensions
# mcubes (marching cubes algorithm)
mcubes_module = Extension(
'src.utils.libmcubes.mcubes',
sources=[
'src/utils/libmcubes/mcubes.pyx',
'src/utils/libmcubes/pywrapper.cpp',
'src/utils/libmcubes/marchingcubes.cpp'
],
language='c++',
extra_compile_args=['-std=c++11'],
include_dirs=[numpy_include_dir]
)
# mise (efficient mesh extraction)
mise_module = Extension(
'src.utils.libmise.mise',
sources=[
'src/utils/libmise/mise.pyx'
],
)
# simplify (efficient mesh simplification)
simplify_mesh_module = Extension(
'src.utils.libsimplify.simplify_mesh',
sources=[
'src/utils/libsimplify/simplify_mesh.pyx'
],
include_dirs=[numpy_include_dir]
)
# Gather all extension modules
ext_modules = [
mcubes_module,
mise_module,
simplify_mesh_module,
]
setup(
ext_modules=cythonize(ext_modules),
cmdclass={
'build_ext': BuildExtension
}
)
| 1,311 | Python | 21.237288 | 81 | 0.691076 |
NVlabs/ACID/ACID/plush_train.py | import torch
import torch.optim as optim
from tensorboardX import SummaryWriter
import matplotlib; matplotlib.use('Agg')
import numpy as np
import os
import argparse
import time, datetime
from src import config, data
from src.checkpoints import CheckpointIO
from collections import defaultdict
import shutil
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from src.utils import common_util
import matplotlib.pyplot as plt
from PIL import Image
# Arguments
parser = argparse.ArgumentParser(
description='Train a Plush Env dynamics model.'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=-1,
help='Checkpoint and exit after specified number of seconds'
'with exit code 2.')
parser.add_argument('--debug', action='store_true', help='debugging')
parser.add_argument('--eval_only', action='store_true', help='run eval only')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# Set t0
t0 = time.time()
# Shorthands
out_dir = cfg['training']['out_dir']
if args.debug:
cfg['training']['batch_size'] = 2
cfg['training']['vis_n_outputs'] = 1
cfg['training']['print_every'] = 1
cfg['training']['backup_every'] = 1
cfg['training']['validate_every'] = 1
cfg['training']['visualize_every'] = 1
cfg['training']['checkpoint_every'] = 1
cfg['training']['visualize_total'] = 1
batch_size = cfg['training']['batch_size']
backup_every = cfg['training']['backup_every']
vis_n_outputs = cfg['generation']['vis_n_outputs']
exit_after = args.exit_after
model_selection_metric = cfg['training']['model_selection_metric']
if cfg['training']['model_selection_mode'] == 'maximize':
model_selection_sign = 1
elif cfg['training']['model_selection_mode'] == 'minimize':
model_selection_sign = -1
else:
raise ValueError('model_selection_mode must be '
'either maximize or minimize.')
# Output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
shutil.copyfile(args.config, os.path.join(out_dir, 'config.yaml'))
# Dataset
train_loader = data.core.get_plush_loader(cfg, cfg['model']['type'], split='train')
val_loader = data.core.get_plush_loader(cfg, cfg['model']['type'], split='test')
# Model
model = config.get_model(cfg, device=device)
# Generator
generator = config.get_generator(model, cfg, device=device)
# Intialize training
optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-4)
trainer = config.get_trainer(model, optimizer, cfg, device=device)
checkpoint_io = CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
load_dict = checkpoint_io.load('model_best.pt')
except FileExistsError:
load_dict = dict()
epoch_it = load_dict.get('epoch_it', 0)
it = load_dict.get('it', 0)
metric_val_best = load_dict.get(
'loss_val_best', -model_selection_sign * np.inf)
if metric_val_best == np.inf or metric_val_best == -np.inf:
metric_val_best = -model_selection_sign * np.inf
print('Current best validation metric (%s): %.8f'
% (model_selection_metric, metric_val_best))
logger = SummaryWriter(os.path.join(out_dir, 'logs'))
# Shorthands
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
validate_every = cfg['training']['validate_every']
visualize_every = cfg['training']['visualize_every']
# Print model
nparameters = sum(p.numel() for p in model.parameters())
print('Total number of parameters: %d' % nparameters)
print('output path: ', cfg['training']['out_dir'])
# For visualizations
data_vis_list = []
if cfg['model']['type'] == 'geom':
vis_dataset = data.core.get_geom_dataset(cfg, split='vis')
elif cfg['model']['type'] == 'combined':
vis_dataset = data.core.get_combined_dataset(cfg, split='vis')
# Build a data dictionary for visualization
np.random.seed(0)
data_idxes = np.random.randint(len(vis_dataset), size=cfg['training']['visualize_total'])
for i, id in enumerate(data_idxes):
data_vis = data.core.collate_pair_fn([vis_dataset[id]])
data_vis_list.append({'it': i, 'data': data_vis})
if args.eval_only:
eval_dict, figs = trainer.evaluate(val_loader)
metric_val = eval_dict[model_selection_metric]
for k, v in eval_dict.items():
print(f"metric {k}: {v}")
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k,v in figs.items():
fig_path = os.path.join(out_dir, 'vis', f"{k}_eval_best.png")
v.savefig(fig_path)
for data_vis in data_vis_list:
out = generator.generate_mesh(data_vis['data'])
# Get statistics
try:
mesh, stats_dict = out
except TypeError:
mesh, stats_dict = out, {}
mesh.export(os.path.join(out_dir, 'vis', f"best_{data_vis['it']}.off"))
out2 = generator.generate_pointcloud(data_vis['data'])
for i,pcloud in enumerate(out2):
ipath = os.path.join(out_dir, 'vis', f"best_{data_vis['it']}_{i}.obj")
common_util.write_pointcoud_as_obj(ipath, pcloud)
pcloud_dict = [{"title":'source'if i == 0 else 'target',
"pts": p[:,:3],
"col": None if p.shape[1] == 3 else p[:,3:]
} for i,p in enumerate(out2)]
fig = common_util.side_by_side_point_clouds(pcloud_dict)
width, height = fig.get_size_inches() * fig.get_dpi()
canvas = FigureCanvas(fig)
canvas.draw()
img_path = os.path.join(out_dir, 'vis', f"best_{data_vis['it']}.png")
Image.fromarray(
np.frombuffer(
canvas.tostring_rgb(),
dtype='uint8').reshape(int(height), int(width), 3)).save(
img_path
)
plt.close(fig)
quit()
while True:
epoch_it += 1
for batch in train_loader:
it += 1
losses = trainer.train_step(batch, it)
for k,v in losses.items():
logger.add_scalar(f'train/{k}_loss', v, it)
# Print output
if (it % print_every) == 0:
t = datetime.datetime.now()
print_str = f"[Epoch {epoch_it:04d}] it={it:04d}, time: {time.time()-t0:.3f}, "
print_str += f"{t.hour:02d}:{t.minute:02d}, "
for k,v in losses.items():
print_str += f"{k}:{v:.4f}, "
print(print_str)
# Save checkpoint
if (checkpoint_every > 0 and (it % checkpoint_every) == 0):
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if (backup_every > 0 and (it % backup_every) == 0):
print('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Run validation
if validate_every > 0 and (it % validate_every) == 0:
print('Running Validation')
eval_dict, figs = trainer.evaluate(val_loader)
for k,v in figs.items():
fig_path = os.path.join(out_dir, 'vis', f"{k}_{it}.png")
v.savefig(fig_path)
logger.add_figure(k, v, it)
metric_val = eval_dict[model_selection_metric]
print('Validation metric (%s): %.4f'
% (model_selection_metric, metric_val))
for k, v in eval_dict.items():
print(f"metric {k}: {v}")
logger.add_scalar('val/%s' % k, v, it)
if model_selection_sign * (metric_val - metric_val_best) > 0:
metric_val_best = metric_val
print('New best model (loss %.4f)' % metric_val_best)
checkpoint_io.save('model_best.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Visualize output
if visualize_every > 0 and (it % visualize_every) == 0:
print('Visualizing')
renders = []
for data_vis in data_vis_list:
out = generator.generate_mesh(data_vis['data'])
# Get statistics
try:
mesh, stats_dict = out
except TypeError:
mesh, stats_dict = out, {}
mesh.export(os.path.join(out_dir, 'vis', '{}_{}.off'.format(it, data_vis['it'])))
out2 = generator.generate_pointcloud(data_vis['data'])
for i,pcloud in enumerate(out2):
ipath = os.path.join(out_dir, 'vis', f"{it}_{data_vis['it']}_{i}.obj")
common_util.write_pointcoud_as_obj(ipath, pcloud)
name_dict = ['source', 'target', 'source_rollout', 'target_rollout']
pcloud_dict = [{"title":name_dict[i],
"pts": p[:,:3],
"col": None if p.shape[1] == 3 else p[:,3:]
} for i,p in enumerate(out2)]
fig = common_util.side_by_side_point_clouds(pcloud_dict)
width, height = fig.get_size_inches() * fig.get_dpi()
canvas = FigureCanvas(fig)
canvas.draw()
img_path = os.path.join(out_dir, 'vis', f"{it}_{data_vis['it']}.png")
Image.fromarray(
np.frombuffer(
canvas.tostring_rgb(),
dtype='uint8').reshape(int(height), int(width), 3)).save(
img_path
)
plt.close(fig)
# Exit if necessary
if exit_after > 0 and (time.time() - t0) >= exit_after:
print('Time limit reached. Exiting.')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
exit(3)
| 10,307 | Python | 39.108949 | 116 | 0.573979 |
NVlabs/ACID/ACID/README.md | [![NVIDIA Source Code License](https://img.shields.io/badge/license-NSCL-blue.svg)](https://github.com/NVlabs/ACID/blob/master/LICENSE)
![Python 3.7](https://img.shields.io/badge/python-3.7-green.svg)
# ACID model
<div style="text-align: center">
<img src="../_media/model_figure.png" width="600"/>
</div>
## Prerequisites
We use anaconda to manage necessary packages. You can create an anaconda environment called `acid_train` using
```bash
conda env create -f environment.yaml
conda activate acid_train
pip install torch-scatter==2.0.4 -f https://pytorch-geometric.com/whl/torch-1.4.0+cu101.html
```
Next, we need to compile extension modules used for mesh utilies, which are from [Convolutional Occupancy Network](https://github.com/autonomousvision/convolutional_occupancy_networks).
You can do this via
```
python setup.py build_ext --inplace
```
## Get Raw Manipulation Data
You can obtain our pre-generated manipulation trajectories from [PlushSim](../PlushSim/) from this [Google Drive](https://drive.google.com/drive/folders/1wOIk58e3wCfgOeYFBC1caYP2KAoFijbW?usp=sharing) directory. The manipulation trajectories are broken down to 10GB chunks. We recommend using [`gdown`](https://github.com/wkentaro/gdown) for downloading.
After downloading, please run the following commands to decompress the data:
```
cat data_plush.zip.part-* > data_plush.zip
unzip data_plush.zip
```
You should have the following folder structure:
```
ACID/
data_plush/
metadata/
split1/
...
split2/
...
split3/
...
split1/
...
split2/
...
split3/
...
```
### Generating Training Data
To generate input-output pairs for ACID training, you need to run the following scripts to generate the data:
```
cd preprocess
python gen_data_flow_plush.py
python gen_data_flow_splits.py
python gen_data_contrastive_pairs_flow.py
```
This should create `train_data` directory inside this folder, with the following structure:
```
ACID/
train_data/
flow/
split1/
split2/
split3/
train.pkl
test.pkl
pair/
split1/
split2/
split3/
```
If you wish to generate the data at another location, you can pass in different flags. Check out each preprocess script for details.
## Training
Finally, to train the ACID model from scratch, run:
```
python plush_train.py configs/plush_dyn_geodesics.yaml
```
For available training options, please take a look at `configs/default.yaml` and `configs/plush_dyn_geodesics.yaml`.
### Pretrained Weights
You can download pretrained weights on [Google Drive](https://drive.google.com/file/d/15ClJpMx8LlgPHXp1EeCP3Z4kD5h5bDKl/view?usp=sharing), please save `model_best.pt` to `result/geodesics/`.
## License
Please check the [LICENSE](../LICENSE) file. ACID may be used non-commercially, meaning for research or evaluation purposes only. For business inquiries, please contact researchinquiries@nvidia.com.
If you find our code or paper useful, please consider citing
```bibtex
@article{shen2022acid,
title={ACID: Action-Conditional Implicit Visual Dynamics for Deformable Object Manipulation},
author={Shen, Bokui and Jiang, Zhenyu and Choy, Christopher and J. Guibas, Leonidas and Savarese, Silvio and Anandkumar, Anima and Zhu, Yuke},
journal={Robotics: Science and Systems (RSS)},
year={2022}
}
``` | 3,489 | Markdown | 33.9 | 353 | 0.70364 |
NVlabs/ACID/ACID/src/training.py | import numpy as np
from collections import defaultdict
from tqdm import tqdm
class BaseTrainer(object):
''' Base trainer class.
'''
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
| 988 | Python | 23.724999 | 65 | 0.571862 |
NVlabs/ACID/ACID/src/common.py | # import multiprocessing
import torch
import numpy as np
import math
import numpy as np
def compute_iou(occ1, occ2):
''' Computes the Intersection over Union (IoU) value for two sets of
occupancy values.
Args:
occ1 (tensor): first set of occupancy values
occ2 (tensor): second set of occupancy values
'''
occ1 = np.asarray(occ1)
occ2 = np.asarray(occ2)
# Put all data in second dimension
# Also works for 1-dimensional data
if occ1.ndim >= 2:
occ1 = occ1.reshape(occ1.shape[0], -1)
if occ2.ndim >= 2:
occ2 = occ2.reshape(occ2.shape[0], -1)
# Convert to boolean values
occ1 = (occ1 >= 0.5)
occ2 = (occ2 >= 0.5)
# Compute IOU
area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1)
area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1)
iou = (area_intersect / area_union)
return iou
def chamfer_distance(points1, points2, give_id=False):
''' Returns the chamfer distance for the sets of points.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
use_kdtree (bool): whether to use a kdtree
give_id (bool): whether to return the IDs of nearest points
'''
return chamfer_distance_naive(points1, points2)
def chamfer_distance_naive(points1, points2):
''' Naive implementation of the Chamfer distance.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
'''
assert(points1.size() == points2.size())
batch_size, T, _ = points1.size()
points1 = points1.view(batch_size, T, 1, 3)
points2 = points2.view(batch_size, 1, T, 3)
distances = (points1 - points2).pow(2).sum(-1)
chamfer1 = distances.min(dim=1)[0].mean(dim=1)
chamfer2 = distances.min(dim=2)[0].mean(dim=1)
chamfer = chamfer1 + chamfer2
return chamfer
def make_3d_grid(bb_min, bb_max, shape):
''' Makes a 3D grid.
Args:
bb_min (tuple): bounding box minimum
bb_max (tuple): bounding box maximum
shape (tuple): output shape
'''
size = shape[0] * shape[1] * shape[2]
pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])
pys = torch.linspace(bb_min[1], bb_max[1], shape[1])
pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])
pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)
pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)
pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)
p = torch.stack([pxs, pys, pzs], dim=1)
return p
def transform_points(points, transform):
''' Transforms points with regard to passed camera information.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
assert(points.size(2) == 3)
assert(transform.size(1) == 3)
assert(points.size(0) == transform.size(0))
if transform.size(2) == 4:
R = transform[:, :, :3]
t = transform[:, :, 3:]
points_out = points @ R.transpose(1, 2) + t.transpose(1, 2)
elif transform.size(2) == 3:
K = transform
points_out = points @ K.transpose(1, 2)
return points_out
def b_inv(b_mat):
''' Performs batch matrix inversion.
Arguments:
b_mat: the batch of matrices that should be inverted
'''
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def project_to_camera(points, transform):
''' Projects points to the camera plane.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
p_camera = transform_points(points, transform)
p_camera = p_camera[..., :2] / p_camera[..., 2:]
return p_camera
def fix_Rt_camera(Rt, loc, scale):
''' Fixes Rt camera matrix.
Args:
Rt (tensor): Rt camera matrix
loc (tensor): location
scale (float): scale
'''
# Rt is B x 3 x 4
# loc is B x 3 and scale is B
batch_size = Rt.size(0)
R = Rt[:, :, :3]
t = Rt[:, :, 3:]
scale = scale.view(batch_size, 1, 1)
R_new = R * scale
t_new = t + R @ loc.unsqueeze(2)
Rt_new = torch.cat([R_new, t_new], dim=2)
assert(Rt_new.size() == (batch_size, 3, 4))
return Rt_new
def normalize_coordinate(p, padding=0.1, plane='xz'):
''' Normalize coordinate to [0, 1] for unit cube experiments
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
plane (str): plane feature type, ['xz', 'xy', 'yz']
'''
if plane == 'xz':
xy = p[:, :, [0, 2]]
elif plane =='xy':
xy = p[:, :, [0, 1]]
else:
xy = p[:, :, [1, 2]]
xy_new = xy / (1 + padding + 10e-6) # (-0.5, 0.5)
xy_new = xy_new + 0.5 # range (0, 1)
# f there are outliers out of the range
if xy_new.max() >= 1:
xy_new[xy_new >= 1] = 1 - 10e-6
if xy_new.min() < 0:
xy_new[xy_new < 0] = 0.0
return xy_new
def normalize_3d_coordinate(p, padding=0.1):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
p_nor = p / (1 + padding + 10e-4) # (-0.5, 0.5)
p_nor = p_nor + 0.5 # range (0, 1)
# f there are outliers out of the range
if p_nor.max() >= 1:
p_nor[p_nor >= 1] = 1 - 10e-4
if p_nor.min() < 0:
p_nor[p_nor < 0] = 0.0
return p_nor
def normalize_coord(p, vol_range, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments
Args:
p (tensor): point
vol_range (numpy array): volume boundary
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
p[:, 0] = (p[:, 0] - vol_range[0][0]) / (vol_range[1][0] - vol_range[0][0])
p[:, 1] = (p[:, 1] - vol_range[0][1]) / (vol_range[1][1] - vol_range[0][1])
p[:, 2] = (p[:, 2] - vol_range[0][2]) / (vol_range[1][2] - vol_range[0][2])
if plane == 'xz':
x = p[:, [0, 2]]
elif plane =='xy':
x = p[:, [0, 1]]
elif plane =='yz':
x = p[:, [1, 2]]
else:
x = p
return x
def coordinate2index(x, reso, coord_type='2d'):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
x (tensor): coordinate
reso (int): defined resolution
coord_type (str): coordinate type
'''
x = (x * reso).long()
if coord_type == '2d': # plane
index = x[:, :, 0] + reso * x[:, :, 1]
elif coord_type == '3d': # grid
index = x[:, :, 0] + reso * (x[:, :, 1] + reso * x[:, :, 2])
index = index[:, None, :]
return index
def coord2index(p, vol_range, reso=None, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments.
Corresponds to our 3D model
Args:
p (tensor): points
vol_range (numpy array): volume boundary
reso (int): defined resolution
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
# normalize to [0, 1]
x = normalize_coord(p, vol_range, plane=plane)
if isinstance(x, np.ndarray):
x = np.floor(x * reso).astype(int)
else: #* pytorch tensor
x = (x * reso).long()
if x.shape[1] == 2:
index = x[:, 0] + reso * x[:, 1]
index[index > reso**2] = reso**2
elif x.shape[1] == 3:
index = x[:, 0] + reso * (x[:, 1] + reso * x[:, 2])
index[index > reso**3] = reso**3
return index[None]
def update_reso(reso, depth):
''' Update the defined resolution so that UNet can process.
Args:
reso (int): defined resolution
depth (int): U-Net number of layers
'''
base = 2**(int(depth) - 1)
if ~(reso / base).is_integer(): # when this is not integer, U-Net dimension error
for i in range(base):
if ((reso + i) / base).is_integer():
reso = reso + i
break
return reso
def decide_total_volume_range(query_vol_metric, recep_field, unit_size, unet_depth):
''' Update the defined resolution so that UNet can process.
Args:
query_vol_metric (numpy array): query volume size
recep_field (int): defined the receptive field for U-Net
unit_size (float): the defined voxel size
unet_depth (int): U-Net number of layers
'''
reso = query_vol_metric / unit_size + recep_field - 1
reso = update_reso(int(reso), unet_depth) # make sure input reso can be processed by UNet
input_vol_metric = reso * unit_size
p_c = np.array([0.0, 0.0, 0.0]).astype(np.float32)
lb_input_vol, ub_input_vol = p_c - input_vol_metric/2, p_c + input_vol_metric/2
lb_query_vol, ub_query_vol = p_c - query_vol_metric/2, p_c + query_vol_metric/2
input_vol = [lb_input_vol, ub_input_vol]
query_vol = [lb_query_vol, ub_query_vol]
# handle the case when resolution is too large
if reso > 10000:
reso = 1
return input_vol, query_vol, reso
def add_key(base, new, base_name, new_name, device=None):
''' Add new keys to the given input
Args:
base (tensor): inputs
new (tensor): new info for the inputs
base_name (str): name for the input
new_name (str): name for the new info
device (device): pytorch device
'''
if (new is not None) and (isinstance(new, dict)):
if device is not None:
for key in new.keys():
new[key] = new[key].to(device)
base = {base_name: base,
new_name: new}
return base
class map2local(object):
''' Add new keys to the given input
Args:
s (float): the defined voxel size
pos_encoding (str): method for the positional encoding, linear|sin_cos
'''
def __init__(self, s, pos_encoding='linear'):
super().__init__()
self.s = s
self.pe = positional_encoding(basis_function=pos_encoding)
def __call__(self, p):
p = torch.remainder(p, self.s) / self.s # always possitive
# p = torch.fmod(p, self.s) / self.s # same sign as input p!
p = self.pe(p)
return p
class positional_encoding(object):
''' Positional Encoding (presented in NeRF)
Args:
basis_function (str): basis function
'''
def __init__(self, basis_function='sin_cos'):
super().__init__()
self.func = basis_function
L = 10
freq_bands = 2.**(np.linspace(0, L-1, L))
self.freq_bands = freq_bands * math.pi
def __call__(self, p):
if self.func == 'sin_cos':
out = []
p = 2.0 * p - 1.0 # chagne to the range [-1, 1]
for freq in self.freq_bands:
out.append(torch.sin(freq * p))
out.append(torch.cos(freq * p))
p = torch.cat(out, dim=2)
return p
| 11,186 | Python | 29.399456 | 109 | 0.562846 |
NVlabs/ACID/ACID/src/config.py | import yaml
from torchvision import transforms
from src import data
from src import conv_onet
method_dict = {
'conv_onet': conv_onet
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Generator for final mesh extraction
def get_generator(model, cfg, device):
''' Returns a generator instance.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
generator = method_dict[method].config.get_generator(model, cfg, device)
return generator
| 2,573 | Python | 23.990291 | 76 | 0.624563 |
NVlabs/ACID/ACID/src/checkpoints.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https') | 2,962 | Python | 28.63 | 70 | 0.568535 |
NVlabs/ACID/ACID/src/layers.py | import torch
import torch.nn as nn
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx | 1,203 | Python | 24.083333 | 68 | 0.532835 |
NVlabs/ACID/ACID/src/conv_onet/training.py | import os
import numpy as np
import torch
from torch.nn import functional as F
from src.common import compute_iou
from src.utils import common_util, plushsim_util
from src.training import BaseTrainer
from sklearn.metrics import roc_curve
from scipy import interp
import matplotlib.pyplot as plt
from collections import defaultdict
from tqdm import tqdm
from src.utils.plushsim_util import find_nn_cpu, find_emd_cpu
class PlushTrainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, cfg, device=None, vis_dir=None, ):
self.model = model
self.optimizer = optimizer
self.device = device
self.vis_dir = vis_dir
self.threshold = cfg['test']['threshold']
self.pos_weight = torch.FloatTensor([cfg['training']['pos_weight']]).to(device)
if 'corr_dim' in cfg['model']['decoder_kwargs'] and cfg['model']['decoder_kwargs']['corr_dim'] > 0:
self.contrastive_threshold = cfg['loss']['contrastive_threshold']
self.use_geodesics = cfg['loss']['use_geodesics']
self.loss_type = cfg['loss']['type']
self.contrastive_coeff_neg = cfg['loss'].get('contrastive_coeff_neg', 1.)
self.contrastive_neg_thres = cfg['loss'].get('contrastive_neg_thres', 1.)
self.contrastive_coeff_pos = cfg['loss'].get('contrastive_coeff_pos', 1.)
self.contrastive_pos_thres= cfg['loss'].get('contrastive_pos_thres', 0.1)
self.scale_with_geodesics = cfg['loss'].get('scale_with_geodesics', False)
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
self.max_thres = 0.2
self.discretization = 1000
self.base_fpr = np.linspace(0,1,101)
self.base_thres = np.linspace(0,self.max_thres,self.discretization)
def train_step(self, data, it):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
losses = self.compute_loss(data, it)
loss = 0
for v in losses.values():
loss += v
loss.backward()
self.optimizer.step()
return {k:v.item() for k,v in losses.items()}
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
agg_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict, agg_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
for k, v in agg_step_dict.items():
agg_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
# - shape completion ROC
figs = {}
if 'tpr' in agg_list:
figs['OCC_ROC'] = self._get_shape_completion_ROC(agg_list['tpr'])
if 'fmr_hits' in agg_list:
fmr = np.array(agg_list['fmr_hits'])
idx01 = int(0.01 * (self.discretization-1) / self.max_thres)
idx02 = int(0.02 * (self.discretization-1) / self.max_thres)
idx05 = int(0.05 * (self.discretization-1) / self.max_thres)
idx10 = int(0.10 * (self.discretization-1) / self.max_thres)
eval_dict['FMR.01m_5%'] = np.mean(fmr[:,idx01] > 0.05)
eval_dict['FMR.02m_5%'] = np.mean(fmr[:,idx02] > 0.05)
eval_dict['FMR.05m_5%'] = np.mean(fmr[:,idx05] > 0.05)
eval_dict['FMR.10m_5%'] = np.mean(fmr[:,idx10] > 0.05)
fmr_std = fmr.std(axis=0)
eval_dict['FMR.01m_5%_std'] = fmr_std[idx01]
eval_dict['FMR.02m_5%_std'] = fmr_std[idx02]
eval_dict['FMR.05m_5%_std'] = fmr_std[idx05]
eval_dict['FMR.10m_5%_std'] = fmr_std[idx10]
for tau2 in np.linspace(0.01,0.2,5):
figs[f'FMR_tau1_wrt_tau2={tau2:.3f}']= self._get_FMR_curve_tau1(fmr, tau2=tau2)
figs['FMR_tau1']= self._get_FMR_curve_tau1(fmr)
for tau1 in np.linspace(0.01,0.1,5):
figs[f'FMR_tau2_wrt_tau1={tau1:.3f}']= self._get_FMR_curve_tau2(fmr, tau1=tau1)
#ax.scatter(fpr, tpr, s=100, alpha=0.5, color="blue")
if 'pair_dist' in agg_list:
all_dists = np.concatenate(agg_list['pair_dist'])
eval_dict['pair_dist'] = all_dists.mean()
eval_dict['pair_dist_std'] = all_dists.std()
figs['dist_hist'] = self._get_pair_distance_histogram(all_dists)
return eval_dict, figs
def _get_pair_distance_histogram(self, all_dists):
fig, ax = plt.subplots(figsize=(10,7))
counts, bins, patches = ax.hist(all_dists, density=True, bins=40) # density=False would make counts
ax.set_ylabel('Density')
ax.set_xlabel('Pair Distance')
return fig
def _get_shape_completion_ROC(self, tpr):
tprs = np.array(tpr)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = np.maximum(mean_tprs - std, 0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_fpr, mean_tprs, 'b')
ax.fill_between(self.base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
ax.plot([0, 1], [0, 1],'r--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('True Positive Rate')
ax.set_xlabel('False Positive Rate')
return fig
def _get_FMR_curve_tau2(self, fmrs, tau1=0.1):
idx05 = int(tau1 * (self.discretization-1) / self.max_thres)
# fix tau 1
means = []
tau1_min = 0.001
tau1_max = 0.25
tau1_ticks = np.linspace(tau1_min, tau1_max, 1000)
for t in tau1_ticks:
means.append(np.mean(fmrs[:,idx05] > t, axis=0))
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(tau1_ticks, means, 'b')
ax.set_xlim([tau1_min, tau1_max])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Ratio threshold')
return fig
def _get_FMR_curve_tau1(self, fmrs, tau2=0.05):
# tau2 = 0.05 is the inlier ratio
# fix tau 2
mean_fmrs = np.mean(fmrs > tau2, axis=0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_thres, mean_fmrs, 'b')
ax.set_xlim([0.0, self.max_thres])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Distance Threshold')
return fig
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
eval_dict = {}
agg = {}
idx = data['idx'].item()
# Compute iou
with torch.no_grad():
outputs = self.model(data)
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
eval_dict[f'iou_{self.threshold}'] = iou
occ_iou_hat_np_2 = (outputs['occ'].probs >= 0.5).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_2).mean()
eval_dict['iou_0.5'] = iou
intermediate = (self.threshold + 0.5) / 2
occ_iou_hat_np_3 = (outputs['occ'].probs >= intermediate).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_3).mean()
eval_dict[f'iou_{intermediate}'] = iou
if 'flow' in outputs:
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
constant = torch.from_numpy(np.array((12.,12.,4.)) / 10. / (1.1,1.1,1.1)).float().cuda()
loss_flow = F.mse_loss(
outputs['flow'] * constant,
gt_flow * constant,
reduction='none')
eval_dict['flow_all_field'] = loss_flow.sum(-1).mean().item()
loss_flow_np = loss_flow.sum(-1).cpu().numpy()
loss_flow_pos = loss_flow_np[occ_iou_np]
# if empty scene, no flow of the object will be present
if len(loss_flow_pos) > 0:
eval_dict['flow'] = loss_flow_pos.mean()
gt_pts = data['sampled_pts'].reshape([B*2, N, 3]).cpu().numpy()
if 'flow' in outputs:
flow_vis_mean = []
for i in range(B*2):
gt_occ_pts = gt_pts[i][occ_iou_np[i]] * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
vis_idx = plushsim_util.render_points(gt_occ_pts,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
vis_pts = gt_occ_pts[vis_idx]
flow_vis_mean.append(loss_flow_np[i][occ_iou_np[i]][vis_idx].mean())
eval_dict['flow_only_vis'] = np.mean(flow_vis_mean)
if idx % 10000 == 9999:
# do expensive evaluations
# occupancy ROC curve
fpr, tpr, _ = roc_curve(occ_iou_np.flatten(),
outputs['occ'].probs.cpu().numpy().flatten())
base_fpr = np.linspace(0, 1, 101)
tpr = interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
agg['tpr'] = tpr
f1 = []
for i in range(B*2):
gt_occ_pts = common_util.subsample_points(gt_pts[i][occ_iou_np[i]], return_index=False)
pred_pts = common_util.subsample_points(gt_pts[i][occ_iou_hat_np[i]], return_index=False)
f1.append(common_util.f1_score(pred_pts, gt_occ_pts))
f1 = np.array(f1)
f1score, precision, recall = f1.mean(axis=0)
eval_dict['f1'] = f1score
eval_dict['precision'] = precision
eval_dict['recall'] = recall
if 'corr' in outputs:
# data prep corr
corr_f = outputs['corr']
num_pairs = corr_f.shape[1]
gt_match = np.arange(num_pairs)
src_f = corr_f[0].cpu().numpy()
tgt_f = corr_f[1].cpu().numpy()
# data prep pts
pts = data['sampled_pts'].cpu().numpy().squeeze()
src_pts = pts[0][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
tgt_pts = pts[1][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
# normalize points to maximum length of 1.
tgt_pts = tgt_pts / np.ptp(tgt_pts, axis=0).max()
_, nn_inds_st = find_emd_cpu(src_f, tgt_f)
# doing Feature-match recall.
eval_dict['match_exact'] = np.mean(gt_match == nn_inds_st)
dist_st = np.linalg.norm(tgt_pts - tgt_pts[nn_inds_st], axis=1)
eval_dict['match_0.05'] = np.mean(dist_st < 0.05)
eval_dict['match_0.1'] = np.mean(dist_st < 0.1)
hits = np.array([np.mean(dist_st < f) for f in self.base_thres])
agg['fmr_hits'] = hits
agg['pair_dist'] = dist_st
return eval_dict, agg
def compute_loss(self, data, it):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
for k,v in data.items():
data[k] = v.to(device)
outputs = self.model(data)
loss = {}
eval_dict = {}
# Occupancy Loss
if 'occ' in outputs:
# gt points
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
# pred
logits = outputs['occ'].logits
loss_i = F.binary_cross_entropy_with_logits(
logits, gt_occ, reduction='none', pos_weight=self.pos_weight)
loss['occ'] = loss_i.mean()
# eval infos
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
if 'flow' in outputs :
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
mask = (gt_occ > 0.5).bool()
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
flow_gt_0 = gt_flow[~mask]
flow_gt_1 = gt_flow[mask]
flow_pred = outputs['flow']
flow_pred_0 = flow_pred[~mask]
flow_pred_1 = flow_pred[mask]
loss['flow'] = F.mse_loss(flow_pred_1, flow_gt_1) + 0.01 * F.mse_loss(flow_pred_0, flow_gt_0)
if 'corr' in outputs:
dist_vec = data['geo_dists']
corr_f = outputs['corr']
src_f = corr_f[0]
src_pos = src_f[dist_vec <= self.contrastive_threshold]
num_positive = (dist_vec <= self.contrastive_threshold).sum()
tgt_f = corr_f[1]
tgt_pos = tgt_f[dist_vec <= self.contrastive_threshold]
if self.loss_type == "contrastive":
if num_positive > 0:
src_neg = src_f[dist_vec > self.contrastive_threshold]
tgt_neg = tgt_f[dist_vec > self.contrastive_threshold]
# Positive loss
pos_loss = F.relu(((src_pos - tgt_pos).pow(2).sum(1) + 1e-4).sqrt()
- self.contrastive_pos_thres).pow(2)
pos_loss_mean = pos_loss.mean()
loss['contrastive_pos'] = self.contrastive_coeff_pos * pos_loss_mean
# Negative loss
neg_dist = (dist_vec[dist_vec > self.contrastive_threshold]
/ self.contrastive_threshold).log() + 1.
neg_dist = torch.clamp(neg_dist, max=2)
neg_loss = F.relu(neg_dist -
((src_neg - tgt_neg).pow(2).sum(1) + 1e-4).sqrt()).pow(2)
if self.scale_with_geodesics:
neg_loss = neg_loss / neg_dist
neg_loss_mean = neg_loss.mean()
loss['contrastive_neg'] = self.contrastive_coeff_neg * neg_loss_mean
return loss
| 15,474 | Python | 42.105849 | 109 | 0.511439 |
NVlabs/ACID/ACID/src/conv_onet/config.py | import os
from src.encoder import encoder_dict
from src.conv_onet import models, training
from src.conv_onet import generation
from src import data
def get_model(cfg,device=None, dataset=None, **kwargs):
if cfg['model']['type'] == 'geom':
return get_geom_model(cfg,device,dataset)
elif cfg['model']['type'] == 'combined':
return get_combined_model(cfg,device,dataset)
def get_combined_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
act_dim = cfg['data']['act_dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'combined_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim,
c_per_dim=obj_c_dim+env_c_dim,
c_act_dim=obj_c_dim+env_c_dim,
padding=padding,
**decoder_kwargs
)
obj_per_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
obj_act_encoder = encoder_dict[encoder](
dim=act_dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvImpDyn(
obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=device
)
return model
def get_geom_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'geom_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim, c_dim=obj_c_dim+env_c_dim, padding=padding,
**decoder_kwargs
)
obj_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvOccGeom(
obj_encoder, env_encoder, decoder, device=device
)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
trainer = training.PlushTrainer(
model, optimizer, cfg,
device=device,
vis_dir=vis_dir )
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
padding=cfg['data']['padding'],
vol_info = None,
vol_bound = None,
)
return generator
| 4,514 | Python | 29.1 | 77 | 0.597475 |
NVlabs/ACID/ACID/src/conv_onet/__init__.py | from src.conv_onet import (
config, generation, training, models
)
__all__ = [
config, generation, training, models
]
| 127 | Python | 14.999998 | 40 | 0.661417 |
NVlabs/ACID/ACID/src/conv_onet/generation.py | import torch
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange, tqdm
import trimesh
from src.utils import libmcubes, common_util
from src.common import make_3d_grid, normalize_coord, add_key, coord2index
from src.utils.libmise import MISE
import time
import math
counter = 0
class Generator3D(object):
''' Generator class for Occupancy Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
device (device): pytorch device
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
input_type (str): type of input
vol_info (dict): volume infomation
vol_bound (dict): volume boundary
simplify_nfaces (int): number of faces the mesh should be simplified to
'''
def __init__(self, model, points_batch_size=100000,
threshold=0.5, refinement_step=0, device=None,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1, sample=False,
input_type = None,
vol_info = None,
vol_bound = None,
simplify_nfaces=None):
self.model = model.to(device)
self.points_batch_size = points_batch_size
self.refinement_step = refinement_step
self.threshold = threshold
self.device = device
self.resolution0 = resolution0
self.upsampling_steps = upsampling_steps
self.with_normals = with_normals
self.input_type = input_type
self.padding = padding
self.sample = sample
self.simplify_nfaces = simplify_nfaces
# for pointcloud_crop
self.vol_bound = vol_bound
if vol_info is not None:
self.input_vol, _, _ = vol_info
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
if type(c) is tuple:
for cs in c:
for k,v in cs.items():
cs[k] = v[0].unsqueeze(0)
else:
for k,v in c.items():
c[k] = v[0].unsqueeze(0)
stats_dict['time (encode inputs)'] = time.time() - t0
mesh = self.generate_from_latent(c, stats_dict=stats_dict)
if return_stats:
return mesh, stats_dict
else:
return mesh
def generate_from_latent(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
mesh = self.extract_mesh(value_grid, c, stats_dict=stats_dict)
return mesh
def eval_points(self, p, c=None, vol_bound=None, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
c (tensor): encoded feature volumes
'''
p_split = torch.split(p, self.points_batch_size)
occ_hats = []
for pi in p_split:
pi = pi.unsqueeze(0).to(self.device)
with torch.no_grad():
occ_hat = self.model.eval_points(pi, c, **kwargs)['occ'].logits
occ_hats.append(occ_hat.squeeze(0).detach().cpu())
occ_hat = torch.cat(occ_hats, dim=0)
return occ_hat
def extract_mesh(self, occ_hat, c=None, stats_dict=dict()):
''' Extracts the mesh from the predicted occupancy grid.
Args:
occ_hat (tensor): value grid of occupancies
c (tensor): encoded feature volumes
stats_dict (dict): stats dictionary
'''
# Some short hands
n_x, n_y, n_z = occ_hat.shape
box_size = 1 + self.padding
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
# Make sure that mesh is watertight
t0 = time.time()
occ_hat_padded = np.pad(
occ_hat, 1, 'constant', constant_values=-1e6)
vertices, triangles = libmcubes.marching_cubes(
occ_hat_padded, threshold)
stats_dict['time (marching cubes)'] = time.time() - t0
# Strange behaviour in libmcubes: vertices are shifted by 0.5
vertices -= 0.5
# # Undo padding
vertices -= 1
if self.vol_bound is not None:
# Scale the mesh back to its original metric
bb_min = self.vol_bound['query_vol'][:, 0].min(axis=0)
bb_max = self.vol_bound['query_vol'][:, 1].max(axis=0)
mc_unit = max(bb_max - bb_min) / (self.vol_bound['axis_n_crop'].max() * self.resolution0*2**self.upsampling_steps)
vertices = vertices * mc_unit + bb_min
else:
# Normalize to bounding box
vertices /= np.array([n_x-1, n_y-1, n_z-1])
vertices = box_size * (vertices - 0.5)
# Create mesh
mesh = trimesh.Trimesh(vertices / (1., 1., 3), triangles,
vertex_normals=None,
process=False)
# Directly return if mesh is empty
if vertices.shape[0] == 0:
return mesh
# TODO: normals are lost here
if self.simplify_nfaces is not None:
t0 = time.time()
from src.utils.libsimplify import simplify_mesh
mesh = simplify_mesh(mesh, self.simplify_nfaces, 5.)
stats_dict['time (simplify)'] = time.time() - t0
# Refine mesh
if self.refinement_step > 0:
t0 = time.time()
self.refine_mesh(mesh, occ_hat, c)
stats_dict['time (refine)'] = time.time() - t0
return mesh
def generate_pointcloud(self, data, threshold=0.75, use_gt_occ=False):
self.model.eval()
device = self.device
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
pts = data['sampled_pts']
B,_,N,C = pts.shape
pts = pts.reshape([B*2,N,C])
p_split = torch.split(pts, self.points_batch_size, dim=-1)
occ_hats = []
features = []
flows = []
for pi in p_split:
with torch.no_grad():
outputs = self.model.eval_points(pi, c)
occ_hats.append((outputs['occ'].probs > threshold).detach().cpu())
if 'corr' in outputs:
features.append(outputs['corr'].detach().cpu())
if 'flow' in outputs:
flows.append(outputs['flow'].detach().cpu())
pts = pts.cpu().numpy()
occ_hat = torch.cat(occ_hats, dim=1).numpy()
if use_gt_occ:
occ_hat = data['sampled_occ'].reshape([B*2, N]).cpu().numpy()
pos_pts0 = pts[0][occ_hat[0] == 1.].reshape((-1,3))
pos_idx0 = common_util.subsample_points(pos_pts0, resolution=0.013)
pos_pts0 = pos_pts0[pos_idx0]
pos_pts1 = pts[1][occ_hat[1] == 1.].reshape((-1,3))
pos_idx1 = common_util.subsample_points(pos_pts1, resolution=0.013)
pos_pts1 = pos_pts1[pos_idx1]
pos_pts = np.concatenate([pos_pts0, pos_pts1], axis=0) / (1.,1.,3.)
if len(features) != 0:
feature = torch.cat(features, dim=1).numpy()
f_dim = feature.shape[-1]
pos_f0 = feature[0][occ_hat[0] == 1.].reshape((-1,f_dim))
pos_f1 = feature[1][occ_hat[1] == 1.].reshape((-1,f_dim))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pos_f = np.concatenate([pos_f0, pos_f1], axis=0)
if pos_f.shape[0] < 100:
pcloud_both = pos_pts
else:
tsne_result = common_util.embed_tsne(pos_f)
colors = common_util.get_color_map(tsne_result)
pcloud_both = np.concatenate([pos_pts, colors], axis=1)
else:
pcloud_both = pos_pts
pcloud0 = pcloud_both[:pos_pts0.shape[0]]
pcloud1 = pcloud_both[pos_pts0.shape[0]:]
if len(flows) != 0:
flow = torch.cat(flows, dim=1).numpy() / 10.
pos_f0 = flow[0][occ_hat[0] == 1.].reshape((-1,3))
pos_f1 = flow[1][occ_hat[1] == 1.].reshape((-1,3))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pcloud_unroll_0 = pcloud0.copy()
pcloud_unroll_0[:,:3] += pos_f0 / (1.,1.,3.)
pcloud_unroll_1 = pcloud1.copy()
pcloud_unroll_1[:,:3] += pos_f1 / (1.,1.,3.)
return pcloud0, pcloud1,pcloud_unroll_0,pcloud_unroll_1
return pcloud0, pcloud1
def refine_mesh(self, mesh, occ_hat, c=None):
''' Refines the predicted mesh.
Args:
mesh (trimesh object): predicted mesh
occ_hat (tensor): predicted occupancy grid
c (tensor): latent conditioned code c
'''
self.model.eval()
# Some shorthands
n_x, n_y, n_z = occ_hat.shape
assert(n_x == n_y == n_z)
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Vertex parameter
v0 = torch.FloatTensor(mesh.vertices).to(self.device)
v = torch.nn.Parameter(v0.clone())
# Faces of mesh
faces = torch.LongTensor(mesh.faces).to(self.device)
# Start optimization
optimizer = optim.RMSprop([v], lr=1e-4)
for it_r in trange(self.refinement_step):
optimizer.zero_grad()
# Loss
face_vertex = v[faces]
eps = np.random.dirichlet((0.5, 0.5, 0.5), size=faces.shape[0])
eps = torch.FloatTensor(eps).to(self.device)
face_point = (face_vertex * eps[:, :, None]).sum(dim=1)
face_v1 = face_vertex[:, 1, :] - face_vertex[:, 0, :]
face_v2 = face_vertex[:, 2, :] - face_vertex[:, 1, :]
face_normal = torch.cross(face_v1, face_v2)
face_normal = face_normal / \
(face_normal.norm(dim=1, keepdim=True) + 1e-10)
face_value = torch.sigmoid(
self.model.eval_points(face_point.unsqueeze(0), c)['occ'].logits
)
normal_target = -autograd.grad(
[face_value.sum()], [face_point], create_graph=True)[0]
normal_target = \
normal_target / \
(normal_target.norm(dim=1, keepdim=True) + 1e-10)
loss_target = (face_value - threshold).pow(2).mean()
loss_normal = \
(face_normal - normal_target).pow(2).sum(dim=1).mean()
loss = loss_target + 0.01 * loss_normal
# Update
loss.backward()
optimizer.step()
mesh.vertices = v.data.cpu().numpy()
return mesh
def generate_occ_grid(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
return value_grid
| 14,928 | Python | 36.044665 | 126 | 0.536509 |
NVlabs/ACID/ACID/src/conv_onet/models/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from src.common import normalize_coordinate, normalize_3d_coordinate, map2local
class GeomDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1):
super().__init__()
self.c_dim = c_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fc_c_occ = nn.ModuleList([
nn.Linear(c_dim, hidden_size) for i in range(n_blocks)
])
self.fc_p = nn.Linear(dim, hidden_size)
self.blocks_occ = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def forward(self, p, c_plane, **kwargs):
c = 0
c += self.sample_plane_feature(p, c_plane['xz'], plane='xz')
c += self.sample_plane_feature(p, c_plane['xy'], plane='xy')
c += self.sample_plane_feature(p, c_plane['yz'], plane='yz')
c = c.transpose(1, 2)
p = p.float()
x = self.fc_p(p)
net = x
for i in range(self.n_blocks):
net = net + self.fc_c_occ[i](c)
net = self.blocks_occ[i](net)
results = {}
if self.corr_dim != 0 and not self.corr_head:
results['corr'] = net
net = self.actvn(net)
results['occ'] = self.fc_occ(net).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
results['corr'] = self.fc_out_corr(net)
return results
class CombinedDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_per_dim=128, c_act_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1, fuse=True, detach=False, anneal_gradient=True):
super().__init__()
self.c_per_dim = c_per_dim
self.c_act_dim = c_act_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fuse = fuse
self.detach = detach
self.anneal_gradient = anneal_gradient
self.fc_c_per = nn.ModuleList([
nn.Linear(c_per_dim, hidden_size) for i in range(n_blocks)
])
self.fc_c_act = nn.ModuleList([
nn.Linear(c_act_dim, hidden_size) for i in range(n_blocks)
])
if self.fuse:
self.fc_c_merge = nn.ModuleList([
nn.Linear(hidden_size*2, hidden_size) for i in range(n_blocks)
])
self.fc_p_per = nn.Linear(dim, hidden_size)
self.fc_p_act = nn.Linear(dim, hidden_size)
self.blocks_per = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.blocks_act = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
self.fc_flow= nn.Linear(hidden_size, 3)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if self.fuse:
self.fc_act_corr_merge = nn.Linear(hidden_size+corr_dim, hidden_size)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def decode_perception(self, p, c_per_plane):
c_per = 0
c_per += self.sample_plane_feature(p, c_per_plane['xz'], plane='xz')
c_per += self.sample_plane_feature(p, c_per_plane['xy'], plane='xy')
c_per += self.sample_plane_feature(p, c_per_plane['yz'], plane='yz')
c_per = c_per.transpose(1, 2)
p = p.float()
net_per = self.fc_p_per(p)
features = []
for i in range(self.n_blocks):
net_per = net_per + self.fc_c_per[i](c_per)
net_per = self.blocks_per[i](net_per)
if self.detach:
features.append(net_per.detach())
else:
features.append(net_per)
net_per = self.actvn(net_per)
results = {}
results['occ'] = self.fc_occ(net_per).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
corr = self.fc_out_corr(net_per)
features.append(corr)
results['corr'] = corr
# if self.anneal_gradient:
# for i,p in enumerate(features):
# features[i] = p * 0.1 + p.detach() * 0.9
return results, features
def decode_action(self, p, c_act_plane, per_features):
c_act = 0
c_act += self.sample_plane_feature(p, c_act_plane['xz'], plane='xz')
c_act += self.sample_plane_feature(p, c_act_plane['xy'], plane='xy')
c_act += self.sample_plane_feature(p, c_act_plane['yz'], plane='yz')
c_act = c_act.transpose(1, 2)
p = p.float()
net_act = self.fc_p_act(p)
for i in range(self.n_blocks):
net_act = net_act + self.fc_c_act[i](c_act)
if self.fuse:
net_act = self.blocks_act[i](
self.fc_c_merge[i](
torch.cat( ( net_act, per_features[i]), dim=-1)))
# (net_per.detach()*0.9+net_per * 0.1)), dim=-1)))
else:
net_act = self.blocks_act[i](net_act)
net_act = self.actvn(net_act)
if self.corr_dim != 0 and self.corr_head:
if self.fuse:
net_act = self.fc_act_corr_merge(
torch.cat((net_act, per_features[-1].detach()), dim=-1))
return {'flow':self.fc_flow(net_act)}
def forward(self, p, c_per_plane, c_act_plane):
results, per_features = self.decode_perception(p, c_per_plane)
results['flow'] = self.decode_action(p, c_act_plane, per_features)['flow']
return results
| 8,333 | Python | 35.876106 | 114 | 0.554062 |
NVlabs/ACID/ACID/src/conv_onet/models/__init__.py | import torch
import numpy as np
import torch.nn as nn
from torch import distributions as dist
from src.conv_onet.models import decoder
from src.utils import plushsim_util
# Decoder dictionary
decoder_dict = {
'geom_decoder': decoder.GeomDecoder,
'combined_decoder': decoder.CombinedDecoder,
}
class ConvImpDyn(nn.Module):
def __init__(self, obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_per_encoder = obj_per_encoder.to(device)
self.obj_act_encoder = obj_act_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c_per, c_act = self.encode_inputs(inputs)
return self.decode(inputs, c_per, c_act, **kwargs)
def forward_perception(self, inputs, filter=True,):
c_per, c_env = self.encode_perception(inputs, merge_env_feature=False)
for k in c_per.keys():
env_f = self.env_upsample(c_env[k])
c_env[k] = env_f
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
# get curr observation state and features
p = inputs['sampled_pts']
if len(p.shape) > 3:
B,_,N,C = p.shape
curr_p = p.reshape([B*2,N,C])
else:
curr_p = p
curr_state, per_features = self.decoder.decode_perception(curr_p, c_per)
occ_pred = dist.Bernoulli(logits=curr_state['occ']).probs >= 0.5
curr_state['occ'] = occ_pred
if filter:
curr_p = curr_p[occ_pred]
if 'corr' in curr_state:
curr_state['corr'] = curr_state['corr'][occ_pred]
for i,p in enumerate(per_features):
per_features[i] = p[occ_pred]
return c_per, c_env, curr_p, curr_state, per_features
def rollout(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
import time
# from functools import partial
# render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
act_traj = []
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
start_time = time.time()
c_act, act_partial = self.get_action_encoding(curr_pts[i], g, t, c_env)
total_time_act_render += time.time() - start_time
act_traj.append(act_partial)
start_time = time.time()
flow = self.decoder.decode_action(curr_pts[i], c_act, per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
total_time_act_decode += time.time() - start_time
all_traj.append((curr_pts.copy(), act_traj))
print("total time render: ",total_time_act_render)
print("total time decode: ",total_time_act_decode)
return all_traj
def rollout_async(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
total_async_time_act_render = 0
import time
from functools import partial
render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
start_time = time.time()
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
from multiprocessing import Pool
with Pool(16) as p:
vis_idxes = p.map(render_pts_func, points_world)
xyzs, acts = [],[]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
# c_act, act_partial = self.get_action_encoding(
# curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
obj_xyz, obj_act = self.get_action_encoding_new(
curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
xyzs.append(obj_xyz)
acts.append(obj_act)
total_time_act_render += time.time() - start_time
n = 20
start_time = time.time()
xyz_chunks = [xyzs[i:i+n] for i in range(0, num_sequence, n)]
act_chunks = [acts[i:i+n] for i in range(0, num_sequence, n)]
c_acts = []
for xyz, act in zip(xyz_chunks, act_chunks):
obj_xyz = torch.as_tensor(np.stack(xyz).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(act).astype(np.float32)).to(self._device)
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
for chunk_i in range(len(xyz)):
c_act = {}
for k in c_act_new.keys():
c_act[k] = torch.cat([c_act_new[k][chunk_i].unsqueeze(0), c_env[k]], dim=1)
c_acts.append(c_act)
total_time_act_decode += time.time() - start_time
from src.utils import common_util
from PIL import Image
for k,v in c_acts[0].items():
v_np = v.squeeze().permute(1,2,0).cpu().numpy()
feature_plane = v_np.reshape([-1, v_np.shape[-1]])
tsne_result = common_util.embed_tsne(feature_plane)
colors = common_util.get_color_map(tsne_result)
colors = colors.reshape((128,128,-1)).astype(np.float32)
colors = (colors * 255 / np.max(colors)).astype('uint8')
img = Image.fromarray(colors)
img.save(f"act_{k}.png")
import pdb; pdb.set_trace()
for i in range(num_sequence):
flow = self.decoder.decode_action(curr_pts[i], c_acts[i], per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
all_traj.append(([p.cpu().numpy().squeeze() for p in curr_pts], xyzs))
return all_traj
def get_action_encoding_new(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
return obj_xyz, obj_act
def get_action_encoding(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
xyzs.append(obj_xyz)
acts.append(obj_act)
obj_xyz = torch.as_tensor(np.stack(xyzs).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(acts).astype(np.float32)).to(self._device)
#print("time split 2: ", time.time() - start_time)
start_time = time.time()
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
#print("time split 3: ", time.time() - start_time)
start_time = time.time()
for k in c_act_new.keys():
c_act_new[k] = torch.cat([c_act_new[k], c_env[k]], dim=1)
#print("time split 4: ", time.time() - start_time)
start_time = time.time()
return c_act_new, obj_xyz
def encode_perception(self, inputs, merge_env_feature=True):
obj_pcloud = inputs['obj_obs']
if len(obj_pcloud.shape) > 3:
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:6]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
if len(env_pcloud.shape) > 3:
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
if merge_env_feature:
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
else:
return c_per, env_features
return c_per
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb, obj_act = obj_pcloud[...,:3],obj_pcloud[...,3:6],obj_pcloud[...,6:]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
c_act = self.obj_act_encoder((obj_xyz, obj_act))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
c_act[k] = torch.cat([c_act[k], env_f], dim=1)
return c_per, c_act
def eval_points(self, pts, c):
outputs = self.decoder(pts, *c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c1, c2, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c1, c2)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
if 'skip_indexing' not in kwargs:
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
class ConvOccGeom(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
device (device): torch device
'''
def __init__(self, obj_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_encoder = obj_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c = self.encode_inputs(inputs)
return self.decode(inputs, c, **kwargs)
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:]
obj_features = self.obj_encoder((obj_xyz, obj_rgb))
if self.env_encoder is None:
return obj_features
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
joint_features = {}
for k in obj_features.keys():
env_f = self.env_upsample(env_features[k])
joint_features[k] = torch.cat([obj_features[k], env_f], dim=1)
return joint_features
def eval_points(self, pts, c):
outputs = self.decoder(pts, c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c, **kwargs)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 17,056 | Python | 39.80622 | 113 | 0.525797 |
NVlabs/ACID/ACID/src/encoder/__init__.py | from src.encoder import (
pointnet
)
encoder_dict = {
'geom_encoder': pointnet.GeomEncoder,
}
| 104 | Python | 10.666665 | 41 | 0.663462 |
NVlabs/ACID/ACID/src/encoder/pointnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from torch_scatter import scatter_mean, scatter_max
from src.common import coordinate2index, normalize_coordinate
from src.encoder.unet import UNet
class GeomEncoder(nn.Module):
''' PointNet-based encoder network with ResNet blocks for each point.
Number of input points are fixed.
Args:
c_dim (int): dimension of latent code c
dim (int): input points dimension
hidden_dim (int): hidden dimension of the network
scatter_type (str): feature aggregation when doing local pooling
unet (bool): weather to use U-Net
unet_kwargs (str): U-Net parameters
unet3d (bool): weather to use 3D U-Net
unet3d_kwargs (str): 3D U-Net parameters
plane_resolution (int): defined resolution for plane feature
grid_resolution (int): defined resolution for grid feature
plane_type (str): feature type, 'xz' - 1-plane, ['xz', 'xy', 'yz'] - 3-plane, ['grid'] - 3D grid volume
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
n_blocks (int): number of blocks ResNetBlockFC layers
'''
def __init__(self, c_dim=128, dim=3, f_dim=9, hidden_dim=128, scatter_type='max',
unet_kwargs=None, plane_resolution=None, padding=0.1, n_blocks=5):
super().__init__()
self.c_dim = c_dim
self.fc_pos = nn.Linear(dim+f_dim, 2*hidden_dim)
self.blocks = nn.ModuleList([
ResnetBlockFC(2*hidden_dim, hidden_dim) for i in range(n_blocks)
])
self.fc_c = nn.Linear(hidden_dim, c_dim)
self.actvn = nn.ReLU()
self.hidden_dim = hidden_dim
self.unet = UNet(c_dim, in_channels=c_dim, **unet_kwargs)
self.reso_plane = plane_resolution
self.padding = padding
if scatter_type == 'max':
self.scatter = scatter_max
elif scatter_type == 'mean':
self.scatter = scatter_mean
else:
raise ValueError('incorrect scatter type')
def generate_plane_features(self, p, c, plane='xz'):
# acquire indices of features in plane
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
index = coordinate2index(xy, self.reso_plane)
# scatter plane features from points
fea_plane = c.new_zeros(p.size(0), self.c_dim, self.reso_plane**2)
c = c.permute(0, 2, 1) # B x 512 x T
fea_plane = scatter_mean(c, index, out=fea_plane) # B x 512 x reso^2
fea_plane = fea_plane.reshape(p.size(0), self.c_dim, self.reso_plane, self.reso_plane) # sparce matrix (B x 512 x reso x reso)
# process the plane features with UNet
fea_plane = self.unet(fea_plane)
return fea_plane
def pool_local(self, xy, index, c):
bs, fea_dim = c.size(0), c.size(2)
keys = xy.keys()
c_out = 0
for key in keys:
# scatter plane features from points
fea = self.scatter(c.permute(0, 2, 1), index[key], dim_size=self.reso_plane**2)
if self.scatter == scatter_max:
fea = fea[0]
# gather feature back to points
fea = fea.gather(dim=2, index=index[key].expand(-1, fea_dim, -1))
c_out += fea
return c_out.permute(0, 2, 1)
def forward(self, p):
if type(p) is tuple:
p, pf = p
else:
pf = None
# acquire the index for each point
coord = {}
index = {}
coord['xz'] = normalize_coordinate(p.clone(), plane='xz', padding=self.padding)
index['xz'] = coordinate2index(coord['xz'], self.reso_plane)
coord['xy'] = normalize_coordinate(p.clone(), plane='xy', padding=self.padding)
index['xy'] = coordinate2index(coord['xy'], self.reso_plane)
coord['yz'] = normalize_coordinate(p.clone(), plane='yz', padding=self.padding)
index['yz'] = coordinate2index(coord['yz'], self.reso_plane)
net = self.fc_pos(torch.cat([p, pf],dim=-1))
net = self.blocks[0](net)
for block in self.blocks[1:]:
pooled = self.pool_local(coord, index, net)
net = torch.cat([net, pooled], dim=2)
net = block(net)
c = self.fc_c(net)
fea = {}
fea['xz'] = self.generate_plane_features(p, c, plane='xz')
fea['xy'] = self.generate_plane_features(p, c, plane='xy')
fea['yz'] = self.generate_plane_features(p, c, plane='yz')
return fea
| 4,654 | Python | 37.791666 | 134 | 0.592609 |
NVlabs/ACID/ACID/src/encoder/unet.py | '''
Codes are from:
https://github.com/jaxony/unet-pytorch/blob/master/model.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np
def conv3x3(in_channels, out_channels, stride=1,
padding=1, bias=True, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=bias,
groups=groups)
def upconv2x2(in_channels, out_channels, mode='transpose'):
if mode == 'transpose':
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=2,
stride=2)
else:
# out_channels is always going to be the same
# as in_channels
return nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
conv1x1(in_channels, out_channels))
def conv1x1(in_channels, out_channels, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
groups=groups,
stride=1)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
class UpConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels,
merge_mode='concat', up_mode='transpose'):
super(UpConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.merge_mode = merge_mode
self.up_mode = up_mode
self.upconv = upconv2x2(self.in_channels, self.out_channels,
mode=self.up_mode)
if self.merge_mode == 'concat':
self.conv1 = conv3x3(
2*self.out_channels, self.out_channels)
else:
# num of input channels to conv2 is same
self.conv1 = conv3x3(self.out_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
def forward(self, from_down, from_up):
""" Forward pass
Arguments:
from_down: tensor from the encoder pathway
from_up: upconv'd tensor from the decoder pathway
"""
from_up = self.upconv(from_up)
if self.merge_mode == 'concat':
x = torch.cat((from_up, from_down), 1)
else:
x = from_up + from_down
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return x
class UNet(nn.Module):
""" `UNet` class is based on https://arxiv.org/abs/1505.04597
The U-Net is a convolutional encoder-decoder neural network.
Contextual spatial information (from the decoding,
expansive pathway) about an input tensor is merged with
information representing the localization of details
(from the encoding, compressive pathway).
Modifications to the original paper:
(1) padding is used in 3x3 convolutions to prevent loss
of border pixels
(2) merging outputs does not require cropping due to (1)
(3) residual connections can be used by specifying
UNet(merge_mode='add')
(4) if non-parametric upsampling is used in the decoder
pathway (specified by upmode='upsample'), then an
additional 1x1 2d convolution occurs after upsampling
to reduce channel dimensionality by a factor of 2.
This channel halving happens with the convolution in
the tranpose convolution (specified by upmode='transpose')
"""
def __init__(self, num_classes, in_channels=3, depth=5,
start_filts=64, up_mode='transpose',
merge_mode='concat', **kwargs):
"""
Arguments:
in_channels: int, number of channels in the input tensor.
Default is 3 for RGB images.
depth: int, number of MaxPools in the U-Net.
start_filts: int, number of convolutional filters for the
first conv.
up_mode: string, type of upconvolution. Choices: 'transpose'
for transpose convolution or 'upsample' for nearest neighbour
upsampling.
"""
super(UNet, self).__init__()
if up_mode in ('transpose', 'upsample'):
self.up_mode = up_mode
else:
raise ValueError("\"{}\" is not a valid mode for "
"upsampling. Only \"transpose\" and "
"\"upsample\" are allowed.".format(up_mode))
if merge_mode in ('concat', 'add'):
self.merge_mode = merge_mode
else:
raise ValueError("\"{}\" is not a valid mode for"
"merging up and down paths. "
"Only \"concat\" and "
"\"add\" are allowed.".format(up_mode))
# NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
if self.up_mode == 'upsample' and self.merge_mode == 'add':
raise ValueError("up_mode \"upsample\" is incompatible "
"with merge_mode \"add\" at the moment "
"because it doesn't make sense to use "
"nearest neighbour to reduce "
"depth channels (by half).")
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.depth = depth
self.down_convs = []
self.up_convs = []
# create the encoder pathway and add to a list
for i in range(depth):
ins = self.in_channels if i == 0 else outs
outs = self.start_filts*(2**i)
pooling = True if i < depth-1 else False
down_conv = DownConv(ins, outs, pooling=pooling)
self.down_convs.append(down_conv)
# create the decoder pathway and add to a list
# - careful! decoding only requires depth-1 blocks
for i in range(depth-1):
ins = outs
outs = ins // 2
up_conv = UpConv(ins, outs, up_mode=up_mode,
merge_mode=merge_mode)
self.up_convs.append(up_conv)
# add the list of modules to current module
self.down_convs = nn.ModuleList(self.down_convs)
self.up_convs = nn.ModuleList(self.up_convs)
self.conv_final = conv1x1(outs, self.num_classes)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
init.constant_(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
encoder_outs = []
# encoder pathway, save outputs for merging
for i, module in enumerate(self.down_convs):
x, before_pool = module(x)
encoder_outs.append(before_pool)
for i, module in enumerate(self.up_convs):
before_pool = encoder_outs[-(i+2)]
x = module(before_pool, x)
# No softmax is used. This means you need to use
# nn.CrossEntropyLoss is your training script,
# as this module includes a softmax already.
x = self.conv_final(x)
return x
if __name__ == "__main__":
"""
testing
"""
model = UNet(1, depth=5, merge_mode='concat', in_channels=1, start_filts=32)
print(model)
print(sum(p.numel() for p in model.parameters()))
reso = 176
x = np.zeros((1, 1, reso, reso))
x[:,:,int(reso/2-1), int(reso/2-1)] = np.nan
x = torch.FloatTensor(x)
out = model(x)
print('%f'%(torch.sum(torch.isnan(out)).detach().cpu().numpy()/(reso*reso)))
# loss = torch.sum(out)
# loss.backward()
| 8,696 | Python | 32.57915 | 80 | 0.575092 |
NVlabs/ACID/ACID/src/utils/common_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
def get_color_map(x):
colours = plt.cm.Spectral(x)
return colours[:, :3]
def embed_tsne(data):
"""
N x D np.array data
"""
tsne = TSNE(n_components=1, verbose=0, perplexity=40, n_iter=300, random_state=0)
tsne_results = tsne.fit_transform(data)
tsne_results = np.squeeze(tsne_results)
tsne_min = np.min(tsne_results)
tsne_max = np.max(tsne_results)
return (tsne_results - tsne_min) / (tsne_max - tsne_min)
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr, return_index=False):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if return_index:
return min_idx
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
def get_trunc_ab_range(mean_min, mean_max, std, a, b):
return (a - mean_min) / std, (b - mean_max) /std
def transform_points(pointcloud, from_range, to_range):
if len(pointcloud.shape) == 1:
pointcloud = pointcloud.reshape([1,-1])
if pointcloud.shape[1] == 6:
xyz = pointcloud[:,:3]
rgb = pointcloud[:,3:]
else:
xyz = pointcloud
rgb = None
from_center = np.mean(from_range, axis=0)
from_size = np.ptp(from_range, axis=0)
to_center = np.mean(to_range, axis=0)
to_size = np.ptp(to_range, axis=0)
xyz = (xyz - from_center) / from_size * to_size + to_center
if rgb is None:
return xyz
else:
return np.concatenate([xyz, rgb], axis=-1)
def extent_to_cube(extent):
min_x,min_y,min_z = extent[0]
max_x,max_y,max_z = extent[1]
verts = np.array([
(max_x,max_y,max_z),
(max_x,max_y,min_z),
(max_x,min_y,max_z),
(max_x,min_y,min_z),
(min_x,max_y,max_z),
(min_x,max_y,min_z),
(min_x,min_y,max_z),
(min_x,min_y,min_z),])
faces = np.array([
(1,5,7,3),
(4,3,7,8),
(8,7,5,6),
(6,2,4,8),
(2,1,3,4),
(6,5,1,2),])
return verts, faces
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def set_background_blank(ax):
# Hide grid lines
ax.grid(False)
ax.set_axis_off()
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# First remove fill
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# Now set color to white (or whatever is "invisible")
ax.xaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.yaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.zaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,1][viz_idx],
pts[:,2][viz_idx],
flow[:,0], flow[:,1], flow[:,2],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,1],
pts[:,2], color=col,s=0.5)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
set_axes_equal(ax)
set_background_blank(ax)
fig.tight_layout()
return fig
def write_pointcoud_as_obj(path, xyzrgb, faces=None):
with open(path, 'w') as fp:
if xyzrgb.shape[1] == 6:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
if faces is not None:
for f in faces:
f_str = " ".join([str(i) for i in f])
fp.write(f"f {f_str}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
if points.shape[1] == 6:
xyz = points[:,:3]
else:
xyz = points
if points.shape[0] == 0:
if return_index:
return np.arange(0)
return points
idx = np.unique(xyz// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
if x.shape[0] == 0:
return 0,0,0
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall | 12,618 | Python | 36.005865 | 116 | 0.560628 |
NVlabs/ACID/ACID/src/utils/io.py | import os
from plyfile import PlyElement, PlyData
import numpy as np
def export_pointcloud(vertices, out_file, as_text=True):
assert(vertices.shape[1] == 3)
vertices = vertices.astype(np.float32)
vertices = np.ascontiguousarray(vertices)
vector_dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertices = vertices.view(dtype=vector_dtype).flatten()
plyel = PlyElement.describe(vertices, 'vertex')
plydata = PlyData([plyel], text=as_text)
plydata.write(out_file)
def load_pointcloud(in_file):
plydata = PlyData.read(in_file)
vertices = np.stack([
plydata['vertex']['x'],
plydata['vertex']['y'],
plydata['vertex']['z']
], axis=1)
return vertices
def read_off(file):
"""
Reads vertices and faces from an off file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines]
# Fix for ModelNet bug were 'OFF' and the number of vertices and faces
# are all in the first line.
if len(lines[0]) > 3:
assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', \
'invalid OFF file %s' % file
parts = lines[0][3:].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 1
# This is the regular case!
else:
assert lines[0] == 'OFF' or lines[0] == 'off', \
'invalid OFF file %s' % file
parts = lines[1].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 2
vertices = []
for i in range(num_vertices):
vertex = lines[start_index + i].split(' ')
vertex = [float(point.strip()) for point in vertex if point != '']
assert len(vertex) == 3
vertices.append(vertex)
faces = []
for i in range(num_faces):
face = lines[start_index + num_vertices + i].split(' ')
face = [index.strip() for index in face if index != '']
# check to be sure
for index in face:
assert index != '', \
'found empty vertex index: %s (%s)' \
% (lines[start_index + num_vertices + i], file)
face = [int(index) for index in face]
assert face[0] == len(face) - 1, \
'face should have %d vertices but as %d (%s)' \
% (face[0], len(face) - 1, file)
assert face[0] == 3, \
'only triangular meshes supported (%s)' % file
for index in face:
assert index >= 0 and index < num_vertices, \
'vertex %d (of %d vertices) does not exist (%s)' \
% (index, num_vertices, file)
assert len(face) > 1
faces.append(face)
return vertices, faces
assert False, 'could not open %s' % file
| 3,415 | Python | 29.230088 | 78 | 0.513616 |
NVlabs/ACID/ACID/src/utils/visualize.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import src.common as common
def visualize_data(data, data_type, out_file):
r''' Visualizes the data with regard to its type.
Args:
data (tensor): batch of data
data_type (string): data type (img, voxels or pointcloud)
out_file (string): output file
'''
if data_type == 'voxels':
visualize_voxels(data, out_file=out_file)
elif data_type == 'pointcloud':
visualize_pointcloud(data, out_file=out_file)
elif data_type is None or data_type == 'idx':
pass
else:
raise ValueError('Invalid data_type "%s"' % data_type)
def visualize_voxels(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_pointcloud(points, normals=None,
out_file=None, show=False):
r''' Visualizes point cloud data.
Args:
points (tensor): point data
normals (tensor): normal data (if existing)
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
points = np.asarray(points)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
ax.scatter(points[:, 2], points[:, 0], points[:, 1])
if normals is not None:
ax.quiver(
points[:, 2], points[:, 0], points[:, 1],
normals[:, 2], normals[:, 0], normals[:, 1],
length=0.1, color='k'
)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.set_zlim(-0.5, 0.5)
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
| 2,378 | Python | 26.66279 | 65 | 0.585786 |
NVlabs/ACID/ACID/src/utils/mentalsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -400, 0], [600, 400, 400]])
def get_scene_partial_pointcloud(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/img/{{}}_{int_id:04d}_{frame_id:06d}.{{}}"
depth_img = path.format('depth', 'png')
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = path.format('rgb', 'jpg')
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = path.format('seg', 'jpg')
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
########################################################################
# Get geometric state (full experiment)
########################################################################
def get_object_full_points(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/geom/{int_id:04d}_{frame_id:06d}.npz"
geom_data = np.load(path)
loc = geom_data['loc']
print(geom_data['rot'])
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts
########################################################################
# partial observation getter for teddy toy example
########################################################################
def get_teddy_partial_pointcloud(int_group, int_id, frame_id, data_root, cam_id='cam0'):
#depth_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_depth.png")[0]
depth_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_depth.png"
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
#rgb_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_rgb.png")[0]
rgb_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_rgb.png"
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
#seg_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_seg.png")[0]
seg_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_seg.png"
seg_img = np.array(Image.open(seg_img))
non_env = np.where(seg_img != 0)
ospdir= os.path.dirname
root_dir = ospdir(ospdir(ospdir(os.path.realpath(__file__))))
camera_json = os.path.join(root_dir, "metadata", "camera.json")
with open(camera_json, 'r') as fp:
cam_info = json.load(fp)
for k in cam_info.keys():
cam_extr, cam_intr = cam_info[k]
cam_info[k] = np.array(cam_extr), np.array(cam_intr)
cam_extr, cam_intr = cam_info[cam_id]
partial_points = project_depth_world_space(depth_vals, cam_intr, cam_extr, keep_dim=True)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
xyzrgb = partial_points_rgb[non_env]
xyz = xyzrgb[:,:3]
xyz = world_coord_view_augmentation(cam_id, xyz)
rgb = xyzrgb[:,3:]
return xyz/ 10. * 1.1, rgb
########################################################################
# Get meta info (teddy toy example)
########################################################################
def get_teddy_loc(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
return np.array(dict(zip(int_info['frames'], int_info['teddy_loc']))[frame_id])
def get_teddy_rot(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
w,x,y,z = np.array(dict(zip(int_info['frames'], int_info['teddy_rot']))[frame_id])
return np.array([x,y,z,w])
def get_action_info(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
grasp_loc = np.array(int_info['grasp'])
target_loc = np.array(int_info['target'])
return grasp_loc, target_loc
def get_release_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['release_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_release_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
def get_end_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['end_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_static_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
########################################################################
# Get geometric state (teddy toy example)
########################################################################
def get_teddy_full_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_sim.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_sim.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
def get_teddy_vis_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_vis.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_vis.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(int_group, int_id, frame_id, data_root, sample_scheme='uniform'):
if sample_scheme not in ['uniform', 'gaussian']:
raise ValueError('Unsupported sampling scheme for occupancy')
num_pts = 100000
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
else:
x,y,z= get_teddy_loc(int_group, int_id, frame_id, data_root) / 10. * 1.1
std = 0.18
a, b = -0.55, 0.55
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
teddy_sim_points = get_teddy_full_points(int_group, int_id, frame_id, data_root)
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(teddy_sim_points)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
occ = dist < 0.01
pt_class = ind[occ != 0]
return pts, occ, pt_class
def sample_occupancies_with_flow(int_group, int_id, release_frame, end_frame, data_root, sample_scheme='uniform'):
pts, occ, ind = sample_occupancies(int_group, int_id, 0, data_root, sample_scheme)
xyz0 = get_teddy_full_points(int_group, int_id, 0, data_root)
f1 = get_teddy_full_points(int_group, int_id, release_frame, data_root) - xyz0
f2 = get_teddy_full_points(int_group, int_id, end_frame, data_root) - xyz0
return pts, occ, ind, f1[ind],f2[ind]
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall | 19,039 | Python | 44.118483 | 116 | 0.564578 |
NVlabs/ACID/ACID/src/utils/plushsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from .common_util import *
########################################################################
# Some file getters
########################################################################
def get_model_dir(data_root, split_id, model_category, model_name):
return f"{data_root}/{split_id}/{model_category}/{model_name}"
def get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{reset_id:04d}.npz"
def get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{reset_id:04d}_{frame_id:06d}.npz"
def get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{reset_id:04d}_{frame_id:06d}.{{}}"
def get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('rgb', 'jpg')
def get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('depth', 'png')
def get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('seg', 'jpg')
def get_flow_data_file(flow_root,split_id, model_id, reset_id, int_id):
return f"{flow_root}/{split_id}/{model_id}/{reset_id:03d}_{int_id:03d}.npz"
def get_flow_pair_data_file(pair_root,split_id, model_id, reset_id, int_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{int_id:03d}.npz"
def get_geom_data_file(geom_root,split_id, model_id, reset_id, frame_id):
return f"{geom_root}/{split_id}/{model_id}/{reset_id:03d}_{frame_id:06d}.npz"
def get_pair_data_file(pair_root,split_id, model_id, reset_id, frame_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{frame_id:06d}.npz"
# Getters for plan data
def get_plan_geom_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{scenario_id:04d}_{seq_str}_{frame_id}.npz"
def get_plan_interaction_info_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{scenario_id:04d}_{seq_str}.npz"
def get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{scenario_id:04d}_{seq_str}_{frame_id}.{{}}"
def get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('rgb', 'jpg')
def get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('depth', 'png')
def get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('seg', 'jpg')
def get_plan_perf_file(data_root, split_id, model_category, model_name, scenario_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/perf_{scenario_id:04d}.npz"
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -600, -20], [600, 600, 380]])
def get_plan_scene_partial_pointcloud(
model_category, model_name, split_id, scenario_id, sequence_id, frame_id, data_root):
depth_img = get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def get_scene_partial_pointcloud(model_category, model_name, split_id, reset_id, frame_id, data_root):
depth_img = get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def render_points(world_points, cam_extr=None, cam_intr=None, return_index=False, filter_in_cam=True):
if cam_extr is None:
cam_extr = CAM_EXTR
if cam_intr is None:
cam_intr = CAM_INTR
cam_points = transform_points_world_to_cam(world_points, cam_extr) / 100.
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 6) * 1000 + np.rint(cam_pts_x / 6)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if filter_in_cam:
in_cam = np.where(np.logical_and(cam_pts_x > 0, cam_pts_y > 0))[0]
min_idx = np.intersect1d(in_cam, min_idx, assume_unique=True)
if return_index:
return min_idx
return world_points[min_idx]
########################################################################
# Get geometric state (full experiment)
########################################################################
def extract_full_points(path):
geom_data = np.load(path)
loc = geom_data['loc']
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts, loc, rot, scale
def get_object_full_points(model_category, model_name, split_id, reset_id, frame_id, data_root):
path = get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id)
return extract_full_points(path)
def get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root):
obj_info = get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id)
int_info = np.load(obj_info)
grasp_loc = np.array(int_info['grasp_points'][interaction_id])
target_loc = np.array(int_info['target_points'][interaction_id])
start_frame = int_info['start_frames'][interaction_id]
release_frame = int_info['release_frames'][interaction_id]
static_frame = int_info['static_frames'][interaction_id]
return grasp_loc, target_loc, start_frame, release_frame, static_frame
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(full_pts, center,
sample_scheme='gaussian',
num_pts = 100000, bound=0.55,
std=0.1):
if sample_scheme not in ['uniform', 'gaussian', 'object']:
raise ValueError('Unsupported sampling scheme for occupancy')
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
elif sample_scheme == 'object':
displace = full_pts[np.random.randint(full_pts.shape[0], size=num_pts)]
x_min,y_min,z_min = full_pts.min(axis=0)
x_max,y_max,z_max = full_pts.max(axis=0)
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(x_min, x_max, std, a, b), loc=0, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(y_min, y_max, std, a, b), loc=0, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(z_min, z_max, std, a, b), loc=0, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T + displace
else:
x,y,z= center
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(full_pts)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
#points_in = points_uniform[np.where(points_distance< 0.1)]
occ = dist < 0.01
#pt_class = ind[np.where(dist < 0.01)]
pt_class = ind[occ != 0]
return pts, occ, pt_class
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
def miou(x, y, th=0.01):
x = subsample_points(x, resolution=th, return_index=False) // th
y = subsample_points(y, resolution=th, return_index=False) // th
xset = set([tuple(i) for i in x])
yset = set([tuple(i) for i in y])
return len(xset & yset) / len(xset | yset)
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall
from scipy.spatial import cKDTree
def find_nn_cpu(feat0, feat1, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=1, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
def find_emd_cpu(feat0, feat1, return_distance=False):
import time
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
d = cdist(feat0, feat1)
feat0_inds, feat1_inds = linear_sum_assignment(d)
return feat0_inds, feat1_inds
def find_nn_cpu_symmetry_consistent(feat0, feat1, pts0, pts1, n_neighbor=10, local_radis=0.05, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=n_neighbor, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
#################################
# ranking utilities
def overlap(list1, list2, depth):
"""Overlap which accounts for possible ties.
This isn't mentioned in the paper but should be used in the ``rbo*()``
functions below, otherwise overlap at a given depth might be > depth which
inflates the result.
There are no guidelines in the paper as to what's a good way to calculate
this, but a good guess is agreement scaled by the minimum between the
requested depth and the lengths of the considered lists (overlap shouldn't
be larger than the number of ranks in the shorter list, otherwise results
are conspicuously wrong when the lists are of unequal lengths -- rbo_ext is
not between rbo_min and rbo_min + rbo_res.
>>> overlap("abcd", "abcd", 3)
3.0
>>> overlap("abcd", "abcd", 5)
4.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 2)
2.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 3)
3.0
"""
return agreement(list1, list2, depth) * min(depth, len(list1), len(list2))
def rbo_ext(list1, list2, p=0.9):
"""RBO point estimate based on extrapolating observed overlap.
See equation (32) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible.
>>> _round(rbo_ext("abcdefg", "abcdefg", .9))
1.0
>>> _round(rbo_ext("abcdefg", "bacdefg", .9))
0.9
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l)
x_s = overlap(list1, list2, s)
# the paper says overlap(..., d) / d, but it should be replaced by
# agreement(..., d) defined as per equation (28) so that ties are handled
# properly (otherwise values > 1 will be returned)
# sum1 = sum(p**d * overlap(list1, list2, d)[0] / d for d in range(1, l + 1))
sum1 = sum(p ** d * agreement(list1, list2, d) for d in range(1, l + 1))
sum2 = sum(p ** d * x_s * (d - s) / s / d for d in range(s + 1, l + 1))
term1 = (1 - p) / p * (sum1 + sum2)
term2 = p ** l * ((x_l - x_s) / l + x_s / s)
return term1 + term2
def set_at_depth(lst, depth):
ans = set()
for v in lst[:depth]:
if isinstance(v, set):
ans.update(v)
else:
ans.add(v)
return ans
def raw_overlap(list1, list2, depth):
"""Overlap as defined in the article.
"""
set1, set2 = set_at_depth(list1, depth), set_at_depth(list2, depth)
return len(set1.intersection(set2)), len(set1), len(set2)
def agreement(list1, list2, depth):
"""Proportion of shared values between two sorted lists at given depth.
>>> _round(agreement("abcde", "abdcf", 1))
1.0
>>> _round(agreement("abcde", "abdcf", 3))
0.667
>>> _round(agreement("abcde", "abdcf", 4))
1.0
>>> _round(agreement("abcde", "abdcf", 5))
0.8
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 1))
0.667
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 2))
1.0
"""
len_intersection, len_set1, len_set2 = raw_overlap(list1, list2, depth)
return 2 * len_intersection / (len_set1 + len_set2)
| 20,693 | Python | 43.407725 | 124 | 0.605905 |
NVlabs/ACID/ACID/src/utils/libmise/__init__.py | from .mise import MISE
__all__ = [
MISE
]
| 47 | Python | 6.999999 | 22 | 0.531915 |
NVlabs/ACID/ACID/src/utils/libmise/test.py | import numpy as np
from mise import MISE
import time
t0 = time.time()
extractor = MISE(1, 2, 0.)
p = extractor.query()
i = 0
while p.shape[0] != 0:
print(i)
print(p)
v = 2 * (p.sum(axis=-1) > 2).astype(np.float64) - 1
extractor.update(p, v)
p = extractor.query()
i += 1
if (i >= 8):
break
print(extractor.to_dense())
# p, v = extractor.get_points()
# print(p)
# print(v)
print('Total time: %f' % (time.time() - t0))
| 456 | Python | 16.576922 | 55 | 0.570175 |
NVlabs/ACID/ACID/src/utils/libsimplify/__init__.py | from .simplify_mesh import (
mesh_simplify
)
import trimesh
def simplify_mesh(mesh, f_target=10000, agressiveness=7.):
vertices = mesh.vertices
faces = mesh.faces
vertices, faces = mesh_simplify(vertices, faces, f_target, agressiveness)
mesh_simplified = trimesh.Trimesh(vertices, faces, process=False)
return mesh_simplified
| 355 | Python | 21.249999 | 77 | 0.723944 |
NVlabs/ACID/ACID/src/utils/libsimplify/test.py | from simplify_mesh import mesh_simplify
import numpy as np
v = np.random.rand(100, 3)
f = np.random.choice(range(100), (50, 3))
mesh_simplify(v, f, 50) | 153 | Python | 20.999997 | 41 | 0.705882 |
NVlabs/ACID/ACID/src/utils/libsimplify/Simplify.h | /////////////////////////////////////////////
//
// Mesh Simplification Tutorial
//
// (C) by Sven Forstmann in 2014
//
// License : MIT
// http://opensource.org/licenses/MIT
//
//https://github.com/sp4cerat/Fast-Quadric-Mesh-Simplification
//
// 5/2016: Chris Rorden created minimal version for OSX/Linux/Windows compile
//#include <iostream>
//#include <stddef.h>
//#include <functional>
//#include <sys/stat.h>
//#include <stdbool.h>
#include <string.h>
//#include <ctype.h>
//#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <map>
#include <vector>
#include <string>
#include <math.h>
#include <float.h> //FLT_EPSILON, DBL_EPSILON
#define loopi(start_l,end_l) for ( int i=start_l;i<end_l;++i )
#define loopi(start_l,end_l) for ( int i=start_l;i<end_l;++i )
#define loopj(start_l,end_l) for ( int j=start_l;j<end_l;++j )
#define loopk(start_l,end_l) for ( int k=start_l;k<end_l;++k )
struct vector3
{
double x, y, z;
};
struct vec3f
{
double x, y, z;
inline vec3f( void ) {}
//inline vec3f operator =( vector3 a )
// { vec3f b ; b.x = a.x; b.y = a.y; b.z = a.z; return b;}
inline vec3f( vector3 a )
{ x = a.x; y = a.y; z = a.z; }
inline vec3f( const double X, const double Y, const double Z )
{ x = X; y = Y; z = Z; }
inline vec3f operator + ( const vec3f& a ) const
{ return vec3f( x + a.x, y + a.y, z + a.z ); }
inline vec3f operator += ( const vec3f& a ) const
{ return vec3f( x + a.x, y + a.y, z + a.z ); }
inline vec3f operator * ( const double a ) const
{ return vec3f( x * a, y * a, z * a ); }
inline vec3f operator * ( const vec3f a ) const
{ return vec3f( x * a.x, y * a.y, z * a.z ); }
inline vec3f v3 () const
{ return vec3f( x , y, z ); }
inline vec3f operator = ( const vector3 a )
{ x=a.x;y=a.y;z=a.z;return *this; }
inline vec3f operator = ( const vec3f a )
{ x=a.x;y=a.y;z=a.z;return *this; }
inline vec3f operator / ( const vec3f a ) const
{ return vec3f( x / a.x, y / a.y, z / a.z ); }
inline vec3f operator - ( const vec3f& a ) const
{ return vec3f( x - a.x, y - a.y, z - a.z ); }
inline vec3f operator / ( const double a ) const
{ return vec3f( x / a, y / a, z / a ); }
inline double dot( const vec3f& a ) const
{ return a.x*x + a.y*y + a.z*z; }
inline vec3f cross( const vec3f& a , const vec3f& b )
{
x = a.y * b.z - a.z * b.y;
y = a.z * b.x - a.x * b.z;
z = a.x * b.y - a.y * b.x;
return *this;
}
inline double angle( const vec3f& v )
{
vec3f a = v , b = *this;
double dot = v.x*x + v.y*y + v.z*z;
double len = a.length() * b.length();
if(len==0)len=0.00001f;
double input = dot / len;
if (input<-1) input=-1;
if (input>1) input=1;
return (double) acos ( input );
}
inline double angle2( const vec3f& v , const vec3f& w )
{
vec3f a = v , b= *this;
double dot = a.x*b.x + a.y*b.y + a.z*b.z;
double len = a.length() * b.length();
if(len==0)len=1;
vec3f plane; plane.cross( b,w );
if ( plane.x * a.x + plane.y * a.y + plane.z * a.z > 0 )
return (double) -acos ( dot / len );
return (double) acos ( dot / len );
}
inline vec3f rot_x( double a )
{
double yy = cos ( a ) * y + sin ( a ) * z;
double zz = cos ( a ) * z - sin ( a ) * y;
y = yy; z = zz;
return *this;
}
inline vec3f rot_y( double a )
{
double xx = cos ( -a ) * x + sin ( -a ) * z;
double zz = cos ( -a ) * z - sin ( -a ) * x;
x = xx; z = zz;
return *this;
}
inline void clamp( double min, double max )
{
if (x<min) x=min;
if (y<min) y=min;
if (z<min) z=min;
if (x>max) x=max;
if (y>max) y=max;
if (z>max) z=max;
}
inline vec3f rot_z( double a )
{
double yy = cos ( a ) * y + sin ( a ) * x;
double xx = cos ( a ) * x - sin ( a ) * y;
y = yy; x = xx;
return *this;
}
inline vec3f invert()
{
x=-x;y=-y;z=-z;return *this;
}
inline vec3f frac()
{
return vec3f(
x-double(int(x)),
y-double(int(y)),
z-double(int(z))
);
}
inline vec3f integer()
{
return vec3f(
double(int(x)),
double(int(y)),
double(int(z))
);
}
inline double length() const
{
return (double)sqrt(x*x + y*y + z*z);
}
inline vec3f normalize( double desired_length = 1 )
{
double square = sqrt(x*x + y*y + z*z);
/*
if (square <= 0.00001f )
{
x=1;y=0;z=0;
return *this;
}*/
//double len = desired_length / square;
x/=square;y/=square;z/=square;
return *this;
}
static vec3f normalize( vec3f a );
static void random_init();
static double random_double();
static vec3f random();
static int random_number;
double random_double_01(double a){
double rnf=a*14.434252+a*364.2343+a*4213.45352+a*2341.43255+a*254341.43535+a*223454341.3523534245+23453.423412;
int rni=((int)rnf)%100000;
return double(rni)/(100000.0f-1.0f);
}
vec3f random01_fxyz(){
x=(double)random_double_01(x);
y=(double)random_double_01(y);
z=(double)random_double_01(z);
return *this;
}
};
vec3f barycentric(const vec3f &p, const vec3f &a, const vec3f &b, const vec3f &c){
vec3f v0 = b-a;
vec3f v1 = c-a;
vec3f v2 = p-a;
double d00 = v0.dot(v0);
double d01 = v0.dot(v1);
double d11 = v1.dot(v1);
double d20 = v2.dot(v0);
double d21 = v2.dot(v1);
double denom = d00*d11-d01*d01;
double v = (d11 * d20 - d01 * d21) / denom;
double w = (d00 * d21 - d01 * d20) / denom;
double u = 1.0 - v - w;
return vec3f(u,v,w);
}
vec3f interpolate(const vec3f &p, const vec3f &a, const vec3f &b, const vec3f &c, const vec3f attrs[3])
{
vec3f bary = barycentric(p,a,b,c);
vec3f out = vec3f(0,0,0);
out = out + attrs[0] * bary.x;
out = out + attrs[1] * bary.y;
out = out + attrs[2] * bary.z;
return out;
}
double min(double v1, double v2) {
return fmin(v1,v2);
}
class SymetricMatrix {
public:
// Constructor
SymetricMatrix(double c=0) { loopi(0,10) m[i] = c; }
SymetricMatrix( double m11, double m12, double m13, double m14,
double m22, double m23, double m24,
double m33, double m34,
double m44) {
m[0] = m11; m[1] = m12; m[2] = m13; m[3] = m14;
m[4] = m22; m[5] = m23; m[6] = m24;
m[7] = m33; m[8] = m34;
m[9] = m44;
}
// Make plane
SymetricMatrix(double a,double b,double c,double d)
{
m[0] = a*a; m[1] = a*b; m[2] = a*c; m[3] = a*d;
m[4] = b*b; m[5] = b*c; m[6] = b*d;
m[7 ] =c*c; m[8 ] = c*d;
m[9 ] = d*d;
}
double operator[](int c) const { return m[c]; }
// Determinant
double det( int a11, int a12, int a13,
int a21, int a22, int a23,
int a31, int a32, int a33)
{
double det = m[a11]*m[a22]*m[a33] + m[a13]*m[a21]*m[a32] + m[a12]*m[a23]*m[a31]
- m[a13]*m[a22]*m[a31] - m[a11]*m[a23]*m[a32]- m[a12]*m[a21]*m[a33];
return det;
}
const SymetricMatrix operator+(const SymetricMatrix& n) const
{
return SymetricMatrix( m[0]+n[0], m[1]+n[1], m[2]+n[2], m[3]+n[3],
m[4]+n[4], m[5]+n[5], m[6]+n[6],
m[ 7]+n[ 7], m[ 8]+n[8 ],
m[ 9]+n[9 ]);
}
SymetricMatrix& operator+=(const SymetricMatrix& n)
{
m[0]+=n[0]; m[1]+=n[1]; m[2]+=n[2]; m[3]+=n[3];
m[4]+=n[4]; m[5]+=n[5]; m[6]+=n[6]; m[7]+=n[7];
m[8]+=n[8]; m[9]+=n[9];
return *this;
}
double m[10];
};
///////////////////////////////////////////
namespace Simplify
{
// Global Variables & Strctures
enum Attributes {
NONE,
NORMAL = 2,
TEXCOORD = 4,
COLOR = 8
};
struct Triangle { int v[3];double err[4];int deleted,dirty,attr;vec3f n;vec3f uvs[3];int material; };
struct Vertex { vec3f p;int tstart,tcount;SymetricMatrix q;int border;};
struct Ref { int tid,tvertex; };
std::vector<Triangle> triangles;
std::vector<Vertex> vertices;
std::vector<Ref> refs;
std::string mtllib;
std::vector<std::string> materials;
// Helper functions
double vertex_error(SymetricMatrix q, double x, double y, double z);
double calculate_error(int id_v1, int id_v2, vec3f &p_result);
bool flipped(vec3f p,int i0,int i1,Vertex &v0,Vertex &v1,std::vector<int> &deleted);
void update_uvs(int i0,const Vertex &v,const vec3f &p,std::vector<int> &deleted);
void update_triangles(int i0,Vertex &v,std::vector<int> &deleted,int &deleted_triangles);
void update_mesh(int iteration);
void compact_mesh();
//
// Main simplification function
//
// target_count : target nr. of triangles
// agressiveness : sharpness to increase the threshold.
// 5..8 are good numbers
// more iterations yield higher quality
//
void simplify_mesh(int target_count, double agressiveness=7, bool verbose=false)
{
// init
loopi(0,triangles.size())
{
triangles[i].deleted=0;
}
// main iteration loop
int deleted_triangles=0;
std::vector<int> deleted0,deleted1;
int triangle_count=triangles.size();
//int iteration = 0;
//loop(iteration,0,100)
for (int iteration = 0; iteration < 100; iteration ++)
{
if(triangle_count-deleted_triangles<=target_count)break;
// update mesh once in a while
if(iteration%5==0)
{
update_mesh(iteration);
}
// clear dirty flag
loopi(0,triangles.size()) triangles[i].dirty=0;
//
// All triangles with edges below the threshold will be removed
//
// The following numbers works well for most models.
// If it does not, try to adjust the 3 parameters
//
double threshold = 0.000000001*pow(double(iteration+3),agressiveness);
// target number of triangles reached ? Then break
if ((verbose) && (iteration%5==0)) {
printf("iteration %d - triangles %d threshold %g\n",iteration,triangle_count-deleted_triangles, threshold);
}
// remove vertices & mark deleted triangles
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
if(t.err[3]>threshold) continue;
if(t.deleted) continue;
if(t.dirty) continue;
loopj(0,3)if(t.err[j]<threshold)
{
int i0=t.v[ j ]; Vertex &v0 = vertices[i0];
int i1=t.v[(j+1)%3]; Vertex &v1 = vertices[i1];
// Border check
if(v0.border != v1.border) continue;
// Compute vertex to collapse to
vec3f p;
calculate_error(i0,i1,p);
deleted0.resize(v0.tcount); // normals temporarily
deleted1.resize(v1.tcount); // normals temporarily
// don't remove if flipped
if( flipped(p,i0,i1,v0,v1,deleted0) ) continue;
if( flipped(p,i1,i0,v1,v0,deleted1) ) continue;
if ( (t.attr & TEXCOORD) == TEXCOORD )
{
update_uvs(i0,v0,p,deleted0);
update_uvs(i0,v1,p,deleted1);
}
// not flipped, so remove edge
v0.p=p;
v0.q=v1.q+v0.q;
int tstart=refs.size();
update_triangles(i0,v0,deleted0,deleted_triangles);
update_triangles(i0,v1,deleted1,deleted_triangles);
int tcount=refs.size()-tstart;
if(tcount<=v0.tcount)
{
// save ram
if(tcount)memcpy(&refs[v0.tstart],&refs[tstart],tcount*sizeof(Ref));
}
else
// append
v0.tstart=tstart;
v0.tcount=tcount;
break;
}
// done?
if(triangle_count-deleted_triangles<=target_count)break;
}
}
// clean up mesh
compact_mesh();
} //simplify_mesh()
void simplify_mesh_lossless(bool verbose=false)
{
// init
loopi(0,triangles.size()) triangles[i].deleted=0;
// main iteration loop
int deleted_triangles=0;
std::vector<int> deleted0,deleted1;
int triangle_count=triangles.size();
//int iteration = 0;
//loop(iteration,0,100)
for (int iteration = 0; iteration < 9999; iteration ++)
{
// update mesh constantly
update_mesh(iteration);
// clear dirty flag
loopi(0,triangles.size()) triangles[i].dirty=0;
//
// All triangles with edges below the threshold will be removed
//
// The following numbers works well for most models.
// If it does not, try to adjust the 3 parameters
//
double threshold = DBL_EPSILON; //1.0E-3 EPS;
if (verbose) {
printf("lossless iteration %d\n", iteration);
}
// remove vertices & mark deleted triangles
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
if(t.err[3]>threshold) continue;
if(t.deleted) continue;
if(t.dirty) continue;
loopj(0,3)if(t.err[j]<threshold)
{
int i0=t.v[ j ]; Vertex &v0 = vertices[i0];
int i1=t.v[(j+1)%3]; Vertex &v1 = vertices[i1];
// Border check
if(v0.border != v1.border) continue;
// Compute vertex to collapse to
vec3f p;
calculate_error(i0,i1,p);
deleted0.resize(v0.tcount); // normals temporarily
deleted1.resize(v1.tcount); // normals temporarily
// don't remove if flipped
if( flipped(p,i0,i1,v0,v1,deleted0) ) continue;
if( flipped(p,i1,i0,v1,v0,deleted1) ) continue;
if ( (t.attr & TEXCOORD) == TEXCOORD )
{
update_uvs(i0,v0,p,deleted0);
update_uvs(i0,v1,p,deleted1);
}
// not flipped, so remove edge
v0.p=p;
v0.q=v1.q+v0.q;
int tstart=refs.size();
update_triangles(i0,v0,deleted0,deleted_triangles);
update_triangles(i0,v1,deleted1,deleted_triangles);
int tcount=refs.size()-tstart;
if(tcount<=v0.tcount)
{
// save ram
if(tcount)memcpy(&refs[v0.tstart],&refs[tstart],tcount*sizeof(Ref));
}
else
// append
v0.tstart=tstart;
v0.tcount=tcount;
break;
}
}
if(deleted_triangles<=0)break;
deleted_triangles=0;
} //for each iteration
// clean up mesh
compact_mesh();
} //simplify_mesh_lossless()
// Check if a triangle flips when this edge is removed
bool flipped(vec3f p,int i0,int i1,Vertex &v0,Vertex &v1,std::vector<int> &deleted)
{
loopk(0,v0.tcount)
{
Triangle &t=triangles[refs[v0.tstart+k].tid];
if(t.deleted)continue;
int s=refs[v0.tstart+k].tvertex;
int id1=t.v[(s+1)%3];
int id2=t.v[(s+2)%3];
if(id1==i1 || id2==i1) // delete ?
{
deleted[k]=1;
continue;
}
vec3f d1 = vertices[id1].p-p; d1.normalize();
vec3f d2 = vertices[id2].p-p; d2.normalize();
if(fabs(d1.dot(d2))>0.999) return true;
vec3f n;
n.cross(d1,d2);
n.normalize();
deleted[k]=0;
if(n.dot(t.n)<0.2) return true;
}
return false;
}
// update_uvs
void update_uvs(int i0,const Vertex &v,const vec3f &p,std::vector<int> &deleted)
{
loopk(0,v.tcount)
{
Ref &r=refs[v.tstart+k];
Triangle &t=triangles[r.tid];
if(t.deleted)continue;
if(deleted[k])continue;
vec3f p1=vertices[t.v[0]].p;
vec3f p2=vertices[t.v[1]].p;
vec3f p3=vertices[t.v[2]].p;
t.uvs[r.tvertex] = interpolate(p,p1,p2,p3,t.uvs);
}
}
// Update triangle connections and edge error after a edge is collapsed
void update_triangles(int i0,Vertex &v,std::vector<int> &deleted,int &deleted_triangles)
{
vec3f p;
loopk(0,v.tcount)
{
Ref &r=refs[v.tstart+k];
Triangle &t=triangles[r.tid];
if(t.deleted)continue;
if(deleted[k])
{
t.deleted=1;
deleted_triangles++;
continue;
}
t.v[r.tvertex]=i0;
t.dirty=1;
t.err[0]=calculate_error(t.v[0],t.v[1],p);
t.err[1]=calculate_error(t.v[1],t.v[2],p);
t.err[2]=calculate_error(t.v[2],t.v[0],p);
t.err[3]=min(t.err[0],min(t.err[1],t.err[2]));
refs.push_back(r);
}
}
// compact triangles, compute edge error and build reference list
void update_mesh(int iteration)
{
if(iteration>0) // compact triangles
{
int dst=0;
loopi(0,triangles.size())
if(!triangles[i].deleted)
{
triangles[dst++]=triangles[i];
}
triangles.resize(dst);
}
//
// Init Quadrics by Plane & Edge Errors
//
// required at the beginning ( iteration == 0 )
// recomputing during the simplification is not required,
// but mostly improves the result for closed meshes
//
if( iteration == 0 )
{
loopi(0,vertices.size())
vertices[i].q=SymetricMatrix(0.0);
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
vec3f n,p[3];
loopj(0,3) p[j]=vertices[t.v[j]].p;
n.cross(p[1]-p[0],p[2]-p[0]);
n.normalize();
t.n=n;
loopj(0,3) vertices[t.v[j]].q =
vertices[t.v[j]].q+SymetricMatrix(n.x,n.y,n.z,-n.dot(p[0]));
}
loopi(0,triangles.size())
{
// Calc Edge Error
Triangle &t=triangles[i];vec3f p;
loopj(0,3) t.err[j]=calculate_error(t.v[j],t.v[(j+1)%3],p);
t.err[3]=min(t.err[0],min(t.err[1],t.err[2]));
}
}
// Init Reference ID list
loopi(0,vertices.size())
{
vertices[i].tstart=0;
vertices[i].tcount=0;
}
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3) vertices[t.v[j]].tcount++;
}
int tstart=0;
loopi(0,vertices.size())
{
Vertex &v=vertices[i];
v.tstart=tstart;
tstart+=v.tcount;
v.tcount=0;
}
// Write References
refs.resize(triangles.size()*3);
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3)
{
Vertex &v=vertices[t.v[j]];
refs[v.tstart+v.tcount].tid=i;
refs[v.tstart+v.tcount].tvertex=j;
v.tcount++;
}
}
// Identify boundary : vertices[].border=0,1
if( iteration == 0 )
{
std::vector<int> vcount,vids;
loopi(0,vertices.size())
vertices[i].border=0;
loopi(0,vertices.size())
{
Vertex &v=vertices[i];
vcount.clear();
vids.clear();
loopj(0,v.tcount)
{
int k=refs[v.tstart+j].tid;
Triangle &t=triangles[k];
loopk(0,3)
{
int ofs=0,id=t.v[k];
while(ofs<vcount.size())
{
if(vids[ofs]==id)break;
ofs++;
}
if(ofs==vcount.size())
{
vcount.push_back(1);
vids.push_back(id);
}
else
vcount[ofs]++;
}
}
loopj(0,vcount.size()) if(vcount[j]==1)
vertices[vids[j]].border=1;
}
}
}
// Finally compact mesh before exiting
void compact_mesh()
{
int dst=0;
loopi(0,vertices.size())
{
vertices[i].tcount=0;
}
loopi(0,triangles.size())
if(!triangles[i].deleted)
{
Triangle &t=triangles[i];
triangles[dst++]=t;
loopj(0,3)vertices[t.v[j]].tcount=1;
}
triangles.resize(dst);
dst=0;
loopi(0,vertices.size())
if(vertices[i].tcount)
{
vertices[i].tstart=dst;
vertices[dst].p=vertices[i].p;
dst++;
}
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3)t.v[j]=vertices[t.v[j]].tstart;
}
vertices.resize(dst);
}
// Error between vertex and Quadric
double vertex_error(SymetricMatrix q, double x, double y, double z)
{
return q[0]*x*x + 2*q[1]*x*y + 2*q[2]*x*z + 2*q[3]*x + q[4]*y*y
+ 2*q[5]*y*z + 2*q[6]*y + q[7]*z*z + 2*q[8]*z + q[9];
}
// Error for one edge
double calculate_error(int id_v1, int id_v2, vec3f &p_result)
{
// compute interpolated vertex
SymetricMatrix q = vertices[id_v1].q + vertices[id_v2].q;
bool border = vertices[id_v1].border & vertices[id_v2].border;
double error=0;
double det = q.det(0, 1, 2, 1, 4, 5, 2, 5, 7);
if ( det != 0 && !border )
{
// q_delta is invertible
p_result.x = -1/det*(q.det(1, 2, 3, 4, 5, 6, 5, 7 , 8)); // vx = A41/det(q_delta)
p_result.y = 1/det*(q.det(0, 2, 3, 1, 5, 6, 2, 7 , 8)); // vy = A42/det(q_delta)
p_result.z = -1/det*(q.det(0, 1, 3, 1, 4, 6, 2, 5, 8)); // vz = A43/det(q_delta)
error = vertex_error(q, p_result.x, p_result.y, p_result.z);
}
else
{
// det = 0 -> try to find best result
vec3f p1=vertices[id_v1].p;
vec3f p2=vertices[id_v2].p;
vec3f p3=(p1+p2)/2;
double error1 = vertex_error(q, p1.x,p1.y,p1.z);
double error2 = vertex_error(q, p2.x,p2.y,p2.z);
double error3 = vertex_error(q, p3.x,p3.y,p3.z);
error = min(error1, min(error2, error3));
if (error1 == error) p_result=p1;
if (error2 == error) p_result=p2;
if (error3 == error) p_result=p3;
}
return error;
}
char *trimwhitespace(char *str)
{
char *end;
// Trim leading space
while(isspace((unsigned char)*str)) str++;
if(*str == 0) // All spaces?
return str;
// Trim trailing space
end = str + strlen(str) - 1;
while(end > str && isspace((unsigned char)*end)) end--;
// Write new null terminator
*(end+1) = 0;
return str;
}
//Option : Load OBJ
void load_obj(const char* filename, bool process_uv=false){
vertices.clear();
triangles.clear();
//printf ( "Loading Objects %s ... \n",filename);
FILE* fn;
if(filename==NULL) return ;
if((char)filename[0]==0) return ;
if ((fn = fopen(filename, "rb")) == NULL)
{
printf ( "File %s not found!\n" ,filename );
return;
}
char line[1000];
memset ( line,0,1000 );
int vertex_cnt = 0;
int material = -1;
std::map<std::string, int> material_map;
std::vector<vec3f> uvs;
std::vector<std::vector<int> > uvMap;
while(fgets( line, 1000, fn ) != NULL)
{
Vertex v;
vec3f uv;
if (strncmp(line, "mtllib", 6) == 0)
{
mtllib = trimwhitespace(&line[7]);
}
if (strncmp(line, "usemtl", 6) == 0)
{
std::string usemtl = trimwhitespace(&line[7]);
if (material_map.find(usemtl) == material_map.end())
{
material_map[usemtl] = materials.size();
materials.push_back(usemtl);
}
material = material_map[usemtl];
}
if ( line[0] == 'v' && line[1] == 't' )
{
if ( line[2] == ' ' )
if(sscanf(line,"vt %lf %lf",
&uv.x,&uv.y)==2)
{
uv.z = 0;
uvs.push_back(uv);
} else
if(sscanf(line,"vt %lf %lf %lf",
&uv.x,&uv.y,&uv.z)==3)
{
uvs.push_back(uv);
}
}
else if ( line[0] == 'v' )
{
if ( line[1] == ' ' )
if(sscanf(line,"v %lf %lf %lf",
&v.p.x, &v.p.y, &v.p.z)==3)
{
vertices.push_back(v);
}
}
int integers[9];
if ( line[0] == 'f' )
{
Triangle t;
bool tri_ok = false;
bool has_uv = false;
if(sscanf(line,"f %d %d %d",
&integers[0],&integers[1],&integers[2])==3)
{
tri_ok = true;
}else
if(sscanf(line,"f %d// %d// %d//",
&integers[0],&integers[1],&integers[2])==3)
{
tri_ok = true;
}else
if(sscanf(line,"f %d//%d %d//%d %d//%d",
&integers[0],&integers[3],
&integers[1],&integers[4],
&integers[2],&integers[5])==6)
{
tri_ok = true;
}else
if(sscanf(line,"f %d/%d/%d %d/%d/%d %d/%d/%d",
&integers[0],&integers[6],&integers[3],
&integers[1],&integers[7],&integers[4],
&integers[2],&integers[8],&integers[5])==9)
{
tri_ok = true;
has_uv = true;
}
else
{
printf("unrecognized sequence\n");
printf("%s\n",line);
while(1);
}
if ( tri_ok )
{
t.v[0] = integers[0]-1-vertex_cnt;
t.v[1] = integers[1]-1-vertex_cnt;
t.v[2] = integers[2]-1-vertex_cnt;
t.attr = 0;
if ( process_uv && has_uv )
{
std::vector<int> indices;
indices.push_back(integers[6]-1-vertex_cnt);
indices.push_back(integers[7]-1-vertex_cnt);
indices.push_back(integers[8]-1-vertex_cnt);
uvMap.push_back(indices);
t.attr |= TEXCOORD;
}
t.material = material;
//geo.triangles.push_back ( tri );
triangles.push_back(t);
//state_before = state;
//state ='f';
}
}
}
if ( process_uv && uvs.size() )
{
loopi(0,triangles.size())
{
loopj(0,3)
triangles[i].uvs[j] = uvs[uvMap[i][j]];
}
}
fclose(fn);
//printf("load_obj: vertices = %lu, triangles = %lu, uvs = %lu\n", vertices.size(), triangles.size(), uvs.size() );
} // load_obj()
// Optional : Store as OBJ
void write_obj(const char* filename)
{
FILE *file=fopen(filename, "w");
int cur_material = -1;
bool has_uv = (triangles.size() && (triangles[0].attr & TEXCOORD) == TEXCOORD);
if (!file)
{
printf("write_obj: can't write data file \"%s\".\n", filename);
exit(0);
}
if (!mtllib.empty())
{
fprintf(file, "mtllib %s\n", mtllib.c_str());
}
loopi(0,vertices.size())
{
//fprintf(file, "v %lf %lf %lf\n", vertices[i].p.x,vertices[i].p.y,vertices[i].p.z);
fprintf(file, "v %g %g %g\n", vertices[i].p.x,vertices[i].p.y,vertices[i].p.z); //more compact: remove trailing zeros
}
if (has_uv)
{
loopi(0,triangles.size()) if(!triangles[i].deleted)
{
fprintf(file, "vt %g %g\n", triangles[i].uvs[0].x, triangles[i].uvs[0].y);
fprintf(file, "vt %g %g\n", triangles[i].uvs[1].x, triangles[i].uvs[1].y);
fprintf(file, "vt %g %g\n", triangles[i].uvs[2].x, triangles[i].uvs[2].y);
}
}
int uv = 1;
loopi(0,triangles.size()) if(!triangles[i].deleted)
{
if (triangles[i].material != cur_material)
{
cur_material = triangles[i].material;
fprintf(file, "usemtl %s\n", materials[triangles[i].material].c_str());
}
if (has_uv)
{
fprintf(file, "f %d/%d %d/%d %d/%d\n", triangles[i].v[0]+1, uv, triangles[i].v[1]+1, uv+1, triangles[i].v[2]+1, uv+2);
uv += 3;
}
else
{
fprintf(file, "f %d %d %d\n", triangles[i].v[0]+1, triangles[i].v[1]+1, triangles[i].v[2]+1);
}
//fprintf(file, "f %d// %d// %d//\n", triangles[i].v[0]+1, triangles[i].v[1]+1, triangles[i].v[2]+1); //more compact: remove trailing zeros
}
fclose(file);
}
};
///////////////////////////////////////////
| 25,295 | C | 23.58309 | 142 | 0.567108 |
NVlabs/ACID/ACID/src/utils/libmcubes/pyarray_symbol.h |
#define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API
| 51 | C | 16.333328 | 49 | 0.803922 |
NVlabs/ACID/ACID/src/utils/libmcubes/README.rst | ========
PyMCubes
========
PyMCubes is an implementation of the marching cubes algorithm to extract
isosurfaces from volumetric data. The volumetric data can be given as a
three-dimensional NumPy array or as a Python function ``f(x, y, z)``. The first
option is much faster, but it requires more memory and becomes unfeasible for
very large volumes.
PyMCubes also provides a function to export the results of the marching cubes as
COLLADA ``(.dae)`` files. This requires the
`PyCollada <https://github.com/pycollada/pycollada>`_ library.
Installation
============
Just as any standard Python package, clone or download the project
and run::
$ cd path/to/PyMCubes
$ python setup.py build
$ python setup.py install
If you do not have write permission on the directory of Python packages,
install with the ``--user`` option::
$ python setup.py install --user
Example
=======
The following example creates a data volume with spherical isosurfaces and
extracts one of them (i.e., a sphere) with PyMCubes. The result is exported as
``sphere.dae``::
>>> import numpy as np
>>> import mcubes
# Create a data volume (30 x 30 x 30)
>>> X, Y, Z = np.mgrid[:30, :30, :30]
>>> u = (X-15)**2 + (Y-15)**2 + (Z-15)**2 - 8**2
# Extract the 0-isosurface
>>> vertices, triangles = mcubes.marching_cubes(u, 0)
# Export the result to sphere.dae
>>> mcubes.export_mesh(vertices, triangles, "sphere.dae", "MySphere")
The second example is very similar to the first one, but it uses a function
to represent the volume instead of a NumPy array::
>>> import numpy as np
>>> import mcubes
# Create the volume
>>> f = lambda x, y, z: x**2 + y**2 + z**2
# Extract the 16-isosurface
>>> vertices, triangles = mcubes.marching_cubes_func((-10,-10,-10), (10,10,10),
... 100, 100, 100, f, 16)
# Export the result to sphere2.dae
>>> mcubes.export_mesh(vertices, triangles, "sphere2.dae", "MySphere")
| 1,939 | reStructuredText | 28.846153 | 81 | 0.682826 |
NVlabs/ACID/ACID/src/utils/libmcubes/marchingcubes.h |
#ifndef _MARCHING_CUBES_H
#define _MARCHING_CUBES_H
#include <stddef.h>
#include <vector>
namespace mc
{
extern int edge_table[256];
extern int triangle_table[256][16];
namespace private_
{
double mc_isovalue_interpolation(double isovalue, double f1, double f2,
double x1, double x2);
void mc_add_vertex(double x1, double y1, double z1, double c2,
int axis, double f1, double f2, double isovalue, std::vector<double>* vertices);
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i + dx/2;
coord_type x_dx = lower[0] + dx*(i+1) + dx/2;
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j + dy/2;
coord_type y_dy = lower[1] + dy*(j+1) + dy/2;
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k + dz/2;
coord_type z_dz = lower[2] + dz*(k+1) + dz/2;
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes2(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i;
coord_type x_dx = lower[0] + dx*(i+1);
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j;
coord_type y_dy = lower[1] + dy*(j+1);
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k;
coord_type z_dz = lower[2] + dz*(k+1);
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes3(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i - dx/2;
coord_type x_dx = lower[0] + dx*(i+1) - dx/2;
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j - dy/2;
coord_type y_dy = lower[1] + dy*(j+1) - dy/2;
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k - dz/2;
coord_type z_dz = lower[2] + dz*(k+1) - dz/2;
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
}
#endif // _MARCHING_CUBES_H
| 20,843 | C | 37.457565 | 92 | 0.372931 |
NVlabs/ACID/ACID/src/utils/libmcubes/pyarraymodule.h |
#ifndef _EXTMODULE_H
#define _EXTMODULE_H
#include <Python.h>
#include <stdexcept>
// #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API
#define NO_IMPORT_ARRAY
#include "numpy/arrayobject.h"
#include <complex>
template<class T>
struct numpy_typemap;
#define define_numpy_type(ctype, dtype) \
template<> \
struct numpy_typemap<ctype> \
{static const int type = dtype;};
define_numpy_type(bool, NPY_BOOL);
define_numpy_type(char, NPY_BYTE);
define_numpy_type(short, NPY_SHORT);
define_numpy_type(int, NPY_INT);
define_numpy_type(long, NPY_LONG);
define_numpy_type(long long, NPY_LONGLONG);
define_numpy_type(unsigned char, NPY_UBYTE);
define_numpy_type(unsigned short, NPY_USHORT);
define_numpy_type(unsigned int, NPY_UINT);
define_numpy_type(unsigned long, NPY_ULONG);
define_numpy_type(unsigned long long, NPY_ULONGLONG);
define_numpy_type(float, NPY_FLOAT);
define_numpy_type(double, NPY_DOUBLE);
define_numpy_type(long double, NPY_LONGDOUBLE);
define_numpy_type(std::complex<float>, NPY_CFLOAT);
define_numpy_type(std::complex<double>, NPY_CDOUBLE);
define_numpy_type(std::complex<long double>, NPY_CLONGDOUBLE);
template<typename T>
T PyArray_SafeGet(const PyArrayObject* aobj, const npy_intp* indaux)
{
// HORROR.
npy_intp* ind = const_cast<npy_intp*>(indaux);
void* ptr = PyArray_GetPtr(const_cast<PyArrayObject*>(aobj), ind);
switch(PyArray_TYPE(aobj))
{
case NPY_BOOL:
return static_cast<T>(*reinterpret_cast<bool*>(ptr));
case NPY_BYTE:
return static_cast<T>(*reinterpret_cast<char*>(ptr));
case NPY_SHORT:
return static_cast<T>(*reinterpret_cast<short*>(ptr));
case NPY_INT:
return static_cast<T>(*reinterpret_cast<int*>(ptr));
case NPY_LONG:
return static_cast<T>(*reinterpret_cast<long*>(ptr));
case NPY_LONGLONG:
return static_cast<T>(*reinterpret_cast<long long*>(ptr));
case NPY_UBYTE:
return static_cast<T>(*reinterpret_cast<unsigned char*>(ptr));
case NPY_USHORT:
return static_cast<T>(*reinterpret_cast<unsigned short*>(ptr));
case NPY_UINT:
return static_cast<T>(*reinterpret_cast<unsigned int*>(ptr));
case NPY_ULONG:
return static_cast<T>(*reinterpret_cast<unsigned long*>(ptr));
case NPY_ULONGLONG:
return static_cast<T>(*reinterpret_cast<unsigned long long*>(ptr));
case NPY_FLOAT:
return static_cast<T>(*reinterpret_cast<float*>(ptr));
case NPY_DOUBLE:
return static_cast<T>(*reinterpret_cast<double*>(ptr));
case NPY_LONGDOUBLE:
return static_cast<T>(*reinterpret_cast<long double*>(ptr));
default:
throw std::runtime_error("data type not supported");
}
}
template<typename T>
T PyArray_SafeSet(PyArrayObject* aobj, const npy_intp* indaux, const T& value)
{
// HORROR.
npy_intp* ind = const_cast<npy_intp*>(indaux);
void* ptr = PyArray_GetPtr(aobj, ind);
switch(PyArray_TYPE(aobj))
{
case NPY_BOOL:
*reinterpret_cast<bool*>(ptr) = static_cast<bool>(value);
break;
case NPY_BYTE:
*reinterpret_cast<char*>(ptr) = static_cast<char>(value);
break;
case NPY_SHORT:
*reinterpret_cast<short*>(ptr) = static_cast<short>(value);
break;
case NPY_INT:
*reinterpret_cast<int*>(ptr) = static_cast<int>(value);
break;
case NPY_LONG:
*reinterpret_cast<long*>(ptr) = static_cast<long>(value);
break;
case NPY_LONGLONG:
*reinterpret_cast<long long*>(ptr) = static_cast<long long>(value);
break;
case NPY_UBYTE:
*reinterpret_cast<unsigned char*>(ptr) = static_cast<unsigned char>(value);
break;
case NPY_USHORT:
*reinterpret_cast<unsigned short*>(ptr) = static_cast<unsigned short>(value);
break;
case NPY_UINT:
*reinterpret_cast<unsigned int*>(ptr) = static_cast<unsigned int>(value);
break;
case NPY_ULONG:
*reinterpret_cast<unsigned long*>(ptr) = static_cast<unsigned long>(value);
break;
case NPY_ULONGLONG:
*reinterpret_cast<unsigned long long*>(ptr) = static_cast<unsigned long long>(value);
break;
case NPY_FLOAT:
*reinterpret_cast<float*>(ptr) = static_cast<float>(value);
break;
case NPY_DOUBLE:
*reinterpret_cast<double*>(ptr) = static_cast<double>(value);
break;
case NPY_LONGDOUBLE:
*reinterpret_cast<long double*>(ptr) = static_cast<long double>(value);
break;
default:
throw std::runtime_error("data type not supported");
}
}
#endif
| 4,645 | C | 32.666666 | 93 | 0.655328 |
NVlabs/ACID/ACID/src/utils/libmcubes/__init__.py | from src.utils.libmcubes.mcubes import (
marching_cubes, marching_cubes_func
)
from src.utils.libmcubes.exporter import (
export_mesh, export_obj, export_off
)
__all__ = [
marching_cubes, marching_cubes_func,
export_mesh, export_obj, export_off
]
| 265 | Python | 19.461537 | 42 | 0.70566 |
NVlabs/ACID/ACID/src/utils/libmcubes/exporter.py |
import numpy as np
def export_obj(vertices, triangles, filename):
"""
Exports a mesh in the (.obj) format.
"""
with open(filename, 'w') as fh:
for v in vertices:
fh.write("v {} {} {}\n".format(*v))
for f in triangles:
fh.write("f {} {} {}\n".format(*(f + 1)))
def export_off(vertices, triangles, filename):
"""
Exports a mesh in the (.off) format.
"""
with open(filename, 'w') as fh:
fh.write('OFF\n')
fh.write('{} {} 0\n'.format(len(vertices), len(triangles)))
for v in vertices:
fh.write("{} {} {}\n".format(*v))
for f in triangles:
fh.write("3 {} {} {}\n".format(*f))
def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"):
"""
Exports a mesh in the COLLADA (.dae) format.
Needs PyCollada (https://github.com/pycollada/pycollada).
"""
import collada
mesh = collada.Collada()
vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z'))
geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src])
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', "#verts-array")
triset = geom.createTriangleSet(np.copy(triangles), input_list, "")
geom.primitives.append(triset)
mesh.geometries.append(geom)
geomnode = collada.scene.GeometryNode(geom, [])
node = collada.scene.Node(mesh_name, children=[geomnode])
myscene = collada.scene.Scene("mcubes_scene", [node])
mesh.scenes.append(myscene)
mesh.scene = myscene
mesh.write(filename)
| 1,697 | Python | 25.53125 | 81 | 0.570418 |
NVlabs/ACID/ACID/src/utils/libmcubes/marchingcubes.cpp |
#include "marchingcubes.h"
namespace mc
{
int edge_table[256] =
{
0x000, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x099, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x033, 0x13a, 0x636, 0x73f, 0x435, 0x53c, 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0x0aa, 0x7a6, 0x6af, 0x5a5, 0x4ac, 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x066, 0x16f, 0x265, 0x36c, 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0x0ff, 0x3f5, 0x2fc, 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x055, 0x15c, 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0x0cc, 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc, 0x0cc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c, 0x15c, 0x055, 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc, 0x2fc, 0x3f5, 0x0ff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c, 0x36c, 0x265, 0x16f, 0x066, 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac, 0x4ac, 0x5a5, 0x6af, 0x7a6, 0x0aa, 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c, 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x033, 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c, 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x099, 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c, 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x000
};
int triangle_table[256][16] =
{
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
};
namespace private_
{
double mc_isovalue_interpolation(double isovalue, double f1, double f2,
double x1, double x2)
{
if(f2==f1)
return (x2+x1)/2;
return (x2-x1)*(isovalue-f1)/(f2-f1) + x1;
}
void mc_add_vertex(double x1, double y1, double z1, double c2,
int axis, double f1, double f2, double isovalue, std::vector<double>* vertices)
{
if(axis == 0)
{
double x = mc_isovalue_interpolation(isovalue, f1, f2, x1, c2);
vertices->push_back(x);
vertices->push_back(y1);
vertices->push_back(z1);
return;
}
if(axis == 1)
{
double y = mc_isovalue_interpolation(isovalue, f1, f2, y1, c2);
vertices->push_back(x1);
vertices->push_back(y);
vertices->push_back(z1);
return;
}
if(axis == 2)
{
double z = mc_isovalue_interpolation(isovalue, f1, f2, z1, c2);
vertices->push_back(x1);
vertices->push_back(y1);
vertices->push_back(z);
return;
}
}
}
}
| 18,889 | C++ | 56.069486 | 116 | 0.339827 |
NVlabs/ACID/ACID/src/utils/libmcubes/pywrapper.cpp |
#include "pywrapper.h"
#include "marchingcubes.h"
#include <stdexcept>
struct PythonToCFunc
{
PyObject* func;
PythonToCFunc(PyObject* func) {this->func = func;}
double operator()(double x, double y, double z)
{
PyObject* res = PyObject_CallFunction(func, "(d,d,d)", x, y, z); // py::extract<double>(func(x,y,z));
if(res == NULL)
return 0.0;
double result = PyFloat_AsDouble(res);
Py_DECREF(res);
return result;
}
};
PyObject* marching_cubes_func(PyObject* lower, PyObject* upper,
int numx, int numy, int numz, PyObject* f, double isovalue)
{
std::vector<double> vertices;
std::vector<size_t> polygons;
// Copy the lower and upper coordinates to a C array.
double lower_[3];
double upper_[3];
for(int i=0; i<3; ++i)
{
PyObject* l = PySequence_GetItem(lower, i);
if(l == NULL)
throw std::runtime_error("error");
PyObject* u = PySequence_GetItem(upper, i);
if(u == NULL)
{
Py_DECREF(l);
throw std::runtime_error("error");
}
lower_[i] = PyFloat_AsDouble(l);
upper_[i] = PyFloat_AsDouble(u);
Py_DECREF(l);
Py_DECREF(u);
if(lower_[i]==-1.0 || upper_[i]==-1.0)
{
if(PyErr_Occurred())
throw std::runtime_error("error");
}
}
// Marching cubes.
mc::marching_cubes<double>(lower_, upper_, numx, numy, numz, PythonToCFunc(f), isovalue, vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
struct PyArrayToCFunc
{
PyArrayObject* arr;
PyArrayToCFunc(PyArrayObject* arr) {this->arr = arr;}
double operator()(int x, int y, int z)
{
npy_intp c[3] = {x,y,z};
return PyArray_SafeGet<double>(arr, c);
}
};
PyObject* marching_cubes(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
PyObject* marching_cubes2(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes2<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
PyObject* marching_cubes3(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes3<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
} | 7,565 | C++ | 35.907317 | 120 | 0.624455 |
NVlabs/ACID/ACID/src/utils/libmcubes/pywrapper.h |
#ifndef _PYWRAPPER_H
#define _PYWRAPPER_H
#include <Python.h>
#include "pyarraymodule.h"
#include <vector>
PyObject* marching_cubes(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes2(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes3(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes_func(PyObject* lower, PyObject* upper,
int numx, int numy, int numz, PyObject* f, double isovalue);
#endif // _PYWRAPPER_H
| 455 | C | 25.823528 | 64 | 0.758242 |
NVlabs/ACID/ACID/src/data/__init__.py |
from src.data.core import (
PlushEnvGeom, collate_remove_none, worker_init_fn, get_plush_loader
)
from src.data.transforms import (
PointcloudNoise, SubsamplePointcloud,
SubsamplePoints,
)
__all__ = [
# Core
PlushEnvGeom,
get_plush_loader,
collate_remove_none,
worker_init_fn,
PointcloudNoise,
SubsamplePointcloud,
SubsamplePoints,
]
| 379 | Python | 18.999999 | 71 | 0.693931 |
NVlabs/ACID/ACID/src/data/core.py | import os
import yaml
import pickle
import torch
import logging
import numpy as np
from torch.utils import data
from torch.utils.data.dataloader import default_collate
from src.utils import plushsim_util, common_util
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
logger = logging.getLogger(__name__)
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
def set_num_threads(nt):
try:
import mkl; mkl.set_num_threads(nt)
except:
pass
torch.set_num_threads(1)
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
def collate_pair_fn(batch):
num_points = batch[0]['sampled_pts'].shape[1]
collated = {}
for key in batch[0]:
if key == 'geo_dists':
collated[key] = torch.as_tensor(np.concatenate([d[key] for d in batch]))
elif key == 'num_pairs':
indices = []
for i,d in enumerate(batch):
indices.append(np.arange(d['num_pairs']) + i * num_points)
collated["pair_indices"] = torch.as_tensor(np.concatenate(indices))
else:
collated[key] = default_collate([d[key] for d in batch])
return collated
class PlushEnvBoth(data.Dataset):
def __init__(self, flow_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.flow_root = flow_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(flow_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(flow_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, int_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, reset_id, int_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_flow_pair_data_file(
self.pair_root,split_id, model_id, reset_id, int_id))
pair_reset_id, pair_int_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, pair_reset_id, pair_int_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_flow, sampled_inds = self._prepare_points(
points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
sampled_flow = np.concatenate([sampled_flow[sampled_occ == 1.], sampled_flow[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, sampled_flow2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, \
sampled_occ, sampled_occ2, \
sampled_flow, sampled_flow2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"sampled_flow":np.stack([sampled_flow,sampled_flow2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
"start_frame":int(points_dict['start_frame']),
"end_frame":int(points_dict['end_frame']),
}
return data
def _get_pts_related_info(self, points_dict):
pts = points_dict['pts'].astype(np.float32)
occs = np.unpackbits(points_dict['occ'])
inds = points_dict['ind']
flow = np.zeros((len(pts), 3), dtype=np.float32)
flow[occs != 0] = points_dict['flow'].astype(np.float32) * 10.
return pts, occs, inds, flow
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1,occs1,inds1,flow1 = self._get_pts_related_info(points_dict)
pts2,occs2,inds2,flow2 = self._get_pts_related_info(points_dict2)
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2,
assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_flow_1 = flow1[occs1==1][id1[int_id1]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
sub_inds = common_util.subsample_points(unique_pts_1, resolution=0.03, return_index=True)
unique_pts_1 = unique_pts_1[sub_inds]
unique_flow_1 = unique_flow_1[sub_inds]
unique_occ_1 = unique_occ_1[sub_inds]
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
flow_others1 = flow1[sample_others1]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_flow1 = np.concatenate([unique_flow_1, flow_others1])
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_flow_2 = flow2[occs2==1][id2[int_id2]]
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
unique_pts_2 = unique_pts_2[sub_inds]
unique_flow_2 = unique_flow_2[sub_inds]
unique_occ_2 = unique_occ_2[sub_inds]
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
flow_others2 = flow2[sample_others2]
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
sampled_flow2 = np.concatenate([unique_flow_2, flow_others2])
geo_dists = geo_dists[sub_inds]
return sampled_pts1, sampled_pts2,\
sampled_occ1, sampled_occ2, \
sampled_flow1, sampled_flow2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud_obs'].astype(np.float32)
grasp_loc = common_util.transform_points(info_dict['grasp_loc'], scene_range, to_range)
target_loc = common_util.transform_points(info_dict['target_loc'], scene_range, to_range)
tiled_grasp_loc = np.tile(grasp_loc, (len(obj_pcloud), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc, (len(obj_pcloud), 1)).astype(np.float32)
obj_pcloud= np.concatenate([obj_pcloud, tiled_target_loc, obj_pcloud[:,:3] - tiled_grasp_loc], axis=-1)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts,occs,inds,flow = self._get_pts_related_info(points_dict)
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_flow = flow
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
sampled_flow = flow[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
flow_chosen = flow[occs!= 0][chosen]
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
flow_others = flow[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
sampled_flow= np.concatenate([flow_chosen, flow_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_flow, sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
class PlushEnvGeom(data.Dataset):
def __init__(self, geom_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.geom_root = geom_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(geom_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(geom_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, frame_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, reset_id, frame_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_pair_data_file(
self.pair_root,split_id, model_id, reset_id, frame_id))
pair_reset_id, pair_frame_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, pair_reset_id, pair_frame_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_inds = self._prepare_points(points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, sampled_occ, sampled_occ2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
}
return data
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1 = points_dict['pts'].astype(np.float32)
occs1 = np.unpackbits(points_dict['occ'])
inds1 = points_dict['ind']
pts2 = points_dict2['pts'].astype(np.float32)
occs2 = np.unpackbits(points_dict2['occ'])
inds2 = points_dict2['ind']
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2, assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
return sampled_pts1, sampled_pts2, sampled_occ1, sampled_occ2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud'].astype(np.float32)
obj_pcloud += 1e-4 * np.random.randn(*obj_pcloud.shape)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts = points_dict['pts'].astype(np.float32)
occs = points_dict['occ']
occs = np.unpackbits(occs)#[:points.shape[0]]
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
def build_transform_geom(cfg):
from . import transforms as tsf
from torchvision import transforms
transform = {}
transform['obj_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_obj']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
transform['env_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_env']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
return transform
def get_geom_dataset(cfg, split='train', transform='build'):
geom_root = cfg['data']['geom_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvGeom(geom_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_combined_dataset(cfg, split='train', transform='build'):
flow_root = cfg['data']['flow_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvBoth(flow_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_plush_loader(cfg, mode, split='train', transform='build', test_shuffle=False, num_workers=None):
if mode == 'geom':
dataset = get_geom_dataset(cfg, split, transform)
elif mode == 'combined':
dataset = get_combined_dataset(cfg, split, transform)
if split == 'train':
loader = torch.utils.data.DataLoader(
dataset, batch_size=cfg['training']['batch_size'],
num_workers=cfg['training']['n_workers'],
shuffle=True,
collate_fn=collate_pair_fn,
worker_init_fn=worker_init_fn)
else:
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=test_shuffle,
collate_fn=collate_pair_fn)
return loader
def get_plan_loader(cfg, transform='build', category="teddy",num_workers=None):
transform = build_transform_geom(cfg)
dataset = PlushEnvPlan(cfg['data']['plan_path'], category=category, transform=transform)
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=False,)
return loader
class PlushEnvPlan(data.Dataset):
def __init__(self, plan_root, category="teddy",transform={}):
# Attributes
self.plan_root = plan_root
self.transform = transform
self.category = category
import glob
self.scenarios = glob.glob(f'{plan_root}/**/*.npz', recursive=True)
self.scenarios = [x for x in self.scenarios if category in x][:-1]
self.scenarios.sort()
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.scenarios)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
# load frame and get partial observation
infos = np.load(self.scenarios[idx])
obj_pcloud_start, env_pcloud_start = self._prepare_partial_obs(infos, "start")
obj_pcloud_end, env_pcloud_end = self._prepare_partial_obs(infos, "end")
action = infos['actions'].astype(np.float32)
pts_start, occ_start, ind_start = self._get_pts_related_info(infos, 'start')
pts_end, occ_end, ind_end = self._get_pts_related_info(infos, 'end')
data = {
"obj_obs_start":obj_pcloud_start,
"env_obs_start":env_pcloud_start,
"obj_obs_end":obj_pcloud_end,
"env_obs_end":env_pcloud_end,
'gt_pts_start': infos['sim_pts_start'].astype(np.float32),
'gt_pts_end': infos['sim_pts_end'].astype(np.float32),
'sampled_pts_start': pts_start,
'sampled_occ_start': occ_start,
'sampled_ind_start': ind_start,
'sampled_pts_end': pts_end,
'sampled_occ_end': occ_end,
'sampled_ind_end': ind_end,
"actions": action,
"sequence_ids":infos['sequence_ids'],
"fname":self.scenarios[idx],
"idx":idx,
}
return data
def _prepare_partial_obs(self, info_dict, key):
# obj partial observation
obj_pcloud = info_dict[f'obj_pcloud_{key}'].astype(np.float32)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict[f'env_pcloud_{key}'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
def _get_pts_related_info(self, points_dict, key):
pts = points_dict[f'pts_{key}'].astype(np.float32)
occs = np.unpackbits(points_dict[f'occ_{key}']).astype(np.float32)
inds = points_dict[f'ind_{key}'].astype(np.int32)
return pts, occs, inds | 26,177 | Python | 42.557404 | 133 | 0.593154 |
NVlabs/ACID/ACID/src/data/transforms.py | import numpy as np
# Transforms
class PointcloudNoise(object):
''' Point cloud noise transformation class.
It adds noise to point cloud data.
Args:
stddev (int): standard deviation
'''
def __init__(self, stddev):
self.stddev = stddev
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
data_out = data.copy()
points = data[None]
noise = self.stddev * np.random.randn(*points.shape)
noise = noise.astype(np.float32)
data_out[None] = points + noise
return data_out
class SubsamplePointcloud(object):
''' Point cloud subsampling transformation class.
It subsamples the point cloud data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
indices = np.random.randint(data.shape[0], size=self.N)
return data[indices]
class SubsamplePoints(object):
''' Points subsampling transformation class.
It subsamples the points data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
points = data[None]
occ = data['occ']
ind = data['ind']
flow1 = data['flow1']
flow2 = data['flow2']
data_out = data.copy()
if isinstance(self.N, int):
idx = np.random.randint(points.shape[0], size=self.N)
data_out.update({
None: points[idx, :],
'occ': occ[idx],
'ind': ind[idx],
'flow1': flow1[idx],
'flow2': flow2[idx],
})
else:
Nt_out, Nt_in = self.N
occ_binary = (occ >= 0.5)
points0 = points[~occ_binary]
points1 = points[occ_binary]
ind0 = ind[~occ_binary]
ind1 = ind[occ_binary]
flow10 = flow1[~occ_binary]
flow11 = flow1[occ_binary]
flow20 = flow2[~occ_binary]
flow21 = flow2[occ_binary]
idx0 = np.random.randint(points0.shape[0], size=Nt_out)
idx1 = np.random.randint(points1.shape[0], size=Nt_in)
points0 = points0[idx0, :]
points1 = points1[idx1, :]
points = np.concatenate([points0, points1], axis=0)
ind0 = ind0[idx0]
ind1 = ind1[idx1]
ind = np.concatenate([ind0, ind1], axis=0)
flow10 = flow10[idx0]
flow11 = flow11[idx1]
flow1 = np.concatenate([flow10, flow11], axis=0)
flow20 = flow20[idx0]
flow21 = flow21[idx1]
flow2 = np.concatenate([flow20, flow21], axis=0)
occ0 = np.zeros(Nt_out, dtype=np.float32)
occ1 = np.ones(Nt_in, dtype=np.float32)
occ = np.concatenate([occ0, occ1], axis=0)
volume = occ_binary.sum() / len(occ_binary)
volume = volume.astype(np.float32)
data_out.update({
None: points,
'occ': occ,
'volume': volume,
'ind': ind,
'flow1': flow1,
'flow2': flow2,
})
return data_out
| 3,578 | Python | 25.708955 | 67 | 0.507546 |
NVlabs/ACID/ACID/configs/default.yaml | method: conv_onet
data:
train_split: train
val_split: val
test_split: test
dim: 3
act_dim: 6
padding: 0.1
type: geom
model:
decoder: simple
encoder: resnet18
decoder_kwargs: {}
encoder_kwargs: {}
multi_gpu: false
c_dim: 512
training:
out_dir: out/default
batch_size: 64
pos_weight: 5
print_every: 200
visualize_every: 1000
visualize_total: 15
checkpoint_every: 1000
validate_every: 2000
backup_every: 100000
eval_sample: false
model_selection_metric: loss
model_selection_mode: minimize
n_workers: 4
n_workers_val: 4
test:
threshold: 0.5
eval_mesh: true
eval_pointcloud: true
remove_wall: false
model_file: model_best.pt
generation:
batch_size: 100000
refinement_step: 0
vis_n_outputs: 30
generate_mesh: true
generate_pointcloud: true
generation_dir: generation
use_sampling: false
resolution_0: 32
upsampling_steps: 3
simplify_nfaces: null
copy_groundtruth: false
copy_input: true
latent_number: 4
latent_H: 8
latent_W: 8
latent_ny: 2
latent_nx: 2
latent_repeat: true
sliding_window: False # added for crop generation | 1,121 | YAML | 18.68421 | 51 | 0.702944 |
NVlabs/ACID/ACID/configs/plush_dyn_geodesics.yaml | method: conv_onet
data:
flow_path: train_data/flow
pair_path: train_data/pair
pointcloud_n_obj: 5000
pointcloud_n_env: 1000
pointcloud_noise: 0.005
points_subsample: 3000
model:
type: combined
obj_encoder_kwargs:
f_dim: 3
hidden_dim: 64
plane_resolution: 128
unet_kwargs:
depth: 4
merge_mode: concat
start_filts: 64
env_encoder_kwargs:
f_dim: 3
hidden_dim: 16
plane_resolution: 64
unet_kwargs:
depth: 2
merge_mode: concat
start_filts: 16
decoder_kwargs:
corr_dim: 32
sample_mode: bilinear # bilinear / nearest
hidden_size: 32
obj_c_dim: 64
env_c_dim: 16
loss:
type: contrastive
contrastive_threshold: 1
use_geodesics: true
scale_with_geodesics: False
training:
out_dir: result/dyn/geodesics
batch_size: 4
model_selection_metric: flow
model_selection_mode: minimize
print_every: 1
visualize_every: 4000
validate_every: 4000
checkpoint_every: 4000
backup_every: 4000
n_workers: 16
n_workers_val: 4
test:
threshold: 0.95
eval_mesh: true
eval_pointcloud: false
model_file: model_best.pt
generation:
refine: false
n_x: 128
n_z: 1
| 1,175 | YAML | 18.932203 | 46 | 0.67234 |
NVlabs/ACID/ACID/preprocess/gen_data_flow_plush.py | import numpy as np
import os
import time, datetime
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import json
from src.utils import plushsim_util
from src.utils import common_util
import glob
import tqdm
from multiprocessing import Pool
import argparse
parser = argparse.ArgumentParser("Training Flow Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--save_root", type=str, default=flow_default)
args = parser.parse_args()
data_root = args.data_root
save_root = args.save_root
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
class_to_std = {
'teddy':0.12,
'elephant':0.15,
'octopus':0.12,
'rabbit':0.08,
'dog':0.08,
'snake':0.04,
}
def export_train_data(data_id):
# try:
# load action info
split_id, model_category, model_name, reset_id, interaction_id = data_id
grasp_loc, target_loc, f1, _, f2 = plushsim_util.get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root)
# get observations
obj_pts1, env_pts1 = plushsim_util.get_scene_partial_pointcloud(
model_category, model_name, split_id, reset_id, f1, data_root)
obj_pts1=common_util.subsample_points(
common_util.transform_points(obj_pts1, scene_range, to_range), resolution=0.005, return_index=False)
env_pts1=common_util.subsample_points(
common_util.transform_points(env_pts1, scene_range, to_range), resolution=0.020, return_index=False)
# calculate flow
sim_pts1, _, loc,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f1, data_root)
sim_pts2, _,_,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f2, data_root)
sim_pts1=common_util.transform_points(sim_pts1, scene_range, to_range)
sim_pts2=common_util.transform_points(sim_pts2, scene_range, to_range)
sim_pts_flow = sim_pts2 - sim_pts1
# sample occupancy
center =common_util.transform_points(loc, scene_range, to_range)[0]
pts, occ, pt_class = plushsim_util.sample_occupancies(sim_pts1, center,
std=class_to_std[model_category],sample_scheme='object')
# get implicit flows
flow = sim_pts_flow[pt_class]
# save
kwargs = {'sim_pts':sim_pts1.astype(np.float16),
'obj_pcloud_obs':obj_pts1.astype(np.float16),
'env_pcloud':env_pts1.astype(np.float16),
'pts':pts.astype(np.float16),
'occ':np.packbits(occ),
'ind':pt_class.astype(np.uint16),
'flow':flow.astype(np.float16),
'start_frame':f1,
'end_frame':f2,
'grasp_loc':grasp_loc,
'target_loc': target_loc}
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
save_path = os.path.join(model_dir, f"{reset_id:03d}_{interaction_id:03d}.npz")
np.savez_compressed(save_path, **kwargs)
def get_all_data_points_flow(data_root):
good_interactions = glob.glob(f"{data_root}/*/*/*/info/good_interactions.json")
good_ints = []
for g in tqdm.tqdm(good_interactions):
split_id, model_category, model_name = g.split('/')[-5:-2]
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
os.makedirs(model_dir, exist_ok=True)
model_dir = plushsim_util.get_model_dir(data_root, split_id, model_category, model_name)
with open(g, 'r') as fp:
good_ones = json.load(fp)
for k,v in good_ones.items():
reset_id = int(k)
for int_id in v:
good_ints.append((split_id, model_category, model_name, reset_id, int_id))
return good_ints
good_ints = get_all_data_points_flow(data_root)#[:100]
start_time = time.time()
with Pool(40) as p:
for _ in tqdm.tqdm(p.imap_unordered(export_train_data, good_ints), total=len(good_ints)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}') | 4,353 | Python | 39.691588 | 143 | 0.64668 |
NVlabs/ACID/ACID/preprocess/gen_data_contrastive_pairs_flow.py | import os
import sys
import glob
import tqdm
import random
import argparse
import numpy as np
import os.path as osp
import time
from multiprocessing import Pool
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
parser = argparse.ArgumentParser("Training Contrastive Pair Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
meta_default = osp.join(ACID_dir, "data_plush", "metadata")
flow_default = osp.join(ACID_dir, "train_data", "flow")
pair_default = osp.join(ACID_dir, "train_data", "pair")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--meta_root", type=str, default=meta_default)
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--save_root", type=str, default=pair_default)
args = parser.parse_args()
data_root = args.data_root
flow_root = args.flow_root
save_root = args.save_root
meta_root = args.meta_root
os.makedirs(save_root, exist_ok=True)
def using_complex(a):
weight = 1j*np.linspace(0, a.shape[1], a.shape[0], endpoint=False)
b = a + weight[:, np.newaxis]
u, ind = np.unique(b, return_index=True)
b = np.zeros_like(a) + 256
np.put(b, ind, a.flat[ind])
return b
def process(pair, num_samples=320, keep=80):
split_id, model_name, f,p = pair
src_file = np.load(f"{flow_root}/{split_id}/{model_name}/{f}")
tgt_file = np.load(f"{flow_root}/{split_id}/{model_name}/{p}")
src_inds = src_file['ind']
tgt_inds = tgt_file['ind']
src_inds = np.tile(src_inds, (num_samples,1)).T
tgt_samples = np.random.randint(0, high=len(tgt_inds) - 1, size=(len(src_inds), num_samples))
tgt_samples_inds = tgt_inds[tgt_samples]
dists = dist_matrix[src_inds.reshape(-1), tgt_samples_inds.reshape(-1)].reshape(*src_inds.shape)
dists_unique = using_complex(dists)
idx = np.argsort(dists_unique, axis=-1)
dists_sorted = np.take_along_axis(dists, idx, axis=-1).astype(np.uint8)[:,:keep]
tgt_samples_sorted = np.take_along_axis(tgt_samples, idx, axis=-1)[:,:keep]
if tgt_samples_sorted.max() <= np.iinfo(np.uint16).max:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint16)
else:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint32)
results = {"target_file":p, "dists":dists_sorted, "inds":tgt_samples_sorted}
np.savez_compressed(os.path.join(save_dir, f"pair_{f}"), **results)
def export_pair_data(data_id):
split_id, model_name = data_id
all_files = all_geoms[data_id]
print(split_id, model_name)
global dist_matrix
dist_matrix = np.load(f'{meta_root}/{split_id}/{model_name}_dist.npz')['arr_0']
global save_dir
save_dir = os.path.join(save_root, split_id, model_name)
os.makedirs(save_dir, exist_ok=True)
pairs = [ (split_id, model_name, f,random.choice(all_files)) for f in all_files ]
start_time = time.time()
with Pool(10) as p:
for _ in tqdm.tqdm(p.imap_unordered(process, pairs), total=len(all_files)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}')
if __name__ == '__main__':
from collections import defaultdict
global all_geoms
all_geoms = defaultdict(lambda: [])
for g in glob.glob(f"{flow_root}/*/*/*"):
split_id, model_name, file_name = g.split('/')[-3:]
all_geoms[(split_id, model_name)].append(file_name)
for k in all_geoms.keys():
export_pair_data(k)
| 3,584 | Python | 35.212121 | 100 | 0.66183 |
NVlabs/ACID/ACID/preprocess/gen_data_flow_splits.py | import os
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import glob
import argparse
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser = argparse.ArgumentParser("Making training / testing splits...")
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--no_split", action="store_true", default=False)
args = parser.parse_args()
flow_root = args.flow_root
all_npz = glob.glob(f"{flow_root}/*/*/*.npz")
print(f"In total {len(all_npz)} data points...")
def filename_to_id(fname):
split_id, model_name, f = fname.split("/")[-3:]
reset_id, frame_id = (int(x) for x in os.path.splitext(f)[0].split('_'))
return split_id, model_name, reset_id, frame_id
from collections import defaultdict
total_files = defaultdict(lambda : defaultdict(lambda : []))
for fname in all_npz:
split_id, model_name, reset_id, frame_id = filename_to_id(fname)
total_files[(split_id, model_name)][reset_id].append(frame_id)
total_files = dict(total_files)
for k,v in total_files.items():
total_files[k] = dict(v)
import pickle
if args.no_split:
train = total_files
test = total_files
else:
train = {}
test = {}
for k,v in total_files.items():
split_id, model_name = k
if "teddy" in model_name:
test[k] = v
else:
train[k] = v
train_total = []
for k,v in train.items():
for x, u in v.items():
for y in u:
train_total.append((*k, x, y))
print(f"training data points: {len(train_total)}")
test_total = []
for k,v in test.items():
for x, u in v.items():
for y in u:
test_total.append((*k, x, y))
print(f"testing data points: {len(test_total)}")
with open(f"{flow_root}/train.pkl", "wb") as fp:
pickle.dump(train_total, fp)
with open(f"{flow_root}/test.pkl", "wb") as fp:
pickle.dump(test_total, fp) | 1,972 | Python | 28.447761 | 76 | 0.625761 |
erasromani/isaac-sim-python/simulate_grasp.py | import os
import argparse
from grasp.grasp_sim import GraspSimulator
from omni.isaac.motion_planning import _motion_planning
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.synthetic_utils import OmniKitHelper
def main(args):
kit = OmniKitHelper(
{"renderer": "RayTracedLighting", "experience": f"{os.environ['EXP_PATH']}/isaac-sim-python.json", "width": args.width, "height": args.height}
)
_mp = _motion_planning.acquire_motion_planning_interface()
_dc = _dynamic_control.acquire_dynamic_control_interface()
if args.video: record = True
else: record = False
sim = GraspSimulator(kit, _dc, _mp, record=record)
# add object path
if args.location == 'local': from_server = False
else: from_server = True
for path in args.path:
sim.add_object_path(path, from_server=from_server)
# start simulation
sim.play()
for _ in range(args.num):
sim.add_object(position=(40, 0, 10))
sim.wait_for_drop()
sim.wait_for_loading()
evaluation = sim.execute_grasp(args.position, args.angle)
output_string = f"Grasp evaluation: {evaluation}"
print('\n' + ''.join(['#'] * len(output_string)))
print(output_string)
print(''.join(['#'] * len(output_string)) + '\n')
# Stop physics simulation
sim.stop()
if record: sim.save_video(args.video)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simulate Panda arm planar grasp execution in NVIDIA Omniverse Isaac Sim')
required = parser.add_argument_group('required arguments')
required.add_argument('-P', '--path', type=str, nargs='+', metavar='', required=True, help='path to usd file or content folder')
required.add_argument('-p', '--position', type=float, nargs=3, metavar='', required=True, help='grasp position, X Y Z')
required.add_argument('-a', '--angle', type=float, metavar='', required=True, help='grasp angle in degrees')
parser.add_argument('-l', '--location', type=str, metavar='', required=False, help='location of usd path, choices={local, nucleus_server}', choices=['local', 'nucleus_server'], default='local')
parser.add_argument('-n', '--num', type=int, metavar='', required=False, help='number of objects to spawn in the scene', default=1)
parser.add_argument('-v', '--video', type=str, metavar='', required=False, help='output filename of grasp simulation video')
parser.add_argument('-W', '--width', type=int, metavar='', required=False, help='width of the viewport and generated images', default=1024)
parser.add_argument('-H', '--height', type=int, metavar='', required=False, help='height of the viewport and generated images', default=800)
args = parser.parse_args()
print(args.path)
main(args) | 2,835 | Python | 39.514285 | 197 | 0.662434 |
erasromani/isaac-sim-python/README.md | # isaac-sim-python: Python wrapper for NVIDIA Omniverse Isaac-Sim
## Overview
This repository contains a collection of python wrappers for NVIDIA Omniverse Isaac-Sim simulations. `grasp` package simulates a planar grasp execution of a Panda arm in a scene with various rigid objects place in a bin.
## Installation
This repository requires installation of NVIDIA Omniverse Isaac-Sim. A comprehensive setup tutorial is provided in the official [NVIDIA Omniverse Isaac-Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/setup.html) documentation. Following installation of Isaac-Sim, a conda environment must also be created that contains all the required packages for the python wrappers. Another comprehensive conda environment setup tutorial is provided in this [link](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/python_samples.html).
`ffmpeg-python` must be installed within the `isaac-sim` conda environment and can be aquired via a typical pip install:
```
conda activate isaac-sim
pip install ffmpeg-python
```
Lastly, clone the repository into the `python-samples` sub-directory within the `isaac-sim` directory.
```
git clone https://github.com/erasromani/isaac-sim-python.git
```
## Quickstart
Navigate to the `python-samples` sub-directory within the `isaac-sim` directory, source environment variables, activate conda environment, and run `simulate_grasp.py`.
```
source setenv.sh
conda activate isaac-sim
cd isaac-sim-python
python simulate_grasp.py -P Isaac/Props/Flip_Stack/large_corner_bracket_physics.usd Isaac/Props/Flip_Stack/screw_95_physics.usd Isaac/Props/Flip_Stack/t_connector_physics.usd -l nucleus_server -p 40 0 5 -a 45 -n 5 -v sim.mp4
```
The code above will simulate grasp execution of Panda arm in a scene with a bin and objects 5 randomly selected objects selected from the collection of usd files given. The specified grasp pose is a planar grasp with grasp position `(40, 0, 5)` and angle `5` degrees. A video of the simulation will be generated and saved as `sim.mp4`.
## Additional Resources
- https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html
- https://docs.omniverse.nvidia.com/py/isaacsim/index.html
| 2,201 | Markdown | 56.947367 | 550 | 0.79055 |
erasromani/isaac-sim-python/grasp/grasp_sim.py | import os
import numpy as np
import tempfile
import omni.kit
from omni.isaac.synthetic_utils import SyntheticDataHelper
from grasp.utils.isaac_utils import RigidBody
from grasp.grasping_scenarios.grasp_object import GraspObject
from grasp.utils.visualize import screenshot, img2vid
default_camera_pose = {
'position': (142, -127, 56), # position given by (x, y, z)
'target': (-180, 234, -27) # target given by (x, y , z)
}
class GraspSimulator(GraspObject):
""" Defines a grasping simulation scenario
Scenarios define planar grasp execution in a scene of a Panda arm and various rigid objects
"""
def __init__(self, kit, dc, mp, dt=1/60.0, record=False, record_interval=10):
"""
Initializes grasp simulator
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
dt (float): simulation time step in seconds
record (bool): flag for capturing screenshots throughout simulation for video recording
record_interval (int): frame intervals for capturing screenshots
"""
super().__init__(kit, dc, mp)
self.frame = 0
self.dt = dt
self.record = record
self.record_interval = record_interval
self.tmp_dir = tempfile.mkdtemp()
self.sd_helper = SyntheticDataHelper()
# create initial scene
self.create_franka()
# set camera pose
self.set_camera_pose(default_camera_pose['position'], default_camera_pose['target'])
def execute_grasp(self, position, angle):
"""
Executes a planar grasp with a panda arm.
Args:
position (list or numpy.darray): grasp position array of length 3 given by [x, y, z]
angle (float): grap angle in degrees
Returns:
evaluation (enum.EnumMeta): GRASP_eval class containing two states {GRASP_eval.FAILURE, GRAPS_eval.SUCCESS}
"""
self.set_target_angle(angle)
self.set_target_position(position)
self.perform_tasks()
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if self.pick_and_place is not None:
while True:
self.step(0)
self.update()
if self.pick_and_place.evaluation is not None:
break
evaluation = self.pick_and_place.evaluation
self.stop_tasks()
self.step(0)
self.update()
# Stop physics simulation
if not previously_playing: self.stop()
return evaluation
def wait_for_drop(self, max_steps=2000):
"""
Waits for all objects to drop.
Args:
max_steps (int): maximum number of timesteps before aborting wait
"""
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if not previously_playing: self.play()
step = 0
while step < max_steps or self._kit.is_loading():
self.step(step)
self.update()
objects_speed = np.array([o.get_speed() for o in self.objects])
if np.all(objects_speed == 0): break
step +=1
# Stop physics simulation
if not previously_playing: self.stop()
def wait_for_loading(self):
"""
Waits for all scene visuals to load.
"""
while self.is_loading():
self.update()
def play(self):
"""
Starts simulation.
"""
self._kit.play()
if not hasattr(self, 'world') or not hasattr(self, 'franka_solid') or not hasattr(self, 'bin_solid') or not hasattr(self, 'pick_and_place'):
self.register_scene()
def stop(self):
"""
Stops simulation.
"""
self._kit.stop()
def update(self):
"""
Simulate one time step.
"""
if self.record and self.sd_helper is not None and self.frame % self.record_interval == 0:
screenshot(self.sd_helper, suffix=self.frame, directory=self.tmp_dir)
self._kit.update(self.dt)
self.frame += 1
def is_loading(self):
"""
Determine if all scene visuals are loaded.
Returns:
(bool): flag for whether or not all scene visuals are loaded
"""
return self._kit.is_loading()
def set_camera_pose(self, position, target):
"""
Set camera pose.
Args:
position (list or numpy.darray): camera position array of length 3 given by [x, y, z]
target (list or numpy.darray): target position array of length 3 given by [x, y, z]
"""
self._editor.set_camera_position("/OmniverseKit_Persp", *position, True)
self._editor.set_camera_target("/OmniverseKit_Persp", *target, True)
def save_video(self, path):
"""
Save video recording of screenshots taken throughout the simulation.
Args:
path (str): output video filename
"""
framerate = int(round(1.0 / (self.record_interval * self.dt)))
img2vid(os.path.join(self.tmp_dir, '*.png'), path, framerate=framerate)
| 5,666 | Python | 32.532544 | 148 | 0.59107 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/scenario.py | # Credits: The majority of this code is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea with minor modifications.
import gc
import carb
import omni.usd
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from grasp.utils.isaac_utils import set_up_z_axis
class Scenario:
"""
Defines a block stacking scenario.
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, editor, dc, mp):
"""
Initialize scenario.
Args:
editor (omni.kit.editor._editor.IEditor): editor object from isaac-sim simulation
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
self._editor = editor # Reference to the Kit editor
self._stage = omni.usd.get_context().get_stage() # Reference to the current USD stage
self._dc = dc # Reference to the dynamic control plugin
self._mp = mp # Reference to the motion planning plugin
self._domains = [] # Contains instances of environment
self._obstacles = [] # Containts references to any obstacles in the scenario
self._executor = None # Contains the thread pool used to run tasks
self._created = False # Is the robot created or not
self._running = False # Is the task running or not
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
self.robot_created = False
self._domains = []
self._obstacles = []
self._executor = None
gc.collect()
def reset_blocks(self, *args):
"""
Funtion called when block poses are reset.
"""
pass
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
self._running = False
pass
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
pass
def create_franka(self, *args):
"""
Create franka USD objects.
"""
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
# USD paths loaded by scenarios
self.franka_table_usd = self.asset_path + "/Samples/Leonardo/Stage/franka_block_stacking.usd"
self.franka_ghost_usd = self.asset_path + "/Samples/Leonardo/Robots/franka_ghost.usd"
self.background_usd = self.asset_path + "/Environments/Grid/gridroom_curved.usd"
self.rubiks_cube_usd = self.asset_path + "/Props/Rubiks_Cube/rubiks_cube.usd"
self.red_cube_usd = self.asset_path + "/Props/Blocks/red_block.usd"
self.yellow_cube_usd = self.asset_path + "/Props/Blocks/yellow_block.usd"
self.green_cube_usd = self.asset_path + "/Props/Blocks/green_block.usd"
self.blue_cube_usd = self.asset_path + "/Props/Blocks/blue_block.usd"
self._created = True
self._stage = omni.usd.get_context().get_stage()
set_up_z_axis(self._stage)
self.stop_tasks()
pass
def register_assets(self, *args):
"""
Connect franka controller to usd assets
"""
pass
def task(self, domain):
"""
Task to be performed for a given robot.
"""
pass
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._running = True
pass
def is_created(self):
"""
Return if the franka was already created.
"""
return self._created
| 3,963 | Python | 32.880342 | 132 | 0.609134 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/grasp_object.py | # Credits: Starter code taken from build code associated with nvidia/isaac-sim:2020.2.2_ea.
import os
import random
import numpy as np
import glob
import omni
import carb
from enum import Enum
from collections import deque
from pxr import Gf, UsdGeom
from copy import copy
from omni.physx.scripts.physicsUtils import add_ground_plane
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils._isaac_utils import math as math_utils
from omni.isaac.samples.scripts.utils.world import World
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.physx import _physx
from grasp.utils.isaac_utils import create_prim_from_usd, RigidBody, set_translate, set_rotate, setup_physics
from grasp.grasping_scenarios.franka import Franka, default_config
from grasp.grasping_scenarios.scenario import Scenario
statedic = {0: "orig", 1: "axis_x", 2: "axis_y", 3: "axis_z"}
class SM_events(Enum):
"""
State machine events.
"""
START = 0
WAYPOINT_REACHED = 1
GOAL_REACHED = 2
ATTACHED = 3
DETACHED = 4
TIMEOUT = 5
STOP = 6
NONE = 7 # no event ocurred, just clocks
class SM_states(Enum):
"""
State machine states.
"""
STANDBY = 0 # Default state, does nothing unless enters with event START
PICKING = 1
ATTACH = 2
HOLDING = 3
GRASPING = 4
LIFTING = 5
class GRASP_eval(Enum):
"""
Grasp execution evaluation.
"""
FAILURE = 0
SUCCESS = 1
class PickAndPlaceStateMachine(object):
"""
Self-contained state machine class for Robot Behavior. Each machine state may react to different events,
and the handlers are defined as in-class functions.
"""
def __init__(self, stage, robot, ee_prim, default_position):
"""
Initialize state machine.
Args:
stage (pxr.Usd.Stage): usd stage
robot (grasp.grasping_scenarios.franka.Franka): robot controller object
ee_prim (pxr.Usd.Prim): Panda arm end effector prim
default_position (omni.isaac.dynamic_control._dynamic_control.Transform): default position of Panda arm
"""
self.robot = robot
self.dc = robot.dc
self.end_effector = ee_prim
self.end_effector_handle = None
self._stage = stage
self.start_time = 0.0
self.start = False
self._time = 0.0
self.default_timeout = 10
self.default_position = copy(default_position)
self.target_position = default_position
self.target_point = default_position.p
self.target_angle = 0 # grasp angle in degrees
self.reset = False
self.evaluation = None
self.waypoints = deque()
self.thresh = {}
# Threshold to clear waypoints/goal
# (any waypoint that is not final will be cleared with the least precision)
self.precision_thresh = [
[0.0005, 0.0025, 0.0025, 0.0025],
[0.0005, 0.005, 0.005, 0.005],
[0.05, 0.2, 0.2, 0.2],
[0.08, 0.4, 0.4, 0.4],
[0.18, 0.6, 0.6, 0.6],
]
self.add_object = None
# Event management variables
# Used to verify if the goal was reached due to robot moving or it had never left previous target
self._is_moving = False
self._attached = False # Used to flag the Attached/Detached events on a change of state from the end effector
self._detached = False
self.is_closed = False
self.pick_count = 0
# Define the state machine handling functions
self.sm = {}
# Make empty state machine for all events and states
for s in SM_states:
self.sm[s] = {}
for e in SM_events:
self.sm[s][e] = self._empty
self.thresh[s] = 0
# Fill in the functions to handle each event for each status
self.sm[SM_states.STANDBY][SM_events.START] = self._standby_start
self.sm[SM_states.STANDBY][SM_events.GOAL_REACHED] = self._standby_goal_reached
self.thresh[SM_states.STANDBY] = 3
self.sm[SM_states.PICKING][SM_events.GOAL_REACHED] = self._picking_goal_reached
self.thresh[SM_states.PICKING] = 1
self.sm[SM_states.GRASPING][SM_events.ATTACHED] = self._grasping_attached
self.sm[SM_states.LIFTING][SM_events.GOAL_REACHED] = self._lifting_goal_reached
for s in SM_states:
self.sm[s][SM_events.DETACHED] = self._all_detached
self.sm[s][SM_events.TIMEOUT] = self._all_timeout
self.current_state = SM_states.STANDBY
self.previous_state = -1
self._physxIFace = _physx.acquire_physx_interface()
# Auxiliary functions
def _empty(self, *args):
"""
Empty function to use on states that do not react to some specific event.
"""
pass
def change_state(self, new_state, print_state=True):
"""
Function called every time a event handling changes current state.
"""
self.current_state = new_state
self.start_time = self._time
if print_state: carb.log_warn(str(new_state))
def goalReached(self):
"""
Checks if the robot has reached a certain waypoint in the trajectory.
"""
if self._is_moving:
state = self.robot.end_effector.status.current_frame
target = self.robot.end_effector.status.current_target
error = 0
for i in [0, 2, 3]:
k = statedic[i]
state_v = state[k]
target_v = target[k]
error = np.linalg.norm(state_v - target_v)
# General Threshold is the least strict
thresh = self.precision_thresh[-1][i]
if len(self.waypoints) == 0:
thresh = self.precision_thresh[self.thresh[self.current_state]][i]
if error > thresh:
return False
self._is_moving = False
return True
return False
def get_current_state_tr(self):
"""
Gets current End Effector Transform, converted from Motion position and Rotation matrix.
"""
# Gets end effector frame
state = self.robot.end_effector.status.current_frame
orig = state["orig"] * 100.0
mat = Gf.Matrix3f(
*state["axis_x"].astype(float), *state["axis_y"].astype(float), *state["axis_z"].astype(float)
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
tr = _dynamic_control.Transform()
tr.p = list(orig)
tr.r = q
return tr
def lerp_to_pose(self, pose, n_waypoints=1):
"""
adds spherical linear interpolated waypoints from last pose in the waypoint list to the provided pose
if the waypoit list is empty, use current pose.
"""
if len(self.waypoints) == 0:
start = self.get_current_state_tr()
start.p = math_utils.mul(start.p, 0.01)
else:
start = self.waypoints[-1]
if n_waypoints > 1:
for i in range(n_waypoints):
self.waypoints.append(math_utils.slerp(start, pose, (i + 1.0) / n_waypoints))
else:
self.waypoints.append(pose)
def move_to_zero(self):
self._is_moving = False
self.robot.end_effector.go_local(
orig=[], axis_x=[], axis_y=[], axis_z=[], use_default_config=True, wait_for_target=False, wait_time=5.0
)
def move_to_target(self):
"""
Move arm towards target with RMP controller.
"""
xform_attr = self.target_position
self._is_moving = True
orig = np.array([xform_attr.p.x, xform_attr.p.y, xform_attr.p.z])
axis_y = np.array(math_utils.get_basis_vector_y(xform_attr.r))
axis_z = np.array(math_utils.get_basis_vector_z(xform_attr.r))
self.robot.end_effector.go_local(
orig=orig,
axis_x=[],
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def get_target_orientation(self):
"""
Gets target gripper orientation given target angle and a plannar grasp.
"""
angle = self.target_angle * np.pi / 180
mat = Gf.Matrix3f(
-np.cos(angle), -np.sin(angle), 0, -np.sin(angle), np.cos(angle), 0, 0, 0, -1
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
return q
def get_target_to_point(self, offset_position=[]):
"""
Get target Panda arm pose from target position and angle.
"""
offset = _dynamic_control.Transform()
if offset_position:
offset.p.x = offset_position[0]
offset.p.y = offset_position[1]
offset.p.z = offset_position[2]
target_pose = _dynamic_control.Transform()
target_pose.p = self.target_point
target_pose.r = self.get_target_orientation()
target_pose = math_utils.mul(target_pose, offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
return target_pose
def set_target_to_point(self, offset_position=[], n_waypoints=1, clear_waypoints=True):
"""
Clears waypoints list, and sets a new waypoint list towards the a given point in space.
"""
target_position = self.get_target_to_point(offset_position=offset_position)
# linear interpolate to target pose
if clear_waypoints:
self.waypoints.clear()
self.lerp_to_pose(target_position, n_waypoints=n_waypoints)
# Get first waypoint target
self.target_position = self.waypoints.popleft()
def step(self, timestamp, start=False, reset=False):
"""
Steps the State machine, handling which event to call.
"""
if self.current_state != self.previous_state:
self.previous_state = self.current_state
if not self.start:
self.start = start
if self.current_state in [SM_states.GRASPING, SM_states.LIFTING]:
# object grasped
if not self.robot.end_effector.gripper.is_closed(1e-1) and not self.robot.end_effector.gripper.is_moving(1e-2):
self._attached = True
# self.is_closed = False
# object not grasped
elif self.robot.end_effector.gripper.is_closed(1e-1):
self._detached = True
self.is_closed = True
# Process events
if reset:
# reset to default pose, clear waypoints, and re-initialize event handlers
self.current_state = SM_states.STANDBY
self.previous_state = -1
self.robot.end_effector.gripper.open()
self.evaluation = None
self.start = False
self._time = 0
self.start_time = self._time
self.pick_count = 0
self.waypoints.clear()
self._detached = False
self._attached = False
self.target_position = self.default_position
self.move_to_target()
elif self._detached:
self._detached = False
self.sm[self.current_state][SM_events.DETACHED]()
elif self.goalReached():
if len(self.waypoints) == 0:
self.sm[self.current_state][SM_events.GOAL_REACHED]()
else:
self.target_position = self.waypoints.popleft()
self.move_to_target()
# self.start_time = self._time
elif self.current_state == SM_states.STANDBY and self.start:
self.sm[self.current_state][SM_events.START]()
elif self._attached:
self._attached = False
self.sm[self.current_state][SM_events.ATTACHED]()
elif self._time - self.start_time > self.default_timeout:
self.sm[self.current_state][SM_events.TIMEOUT]()
else:
self.sm[self.current_state][SM_events.NONE]()
self._time += 1.0 / 60.0
# Event handling functions. Each state has its own event handler function depending on which event happened
def _standby_start(self, *args):
"""
Handles the start event when in standby mode.
Proceeds to move towards target grasp pose.
"""
# Tell motion planner controller to ignore current object as an obstacle
self.pick_count = 0
self.evaluation = None
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 60)
self.robot.end_effector.gripper.open()
# set target above the current bin with offset of 10 cm
self.set_target_to_point(offset_position=[0.0, 0.0, -10.0], n_waypoints=90, clear_waypoints=False)
# pause before lowering to target object
self.lerp_to_pose(self.waypoints[-1], 180)
self.set_target_to_point(n_waypoints=90, clear_waypoints=False)
# start arm movement
self.move_to_target()
# Move to next state
self.change_state(SM_states.PICKING)
# NOTE: As is, this method is never executed
def _standby_goal_reached(self, *args):
"""
Reset grasp execution.
"""
self.move_to_zero()
self.start = True
def _picking_goal_reached(self, *args):
"""
Grap pose reached, close gripper.
"""
self.robot.end_effector.gripper.close()
self.is_closed = True
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.GRASPING)
def _grasping_attached(self, *args):
"""
Object grasped, lift arm.
"""
self.waypoints.clear()
offset = _dynamic_control.Transform()
offset.p.z = -10
target_pose = math_utils.mul(self.get_current_state_tr(), offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
self.lerp_to_pose(target_pose, n_waypoints=60)
self.lerp_to_pose(target_pose, n_waypoints=120)
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.LIFTING)
def _lifting_goal_reached(self, *args):
"""
Finished executing grasp successfully, resets for next grasp execution.
"""
self.is_closed = False
self.robot.end_effector.gripper.open()
self._all_detached()
self.pick_count += 1
self.evaluation = GRASP_eval.SUCCESS
carb.log_warn(str(GRASP_eval.SUCCESS))
def _all_timeout(self, *args):
"""
Timeout reached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.robot.end_effector.gripper.open()
self.start = False
self.waypoints.clear()
self.target_position = self.default_position
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
def _all_detached(self, *args):
"""
Object detached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.start = False
self.waypoints.clear()
self.lerp_to_pose(self.target_position, 60)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
class GraspObject(Scenario):
""" Defines an obstacle avoidance scenario
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, kit, dc, mp):
"""
Initialize scenario.
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
super().__init__(kit.editor, dc, mp)
self._kit = kit
self._paused = True
self._start = False
self._reset = False
self._time = 0
self._start_time = 0
self.current_state = SM_states.STANDBY
self.timeout_max = 8.0
self.pick_and_place = None
self._pending_stop = False
self._gripper_open = False
self.current_obj = 0
self.max_objs = 100
self.num_objs = 3
self.add_objects_timeout = -1
self.franka_solid = None
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
else:
self.nucleus_server = nucleus_server
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
if self.franka_solid:
self.franka_solid.end_effector.gripper = None
super().__del__()
def add_object_path(self, object_path, from_server=False):
"""
Add object usd path.
"""
if from_server and hasattr(self, 'nucleus_server'):
object_path = os.path.join(self.nucleus_server, object_path)
if not from_server and os.path.isdir(object_path): objects_usd = glob.glob(os.path.join(object_path, '**/*.usd'), recursive=True)
else: object_usd = [object_path]
if hasattr(self, 'objects_usd'):
self.objects_usd.extend(object_usd)
else:
self.objects_usd = object_usd
def create_franka(self, *args):
"""
Create franka USD objects and bin USD objects.
"""
super().create_franka()
if self.asset_path is None:
return
# Load robot environment and set its transform
self.env_path = "/scene"
robot_usd = self.asset_path + "/Robots/Franka/franka.usd"
robot_path = "/scene/robot"
create_prim_from_usd(self._stage, robot_path, robot_usd, Gf.Vec3d(0, 0, 0))
bin_usd = self.asset_path + "/Props/KLT_Bin/large_KLT.usd"
bin_path = "/scene/bin"
create_prim_from_usd(self._stage, bin_path, bin_usd, Gf.Vec3d(40, 0, 4))
# Set robot end effector Target
target_path = "/scene/target"
if self._stage.GetPrimAtPath(target_path):
return
GoalPrim = self._stage.DefinePrim(target_path, "Xform")
self.default_position = _dynamic_control.Transform()
self.default_position.p = [0.4, 0.0, 0.3]
self.default_position.r = [0.0, 1.0, 0.0, 0.0] #TODO: Check values for stability
p = self.default_position.p
r = self.default_position.r
set_translate(GoalPrim, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(GoalPrim, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
# Setup physics simulation
add_ground_plane(self._stage, "/groundPlane", "Z", 1000.0, Gf.Vec3f(0.0), Gf.Vec3f(1.0))
setup_physics(self._stage)
def rand_position(self, bound, margin=0, z_range=None):
"""
Obtain random position contained within a specified bound.
"""
x_range = (bound[0][0] * (1 - margin), bound[1][0] * (1 - margin))
y_range = (bound[0][1] * (1 - margin), bound[1][1] * (1 - margin))
if z_range is None:
z_range = (bound[0][2] * (1 - margin), bound[1][2] * (1 - margin))
x = np.random.uniform(*x_range)
y = np.random.uniform(*y_range)
z = np.random.uniform(*z_range)
return Gf.Vec3d(x, y, z)
# combine add_object and add_and_register_object
def add_object(self, *args, register=True, position=None):
"""
Add object to scene.
"""
prim = self.create_new_objects(position=position)
if not register:
return prim
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def create_new_objects(self, *args, position=None):
"""
Randomly select and create prim of object in scene.
"""
if not hasattr(self, 'objects_usd'):
return
prim_usd_path = self.objects_usd[random.randint(0, len(self.objects_usd) - 1)]
prim_env_path = "/scene/objects/object_{}".format(self.current_obj)
if position is None:
position = self.rand_position(self.bin_solid.get_bound(), margin=0.2, z_range=(10, 10))
prim = create_prim_from_usd(self._stage, prim_env_path, prim_usd_path, position)
if hasattr(self, 'current_obj'): self.current_obj += 1
else: self.current_obj = 0
return prim
def register_objects(self, *args):
"""
Register all objects.
"""
self.objects = []
objects_path = '/scene/objects'
objects_prim = self._stage.GetPrimAtPath(objects_path)
if objects_prim.IsValid():
for object_prim in objects_prim.GetChildren():
self.objects.append(RigidBody(object_prim, self._dc))
# TODO: Delete method
def add_and_register_object(self, *args):
prim = self.create_new_objects()
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def register_scene(self, *args):
"""
Register world, panda arm, bin, and objects.
"""
self.world = World(self._dc, self._mp)
self.register_assets(args)
self.register_objects(args)
def register_assets(self, *args):
"""
Connect franka controller to usd assets.
"""
# register robot with RMP
robot_path = "/scene/robot"
self.franka_solid = Franka(
self._stage, self._stage.GetPrimAtPath(robot_path), self._dc, self._mp, self.world, default_config
)
# register bin
bin_path = "/scene/bin"
bin_prim = self._stage.GetPrimAtPath(bin_path)
self.bin_solid = RigidBody(bin_prim, self._dc)
# register stage machine
self.pick_and_place = PickAndPlaceStateMachine(
self._stage,
self.franka_solid,
self._stage.GetPrimAtPath("/scene/robot/panda_hand"),
self.default_position,
)
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._start = True
self._paused = False
return False
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
if self._editor.is_playing():
if self._pending_stop:
self.stop_tasks()
return
# Updates current references and locations for the robot.
self.world.update()
self.franka_solid.update()
target = self._stage.GetPrimAtPath("/scene/target")
xform_attr = target.GetAttribute("xformOp:transform")
if self._reset:
self._paused = False
if not self._paused:
self._time += 1.0 / 60.0
self.pick_and_place.step(self._time, self._start, self._reset)
if self._reset:
self._paused = True
self._time = 0
self._start_time = 0
p = self.default_position.p
r = self.default_position.r
set_translate(target, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
else:
state = self.franka_solid.end_effector.status.current_target
state_1 = self.pick_and_place.target_position
tr = state["orig"] * 100.0
set_translate(target, Gf.Vec3d(tr[0], tr[1], tr[2]))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(state_1.r.w, state_1.r.x, state_1.r.y, state_1.r.z)))
self._start = False
self._reset = False
if self.add_objects_timeout > 0:
self.add_objects_timeout -= 1
if self.add_objects_timeout == 0:
self.create_new_objects()
else:
translate_attr = xform_attr.Get().GetRow3(3)
rotate_x = xform_attr.Get().GetRow3(0)
rotate_y = xform_attr.Get().GetRow3(1)
rotate_z = xform_attr.Get().GetRow3(2)
orig = np.array(translate_attr) / 100.0
axis_x = np.array(rotate_x)
axis_y = np.array(rotate_y)
axis_z = np.array(rotate_z)
self.franka_solid.end_effector.go_local(
orig=orig,
axis_x=axis_x, # TODO: consider setting this to [] for stability reasons
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
if self.pick_and_place is not None:
if self._editor.is_playing():
self._reset = True
self._pending_stop = False
else:
self._pending_stop = True
def pause_tasks(self, *args):
"""
Pause tasks in the scenario.
"""
self._paused = not self._paused
return self._paused
# TODO: use gripper.width == 0 as a proxy for _gripper_open == False
def actuate_gripper(self):
"""
Actuate Panda gripper.
"""
if self._gripper_open:
self.franka_solid.end_effector.gripper.close()
self._gripper_open = False
else:
self.franka_solid.end_effector.gripper.open()
self._gripper_open = True
def set_target_angle(self, angle):
"""
Set grasp angle in degrees.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_angle = angle
def set_target_position(self, position):
"""
Set grasp position.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_point = position
| 27,230 | Python | 35.502681 | 137 | 0.573265 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/franka.py | # Credits: The majority of this code is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea with minor modifications.
import time
import os
import numpy as np
import carb.tokens
import omni.kit.settings
from pxr import Usd, UsdGeom, Gf
from collections import deque
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.motion_planning import _motion_planning
from omni.isaac.samples.scripts.utils import math_utils
# default joint configuration
default_config = (0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75)
# Alternative default config for motion planning
alternate_config = [
(1.5356, -1.3813, -1.5151, -2.0015, -1.3937, 1.5887, 1.4597),
(-1.5356, -1.3813, 1.5151, -2.0015, 1.3937, 1.5887, 0.4314),
]
class Gripper:
"""
Gripper for franka.
"""
def __init__(self, dc, ar):
"""
Initialize gripper.
Args:
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
ar (int): articulation identifier
"""
self.dc = dc
self.ar = ar
self.finger_j1 = self.dc.find_articulation_dof(self.ar, "panda_finger_joint1")
self.finger_j2 = self.dc.find_articulation_dof(self.ar, "panda_finger_joint2")
self.width = 0
self.width_history = deque(maxlen=50)
def open(self, wait=False):
"""
Open gripper.
"""
if self.width < 0.045:
self.move(0.045, wait=True)
self.move(0.09, wait=wait)
def close(self, wait=False, force=0):
"""
Close gripper.
"""
self.move(0, wait=wait)
def move(self, width=0.03, speed=0.2, wait=False):
"""
Modify width.
"""
self.width = width
# if wait:
# time.sleep(0.5)
def update(self):
"""
Actuate gripper.
"""
self.dc.set_dof_position_target(self.finger_j1, self.width * 0.5 * 100)
self.dc.set_dof_position_target(self.finger_j2, self.width * 0.5 * 100)
self.width_history.append(self.get_width())
def get_width(self):
"""
Get current width.
"""
return sum(self.get_position())
def get_position(self):
"""
Get left and right finger local position.
"""
return self.dc.get_dof_position(self.finger_j1), self.dc.get_dof_position(self.finger_j2)
def get_velocity(self, from_articulation=True):
"""
Get left and right finger local velocity.
"""
if from_articulation:
return (self.dc.get_dof_velocity(self.finger_j1), self.dc.get_dof_velocity(self.finger_j2))
else:
leftfinger_handle = self.dc.get_rigid_body(self.dc.get_articulation_path(self.ar) + '/panda_leftfinger')
rightfinger_handle = self.dc.get_rigid_body(self.dc.get_articulation_path(self.ar) + '/panda_rightfinger')
leftfinger_velocity = np.linalg.norm(np.array(self.dc.get_rigid_body_local_linear_velocity(leftfinger_handle)))
rightfinger_velocity = np.linalg.norm(np.array(self.dc.get_rigid_body_local_linear_velocity(rightfinger_handle)))
return (leftfinger_velocity, rightfinger_velocity)
def is_moving(self, tol=1e-2):
"""
Determine if gripper fingers are moving
"""
if len(self.width_history) < self.width_history.maxlen or np.array(self.width_history).std() > tol:
return True
else:
return False
def get_state(self):
"""
Get gripper state.
"""
dof_states = self.dc.get_articulation_dof_states(self.ar, _dynamic_control.STATE_ALL)
return dof_states[-2], dof_states[-1]
def is_closed(self, tol=1e-2):
"""
Determine if gripper is closed.
"""
if self.get_width() < tol:
return True
else:
return False
class Status:
"""
Class that contains status for end effector
"""
def __init__(self, mp, rmp_handle):
"""
Initialize status object.
Args:
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
rmp_handle (int): RMP handle identifier
"""
self.mp = mp
self.rmp_handle = rmp_handle
self.orig = np.array([0, 0, 0])
self.axis_x = np.array([1, 0, 0])
self.axis_y = np.array([0, 1, 0])
self.axis_z = np.array([0, 0, 1])
self.current_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.target_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.frame = self.current_frame
def update(self):
"""
Update end effector state.
"""
state = self.mp.getRMPState(self.rmp_handle)
target = self.mp.getRMPTarget(self.rmp_handle)
self.orig = np.array([state[0].x, state[0].y, state[0].z])
self.axis_x = np.array([state[1].x, state[1].y, state[1].z])
self.axis_y = np.array([state[2].x, state[2].y, state[2].z])
self.axis_z = np.array([state[3].x, state[3].y, state[3].z])
self.current_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.frame = self.current_frame
self.current_target = {
"orig": np.array([target[0].x, target[0].y, target[0].z]),
"axis_x": np.array([target[1].x, target[1].y, target[1].z]),
"axis_y": np.array([target[2].x, target[2].y, target[2].z]),
"axis_z": np.array([target[3].x, target[3].y, target[3].z]),
}
class EndEffector:
"""
End effector object that controls movement.
"""
def __init__(self, dc, mp, ar, rmp_handle):
"""
Initialize end effector.
Args:
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
ar (int): articulation identifier
rmp_handle (int): RMP handle identifier
"""
self.dc = dc
self.ar = ar
self.mp = mp
self.rmp_handle = rmp_handle
self.gripper = Gripper(dc, ar)
self.status = Status(mp, rmp_handle)
self.UpRot = Gf.Rotation(Gf.Vec3d(0, 0, 1), 90)
def freeze(self):
self.go_local(
orig=self.status.orig, axis_x=self.status.axis_x, axis_z=self.status.axis_z, wait_for_target=False
)
def go_local(
self,
target=None,
orig=[],
axis_x=[],
axis_y=[],
axis_z=[],
required_orig_err=0.01,
required_axis_x_err=0.01,
required_axis_y_err=0.01,
required_axis_z_err=0.01,
orig_thresh=None,
axis_x_thresh=None,
axis_y_thresh=None,
axis_z_thresh=None,
approach_direction=[],
approach_standoff=0.1,
approach_standoff_std_dev=0.001,
use_level_surface_orientation=False,
use_target_weight_override=True,
use_default_config=False,
wait_for_target=True,
wait_time=None,
):
self.target_weight_override_value = 10000.0
self.target_weight_override_std_dev = 0.03
if orig_thresh:
required_orig_err = orig_thresh
if axis_x_thresh:
required_axis_x_err = axis_x_thresh
if axis_y_thresh:
required_axis_y_err = axis_y_thresh
if axis_z_thresh:
required_axis_z_err = axis_z_thresh
if target:
orig = target["orig"]
if "axis_x" in target and target["axis_x"] is not None:
axis_x = target["axis_x"]
if "axis_y" in target and target["axis_y"] is not None:
axis_y = target["axis_y"]
if "axis_z" in target and target["axis_z"] is not None:
axis_z = target["axis_z"]
orig = np.array(orig)
axis_x = np.array(axis_x)
axis_y = np.array(axis_y)
axis_z = np.array(axis_z)
approach = _motion_planning.Approach((0, 0, 1), 0, 0)
if len(approach_direction) != 0:
approach = _motion_planning.Approach(approach_direction, approach_standoff, approach_standoff_std_dev)
pose_command = _motion_planning.PartialPoseCommand()
if len(orig) > 0:
pose_command.set(_motion_planning.Command(orig, approach), int(_motion_planning.FrameElement.ORIG))
if len(axis_x) > 0:
pose_command.set(_motion_planning.Command(axis_x), int(_motion_planning.FrameElement.AXIS_X))
if len(axis_y) > 0:
pose_command.set(_motion_planning.Command(axis_y), int(_motion_planning.FrameElement.AXIS_Y))
if len(axis_z) > 0:
pose_command.set(_motion_planning.Command(axis_z), int(_motion_planning.FrameElement.AXIS_Z))
self.mp.goLocal(self.rmp_handle, pose_command)
if wait_for_target and wait_time:
error = 1
future_time = time.time() + wait_time
while error > required_orig_err and time.time() < future_time:
# time.sleep(0.1)
error = self.mp.getError(self.rmp_handle)
def look_at(self, gripper_pos, target):
# Y up works for look at but sometimes flips, go_local might be a safer bet with a locked y_axis
orientation = math_utils.lookAt(gripper_pos, target, (0, 1, 0))
mat = Gf.Matrix3d(orientation).GetTranspose()
self.go_local(
orig=[gripper_pos[0], gripper_pos[1], gripper_pos[2]],
axis_x=[mat.GetColumn(0)[0], mat.GetColumn(0)[1], mat.GetColumn(0)[2]],
axis_z=[mat.GetColumn(2)[0], mat.GetColumn(2)[1], mat.GetColumn(2)[2]],
)
class Franka:
"""
Franka objects that contains implementation details for robot control.
"""
def __init__(self, stage, prim, dc, mp, world=None, group_path="", default_config=None, is_ghost=False):
"""
Initialize Franka controller.
Args:
stage (pxr.Usd.Stage): usd stage
prim (pxr.Usd.Prim): robot prim
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
world (omni.isaac.samples.scripts.utils.world.World): simulation world handler
default_config (tuple or list): default configuration for robot revolute joint drivers
is_ghost (bool): flag for turning off collision and modifying visuals for robot arm
"""
self.dc = dc
self.mp = mp
self.prim = prim
self.stage = stage
# get handle to the articulation for this franka
self.ar = self.dc.get_articulation(prim.GetPath().pathString)
self.is_ghost = is_ghost
self.base = self.dc.get_articulation_root_body(self.ar)
body_count = self.dc.get_articulation_body_count(self.ar)
for bodyIdx in range(body_count):
body = self.dc.get_articulation_body(self.ar, bodyIdx)
self.dc.set_rigid_body_disable_gravity(body, True)
exec_folder = os.path.abspath(
carb.tokens.get_tokens_interface().resolve(
f"{os.environ['ISAAC_PATH']}/exts/omni.isaac.motion_planning/resources/lula/lula_franka"
)
)
self.rmp_handle = self.mp.registerRmp(
exec_folder + "/urdf/lula_franka_gen.urdf",
exec_folder + "/config/robot_descriptor.yaml",
exec_folder + "/config/franka_rmpflow_common.yaml",
prim.GetPath().pathString,
"right_gripper",
True,
)
print("franka rmp handle", self.rmp_handle)
if world is not None:
self.world = world
self.world.rmp_handle = self.rmp_handle
self.world.register_parent(self.base, self.prim, "panda_link0")
settings = omni.kit.settings.get_settings_interface()
self.mp.setFrequency(self.rmp_handle, settings.get("/physics/timeStepsPerSecond"), True)
self.end_effector = EndEffector(self.dc, self.mp, self.ar, self.rmp_handle)
if default_config:
self.mp.setDefaultConfig(self.rmp_handle, default_config)
self.target_visibility = True
if self.is_ghost:
self.target_visibility = False
self.imageable = UsdGeom.Imageable(self.prim)
def __del__(self):
"""
Unregister RMP.
"""
self.mp.unregisterRmp(self.rmp_handle)
print(" Delete Franka")
def set_pose(self, pos, rot):
"""
Set robot pose.
"""
self._mp.setTargetLocal(self.rmp_handle, pos, rot)
def set_speed(self, speed_level):
"""
Set robot speed.
"""
pass
def update(self):
"""
Update robot state.
"""
self.end_effector.gripper.update()
self.end_effector.status.update()
if self.imageable:
if self.target_visibility is not self.imageable.ComputeVisibility(Usd.TimeCode.Default()):
if self.target_visibility:
self.imageable.MakeVisible()
else:
self.imageable.MakeInvisible()
def send_config(self, config):
"""
Set robot default configuration.
"""
if self.is_ghost is False:
self.mp.setDefaultConfig(self.rmp_handle, config)
| 13,794 | Python | 34.371795 | 132 | 0.582935 |
erasromani/isaac-sim-python/grasp/utils/isaac_utils.py | # Credits: All code except class RigidBody and Camera is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea.
import numpy as np
import omni.kit
from pxr import Usd, UsdGeom, Gf, PhysicsSchema, PhysxSchema
def create_prim_from_usd(stage, prim_env_path, prim_usd_path, location):
"""
Create prim from usd.
"""
envPrim = stage.DefinePrim(prim_env_path, "Xform") # create an empty Xform at the given path
envPrim.GetReferences().AddReference(prim_usd_path) # attach the USD to the given path
set_translate(envPrim, location) # set pose
return stage.GetPrimAtPath(envPrim.GetPath().pathString)
def set_up_z_axis(stage):
"""
Utility function to specify the stage with the z axis as "up".
"""
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with Usd.EditContext(stage, rootLayer):
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
def set_translate(prim, new_loc):
"""
Specify position of a given prim, reuse any existing transform ops when possible.
"""
properties = prim.GetPropertyNames()
if "xformOp:translate" in properties:
translate_attr = prim.GetAttribute("xformOp:translate")
translate_attr.Set(new_loc)
elif "xformOp:translation" in properties:
translation_attr = prim.GetAttribute("xformOp:translate")
translation_attr.Set(new_loc)
elif "xformOp:transform" in properties:
transform_attr = prim.GetAttribute("xformOp:transform")
matrix = prim.GetAttribute("xformOp:transform").Get()
matrix.SetTranslateOnly(new_loc)
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(Gf.Matrix4d().SetTranslate(new_loc))
def set_rotate(prim, rot_mat):
"""
Specify orientation of a given prim, reuse any existing transform ops when possible.
"""
properties = prim.GetPropertyNames()
if "xformOp:rotate" in properties:
rotate_attr = prim.GetAttribute("xformOp:rotate")
rotate_attr.Set(rot_mat)
elif "xformOp:transform" in properties:
transform_attr = prim.GetAttribute("xformOp:transform")
matrix = prim.GetAttribute("xformOp:transform").Get()
matrix.SetRotateOnly(rot_mat.ExtractRotation())
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(Gf.Matrix4d().SetRotate(rot_mat))
def create_background(stage, background_stage):
"""
Create background stage.
"""
background_path = "/background"
if not stage.GetPrimAtPath(background_path):
backPrim = stage.DefinePrim(background_path, "Xform")
backPrim.GetReferences().AddReference(background_stage)
# Move the stage down -104cm so that the floor is below the table wheels, move in y axis to get light closer
set_translate(backPrim, Gf.Vec3d(0, -400, -104))
def setup_physics(stage):
"""
Set default physics parameters.
"""
# Specify gravity
metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage)
gravityScale = 9.81 / metersPerUnit
gravity = Gf.Vec3f(0.0, 0.0, -gravityScale)
scene = PhysicsSchema.PhysicsScene.Define(stage, "/physics/scene")
scene.CreateGravityAttr().Set(gravity)
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physics/scene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physics/scene")
physxSceneAPI.CreatePhysxSceneEnableCCDAttr(True)
physxSceneAPI.CreatePhysxSceneEnableStabilizationAttr(True)
physxSceneAPI.CreatePhysxSceneEnableGPUDynamicsAttr(False)
physxSceneAPI.CreatePhysxSceneBroadphaseTypeAttr("MBP")
physxSceneAPI.CreatePhysxSceneSolverTypeAttr("TGS")
class Camera:
"""
Camera object that contain state information for a camera in the scene.
"""
def __init__(self, camera_path, translation, rotation):
"""
Initializes the Camera object.
Args:
camera_path (str): path of camera in stage hierarchy
translation (list or tuple): camera position
rotation (list or tuple): camera orientation described by euler angles in degrees
"""
self.prim = self._kit.create_prim(
camera_path,
"Camera",
translation=translation,
rotation=rotatation,
)
self.name = self.prim.GetPrimPath().name
self.vpi = omni.kit.viewport.get_viewport_interface
def set_translate(self, position):
"""
Set camera position.
Args:
position (tuple): camera position specified by (X, Y, Z)
"""
if not isinstance(position, tuple): position = tuple(position)
translate_attr = self.prim.GetAttribute("xformOp:translate")
translate_attr.Set(position)
def set_rotate(self, rotation):
"""
Set camera position.
Args:
rotation (tuple): camera orientation specified by three euler angles in degrees
"""
if not isinstance(rotation, tuple): rotation = tuple(rotation)
rotate_attr = self.prim.GetAttribute("xformOp:rotateZYX")
rotate_attr.Set(rotation)
def activate(self):
"""
Activate camera to viewport.
"""
self.vpi.get_viewport_window().set_active_camera(str(self.prim.GetPath()))
def __repr__(self):
return self.name
class Camera:
"""
Camera object that contain state information for a camera in the scene.
"""
def __init__(self, camera_path, translation, rotation):
"""
Initializes the Camera object.
Args:
camera_path (str): path of camera in stage hierarchy
translation (list or tuple): camera position
rotation (list or tuple): camera orientation described by euler angles in degrees
"""
self.prim = self._kit.create_prim(
camera_path,
"Camera",
translation=translation,
rotation=rotation,
)
self.name = self.prim.GetPrimPath().name
self.vpi = omni.kit.viewport.get_viewport_interface
def set_translate(self, position):
"""
Set camera position.
Args:
position (tuple): camera position specified by (X, Y, Z)
"""
if not isinstance(position, tuple): position = tuple(position)
translate_attr = self.prim.GetAttribute("xformOp:translate")
translate_attr.Set(position)
def set_rotate(self, rotation):
"""
Set camera position.
Args:
rotation (tuple): camera orientation specified by three euler angles in degrees
"""
if not isinstance(rotation, tuple): rotation = tuple(rotation)
rotate_attr = self.prim.GetAttribute("xformOp:rotateZYX")
rotate_attr.Set(rotation)
def activate(self):
"""
Activate camera to viewport.
"""
self.vpi.get_viewport_window().set_active_camera(str(self.prim.GetPath()))
def __repr__(self):
return self.name
class RigidBody:
"""
RigidBody objects that contains state information of the rigid body.
"""
def __init__(self, prim, dc):
"""
Initializes for RigidBody object
Args:
prim (pxr.Usd.Prim): rigid body prim
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
"""
self.prim = prim
self._dc = dc
self.name = prim.GetPrimPath().name
self.handle = self.get_rigid_body_handle()
def get_rigid_body_handle(self):
"""
Get rigid body handle.
"""
object_children = self.prim.GetChildren()
for child in object_children:
child_path = child.GetPath().pathString
body_handle = self._dc.get_rigid_body(child_path)
if body_handle != 0:
bin_path = child_path
object_handle = self._dc.get_rigid_body(bin_path)
if object_handle != 0: return object_handle
def get_linear_velocity(self):
"""
Get linear velocity of rigid body.
"""
return np.array(self._dc.get_rigid_body_linear_velocity(self.handle))
def get_angular_velocity(self):
"""
Get angular velocity of rigid body.
"""
return np.array(self._dc.get_rigid_body_angular_velocity(self.handle))
def get_speed(self):
"""
Get speed of rigid body given by the l2 norm of the velocity.
"""
velocity = self.get_linear_velocity()
speed = np.linalg.norm(velocity)
return speed
def get_pose(self):
"""
Get pose of the rigid body containing the position and orientation information.
"""
return self._dc.get_rigid_body_pose(self.handle)
def get_position(self):
"""
Get the position of the rigid body object.
"""
pose = self.get_pose()
position = np.array(pose.p)
return position
def get_orientation(self):
"""
Get orientation of the rigid body object.
"""
pose = self.get_pose()
orientation = np.array(pose.r)
return orientation
def get_bound(self):
"""
Get bounds of the rigid body object in global coordinates.
"""
bound = UsdGeom.Mesh(self.prim).ComputeWorldBound(0.0, "default").GetBox()
return [np.array(bound.GetMin()), np.array(bound.GetMax())]
def __repr__(self):
return self.name
| 9,822 | Python | 32.640411 | 124 | 0.633069 |
erasromani/isaac-sim-python/grasp/utils/visualize.py | import os
import ffmpeg
import matplotlib.pyplot as plt
def screenshot(sd_helper, suffix="", prefix="image", directory="images/"):
"""
Take a screenshot of the current time step of a running NVIDIA Omniverse Isaac-Sim simulation.
Args:
sd_helper (omni.isaac.synthetic_utils.SyntheticDataHelper): helper class for visualizing OmniKit simulation
suffix (str or int): suffix for output filename of image screenshot of current time step of simulation
prefix (str): prefix for output filename of image screenshot of current time step of simulation
directory (str): output directory of image screenshot of current time step of simulation
"""
gt = sd_helper.get_groundtruth(
[
"rgb",
]
)
image = gt["rgb"][..., :3]
plt.imshow(image)
if suffix == "":
suffix = 0
if isinstance(suffix, int):
filename = os.path.join(directory, f'{prefix}_{suffix:05}.png')
else:
filename = os.path.join(directory, f'{prefix}_{suffix}.png')
plt.axis('off')
plt.savefig(filename)
def img2vid(input_pattern, output_fn, pattern_type='glob', framerate=25):
"""
Create video from a collection of images.
Args:
input_pattern (str): input pattern for a path of collection of images
output_fn (str): video output filename
pattern_type (str): pattern type for input pattern
framerate (int): video framerate
"""
(
ffmpeg
.input(input_pattern, pattern_type=pattern_type, framerate=framerate)
.output(output_fn)
.run(overwrite_output=True, quiet=True)
)
| 1,647 | Python | 30.692307 | 115 | 0.649059 |
pantelis-classes/omniverse-ai/README.md | # Learning in Simulated Worlds in Omniverse.
Please go to the wiki tab.
![image](https://user-images.githubusercontent.com/589439/143660504-bbcdb786-ea5f-4f74-9496-489032fa2e03.png)
https://github.com/pantelis-classes/omniverse-ai/wiki
<hr />
# Wiki Navigation
* [Home][home]
* [Isaac-Sim-SDK-Omniverse-Installation][Omniverse]
* [Synthetic-Data-Generation][SDG]
* [NVIDIA Transfer Learning Toolkit (TLT) Installation][TLT]
* [NVIDIA TAO][TAO]
* [detectnet_v2 Installation][detectnet_v2]
* [Jupyter Notebook][Jupyter-Notebook]
[home]: https://github.com/pantelis-classes/omniverse-ai/wiki
[Omniverse]: https://github.com/pantelis-classes/omniverse-ai/wiki/Isaac-Sim-SDK-Omniverse-Installation
[SDG]: https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)
[TLT]: https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation
[NTLTSD]: https://github.com/pantelis-classes/omniverse-ai/wiki/Using-NVIDIA-TLT-with-Synthetic-Data
[TAO]: https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)
[detectnet_v2]: https://github.com/pantelis-classes/omniverse-ai/wiki/detectnet_v2-Installation
[Jupyter-Notebook]: https://github.com/pantelis-classes/omniverse-ai/wiki/Jupyter-Notebook
<hr />
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true"> ![image](https://user-images.githubusercontent.com/589439/161171527-4e748031-ff4d-46ed-b1ac-b521cd8ffd3c.png)</a>
## Reports
<a href="https://docs.google.com/document/d/1jVXxrNgtOosZw_vAORzomSnmy45G3qK_mmk2B4oJtPg/edit?usp=sharing">Domain Randomization Paper</a><br>
This report provides an indepth understanding on how Domain Randomization helps perception machine learning tasks such as object detection and/or segmentation.
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true">Final Report</a><br>
This final report contains an indepth explanation on the hardware/software used, the methods used to collect the data, an explanation on the data collected, trained and pruned, and the overall conclusions made from the trained and pruned datasets.
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true">![image](https://user-images.githubusercontent.com/589439/161171433-d2359618-b3dc-4839-b509-c938ce401f73.png)</a>
## Authors
<a href="https://github.com/dfsanchez999">Diego Sanchez</a> | <a href="https://harp.njit.edu/~jga26/">Jibran Absarulislam</a> | <a href="https://github.com/markkcruz">Mark Cruz</a> | <a href="https://github.com/sppatel2112">Sapan Patel</a>
## Supervisor
<a href="https://pantelis.github.io/">Dr. Pantelis Monogioudis</a>
## Credits
<a href="https://developer.nvidia.com/nvidia-omniverse-platform">NVIDIA Omniverse</a><br>
A platform that enables universal interoperability across different applications and 3D ecosystem vendors providing real-time scene updates.
| 3,133 | Markdown | 57.037036 | 256 | 0.785828 |
pantelis-classes/omniverse-ai/Images/images.md | # A markdown file containing all the images in the wiki. (Saved in github's cloud)
![image](https://user-images.githubusercontent.com/589439/143155216-aad83dd6-0bc7-4c85-8c45-4696659a0ff2.png)
![image](https://user-images.githubusercontent.com/589439/143155405-5ab0c92a-10ea-4af2-bcc3-10215808025c.png)
![image](https://user-images.githubusercontent.com/589439/143155607-66dd62b1-9096-4960-af80-05c7d0560616.png)
![image](https://user-images.githubusercontent.com/589439/143155666-96fc6a9d-ca5c-4e10-bb4b-0b75c6afd331.png)
![image](https://user-images.githubusercontent.com/589439/143155774-8a7f0020-70e7-48a4-ad38-9abfda935f1b.png)
![image](https://user-images.githubusercontent.com/589439/143155905-39760d3e-ef68-4a03-8af8-8f1ea0fa7801.png)
![image](https://user-images.githubusercontent.com/589439/143155958-7fe5ce25-d447-4a07-b79b-9785ac456b9a.png)
![image](https://user-images.githubusercontent.com/589439/143155991-41cd11df-3ff9-4ca5-b112-7e63785740db.png)
![image](https://user-images.githubusercontent.com/589439/143156000-5b8dea90-b63b-4c05-90e5-da8f484070e2.png)
![image](https://user-images.githubusercontent.com/589439/143156018-cc426e4a-2785-4050-b643-dc2bee6251aa.png)
![image](https://user-images.githubusercontent.com/589439/143156108-4e4c2f1e-138b-451e-87a7-3cff9da960cb.png)
![image](https://user-images.githubusercontent.com/589439/143156160-4aef319c-3756-4ff6-b429-032d2e45513f.png)
![image](https://user-images.githubusercontent.com/589439/143156180-226269c5-ba2c-4f29-ad2e-378eaf8ee523.png)
![image](https://user-images.githubusercontent.com/589439/143156205-4d4b8afb-c334-4a22-af62-33f7174c716d.png)
![image](https://user-images.githubusercontent.com/589439/143156303-93a31da8-2dc3-49d5-b80a-0246b877dd34.png)
![image](https://user-images.githubusercontent.com/589439/143156381-07c34f94-b2f5-42ac-a61e-10fb5f27a8c9.png)
![image](https://user-images.githubusercontent.com/589439/143157449-7a86072c-0dc4-4e49-a1b3-a62c3f88187c.png)
![image](https://user-images.githubusercontent.com/589439/143157471-7b9bfc36-d505-4b77-8938-d9387e8280b1.png)
![image](https://user-images.githubusercontent.com/589439/143157512-a862401e-38f8-4334-90eb-3f597c583a48.png)
![image](https://user-images.githubusercontent.com/589439/143157553-744bfd7e-5b14-4905-bc84-cf01a245d9ff.png)
![image](https://user-images.githubusercontent.com/589439/143158851-a4f7a00b-4f25-40e0-ae2e-2fba3edef08e.png)
![image](https://user-images.githubusercontent.com/589439/143158880-17506781-abc2-4188-aca3-4546dcb475f9.png)
![image](https://user-images.githubusercontent.com/589439/143158912-97fb24ad-8b49-432e-a3d7-4badb0977714.png)
![image](https://user-images.githubusercontent.com/589439/143158967-afad1831-822f-4440-9a4b-9248c909007d.png)
![image](https://user-images.githubusercontent.com/589439/143160948-90380e23-e8cc-42b3-8933-4d88c5c9bc90.png)
![image](https://user-images.githubusercontent.com/589439/143655465-4efa6088-9bcd-4df4-92f3-d641975ece93.png)
![image](https://user-images.githubusercontent.com/589439/143655576-0ff7992c-0339-48c5-94f1-2be90b2877e5.png
![image](https://user-images.githubusercontent.com/589439/143655623-cc957acf-a6f3-4e23-ad84-2f63762db770.png)
![image](https://user-images.githubusercontent.com/589439/143655734-92f93f94-723b-4a03-aee3-9004ebdfa931.png)
![image](https://user-images.githubusercontent.com/589439/143655803-423dddd8-398e-49e0-839f-d96a5e655441.png)
![image](https://user-images.githubusercontent.com/589439/143656306-85f1aefd-a6a8-4f07-a2e9-b7153ff175ce.png)
![image](https://user-images.githubusercontent.com/589439/143663079-a9503fd4-75f1-4bb0-bfd8-ada3bd9fa2ec.png)
![image](https://user-images.githubusercontent.com/589439/143663183-0bdb6ee0-84be-4788-bdc7-0ab23e9e5d41.png)
![image](https://user-images.githubusercontent.com/589439/143663255-907bff87-ae02-4c4d-8400-ef6a914c3aae.png)
![image](https://user-images.githubusercontent.com/589439/143663347-4ec70e43-da4d-4b97-bd26-b336586bc9d7.png)
![image](https://user-images.githubusercontent.com/589439/143663405-5323b62f-74a8-409f-80a8-c2c6ad961497.png)
![image](https://user-images.githubusercontent.com/589439/143664760-9d9bc86e-9e4a-4bf0-882a-3fa2db1d416b.png)
![image](https://user-images.githubusercontent.com/589439/143664935-4e1d2e45-b4da-4f83-922c-2e7581a65f98.png)
![image](https://user-images.githubusercontent.com/589439/143665245-9dc7ac44-78cd-45ac-992c-f3e23d1a044e.png)
![image](https://user-images.githubusercontent.com/589439/143665289-9f80a74d-f3f4-45b9-a92a-e213a6c37056.png)
![image](https://user-images.githubusercontent.com/589439/143666284-5ff41514-5c89-4cc7-afa0-b17ed9003b61.png)
![image](https://user-images.githubusercontent.com/589439/143666323-eb172e58-d0cb-4228-af31-f9f7daf43d19.png)
![image](https://user-images.githubusercontent.com/589439/143666365-9cbab570-213f-403b-bdc9-d891025fabac.png)
![image](https://user-images.githubusercontent.com/589439/143666538-47885861-2340-4fca-9507-8a1a66d82fe9.png)
![image](https://user-images.githubusercontent.com/589439/143666560-4a7dd70c-abde-4af8-a1c7-16eab5d99bf3.png)
![image](https://user-images.githubusercontent.com/589439/143666727-f7a06dbc-aba6-410f-8bd5-0aa24ecf38d3.png)
![image](https://user-images.githubusercontent.com/589439/143666820-b12aafdd-f1e1-4c46-889c-34af1c9ca929.png)
![image](https://user-images.githubusercontent.com/589439/143666829-813f9715-3a2d-49f1-9124-5a690681accc.png)
![image](https://user-images.githubusercontent.com/589439/143666852-90d659de-01a0-4685-bf36-42868e1c77d9.png)
![image](https://user-images.githubusercontent.com/589439/143666866-5896317b-1255-4e67-abe7-5f3ff03be288.png)
![image](https://user-images.githubusercontent.com/589439/143666874-a453b635-63e6-44e0-94c1-7127e1c7f729.png)
![image](https://user-images.githubusercontent.com/589439/143723668-73111ae8-0ac5-4729-b89b-481d29b25d16.png)
![image](https://user-images.githubusercontent.com/589439/143723824-968874c9-5f8e-44cc-a535-d0d336a72b78.png)
![image](https://user-images.githubusercontent.com/589439/143723906-baf552bc-e9d1-435b-8d43-553f6f0a6707.png)
![image](https://user-images.githubusercontent.com/589439/143723930-c9c8658f-339d-4693-894a-daf70dea28ae.png)
![image](https://user-images.githubusercontent.com/589439/143724110-61196b6e-7d6e-4fc5-86a4-c7234cd4d379.png)
![image](https://user-images.githubusercontent.com/589439/143724128-692a9f83-0365-4f0f-9068-e8e6af9cac15.png)
![image](https://user-images.githubusercontent.com/589439/143724159-ae6c0578-14e4-463b-8287-ef4147ff0f34.png)
![image](https://user-images.githubusercontent.com/589439/143724167-70721d41-12f2-4322-b611-3f07df92d344.png)
![image](https://user-images.githubusercontent.com/589439/143724450-3e95cb75-0dba-45da-9abf-7de026d3b8f3.png)
![image](https://user-images.githubusercontent.com/589439/143724459-afaf363f-dd92-494b-9707-5400f409d05a.png)
![image](https://user-images.githubusercontent.com/589439/143724476-77609fc2-e5a7-4773-94d9-799f2b78be6f.png)
![image](https://user-images.githubusercontent.com/589439/143724492-3036d310-3569-4820-9087-daca2bf9869f.png)
![image](https://user-images.githubusercontent.com/589439/143724167-70721d41-12f2-4322-b611-3f07df92d344.png)
![image](https://user-images.githubusercontent.com/589439/143727424-3d4930ef-647a-42cc-838a-ea7284dbda2a.png)
![image](https://user-images.githubusercontent.com/589439/143727464-598963b9-73c6-4b65-a617-5eecf454f4e9.png)
![image](https://user-images.githubusercontent.com/589439/143727479-6828fc05-4672-4c60-8a21-f1fe6e97d0ea.png)
![image](https://user-images.githubusercontent.com/589439/143727509-2313c55c-bc6d-4451-91f1-f4424fac580a.png)
![image](https://user-images.githubusercontent.com/589439/143727576-96f5f991-0493-4417-94e8-e12ab8bfd769.png)
![image](https://user-images.githubusercontent.com/589439/143727645-23bc99ea-105b-455b-b03f-1737892d3b9a.png)
![image](https://user-images.githubusercontent.com/589439/143729065-f90e8ed0-07ac-4c77-8e4b-8d4c7fcffdef.png)
![image](https://user-images.githubusercontent.com/589439/143729154-00111bfe-534d-4403-bcab-92e3adf032ee.png)
![image](https://user-images.githubusercontent.com/589439/143729162-ad6f82c6-643e-4ec0-a082-75842c237053.png)
![image](https://user-images.githubusercontent.com/589439/143729232-16e479b2-527e-4b0f-94b0-e43bd08cfba8.png)
![image](https://user-images.githubusercontent.com/589439/143729360-d16987b0-25ab-42db-a08d-66bae5576443.png)
![image](https://user-images.githubusercontent.com/589439/143729413-dffdd2dc-d0cb-40aa-8b0f-fd567b2a527c.png)
![image](https://user-images.githubusercontent.com/589439/143729441-e43fde75-76ed-489d-acef-56fea5ddf539.png)
![image](https://user-images.githubusercontent.com/589439/143729376-8e7db409-6651-4a55-90a7-8750d77d5838.png)
![image](https://user-images.githubusercontent.com/589439/143729521-c7b0fc38-baf0-4701-9032-dba324497f5e.png)
![image](https://user-images.githubusercontent.com/589439/143730115-59cf1d93-b27b-4902-a39b-522551733281.png)
![image](https://user-images.githubusercontent.com/589439/143730111-33db8027-2c8e-41e8-98a6-9e5e45984fc5.png)
![image](https://user-images.githubusercontent.com/589439/143797035-060165d6-d462-4160-b9f0-a2b31bdd3d72.png)
![image](https://user-images.githubusercontent.com/589439/143808844-e4244060-5842-41e2-868d-7a75c57a3c21.png)
![image](https://user-images.githubusercontent.com/589439/143809035-2ae69802-7929-47a6-a445-12b571cacd14.png)
![image](https://user-images.githubusercontent.com/589439/143809423-cea91ff5-916f-4c03-b7c3-e4eb625756a4.png)
![image](https://user-images.githubusercontent.com/589439/143809877-6e766d73-ff1c-405f-bd6f-600a58736b25.png)
![image](https://user-images.githubusercontent.com/589439/143809929-1e119a3b-0239-4144-bece-a1d9aa7d51bf.png)
![image](https://user-images.githubusercontent.com/589439/143809965-7997fd22-e172-4360-af13-8c0d65b83f4e.png)
![image](https://user-images.githubusercontent.com/589439/143809992-3a41471a-dd02-4a3e-acea-96b7a7c3a674.png)
![image](https://user-images.githubusercontent.com/589439/143810068-5f175928-4e4d-4820-8b14-067a31b35cd6.png)
![image](https://user-images.githubusercontent.com/589439/143810077-6bfb77d3-4643-4129-a8c4-0b4fbf196b43.png)
![image](https://user-images.githubusercontent.com/589439/143810093-f8508bb1-5728-4010-b87b-21f4aed74e73.png)
![image](https://user-images.githubusercontent.com/589439/143810115-c88787cb-3cae-433a-93c8-712a25db0c78.png)
![image](https://user-images.githubusercontent.com/589439/143810872-231209ca-eb71-4bd2-930d-3527fbaaace0.png)
![image](https://user-images.githubusercontent.com/589439/143810896-a9875ab8-b9ab-4ced-ad49-c47ea321a052.png)
![image](https://user-images.githubusercontent.com/589439/143810915-c9428405-1f00-462d-8a80-2d1467c95e7b.png)
![image](https://user-images.githubusercontent.com/589439/143810942-972f34b4-b7a4-4532-9e8d-6f6bcc01ac9f.png)
![image](https://user-images.githubusercontent.com/589439/143810970-69367200-b71e-481f-b813-3d447e154bb3.png)
![image](https://user-images.githubusercontent.com/589439/143811032-4adc40ef-fa0e-4596-88b5-2a24610cdaf3.png)
![image](https://user-images.githubusercontent.com/589439/143811081-edaa58f5-d3e6-40c6-9dab-f19e547d090e.png)
![image](https://user-images.githubusercontent.com/589439/143811255-0b946589-2679-4747-b514-3b91ac2259cd.png)
![image](https://user-images.githubusercontent.com/589439/143811275-488e15be-15bd-4341-8392-834cd68bbcad.png)
| 11,427 | Markdown | 48.90393 | 109 | 0.808874 |
pantelis-classes/omniverse-ai/Wikipages/Editing Synthetic Data Generation (Python API).md | # Synthetic Data in Omniverse from Isaac Sim
Omniverse comes with synthetic data generation samples in Python. These can be found in (home/.local/share/ov/pkg/isaac_sim-2021.2.0/python_samples)
## Offline Dataset Generation
This example will demonstrate how to generate synthetic dataset offline which can be used for training deep neural networks using default values.
From the package root folder (home/.local/share/ov/pkg/isaac_sim-2021.2.0/) run this command to generate synthetic data:
./python.sh standalone_examples/replicator/offline_generation.py
These are the arguments we can use:
1. --scenario: Specify the USD stage to load from omniverse server for dataset generation.
1. --num_frames: Number of frames to record.
1. --max_queue_size: Maximum size of queue to store and process synthetic data. If value of this field is less than or equal to zero, the queue size is infinite.
1. --data_dir: Location where data will be output. Default is ./output
1. --writer_mode: Specify output format - npy or kitti. Default is npy.
When KittiWriter is used with the --writer_mode kitti argument, two more arguments become available.
6. --classes: Which classes to write labels for. Defaults to all classes.
7. --train_size: Number of frames for training set. Defaults to 8.
queue size is infinite.
With arguments, the above command looks like:
./python.sh standalone_examples/replicator/offline_generation.py --scenario omniverse://<server-name>/Isaac/Samples/Synthetic_Data/Stage/warehouse_with_sensors.usd --num_frames 10 --max_queue_size 500
All output data is stored within (home/.local/share/ov/pkg/isaac_sim-2021.1.1/output)
## Offline Training with TLT
To leverage TLT, we need to have a dataset in the Kitti format. NVIDIA Transfer Learning Toolkit (TLT) is a Python-based AI toolkit for taking purpose-built pretrained AI models and customizing them with your own data.
### Offline Kitti Dataset Generation
for this we add the argument --writer_mode kitti and specify the classes like in this example (not specifying an argument makes it use the default):
./python.sh standalone_examples/replicator/offline_generation.py --writer_mode kitti --classes ceiling floor --num_frames 500 --train_size 100
![image](https://user-images.githubusercontent.com/589439/143666365-9cbab570-213f-403b-bdc9-d891025fabac.png)
![image](https://user-images.githubusercontent.com/589439/143666538-47885861-2340-4fca-9507-8a1a66d82fe9.png)
![image](https://user-images.githubusercontent.com/589439/143666560-4a7dd70c-abde-4af8-a1c7-16eab5d99bf3.png)
![omniverse data gen](https://user-images.githubusercontent.com/589439/143667012-183800ff-f197-44a7-9677-d19940a06179.gif)
The python scripts can be extensively modified to generate more customized datasets (code deep dive to come).
- The output of the synthetic data generation can be found in: `~/.local/share/ov/pkg/isaac_sim-2021.2.0/output`
![image](https://user-images.githubusercontent.com/589439/143666727-f7a06dbc-aba6-410f-8bd5-0aa24ecf38d3.png)
- The dataset is divided into two folders; A Training and Test Dataset. The training dataset contains **images** and **labels** of the warehouse.
![image](https://user-images.githubusercontent.com/589439/143666820-b12aafdd-f1e1-4c46-889c-34af1c9ca929.png)
![image](https://user-images.githubusercontent.com/589439/143666829-813f9715-3a2d-49f1-9124-5a690681accc.png)
![image](https://user-images.githubusercontent.com/589439/143666852-90d659de-01a0-4685-bf36-42868e1c77d9.png)
![image](https://user-images.githubusercontent.com/589439/143666866-5896317b-1255-4e67-abe7-5f3ff03be288.png)
- The test dataset contains only **images**.
![image](https://user-images.githubusercontent.com/589439/143666874-a453b635-63e6-44e0-94c1-7127e1c7f729.png)
![omniversepicgen](https://user-images.githubusercontent.com/589439/143667064-d0136cd5-9b3e-4b5d-987f-c013ff08d401.gif)
| 3,918 | Markdown | 49.243589 | 218 | 0.782797 |
pantelis-classes/omniverse-ai/Wikipages/Isaac Sim SDK Omniverse Installation.md | ## Prerequisites
Ubuntu 18.04 LTS required
Nvidia drivers 470 or higher
### Installing Nvidia Drivers on Ubuntu 18.04 LTS
sudo apt-add-repository -r ppa:graphics-drivers/ppa
![image](https://user-images.githubusercontent.com/589439/143662835-6d5624b2-b78d-4ff2-acc3-efadc64d58a2.png)
sudo apt update
![image](https://user-images.githubusercontent.com/589439/143662852-f99e89cc-1c28-4039-8c25-95c470de171f.png)
sudo apt remove nvidia*
![image](https://user-images.githubusercontent.com/589439/143662863-5dbc78c5-c175-495e-bd36-5b214557774c.png)
![image](https://user-images.githubusercontent.com/589439/143662877-cd6abe58-973f-4da1-ac1c-9fe5d28a5853.png)
sudo apt autoremove
![image](https://user-images.githubusercontent.com/589439/143662895-53e3155b-e8bf-498b-9bb3-4cbe39e1354a.png)
![image](https://user-images.githubusercontent.com/589439/143662915-70024577-3531-46da-8f6e-ea2d8d230e8a.png)
sudo ubuntu-drivers autoinstall
![image](https://user-images.githubusercontent.com/589439/143662959-6b21b9f4-5462-4b9d-9a29-083fad49eafe.png)
sudo apt install nvidia-driver-470
![image](https://user-images.githubusercontent.com/589439/143662965-5e05ee0d-a48f-4161-a086-ab03bf6854bf.png)
- Restart your PC.
- Run nvidia-smi to make sure you are on the latest nvidia drivers for Isaac.
nvidia-smi
![image](https://user-images.githubusercontent.com/589439/143663079-a9503fd4-75f1-4bb0-bfd8-ada3bd9fa2ec.png)
## Omniverse and Isaac Sim installation (executable)
### 1. Create nvidia developer account. This is required to access some of the downloads as well as obtaining API keys for Nvidia NGC
- Go to this <a href="https://developer.nvidia.com/developer-program">link</a> and create an account.
![image](https://user-images.githubusercontent.com/589439/143655734-92f93f94-723b-4a03-aee3-9004ebdfa931.png)
- Fill out your NVIDIA profile.
![image](https://user-images.githubusercontent.com/589439/143655803-423dddd8-398e-49e0-839f-d96a5e655441.png)
### 2. Go to this <a href="https://www.nvidia.com/en-us/omniverse/">omniverse link</a> and download Omniverse and install.
![image](https://user-images.githubusercontent.com/589439/143158851-a4f7a00b-4f25-40e0-ae2e-2fba3edef08e.png)
- Fill out the form.
![image](https://user-images.githubusercontent.com/589439/143158880-17506781-abc2-4188-aca3-4546dcb475f9.png)
- Click the download link for Linux.
![image](https://user-images.githubusercontent.com/589439/143158912-97fb24ad-8b49-432e-a3d7-4badb0977714.png)
- Download and save the AppImage file to your ~/Downloads folder.
![image](https://user-images.githubusercontent.com/589439/143158967-afad1831-822f-4440-9a4b-9248c909007d.png)
- Run these commands to execute the AppImage.
cd ~/Downloads
ls
chmod +x omniverse-launcher-linux.AppImage
./omniverse-launcher-linux.AppImage
![image](https://user-images.githubusercontent.com/589439/143656306-85f1aefd-a6a8-4f07-a2e9-b7153ff175ce.png)
### 3. Login to Omniverse to install Isaac Sim 2021.
- Login with your NVIDIA credentials.
![image](https://user-images.githubusercontent.com/589439/143160948-90380e23-e8cc-42b3-8933-4d88c5c9bc90.png)
- Accept the terms of agreement.
![image](https://user-images.githubusercontent.com/589439/143161008-59913f3c-cfde-4c9f-93d4-609dc0346469.png)
- Click continue. (default paths)
![image](https://user-images.githubusercontent.com/589439/143161046-21afc550-6bf7-450c-b023-3296de59d7b4.png)
- Install cache.
![image](https://user-images.githubusercontent.com/589439/143161192-9936a489-e81d-4ccc-a2e0-caf120ce92c4.png)
### 4. Installing Isaac through Omniverse.
- Click the Exchange tab in Omniverse.
![image](https://user-images.githubusercontent.com/589439/143165080-9daa5e96-99c0-4e60-9a40-ff4f77944311.png)
- Search for Isaac and Click Isaac Sim.
![image](https://user-images.githubusercontent.com/589439/143165387-659a75bf-ba62-49e4-9bab-320b0da9eeb1.png)
- Click install.
![image](https://user-images.githubusercontent.com/589439/143165778-75f9cbea-b93b-4c0a-9661-269ec0e643f5.png)
### 5. Go to the nucleus tab and create a nucleus local server to run the Omniverse Isaac Sim Samples.
- Create your local nucleus account by clicking the Nucleus tab in Omniverse.
- Click Add Local Nucleus Service.
![image](https://user-images.githubusercontent.com/589439/143163402-c38ef3e5-64a8-437f-8a4c-7f978b37e40b.png)
- Click Next. (Default Path)
![image](https://user-images.githubusercontent.com/589439/143163446-5fa6c2bc-6437-4239-bcd7-5be8f9159de7.png)
- Create Administrator Account.
- Go to this <a href="https://developer.nvidia.com/nvidia-isaac-sim-assets-20211">link</a> and download the Isaac Sim Assets.
![image](https://user-images.githubusercontent.com/589439/143163494-95fba91c-12b3-4228-ae21-39ce639d66b4.png)
- Unzip the by going to your downloads folder and right clicking isaac-sim-assets-2021.1.1.zip and choosing "extract here".
![image](https://user-images.githubusercontent.com/589439/143657912-d33c71f8-1965-4ca2-b06c-3d0790ffd1e4.png)
- Log into the Nucleus Service with the credentials you created.
![image](https://user-images.githubusercontent.com/589439/143163725-d7b1a5ae-2391-4da0-9a70-f58ce063eb38.png)
- Create an Isaac Folder. (Right click localhost)
![image](https://user-images.githubusercontent.com/589439/143164075-7cfacb0b-a2e2-4e29-a63f-85316f585a5e.png)
![image](https://user-images.githubusercontent.com/589439/143164125-851ba73c-0cc8-4555-b5d8-769d54625d8d.png)
![image](https://user-images.githubusercontent.com/589439/143657335-7499d95b-d4e0-44bd-88f9-87f4d73a9de9.png)
- Drag and drop the the files in the isaac-sim-assets-2021.1.1. folder into the Isaac folder in Omniverse. (NOT THE .ZIP; THE FILES IN THE FOLDER THAT WAS CREATED WHEN YOU EXTRACTED IT).
![image](https://user-images.githubusercontent.com/589439/143666284-5ff41514-5c89-4cc7-afa0-b17ed9003b61.png)
- Click upload.
![image](https://user-images.githubusercontent.com/589439/143657451-f9792fd1-e085-4850-a5b5-1ccbe9d4d4e5.png)
![image](https://user-images.githubusercontent.com/589439/143666323-eb172e58-d0cb-4228-af31-f9f7daf43d19.png)
### 6. Now launch Isaac Sim from the Library Omniverse tab within Omniverse.
- Click Launch in the Library Tab of Omniverse.
![image](https://user-images.githubusercontent.com/589439/143657605-6b09b104-698d-4eba-b5f7-e027eee033eb.png)
- Click Start with the default settings with "Issac Sim" selected.
![image](https://user-images.githubusercontent.com/589439/143657653-c3d31131-1da7-4919-b7dd-8a9555c4aba6.png)
- Once Isaac Sim has finished loading, login to localhost with the browser window that opened.
![image](https://user-images.githubusercontent.com/589439/143658289-5d6ed582-e15f-4ca7-b3dd-b7cd1d37a2fb.png)
![image](https://user-images.githubusercontent.com/589439/143658399-7538b399-a050-4468-842f-32cfe782bf80.png)
From here we can launch the Isaac Sim application. Currently there is no way to generate KITTI formated output synthetic data (which is required for Nvidia's transfer learning) from the domain randomizer within the application itself.
For this we need to use Omniverse's built in python environment.
## Python API Installation
1. Using the Linux command line interface (terminal), go to the packages root folder (home/.local/share/ov/pkg/isaac_sim-2021.2.0/).
cd ~/.local/share/ov/pkg/isaac_sim-2021.2.0/
ls
![image](https://user-images.githubusercontent.com/589439/143659975-91da9c57-e9c0-4c41-a208-c02010656a83.png)
2. Run the following command to get all the required dependencies:
./python.sh -m pip install -r requirements.txt
![image](https://user-images.githubusercontent.com/589439/143660049-8e2288b8-14c4-4503-a4d4-56fb45574849.png)
| 7,828 | Markdown | 38.741117 | 234 | 0.774527 |
pantelis-classes/omniverse-ai/Wikipages/TAO (NVIDIA Train, Adapt, and Optimize).md | All instructions stem from this <a href="https://docs.nvidia.com/tao/tao-toolkit/text/tao_toolkit_quick_start_guide.html">Nvidia Doc</a>.
# Requirements
### Hardware Requirements (Recommended)
32 GB system RAM
32 GB of GPU RAM
8 core CPU
1 NVIDIA GPU
100 GB of SSD space
### Hardware Requirements (REQUIRED)
- TAO Toolkit is supported on **A100**, **V100** and **RTX 30x0 GPUs**.
# Login to the NGC docker registry.
Login to the NGC docker registry:
Use the command
docker login nvcr.io
and enter the following credentials:
a. Username: "$oauthtoken"
b. Password: "YOUR_NGC_API_KEY"
- Where YOUR_NGC_API_KEY corresponds to the key you generated from step 3.
![image](https://user-images.githubusercontent.com/589439/143663405-5323b62f-74a8-409f-80a8-c2c6ad961497.png)
# Installing TAO Toolkit
- TAO Toolkit is a Python pip package that is hosted on the NVIDIA PyIndex. The package uses the docker restAPI under the hood to interact with the NGC Docker registry to pull and instantiate the underlying docker containers. You must have an NGC account and an API key associated with your account. See the Installation Prerequisites section for details on creating an NGC account and obtaining an API key.
## 1. Create a new virtualenv using virtualenvwrapper
- Click this <a href="https://python-guide-cn.readthedocs.io/en/latest/dev/virtualenvs.html"> link</a> to understand how virtual enviroments in python work.
- Make sure you have virtualenv installed by checking it's version. (Instructions are in this <a href="https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation#1-create-new-python-virtual-environment">page</a> of the)
virtualenv --version
![image](https://user-images.githubusercontent.com/589439/143723668-73111ae8-0ac5-4729-b89b-481d29b25d16.png)
## 2. Define the environment variable called VIRTUALENVWRAPPER_PYTHON.
- Run this command to see where your python is located.
which python3
![image](https://user-images.githubusercontent.com/589439/143723824-968874c9-5f8e-44cc-a535-d0d336a72b78.png)
- Define the environment variable of your Python location.
export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3
![image](https://user-images.githubusercontent.com/589439/143723906-baf552bc-e9d1-435b-8d43-553f6f0a6707.png)
- Run this command to make sure the enviroment variable was created. (There should be red output with the variable name.)
env | grep 'VIRTUALENVWRAPPER_PYTHON'
![image](https://user-images.githubusercontent.com/589439/143723930-c9c8658f-339d-4693-894a-daf70dea28ae.png)
- Run this command.
source `which virtualenvwrapper.sh`
- Run this command to create a virtualenv named "TAO".
mkvirtualenv TAO -p $VIRTUALENVWRAPPER_PYTHON
![image](https://user-images.githubusercontent.com/589439/143724459-afaf363f-dd92-494b-9707-5400f409d05a.png)
- You should now see a (TAO) prepending your username in the CLI.
![image](https://user-images.githubusercontent.com/589439/143724476-77609fc2-e5a7-4773-94d9-799f2b78be6f.png)
## Intructions on how to activate/deactive the vitualenv.
- When you are done with you session, you may deactivate your virtualenv using the deactivate command:
deactivate
![image](https://user-images.githubusercontent.com/589439/143724159-ae6c0578-14e4-463b-8287-ef4147ff0f34.png)
- You may re-instantiate this created virtualenv env using the workon command.
workon TAO
![image](https://user-images.githubusercontent.com/589439/143724492-3036d310-3569-4820-9087-daca2bf9869f.png)
## 3. Download Jupyter Notebook.
- TAO Toolkit provides samples notebooks to walk through and prescrible TAO workflow. These samples are hosted on NGC as a resource and can be downloaded from NGC by executing the command mentioned below.
- Run these commands to set up your notebook.
workon TAO
![image](https://user-images.githubusercontent.com/589439/143725152-cbbd609d-6d94-452c-8a48-a2bcf66dc4ab.png)
- Copy the command belown and keep pressing enter until you are in ~/cv_samples_v1.2.0.
wget --content-disposition https://api.ngc.nvidia.com/v2/resources/nvidia/tao/cv_samples/versions/v1.2.0/zip -O cv_samples_v1.2.0.zip
unzip -u cv_samples_v1.2.0.zip -d ./cv_samples_v1.2.0 && rm -rf cv_samples_v1.2.0.zip && cd ./cv_samples_v1.2.0
![image](https://user-images.githubusercontent.com/589439/143725176-02cc805c-4a98-4afe-9d49-ff17b48e171c.png)
![image](https://user-images.githubusercontent.com/589439/143725173-3c7d7cf0-c3b7-487a-9ed9-818aa5615e84.png)
![image](https://user-images.githubusercontent.com/589439/143725183-3d1caa61-125e-43fe-be67-683429c272ab.png)
## 4. Start Jupyter Notebook
- Once the notebook samples are downloaded, you may start the notebook using the below commands:
jupyter notebook --ip 0.0.0.0 --port 8888 --allow-root
![image](https://user-images.githubusercontent.com/589439/143725216-d67fe159-5f1f-47b1-8dbe-5c14a4e6a7aa.png)
- Open an internet browser on localhost and navigate to the following URL:
http://0.0.0.0:8888
![image](https://user-images.githubusercontent.com/589439/143725228-4696d70e-ec0b-485c-985b-3bffb83be6ac.png)
- Navigate to ./detectnet_v2/detectnet_v2.ipynb
![image](https://user-images.githubusercontent.com/589439/143725266-806cf049-c46f-4e22-9940-ac4e9d952117.png)
![image](https://user-images.githubusercontent.com/589439/143725290-0778740c-3b39-45b4-8d83-a254f545844c.png)
![image](https://user-images.githubusercontent.com/589439/143725306-14110acd-9a61-460a-be5d-df45a55c5b65.png)
| 5,654 | Markdown | 40.580882 | 408 | 0.759816 |
pantelis-classes/omniverse-ai/Wikipages/_Sidebar.md | # Isaac Sim in Omniverse
* [Home][home]
* [Isaac-Sim-SDK-Omniverse-Installation][Omniverse]
* [Synthetic-Data-Generation][SDG]
* [NVIDIA Transfer Learning Toolkit (TLT) Installation][TLT]
* [NVIDIA TAO][TAO]
* [detectnet_v2 Installation][detectnet_v2]
* [Jupyter Notebook][Jupyter-Notebook]
[home]: https://github.com/pantelis-classes/omniverse-ai/wiki
[Omniverse]: https://github.com/pantelis-classes/omniverse-ai/wiki/Isaac-Sim-SDK-Omniverse-Installation
[SDG]: https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)
[TLT]: https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation
[NTLTSD]: https://github.com/pantelis-classes/omniverse-ai/wiki/Using-NVIDIA-TLT-with-Synthetic-Data
[TAO]: https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)
[detectnet_v2]: https://github.com/pantelis-classes/omniverse-ai/wiki/detectnet_v2-Installation
[Jupyter-Notebook]: https://github.com/pantelis-classes/omniverse-ai/wiki/Jupyter-Notebook
| 1,061 | Markdown | 57.999997 | 112 | 0.782281 |
pantelis-classes/omniverse-ai/Wikipages/home.md | # Learning in Simulated Worlds in Omniverse.
## Wiki Navigation
* [Home][home]
* [Isaac-Sim-SDK-Omniverse-Installation][Omniverse]
* [Synthetic-Data-Generation][SDG]
* [NVIDIA Transfer Learning Toolkit (TLT) Installation][TLT]
* [NVIDIA TAO][TAO]
* [detectnet_v2 Installation][detectnet_v2]
* [Jupyter Notebook][Jupyter-Notebook]
[home]: https://github.com/pantelis-classes/omniverse-ai/wiki
[Omniverse]: https://github.com/pantelis-classes/omniverse-ai/wiki/Isaac-Sim-SDK-Omniverse-Installation
[SDG]: https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)
[TLT]: https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation
[NTLTSD]: https://github.com/pantelis-classes/omniverse-ai/wiki/Using-NVIDIA-TLT-with-Synthetic-Data
[TAO]: https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)
[detectnet_v2]: https://github.com/pantelis-classes/omniverse-ai/wiki/detectnet_v2-Installation
[Jupyter-Notebook]: https://github.com/pantelis-classes/omniverse-ai/wiki/Jupyter-Notebook
<hr />
## Reports
<a href="https://docs.google.com/document/d/1jVXxrNgtOosZw_vAORzomSnmy45G3qK_mmk2B4oJtPg/edit?usp=sharing">Domain Randomization Paper</a><br>
This report provides an indepth understanding on how Domain Randomization helps perception machine learning tasks such as object detection and/or segmentation.
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true">Final Report</a><br>
This final report contains an indepth explanation on the hardware/software used, the methods used to collect the data, an explanation on the data collected, trained and pruned, and the overall conclusions made from the trained and pruned datasets. | 1,834 | Markdown | 64.535712 | 247 | 0.794984 |
pantelis-classes/omniverse-ai/Wikipages/NVIDIA Transfer Learning Toolkit (TLT) Installation.md | # Installing the Pre-requisites
## 1. Install docker-ce:
### * Set up repository:
Update apt package index and install packages.
sudo apt-get update
![image](https://user-images.githubusercontent.com/589439/143660967-37eb6626-62c0-4afa-af3a-c43a3c172e85.png)
sudo apt-get install \
ca-certificates \
curl \
gnupg \
lsb-release
- The following image has these dependencies already installed.
![image](https://user-images.githubusercontent.com/589439/143660985-4ae4366b-8d28-4514-b1df-bd7fe03e581d.png)
Add Docker's official GPG key:
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
![image](https://user-images.githubusercontent.com/589439/143661077-2d0ce142-be2f-4ab6-ad99-a685fa709182.png)
### * Install Docker Engine:
Update the apt package index, and install the latest version of Docker Engine.
sudo apt-get update
![image](https://user-images.githubusercontent.com/589439/143661094-a2b86161-c37f-42fd-9110-34523343f65a.png)
sudo apt-get install docker-ce docker-ce-cli containerd.io
![image](https://user-images.githubusercontent.com/589439/143661447-8fa25b3b-1c79-470d-b962-88c21bd56f63.png)
Verify that Docker Engine is installed correctly by running the hello-world image.
sudo docker run hello-world
![image](https://user-images.githubusercontent.com/589439/143661433-d67e18ac-c098-4665-b7ba-127e397b0df6.png)
### * Manage Docker as a non-root user:
Create the docker group.
sudo groupadd docker
![image](https://user-images.githubusercontent.com/589439/143661491-c43c3f94-90d7-47d4-8bd4-dee974f67838.png)
Add your user to the docker group.
sudo usermod -aG docker $USER
![image](https://user-images.githubusercontent.com/589439/143661478-cff5282c-e864-4821-a084-7f1f8360b4bc.png)
Log out and log back in so that your group membership is re-evaluated.
![image](https://user-images.githubusercontent.com/589439/143661541-098c52b5-0c54-46c9-9d14-fd0250f27a1e.png)
Verify that you can run docker commands without sudo.
docker run hello-world
![image](https://user-images.githubusercontent.com/589439/143661708-6baceb75-a047-4f75-8b51-9496e6908d15.png)
- If you get the WARNING error in the above image, run these two commands. Otherwise Skip to #2.
sudo chown "$USER":"$USER" /home/"$USER"/.docker -R
sudo chmod g+rwx "/home/$USER/.docker" -R
- Run docker run hello-world to double check it works now.
docker run hello-world
![image](https://user-images.githubusercontent.com/589439/143661749-52f2103f-19c5-47bb-85b3-0b5069957b87.png)
## 2. Install NVIDIA Container Toolkit:
Setup the stable repository and the GPG key:
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \
&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
![image](https://user-images.githubusercontent.com/589439/143662010-9b31cc9d-bbbe-4aa7-af69-ade75e18ccc6.png)
Install the nvidia-docker2 package (and dependencies) after updating the package listing:
sudo apt-get update
sudo apt-get install -y nvidia-docker2
![image](https://user-images.githubusercontent.com/589439/143662034-8e020c83-780b-40d0-a17b-ad0cdfd4210f.png)
Restart the Docker daemon to complete the installation after setting the default runtime:
sudo systemctl restart docker
![image](https://user-images.githubusercontent.com/589439/143662068-dfcad334-8466-4c9a-9cd0-e08a23f31b66.png)
At this point, a working setup can be tested by running a base CUDA container:
sudo docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi
- This should result in a console output shown below:
![image](https://user-images.githubusercontent.com/589439/143663183-0bdb6ee0-84be-4788-bdc7-0ab23e9e5d41.png)
## 3. Get an NVIDIA NGC account and API key:
- Go to <a href="https://ngc.nvidia.com/signin">NGC</a> and click the Transfer Learning Toolkit container in the Catalog tab. This message is displayed: “Sign in to access the PULL feature of this repository”.
![image](https://user-images.githubusercontent.com/589439/143662546-8e8053f4-9aa9-40bb-bb8c-432d652db64b.png)
- Enter your Email address and click Next, or click Create an Account.
- Choose your organization when prompted for Organization/Team.
- Click Sign In.
- Once redirected to this <a href="https://catalog.ngc.nvidia.com/">page</a> with your account made, click the top right corner to click your profile and click "Setup"
![image](https://user-images.githubusercontent.com/589439/143662652-a6595488-44e6-494e-8e11-17056209a3fd.png)
- Click Get API Key.
![image](https://user-images.githubusercontent.com/589439/143662747-cda7d160-6f1f-41dc-815f-65bf13ba7bc7.png)
- Click Generate API Key.
![image](https://user-images.githubusercontent.com/589439/143662782-9bebeb67-26ec-4980-9624-1a91f0d1a6cc.png)
- Your API key and username will be shown under the DOCKER tm section. Copy the text with your username and API password and save it in a file somewhere.
![image](https://user-images.githubusercontent.com/589439/143663255-907bff87-ae02-4c4d-8400-ef6a914c3aae.png)
![image](https://user-images.githubusercontent.com/589439/143663347-4ec70e43-da4d-4b97-bd26-b336586bc9d7.png)
## 4. Login to the NGC docker registry:
Use the command
docker login nvcr.io
and enter the following credentials:
a. Username: "$oauthtoken"
b. Password: "YOUR_NGC_API_KEY"
- Where YOUR_NGC_API_KEY corresponds to the key you generated from step 3.
![image](https://user-images.githubusercontent.com/589439/143663405-5323b62f-74a8-409f-80a8-c2c6ad961497.png)
# Installing TLT
The Transfer Learning Toolkit (TLT) is a Python pip package that is hosted on the NVIDIA PyIndex. The package uses the docker restAPI under the hood to interact with the NGC Docker registry to pull and instantiate the underlying docker containers.
## 1. Create new Python virtual environment.
### Python virtualenv setup using virtualenvwrapper
Install via pip:
pip3 install virtualenv
![image](https://user-images.githubusercontent.com/589439/143667101-35f5e890-f96d-4a24-8f85-4db1ff95ab8f.png)
pip3 install virtualenvwrapper
![image](https://user-images.githubusercontent.com/589439/143667117-cef7ead6-5ca1-4f93-b759-4caa9c8dca76.png)
| 6,554 | Markdown | 38.727272 | 247 | 0.754043 |
pantelis-classes/omniverse-ai/Wikipages/_Footer.md | ## Authors
### <a href="https://github.com/dfsanchez999">Diego Sanchez</a> | <a href="https://harp.njit.edu/~jga26/">Jibran Absarulislam</a> | <a href="https://github.com/markkcruz">Mark Cruz</a> | <a href="https://github.com/sppatel2112">Sapan Patel</a>
## Supervisor
### <a href="https://pantelis.github.io/">Dr. Pantelis Monogioudis</a>
## Credits
### <a href="https://developer.nvidia.com/nvidia-omniverse-platform">NVIDIA Omniverse</a> | 446 | Markdown | 39.63636 | 244 | 0.686099 |
pantelis-classes/omniverse-ai/Wikipages/detectnet_v2 Installation.md | # Installing running detectnet_v2 in a jupyter notebook
## Setup File Structures.
- Run these commands to create the correct file structure.
cd ~
mkdir tao
mv cv_samples_v1.2.0/ tao
cd tao/cv_samples_v1.2.0/
rm -r detectnet_v2
![image](https://user-images.githubusercontent.com/589439/143797815-904b6033-f5db-43ac-a736-d653d4d19cfe.png)
![image](https://user-images.githubusercontent.com/589439/143797903-cd33e342-e45d-44ca-a8ac-6efb6d2cd18f.png)
- Download the detectnet_v2.zip from this <a href="https://github.com/pantelis-classes/omniverse-ai/raw/main/detectnet_v2.zip">link</a>.
![image](https://user-images.githubusercontent.com/589439/143727479-6828fc05-4672-4c60-8a21-f1fe6e97d0ea.png)
- Run this command to move the .zip from your downloads folder to your detectnet_v2 folder.
mv ~/Downloads/detectnet_v2.zip ~/tao/cv_samples_v1.2.0/
![image](https://user-images.githubusercontent.com/589439/143798005-a702ed00-5971-4ece-b60a-d05e14fa09b9.png)
- Run this command to unzip the folder.
unzip ~/tao/cv_samples_v1.2.0/detectnet_v2.zip -d detectnet_v2
![image](https://user-images.githubusercontent.com/589439/143798404-ae066e4a-d573-4144-a1ec-b5410db9efb7.png)
![image](https://user-images.githubusercontent.com/589439/143798434-9d14756d-2bdb-4f68-88cb-0e5610562034.png)
- Run this command to copy your dataset to the TAO folder. (You generated this dataset in this <a href="https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)#offline-training-with-tlt">wiki page</a>.)
cp -r ~/.local/share/ov/pkg/isaac_sim-2021.2.0/output/testing/ ~/tao/cv_samples_v1.2.0/detectnet_v2/workspace/tao-experiment/data/
cp -r ~/.local/share/ov/pkg/isaac_sim-2021.2.0/output/training/ ~/tao/cv_samples_v1.2.0/detectnet_v2/workspace/tao-experiment/data/
![image](https://user-images.githubusercontent.com/589439/143798514-be064b8e-18e9-4f21-97b2-ef72820190a8.png)
![image](https://user-images.githubusercontent.com/589439/143798539-d7555c9c-87c3-4037-819a-ee32aca9fa44.png)
- Navigate to Home -> cv_samples_v1.2.0 -> detectnet_v2
- Open the detectnet_v2.ipynb file.
![image](https://user-images.githubusercontent.com/589439/143729232-16e479b2-527e-4b0f-94b0-e43bd08cfba8.png)
- Scroll down to section "0. Set up env variables and map drives" (Ctrl + F)
![image](https://user-images.githubusercontent.com/589439/143729413-dffdd2dc-d0cb-40aa-8b0f-fd567b2a527c.png)
- Replace "diego" with your username. (TIP: whoami in BASH)
![image](https://user-images.githubusercontent.com/589439/143729441-e43fde75-76ed-489d-acef-56fea5ddf539.png)
![image](https://user-images.githubusercontent.com/589439/143729521-c7b0fc38-baf0-4701-9032-dba324497f5e.png) | 2,798 | Markdown | 43.428571 | 240 | 0.74732 |
pantelis-classes/omniverse-ai/Wikipages/Jupyter Notebook.md | # Object Detection using TAO DetectNet_v2
- Transfer learning is the process of transferring learned features from one application to another. It is a commonly used training technique where you use a model trained on one task and re-train to use it on a different task.
- Train Adapt Optimize (TAO) Toolkit is a simple and easy-to-use Python based AI toolkit for taking purpose-built AI models and customizing them with users' own data.
## How to use the notebook.
- Please refer to the actual jupyter notebook to have more in-depth explanations of the code.
- Each Cell will run some lines of code. Start from the top of the notebook and run each cell by click the play button or using **shift + enter**.
![image](https://user-images.githubusercontent.com/589439/143809035-2ae69802-7929-47a6-a445-12b571cacd14.png)
- Some of the cells may take a long time to complete. Please do not skip cells and wait for the output to finish.
## 0. Set up env variables and map drives
![image](https://user-images.githubusercontent.com/589439/143808844-e4244060-5842-41e2-868d-7a75c57a3c21.png)
![image](https://user-images.githubusercontent.com/589439/143809423-cea91ff5-916f-4c03-b7c3-e4eb625756a4.png)
- We set up the env variables by linking paths, setting number of GPUs, and choosing an encoding style.
## 1. Install the TAO launcher
- This step should have been already completed in the previous wiki pages. Please refer to this <a href="https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)#login-to-the-ngc-docker-registry">link</a>.
![image](https://user-images.githubusercontent.com/589439/143809877-6e766d73-ff1c-405f-bd6f-600a58736b25.png)
## 2. Prepare dataset and pre-trained model
![image](https://user-images.githubusercontent.com/589439/143809929-1e119a3b-0239-4144-bece-a1d9aa7d51bf.png)
![image](https://user-images.githubusercontent.com/589439/143809965-7997fd22-e172-4360-af13-8c0d65b83f4e.png)
![image](https://user-images.githubusercontent.com/589439/143809992-3a41471a-dd02-4a3e-acea-96b7a7c3a674.png)
![image](https://user-images.githubusercontent.com/589439/143810068-5f175928-4e4d-4820-8b14-067a31b35cd6.png)
![image](https://user-images.githubusercontent.com/589439/143810077-6bfb77d3-4643-4129-a8c4-0b4fbf196b43.png)
![image](https://user-images.githubusercontent.com/589439/143810093-f8508bb1-5728-4010-b87b-21f4aed74e73.png)
![image](https://user-images.githubusercontent.com/589439/143810115-c88787cb-3cae-433a-93c8-712a25db0c78.png)
## 3. Provide training specification
![image](https://user-images.githubusercontent.com/589439/143810872-231209ca-eb71-4bd2-930d-3527fbaaace0.png)
## 4. Run TAO training
![image](https://user-images.githubusercontent.com/589439/143810896-a9875ab8-b9ab-4ced-ad49-c47ea321a052.png)
## 5. Evaluate the trained model
![image](https://user-images.githubusercontent.com/589439/143811275-488e15be-15bd-4341-8392-834cd68bbcad.png)
## 6. Prune the trained model
![image](https://user-images.githubusercontent.com/589439/143810915-c9428405-1f00-462d-8a80-2d1467c95e7b.png)
## 7. Retrain the pruned model
![image](https://user-images.githubusercontent.com/589439/143810942-972f34b4-b7a4-4532-9e8d-6f6bcc01ac9f.png)
![image](https://user-images.githubusercontent.com/589439/143810970-69367200-b71e-481f-b813-3d447e154bb3.png)
## 8. Evaluate the retrained model
![image](https://user-images.githubusercontent.com/589439/143811255-0b946589-2679-4747-b514-3b91ac2259cd.png)
## 9. Visualize inferences
![image](https://user-images.githubusercontent.com/589439/143811032-4adc40ef-fa0e-4596-88b5-2a24610cdaf3.png)
![image](https://user-images.githubusercontent.com/589439/143811081-edaa58f5-d3e6-40c6-9dab-f19e547d090e.png) | 3,749 | Markdown | 45.874999 | 243 | 0.787143 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/CODE_OF_CONDUCT.md | ## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.
| 309 | Markdown | 60.999988 | 105 | 0.789644 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/CONTRIBUTING.md | # Contributing Guidelines
Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
documentation, we greatly value feedback and contributions from our community.
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
information to effectively respond to your bug report or contribution.
## Reporting Bugs/Feature Requests
We welcome you to use the GitHub issue tracker to report bugs or suggest features.
When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
* A reproducible test case or series of steps
* The version of our code being used
* Any modifications you've made relevant to the bug
* Anything unusual about your environment or deployment
## Contributing via Pull Requests
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
1. You are working against the latest source on the *main* branch.
2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
To send us a pull request, please:
1. Fork the repository.
2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
3. Ensure local tests pass.
4. Commit to your fork using clear commit messages.
5. Send us a pull request, answering any default questions in the pull request interface.
6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
## Finding contributions to work on
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.
## Security issue notifications
If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
## Licensing
See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
| 3,160 | Markdown | 51.683332 | 275 | 0.792405 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/README.md | # NVIDIA Omniverse Nucleus on Amazon EC2
NVIDIA Omniverse is a scalable, multi-GPU, real-time platform for building and operating metaverse applications, based on Pixar's Universal Scene Description (USD) and NVIDIA RTX technology. USD is a powerful, extensible 3D framework and ecosystem that enables 3D designers and developers to connect and collaborate between industry-leading 3D content creation, rendering, and simulation applications. Omniverse helps individual creators to connect and enhance their 3D artistic process, and enterprises to build and simulate large scale virtual worlds for industrial applications.
With Omniverse, everyone involved in the lifecycle of 3D data has access to high-quality visualizations, authoring, and review tools. Teams do not need additional overhead to manage complex 3D data pipelines. Instead, they can focus on their unique contributions to bring value to the market. Non-technical stakeholders do not need to subject themselves to applications with steep learning curves, nor do results need to be compromised for the sake of iteration reviews.
To support distributed Omniverse users, Nucleus should be deployed in a secure environment. With on-demand compute, storage, and networking resources, AWS infrastructure is well suited to all spatial computing workloads, including Omniverse Nucleus. This repository provides the steps and infrastructure for an Omniverse Enterprise Nucleus Server deployment on Amazon EC2.
## Contents
* [Prerequisites](#prerequisites)
* [Deployment](#deployment)
* [Architecture](#architecture)
* [Troubleshooting](#troubleshooting)
* [Getting Help](#getting-help)
* [Changelog](#changelog)
* [Security](#security)
* [License](#license)
* [References](#references)
## Prerequisites
- AWS CLI - https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
- AWS CDK - https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_install
- Docker - https://www.docker.com/products/docker-desktop/
- Python 3.9 or greater - https://www.python.org
- Access to NVIDIA Enterprise Omniverse Nucleus packages - https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/installation/quick_start_tips.html
- A Route53 Public Hosted Zone - https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html
**To learn more, reference the official documentation from NVIDIA:** https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/cloud_aws_ec2.html
## Architecture
![architecture](/diagrams/architecture.png)
## Deployment
### 1. Download Nucleus Deployment Artifacts from NVIDIA
Place them in `./src/tools/nucleusServer/stack`
For example: `./src/tools/nucleusServer/stack/nucleus-stack-2022.1.0+tag-2022.1.0.gitlab.3983146.613004ac.tar.gz`
Consult NVIDIA documentation to find the appropriate packages.
> Note This deployment has a templated copy of `nucleus-stack.env` located at `./src/tools/nucleusServer/templates/nucleus-stack.env` this may need to be updated if NVIDIA makes changes to the `nucleus-stack.env` file packaged with their archive.
>
> The same applies to NVIDIA's reverse proxy `nginx.conf` located at `./src/tools/reverseProxy/templates/nginx.conf`
### 2. configure .env file
create ./.env
Set the following variables
```
export APP_STACK_NAME=omni-app
export AWS_DEFAULT_REGION=us-west-2
# STACK INPUTS
export OMNIVERSE_ARTIFACTS_BUCKETNAME=example-bucket-name
export ROOT_DOMAIN=example-domain.com
export NUCLEUS_SERVER_PREFIX=nucleus
export NUCLEUS_BUILD=nucleus-stack-2022.1.0+tag-2022.1.0.gitlab.3983146.613004ac # from Step 1
export ALLOWED_CIDR_RANGE_01=cidr-range-with-public-access
export DEV_MODE=true
```
> NOTE: This deployment assumes you have a public hosted zone in Route53 for the ROOT_DOMAIN, this deployment will add a CNAME record to that hosted zone
### 3. Run the deployment
The following script will run cdk deploy. The calling process must be authenticated with sufficient permissions to deploy AWS resources.
```
chmod +x ./deploy.sh
./deploy.sh
```
> NOTE: deployment requires a running docker session for building Python Lambda functions
> NOTE: It can take a few minutes for the instances to get up and running. After the deployment script finishes, review your EC2 instances and check that they are in a running state.
### 4. Test the connection
Test a connection to `<NUCLEUS_SERVER_PREFIX>.<ROOT_DOMAIN>` from within the ALLOWED_CIDR_RANGE set in the `.env` file. Do so by browsing to `https://<NUCLUES_SERVER_PREFIX>.<ROOT_DOMAIN>` in your web browser.
The default admin username for the Nucleus server is 'omniverse'. You can find the password in a Secrets Manager resource via the AWS Secrets Manager Console. Alternatively, from the Omniverse WebUI, you can create a new username and password.
## Troubleshooting
### Unable to connect to the Nucleus Server
If you are not able to connect to to the Nucleus server, review the status of the Nginx service, and the Nucleus docker stack. To do so, connect to your instances from the EC2 Console via Session Manager - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/session-manager.html.
- On the Nginx Server, run `sudo journalctl -u nginx.service`, if this is produces no output the Nginx service is not running.
- On the Nucleus server, run `sudo docker ps`, you should see a list of Nucleus containers up.
If there are issues with either of these, it is likely there was an issue with the Lambda and/or SSM run commands that configure the instances. Browse to the Lambda Console (https://us-west-2.console.aws.amazon.com/lambda/home?region=us-west-2#/functions) and search for the respective Lambda Functions:
- STACK_NAME-ReverseProxyConfig-CustomResource
- STACK_NAME-NucleusServerConfig-CustomResource
Review the function CloudWatch Logs.
### No service log entries, or unable to restart nitro-enclave service
If there are issues with either of these, it is likely there was an issue with the Lambda and/or SSM run commands that configure the instances. Browse to the Lambda Console and search for the `STACK_NAME-ReverseProxyConfig-CustomResource` Lambda Function, then review the CloudWatch Logs.
At times the Reverse Proxy custom resource Lambda function does not trigger on a initial stack deployment. If the reverse proxy instance is in a running state, but there are now invocations/logs, terminate the instance and give the auto scaling group a few minutes to create another one, and then try again. Afterwards, check the CloudWatch Logs for the Lambda function: `ReverseProxyAutoScalingLifecycleLambdaFunction`
### Additional Nginx Commands
View Nitro Enclaves Service Logs:
`sudo journalctl -u nginx.service`
Viewing Nginx Logs
`sudo cat /var/log/nginx/error.log`
`sudo cat /var/log/nginx/access.log`
Restart Nginx
`systemctl restart nginx.service`
### Additional Nucleus server notes
Review NVIDIA's Documentation - https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/installation/quick_start_tips.html
default base stack and config location: `/opt/ove/`
default omniverse data dir: `/var/lib/omni/nucleus-data`
Interacting with the Nucleus Server docker compose stack:
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml pull`
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml up -d`
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml down`
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml ps`
Generate new secrets
`sudo rm -fr secrets && sudo ./generate-sample-insecure-secrets.sh`
## Getting Help
If you have questions as you explore this sample project, post them to the Issues section of this repository. To report bugs, request new features, or contribute to this open source project, see [CONTRIBUTING.md](./CONTRIBUTING.md).
## Changelog
To view the history and recent changes to this repository, see [CHANGELOG.md](./CHANGELOG.md)
## Security
See [CONTRIBUTING](./CONTRIBUTING.md) for more information.
## License
This sample code is licensed under the MIT-0 License. See the [LICENSE](./LICENSE) file.
## References
### NVIDIA Omniverse
[Learn more about the NVIDIA Omniverse Platform](https://www.nvidia.com/en-us/omniverse/)
### Omniverse Nucleus
[Learn more about the NVIDIA Omniverse Nucleus](https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/overview.html)
| 8,456 | Markdown | 53.211538 | 581 | 0.786542 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/setup.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Nucleus Server Tools",
version="1.0",
py_modules=[
'nst'
],
install_requires=[
"boto3",
"python-dotenv",
"Click"
],
entry_points='''
[console_scripts]
nst=nst_cli:main
'''
)
| 576 | Python | 21.192307 | 73 | 0.609375 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst_cli.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
"""
helper tools for omniverse nucleus deployment configuration
"""
# std lib modules
import os
import logging
from pathlib import Path
# 3rd party modules
import click
import nst.logger as logger
pass_config = click.make_pass_decorator(object, ensure=True)
@click.group()
@pass_config
def main(config):
pass
@main.command()
@pass_config
@click.option("--my_opt_arg")
def hello_world(config, my_opt_arg):
logger.info(f"Hello World: {my_opt_arg=}")
@main.command()
@pass_config
@click.option("--server-ip", required=True)
@click.option("--reverse-proxy-domain", required=True)
@click.option("--instance-name", required=True)
@click.option("--master-password", required=True)
@click.option("--service-password", required=True)
@click.option("--data-root", required=True)
def generate_nucleus_stack_env(
config,
server_ip,
reverse_proxy_domain,
instance_name,
master_password,
service_password,
data_root,
):
logger.info(
f"generate_nucleus_stack_env:{server_ip=},{reverse_proxy_domain=},{instance_name=},{master_password=},{service_password=},{data_root=}"
)
tools_path = "/".join(list(Path(__file__).parts[:-1]))
cur_dir_path = "."
template_name = "nucleus-stack.env"
template_path = f"{tools_path}/templates/{template_name}"
output_path = f"{cur_dir_path}/{template_name}"
if not Path(template_path).is_file():
raise Exception("File not found: {template_path}")
data = ""
with open(template_path, "r") as file:
data = file.read()
data = data.format(
SERVER_IP_OR_HOST=server_ip,
REVERSE_PROXY_DOMAIN=reverse_proxy_domain,
INSTANCE_NAME=instance_name,
MASTER_PASSWORD=master_password,
SERVICE_PASSWORD=service_password,
DATA_ROOT=data_root,
ACCEPT_EULA="1",
SECURITY_REVIEWED="1",
)
with open(f"{output_path}", "w") as file:
file.write(data)
logger.info(output_path)
| 2,391 | Python | 25.876404 | 143 | 0.677123 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/README.md | # Tools for configuring Nuclues Server
The contents of this directory are zipped and then deployed to the nuclues server | 121 | Markdown | 39.666653 | 81 | 0.826446 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst/__init__.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
| 210 | Python | 41.199992 | 73 | 0.766667 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst/logger.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
LOG_LEVEL = os.getenv('LOG_LEVEL', 'DEBUG')
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
def info(*args):
print(*args)
def debug(*args):
print(*args)
def warning(*args):
print(*args)
def error(*args):
print(*args) | 480 | Python | 20.863635 | 73 | 0.708333 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/rpt_cli.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
"""
helper tools for reverse proxy nginx configuration
"""
# std lib modules
import os
import logging
from pathlib import Path
# 3rd party modules
import click
import rpt.logger as logger
pass_config = click.make_pass_decorator(object, ensure=True)
@click.group()
@pass_config
def main(config):
pass
@main.command()
@pass_config
def hello_world(config):
logger.info(f'Hello World')
@main.command()
@pass_config
@click.option("--cert-arn", required=True)
def generate_acm_yaml(config, cert_arn):
logger.info(f'generate_acm_yaml: {cert_arn=}')
tools_path = '/'.join(list(Path(__file__).parts[:-1]))
cur_dir_path = '.'
template_path = f'{tools_path}/templates/acm.yaml'
output_path = f'{cur_dir_path}/acm.yaml'
logger.info(Path(template_path).is_file())
data = ''
with open(template_path, 'r') as file:
data = file.read()
data = data.format(cert_arn=cert_arn)
with open(f'{output_path}', 'w') as file:
file.write(data)
logger.info(output_path)
@main.command()
@pass_config
@click.option("--domain", required=True)
@click.option("--server-address", required=True)
def generate_nginx_config(config, domain, server_address):
logger.info(f'generate_nginx_config: {domain=}')
nginx_template_path = os.path.join(
os.getcwd(), 'templates', 'nginx.conf')
if Path(nginx_template_path).is_file():
logger.info(f"NGINX template found at: {nginx_template_path}")
else:
raise Exception(
f"ERROR: No NGINX template found at: {nginx_template_path}")
output_path = f'/etc/nginx/nginx.conf'
if Path(output_path).is_file():
logger.info(f"NGINX default configuration found at: {output_path}")
else:
raise Exception(
f"ERROR: No NGINX default configuration found at: {output_path}. Verify NGINX installation.")
data = ''
with open(nginx_template_path, 'r') as file:
data = file.read()
data = data.format(PUBLIC_DOMAIN=domain,
NUCLEUS_SERVER_DOMAIN=server_address)
with open(output_path, 'w') as file:
file.write(data)
logger.info(output_path)
| 2,373 | Python | 24.526881 | 105 | 0.659503 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/setup.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Reverse Proxy Tools",
version="1.0",
py_modules=["rpt"],
install_requires=["boto3", "python-dotenv", "Click"],
entry_points="""
[console_scripts]
rpt=rpt_cli:main
""",
)
| 532 | Python | 25.649999 | 73 | 0.657895 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/README.md | # Tools for configuring Nginx Reverse Proxy
The contents of this directory are zipped and then deployed to the reverse proxy server | 132 | Markdown | 43.333319 | 87 | 0.825758 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/rpt/__init__.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
| 210 | Python | 41.199992 | 73 | 0.766667 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/rpt/logger.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
LOG_LEVEL = os.getenv('LOG_LEVEL', 'DEBUG')
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
def info(*args):
print(*args)
def debug(*args):
print(*args)
def warning(*args):
print(*args)
def error(*args):
print(*args) | 480 | Python | 20.863635 | 73 | 0.708333 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/templates/acm.yaml | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
---
# ACM for Nitro Enclaves config.
#
# This is an example of setting up ACM, with Nitro Enclaves and nginx.
# You can take this file and then:
# - copy it to /etc/nitro_enclaves/acm.yaml;
# - fill in your ACM certificate ARN in the `certificate_arn` field below;
# - make sure /etc/nginx/nginx.conf is set up to:
# - use the pkcs11 SSL engine, and;
# - include the stanza file configured below (under `NginxStanza`)
# somewhere in the nginx.conf `server` section;
# - start the nitro-enclaves-acm service.
#
# Enclave general configuration
enclave:
# Number of vCPUs to be assigned to the enclave
cpu_count: 2
# Memory (in MiB) to be assigned to the enclave
memory_mib: 256
tokens:
# A label for this PKCS#11 token
- label: nginx-acm-token
# Configure a managed token, sourced from an ACM certificate.
source:
Acm:
# The certificate ARN
# Note: this certificate must have been associated with the
# IAM role assigned to the instance on which ACM for
# Nitro Enclaves is run.
certificate_arn: "{cert_arn}"
target:
NginxStanza:
# Path to the nginx stanza to be written by the ACM service whenever
# the certificate configuration changes (e.g. after a certificate renewal).
# This file must be included from the main nginx config `server` section,
# as it will contain the TLS nginx configuration directives.
path: /etc/pki/nginx/nginx-acm.conf
# Stanza file owner (i.e. the user nginx is configured to run as).
user: nginx
| 1,689 | YAML | 39.238094 | 83 | 0.68206 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/customResources/reverseProxyConfig/index.py | import os
import logging
import json
from crhelper import CfnResource
import aws_utils.ssm as ssm
import aws_utils.ec2 as ec2
import config.reverseProxy as config
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
helper = CfnResource(
json_logging=False, log_level="DEBUG", boto_level="CRITICAL"
)
@helper.create
def create(event, context):
logger.info("Create Event: %s", json.dumps(event, indent=2))
response = update_config(
event["ResourceProperties"]["STACK_NAME"],
event["ResourceProperties"]["ARTIFACTS_BUCKET_NAME"],
event["ResourceProperties"]["FULL_DOMAIN"],
event["ResourceProperties"]["RP_AUTOSCALING_GROUP_NAME"],
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
@helper.update
def update(event, context):
logger.info("Update Event: %s", json.dumps(event, indent=2))
response = update_config(
event["ResourceProperties"]["STACK_NAME"],
event["ResourceProperties"]["ARTIFACTS_BUCKET_NAME"],
event["ResourceProperties"]["FULL_DOMAIN"],
event["ResourceProperties"]["RP_AUTOSCALING_GROUP_NAME"],
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
def update_config(
stack_name,
artifacts_bucket_name,
full_domain,
rp_autoscaling_group_name
):
# get nucleus main instance id
nucleus_instances = []
try:
nucleus_instances = ec2.get_instances_by_tag(
"Name", f"{stack_name}/NucleusServer")
except Exception as e:
raise Exception(
f"Failed to get nucleus instances by name. {e}")
logger.info(f"Nucleus Instances: {nucleus_instances}")
# get nucleus main hostname
nucleus_hostname = ec2.get_instance_private_dns_name(nucleus_instances[0])
logger.info(f"Nucleus Hostname: {nucleus_hostname}")
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifacts_bucket_name, nucleus_hostname, full_domain)
logger.debug(commands)
except Exception as e:
raise Exception(f"Failed to get Reverse Proxy config. {e}")
# get reverse proxy instance ids
rp_instances = ec2.get_autoscaling_instance(rp_autoscaling_group_name)
if rp_instances is None:
return None
logger.info(rp_instances)
# run config commands
response = []
for i in rp_instances:
r = ssm.run_commands(
i, commands, document="AWS-RunShellScript"
)
response.append(r)
return response
@helper.delete
def delete(event, context):
logger.info("Delete Event: %s", json.dumps(event, indent=2))
def handler(event, context):
helper(event, context)
| 2,776 | Python | 26.495049 | 78 | 0.667147 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/customResources/nucleusServerConfig/index.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
import json
from crhelper import CfnResource
import aws_utils.ssm as ssm
import aws_utils.sm as sm
import config.nucleus as config
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
helper = CfnResource(json_logging=False, log_level="DEBUG",
boto_level="CRITICAL")
@helper.create
def create(event, context):
logger.info("Create Event: %s", json.dumps(event, indent=2))
instanceId = event["ResourceProperties"]["instanceId"]
reverseProxyDomain = event["ResourceProperties"]["reverseProxyDomain"]
artifactsBucket = event["ResourceProperties"]["artifactsBucket"]
nucleusBuild = event["ResourceProperties"]["nucleusBuild"]
ovMainLoginSecretArn = event["ResourceProperties"]["ovMainLoginSecretArn"]
ovServiceLoginSecretArn = event["ResourceProperties"]["ovServiceLoginSecretArn"]
response = update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
@helper.update
def update(event, context):
logger.info("Update Event: %s", json.dumps(event, indent=2))
instanceId = event["ResourceProperties"]["instanceId"]
reverseProxyDomain = event["ResourceProperties"]["reverseProxyDomain"]
artifactsBucket = event["ResourceProperties"]["artifactsBucket"]
nucleusBuild = event["ResourceProperties"]["nucleusBuild"]
ovMainLoginSecretArn = event["ResourceProperties"]["ovMainLoginSecretArn"]
ovServiceLoginSecretArn = event["ResourceProperties"]["ovServiceLoginSecretArn"]
response = update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
def update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
):
ovMainLoginSecret = sm.get_secret(ovMainLoginSecretArn)
ovServiceLoginSecret = sm.get_secret(ovServiceLoginSecretArn)
ovMainLoginPassword = ovMainLoginSecret["password"]
ovServiceLoginPassword = ovServiceLoginSecret["password"]
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifactsBucket, reverseProxyDomain, nucleusBuild, ovMainLoginPassword, ovServiceLoginPassword)
logger.debug(commands)
except Exception as e:
raise Exception("Failed to get Reverse Proxy config. {}".format(e))
for p in commands:
print(p)
response = ssm.run_commands(
instanceId, commands, document="AWS-RunShellScript")
return response
@helper.delete
def delete(event, context):
logger.info("Delete Event: %s", json.dumps(event, indent=2))
def handler(event, context):
helper(event, context)
| 3,303 | Python | 30.169811 | 107 | 0.718438 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/asgLifeCycleHooks/reverseProxy/index.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import boto3
import os
import json
import logging
import traceback
from botocore.exceptions import ClientError
import aws_utils.ssm as ssm
import aws_utils.r53 as r53
import aws_utils.ec2 as ec2
import config.reverseProxy as config
logger = logging.getLogger()
logger.setLevel(logging.INFO)
autoscaling = boto3.client("autoscaling")
ARTIFACTS_BUCKET = os.environ["ARTIFACTS_BUCKET"]
NUCLEUS_ROOT_DOMAIN = os.environ["NUCLEUS_ROOT_DOMAIN"]
NUCLEUS_DOMAIN_PREFIX = os.environ["NUCLEUS_DOMAIN_PREFIX"]
NUCLEUS_SERVER_ADDRESS = os.environ["NUCLEUS_SERVER_ADDRESS"]
def send_lifecycle_action(event, result):
try:
response = autoscaling.complete_lifecycle_action(
LifecycleHookName=event["detail"]["LifecycleHookName"],
AutoScalingGroupName=event["detail"]["AutoScalingGroupName"],
LifecycleActionToken=event["detail"]["LifecycleActionToken"],
LifecycleActionResult=result,
InstanceId=event["detail"]["EC2InstanceId"],
)
logger.info(response)
except ClientError as e:
message = "Error completing lifecycle action: {}".format(e)
logger.error(message)
raise Exception(message)
return
def update_nginix_config(
instanceId, artifactsBucket, nucleusServerAddress, domain
):
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifactsBucket, nucleusServerAddress, domain)
logger.debug(commands)
except Exception as e:
raise Exception("Failed to get Reverse Proxy config. {}".format(e))
response = ssm.run_commands(
instanceId, commands, document="AWS-RunShellScript"
)
return response
def handler(event, context):
logger.info("Event: %s", json.dumps(event, indent=2))
instanceId = event["detail"]["EC2InstanceId"]
transition = event["detail"]["LifecycleTransition"]
if transition == "autoscaling:EC2_INSTANCE_LAUNCHING":
try:
update_nginix_config(
instanceId,
ARTIFACTS_BUCKET,
NUCLEUS_SERVER_ADDRESS,
f"{NUCLEUS_DOMAIN_PREFIX}.{NUCLEUS_ROOT_DOMAIN}",
)
send_lifecycle_action(event, "CONTINUE")
except Exception as e:
message = "Error running command: {}".format(e)
logger.warning(traceback.format_exc())
logger.error(message)
send_lifecycle_action(event, "ABANDON")
elif transition == "autoscaling:EC2_INSTANCE_TERMINATING":
try:
send_lifecycle_action(event, "CONTINUE")
except Exception as e:
message = "Error running command: {}".format(e)
logger.warning(traceback.format_exc())
logger.error(message)
send_lifecycle_action(event, "ABANDON")
logger.info("Execution Complete")
return
| 3,116 | Python | 28.40566 | 75 | 0.662067 |