diff --git a/README.md b/README.md
index ed7162c..25b2853 100644
--- a/README.md
+++ b/README.md
@@ -56,12 +56,12 @@ This package is powered by [NVIDIA Isaac Transport for ROS (NITROS)](https://dev
## Performance
-| Sample Graph
| Input Size
| AGX Orin
| Orin NX
| x86_64 w/ RTX 4060 Ti
|
-|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| [DNN Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_ess_node.py)
Full
| 576p
| [96.5 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-agx_orin.json)
13 ms @ 30Hz
| [41.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-orin_nx.json)
27 ms @ 30Hz
| [224 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-nuc_4060ti.json)
5.5 ms @ 30Hz
|
-| [DNN Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_light_ess_node.py)
Light
| 288p
| [276 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-agx_orin.json)
5.9 ms @ 30Hz
| [134 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-orin_nx.json)
10 ms @ 30Hz
| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-nuc_4060ti.json)
2.4 ms @ 30Hz
|
-| [DNN Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_ess_graph.py)
Full
| 576p
| [89.4 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-agx_orin.json)
5.4 ms @ 30Hz
| [36.8 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-orin_nx.json)
36 ms @ 30Hz
| [215 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-nuc_4060ti.json)
3.7 ms @ 30Hz
|
-| [DNN Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_light_ess_graph.py)
Light
| 288p
| [247 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-agx_orin.json)
5.9 ms @ 30Hz
| [122 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-orin_nx.json)
8.5 ms @ 30Hz
| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-nuc_4060ti.json)
6.1 ms @ 30Hz
|
+| Sample Graph
| Input Size
| AGX Orin
| Orin NX
| x86_64 w/ RTX 4090
|
+|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| [DNN Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_ess_node.py)
Full
| 576p
| [103 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-agx_orin.json)
12 ms @ 30Hz
| [42.1 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-orin_nx.json)
26 ms @ 30Hz
| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_node-x86-4090.json)
2.3 ms @ 30Hz
|
+| [DNN Stereo Disparity Node](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_light_ess_node.py)
Light
| 288p
| [306 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-agx_orin.json)
5.6 ms @ 30Hz
| [143 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-orin_nx.json)
9.4 ms @ 30Hz
| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_node-x86-4090.json)
1.6 ms @ 30Hz
|
+| [DNN Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_ess_graph.py)
Full
| 576p
| [33.5 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-agx_orin.json)
25 ms @ 30Hz
| [35.2 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-orin_nx.json)
34 ms @ 30Hz
| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_ess_graph-x86-4090.json)
5.6 ms @ 30Hz
|
+| [DNN Stereo Disparity Graph](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/benchmarks/isaac_ros_ess_benchmark/scripts/isaac_ros_light_ess_graph.py)
Light
| 288p
| [179 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-agx_orin.json)
14 ms @ 30Hz
| [126 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-orin_nx.json)
15 ms @ 30Hz
| [350 fps](https://github.com/NVIDIA-ISAAC-ROS/isaac_ros_benchmark/blob/main/results/isaac_ros_light_ess_graph-x86-4090.json)
4.4 ms @ 30Hz
|
---
@@ -81,4 +81,4 @@ Please visit the [Isaac ROS Documentation](https://nvidia-isaac-ros.github.io/re
## Latest
-Update 2024-09-26: Update for ZED compatibility
+Update 2024-09-26: Updated for ESS 4.1 trained on additional samples
diff --git a/gxf_isaac_ess/CMakeLists.txt b/gxf_isaac_ess/CMakeLists.txt
index 0eb5ca4..0c5521f 100644
--- a/gxf_isaac_ess/CMakeLists.txt
+++ b/gxf_isaac_ess/CMakeLists.txt
@@ -66,4 +66,10 @@ set_target_properties(${PROJECT_NAME} PROPERTIES
# Install the binary file
install(TARGETS ${PROJECT_NAME} DESTINATION share/${PROJECT_NAME}/gxf/lib)
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE)
diff --git a/gxf_isaac_ess/gxf/extensions/ess/components/ess_inference.cpp b/gxf_isaac_ess/gxf/extensions/ess/components/ess_inference.cpp
index 7caadc0..228a7c3 100644
--- a/gxf_isaac_ess/gxf/extensions/ess/components/ess_inference.cpp
+++ b/gxf_isaac_ess/gxf/extensions/ess/components/ess_inference.cpp
@@ -53,7 +53,7 @@ gxf_result_t PassthroughComponents(gxf::Entity& output, gxf::Entity& input,
}
*(output_component.value()) = *(maybe_component.value());
} else {
- GXF_LOG_INFO("component %s not found.", name);
+ GXF_LOG_DEBUG("component %s not found.", name);
}
return GXF_SUCCESS;
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Errors.h b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Errors.h
index b52094a..295a715 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Errors.h
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Errors.h
@@ -1,19 +1,16 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
+ * property and proprietary rights in and to this material, related
+ * documentation and any modifications thereto. Any use, reproduction,
+ * disclosure or distribution of this material and related documentation
+ * without an express license agreement from NVIDIA CORPORATION or
+ * its affiliates is strictly prohibited.
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
+ * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ */
#pragma once
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/IInferenceBackend.h b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/IInferenceBackend.h
index b49fde0..80afc3a 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/IInferenceBackend.h
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/IInferenceBackend.h
@@ -1,19 +1,16 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
+ * property and proprietary rights in and to this material, related
+ * documentation and any modifications thereto. Any use, reproduction,
+ * disclosure or distribution of this material and related documentation
+ * without an express license agreement from NVIDIA CORPORATION or
+ * its affiliates is strictly prohibited.
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
+ * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ */
#pragma once
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Inferencer.h b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Inferencer.h
index 901fb78..1c64c26 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Inferencer.h
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/Inferencer.h
@@ -1,19 +1,16 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
+ * property and proprietary rights in and to this material, related
+ * documentation and any modifications thereto. Any use, reproduction,
+ * disclosure or distribution of this material and related documentation
+ * without an express license agreement from NVIDIA CORPORATION or
+ * its affiliates is strictly prohibited.
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
+ * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ */
#pragma once
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp
index 61a0a1b..920e73a 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.cpp
@@ -49,9 +49,8 @@ size_t getDataSize(const std::vector& shape, ChannelType dataType) {
std::error_code TensorRTInferencer::getLayerInfo(LayerInfo& layer, std::string layerName) {
layer.name = layerName;
- layer.index = m_inferenceEngine->getBindingIndex(layerName.c_str());
- auto dim = m_inferenceEngine->getBindingDimensions(layer.index);
- nvinfer1::TensorFormat tensorFormat = m_inferenceEngine->getBindingFormat(layer.index);
+ auto dim = m_inferenceEngine->getTensorShape(layer.name.c_str());
+ nvinfer1::TensorFormat tensorFormat = m_inferenceEngine->getTensorFormat(layer.name.c_str());
std::error_code err;
err = getCVCoreChannelLayoutFromTensorRT(layer.layout, tensorFormat);
@@ -64,7 +63,7 @@ std::error_code TensorRTInferencer::getLayerInfo(LayerInfo& layer, std::string l
}
err = getCVCoreChannelTypeFromTensorRT(layer.dataType,
- m_inferenceEngine->getBindingDataType(layer.index));
+ m_inferenceEngine->getTensorDataType(layer.name.c_str()));
layer.layerSize = getDataSize(layer.shape, layer.dataType);
if (err != make_error_code(ErrorCode::SUCCESS)) {
return ErrorCode::INVALID_ARGUMENT;
@@ -174,16 +173,15 @@ std::error_code TensorRTInferencer::convertModelToEngine(int32_t dla_core,
}
builderConfig->addOptimizationProfile(optimization_profile);
- // Creates TensorRT Engine Plan
- std::unique_ptr engine(
- builder->buildEngineWithConfig(*network, *builderConfig));
- if (!engine) {
+ // Creates TensorRT Model stream
+ std::unique_ptr model_stream(
+ builder->buildSerializedNetwork(*network, *builderConfig));
+ if (!model_stream) {
GXF_LOG_ERROR("Failed to build TensorRT engine from model %s.", model_file);
return InferencerErrorCode::INVALID_ARGUMENT;
}
- std::unique_ptr model_stream(engine->serialize());
- if (!model_stream || model_stream->size() == 0 || model_stream->data() == nullptr) {
+ if (model_stream->size() == 0 || model_stream->data() == nullptr) {
GXF_LOG_ERROR("Fail to serialize TensorRT Engine.");
return InferencerErrorCode::INVALID_ARGUMENT;
}
@@ -284,13 +282,14 @@ TensorRTInferencer::TensorRTInferencer(const TensorRTInferenceParams& params)
}
m_hasImplicitBatch = m_inferenceEngine->hasImplicitBatchDimension();
- m_bindingsCount = m_inferenceEngine->getNbBindings();
+ m_ioTensorsCount = m_inferenceEngine->getNbIOTensors();
if (!m_hasImplicitBatch) {
- for (size_t i = 0; i < m_bindingsCount; i++) {
- if (m_inferenceEngine->bindingIsInput(i)) {
- nvinfer1::Dims dims_i(m_inferenceEngine->getBindingDimensions(i));
+ for (size_t i = 0; i < m_ioTensorsCount; i++) {
+ const char* name = m_inferenceEngine->getIOTensorName(i);
+ if (m_inferenceEngine->getTensorIOMode(name) == nvinfer1::TensorIOMode::kINPUT) {
+ nvinfer1::Dims dims_i(m_inferenceEngine->getTensorShape(name));
nvinfer1::Dims4 inputDims{1, dims_i.d[1], dims_i.d[2], dims_i.d[3]};
- m_inferenceContext->setBindingDimensions(i, inputDims);
+ m_inferenceContext->setInputShape(name, inputDims);
}
}
}
@@ -299,7 +298,6 @@ TensorRTInferencer::TensorRTInferencer(const TensorRTInferenceParams& params)
if (err != make_error_code(ErrorCode::SUCCESS)) {
throw err;
}
- m_buffers.resize(m_bindingsCount);
}
// Set input layer tensor
@@ -309,7 +307,8 @@ std::error_code TensorRTInferencer::setInput(const TensorBase& trtInputBuffer,
return ErrorCode::INVALID_ARGUMENT;
}
LayerInfo layer = m_modelInfo.inputLayers[inputLayerName];
- m_buffers[layer.index] = trtInputBuffer.getData();
+ m_inferenceContext->setTensorAddress(inputLayerName.c_str(),
+ trtInputBuffer.getData());
return ErrorCode::SUCCESS;
}
@@ -320,7 +319,8 @@ std::error_code TensorRTInferencer::setOutput(TensorBase& trtOutputBuffer,
return ErrorCode::INVALID_ARGUMENT;
}
LayerInfo layer = m_modelInfo.outputLayers[outputLayerName];
- m_buffers[layer.index] = trtOutputBuffer.getData();
+ m_inferenceContext->setTensorAddress(outputLayerName.c_str(),
+ trtOutputBuffer.getData());
return ErrorCode::SUCCESS;
}
@@ -334,18 +334,18 @@ ModelMetaData TensorRTInferencer::getModelMetaData() const {
std::error_code TensorRTInferencer::infer(size_t batchSize) {
bool err = true;
if (!m_hasImplicitBatch) {
- size_t bindingsCount = m_inferenceEngine->getNbBindings();
- for (size_t i = 0; i < bindingsCount; i++) {
- if (m_inferenceEngine->bindingIsInput(i)) {
- nvinfer1::Dims dims_i(m_inferenceEngine->getBindingDimensions(i));
- nvinfer1::Dims4 inputDims{static_cast(batchSize), dims_i.d[1],
- dims_i.d[2], dims_i.d[3]};
- m_inferenceContext->setBindingDimensions(i, inputDims);
+ size_t ioTensorsCount = m_inferenceEngine->getNbIOTensors();
+ for (size_t i = 0; i < ioTensorsCount; i++) {
+ const char* name = m_inferenceEngine->getIOTensorName(i);
+ if (m_inferenceEngine->getTensorIOMode(name) == nvinfer1::TensorIOMode::kINPUT) {
+ nvinfer1::Dims dims_i(m_inferenceEngine->getTensorShape(name));
+ nvinfer1::Dims4 inputDims{1, dims_i.d[1], dims_i.d[2], dims_i.d[3]};
+ m_inferenceContext->setInputShape(name, inputDims);
}
}
- err = m_inferenceContext->enqueueV2(&m_buffers[0], m_cudaStream, nullptr);
+ err = m_inferenceContext->enqueueV3(m_cudaStream);
} else {
- err = m_inferenceContext->enqueue(m_maxBatchSize, &m_buffers[0], m_cudaStream, nullptr);
+ return InferencerErrorCode::INVALID_ARGUMENT;
}
if (!err) {
return InferencerErrorCode::TENSORRT_INFERENCE_ERROR;
@@ -360,27 +360,14 @@ std::error_code TensorRTInferencer::setCudaStream(cudaStream_t cudaStream) {
}
std::error_code TensorRTInferencer::unregister(std::string layerName) {
- size_t index;
- if (m_modelInfo.outputLayers.find(layerName) != m_modelInfo.outputLayers.end()) {
- index = m_modelInfo.outputLayers[layerName].index;
- } else if (m_modelInfo.inputLayers.find(layerName) != m_modelInfo.inputLayers.end()) {
- index = m_modelInfo.inputLayers[layerName].index;
- } else {
- return ErrorCode::INVALID_ARGUMENT;
- }
- m_buffers[index] = nullptr;
return ErrorCode::SUCCESS;
}
std::error_code TensorRTInferencer::unregister() {
- for (size_t i = 0; i < m_buffers.size(); i++) {
- m_buffers[i] = nullptr;
- }
return ErrorCode::SUCCESS;
}
TensorRTInferencer::~TensorRTInferencer() {
- m_buffers.clear();
}
} // namespace inferencer
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.h b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.h
index 2122a6e..e339817 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.h
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTInferencer.h
@@ -1,19 +1,16 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright (c) 2021-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
+ * property and proprietary rights in and to this material, related
+ * documentation and any modifications thereto. Any use, reproduction,
+ * disclosure or distribution of this material and related documentation
+ * without an express license agreement from NVIDIA CORPORATION or
+ * its affiliates is strictly prohibited.
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES
+ * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ */
#pragma once
@@ -77,9 +74,8 @@ class TensorRTInferencer : public IInferenceBackendClient {
nvinfer1::ICudaEngine* m_inferenceEngine;
std::unique_ptr m_ownedInferenceEngine;
std::unique_ptr m_inferenceContext;
- size_t m_bindingsCount;
+ size_t m_ioTensorsCount;
ModelMetaData m_modelInfo;
- std::vector m_buffers;
bool m_hasImplicitBatch;
std::vector m_modelEngineStream;
size_t m_modelEngineStreamSize = 0;
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTUtils.h b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTUtils.h
index 220d275..c764a88 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTUtils.h
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TensorRTUtils.h
@@ -1,19 +1,16 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
+ * property and proprietary rights in and to this material, related
+ * documentation and any modifications thereto. Any use, reproduction,
+ * disclosure or distribution of this material and related documentation
+ * without an express license agreement from NVIDIA CORPORATION or
+ * its affiliates is strictly prohibited.
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
+ * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ */
#pragma once
#include "NvInferRuntime.h"
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h
index c0fc599..5e4c6ec 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonGrpcInferencer.h
@@ -1,31 +1,28 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
+ * property and proprietary rights in and to this material, related
+ * documentation and any modifications thereto. Any use, reproduction,
+ * disclosure or distribution of this material and related documentation
+ * without an express license agreement from NVIDIA CORPORATION or
+ * its affiliates is strictly prohibited.
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
+ * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ */
#pragma once
#ifdef ENABLE_TRITON
-#include
-
#include
#include
#include
#include
+
#include "cv/inferencer/Errors.h"
#include "cv/inferencer/IInferenceBackend.h"
#include "cv/inferencer/Inferencer.h"
+#include "grpc_client.h"
namespace cvcore {
namespace inferencer {
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritionUtils.cpp b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonUtils.cpp
similarity index 99%
rename from gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritionUtils.cpp
rename to gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonUtils.cpp
index 557bbc7..9c62f49 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritionUtils.cpp
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonUtils.cpp
@@ -18,6 +18,7 @@
#include
#include
+
#include "gems/dnn_inferencer/inferencer/TritonUtils.h"
namespace cvcore {
diff --git a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonUtils.h b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonUtils.h
index a0d64c9..3d03f13 100644
--- a/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonUtils.h
+++ b/gxf_isaac_ess/gxf/gems/dnn_inferencer/inferencer/TritonUtils.h
@@ -1,25 +1,24 @@
-// SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES
-// Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// SPDX-License-Identifier: Apache-2.0
+/*
+ * Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ *
+ * NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
+ * property and proprietary rights in and to this material, related
+ * documentation and any modifications thereto. Any use, reproduction,
+ * disclosure or distribution of this material and related documentation
+ * without an express license agreement from NVIDIA CORPORATION or
+ * its affiliates is strictly prohibited.
+ *
+ * SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES
+ * SPDX-License-Identifier: LicenseRef-NvidiaProprietary
+ */
#pragma once
#ifdef ENABLE_TRITON
-#include
#include
+
+#include "grpc_client.h"
+
#include "cv/core/Tensor.h"
#include "cv/inferencer/Errors.h"
diff --git a/gxf_isaac_ess/package.xml b/gxf_isaac_ess/package.xml
index 89d0f21..0ba2d2a 100644
--- a/gxf_isaac_ess/package.xml
+++ b/gxf_isaac_ess/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
gxf_isaac_ess
- 3.1.0
+ 3.2.0
Disparity GXF extension.
Isaac ROS Maintainers
diff --git a/gxf_isaac_video_buffer_utils/CMakeLists.txt b/gxf_isaac_video_buffer_utils/CMakeLists.txt
index 8965d19..07b76ee 100644
--- a/gxf_isaac_video_buffer_utils/CMakeLists.txt
+++ b/gxf_isaac_video_buffer_utils/CMakeLists.txt
@@ -62,4 +62,10 @@ target_compile_options(${PROJECT_NAME} PUBLIC -fPIC)
# Install the binary file
install(TARGETS ${PROJECT_NAME} DESTINATION share/${PROJECT_NAME}/gxf/lib)
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE)
diff --git a/gxf_isaac_video_buffer_utils/package.xml b/gxf_isaac_video_buffer_utils/package.xml
index 352d228..75bc62e 100644
--- a/gxf_isaac_video_buffer_utils/package.xml
+++ b/gxf_isaac_video_buffer_utils/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
gxf_isaac_video_buffer_utils
- 3.1.0
+ 3.2.0
GXF extension containing miscellaneous utility components for video buffer.
Isaac ROS Maintainers
diff --git a/isaac_ros_ess/CMakeLists.txt b/isaac_ros_ess/CMakeLists.txt
index b1b6fdc..bb1b42b 100644
--- a/isaac_ros_ess/CMakeLists.txt
+++ b/isaac_ros_ess/CMakeLists.txt
@@ -55,6 +55,7 @@ if(BUILD_TESTING)
ament_add_gtest(ess_disparity_node_test test/ess_disparity_node_test.cpp)
target_link_libraries(ess_disparity_node_test ess_disparity_node)
target_include_directories(ess_disparity_node_test PUBLIC include/isaac_ros_ess/)
+ target_include_directories(ess_disparity_node_test PUBLIC /usr/src/googletest/googlemock/include/)
ament_target_dependencies(ess_disparity_node_test rclcpp)
ament_target_dependencies(ess_disparity_node_test isaac_ros_nitros)
@@ -68,4 +69,10 @@ if(BUILD_TESTING)
add_launch_test(test/isaac_ros_ess_test_1_16HD_model.py TIMEOUT "300")
endif()
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE launch config)
diff --git a/isaac_ros_ess/config/ess_inference.yaml b/isaac_ros_ess/config/ess_inference.yaml
index e6b7442..0ecd211 100644
--- a/isaac_ros_ess/config/ess_inference.yaml
+++ b/isaac_ros_ess/config/ess_inference.yaml
@@ -210,7 +210,6 @@ components:
mask_input: confidence_input
masked_output: masked_output
allocator: allocator
- threshold: 0.35
fill_value_float: -1.0
stream_pool: cuda_stream
- type: nvidia::gxf::MemoryAvailableSchedulingTerm
diff --git a/isaac_ros_ess/isaac_ros_ess/engine_generator.py b/isaac_ros_ess/isaac_ros_ess/engine_generator.py
index 85a512b..c0b6898 100644
--- a/isaac_ros_ess/isaac_ros_ess/engine_generator.py
+++ b/isaac_ros_ess/isaac_ros_ess/engine_generator.py
@@ -23,9 +23,9 @@
class ESSEngineGenerator:
def __init__(self,
- etlt_model,
+ onnx_model,
arch=''):
- self.etlt_model = etlt_model
+ self.onnx_model = onnx_model
if not arch:
self.arch = platform.machine()
print('Architecture of the target platform is {}'.format(self.arch))
@@ -34,27 +34,27 @@ def __init__(self,
def generate(self):
supported_arch = ['x86_64', 'aarch64']
- model_file = os.path.abspath(self.etlt_model)
+ model_file = os.path.abspath(self.onnx_model)
if self.arch not in supported_arch:
print('Unsupported architecture: {}. Supported architectures are:'
'{}'.format(self.arch, supported_arch))
return
- elif os.path.exists(os.path.abspath(self.etlt_model)):
+ elif os.path.exists(os.path.abspath(self.onnx_model)):
plugin = (os.path.dirname(model_file) + '/plugins/' +
self.arch + '/ess_plugins.so')
- engine_file = model_file.replace('.etlt', '.engine')
+ engine_file = model_file.replace('.onnx', '.engine')
- response = subprocess.call('LD_PRELOAD=' + plugin +
- ' tao-converter -k ess -t fp16' +
- ' -e ' + engine_file +
- ' -o output_left,output_conf ' +
- self.etlt_model, shell=True)
+ response = subprocess.call('/usr/src/tensorrt/bin/trtexec' +
+ ' --onnx=' + self.onnx_model +
+ ' --saveEngine=' + engine_file +
+ ' --fp16' +
+ ' --staticPlugins=' + plugin, shell=True)
if response == 0:
print('Engine file for ESS model{} is generated!'
- .format(self.etlt_model))
+ .format(self.onnx_model))
else:
print('Failed to generate engine file for model {}'
- .format(self.etlt_model))
+ .format(self.onnx_model))
else:
- print('ESS etlt model is not found.')
+ print('ESS onnx model is not found.')
diff --git a/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py b/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py
index fb5eeeb..df43d90 100644
--- a/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py
+++ b/isaac_ros_ess/launch/isaac_ros_argus_ess.launch.py
@@ -30,7 +30,7 @@ def generate_launch_description():
description='The absolute path to the ESS engine plan.'),
DeclareLaunchArgument(
'threshold',
- default_value='0.35',
+ default_value='0.4',
description='Threshold value ranges between 0.0 and 1.0 '
'for filtering disparity with confidence.'),
DeclareLaunchArgument(
@@ -57,8 +57,6 @@ def generate_launch_description():
package='isaac_ros_argus_camera',
plugin='nvidia::isaac_ros::argus::ArgusStereoNode',
parameters=[{
- 'left_optical_frame_name': 'left/image_rect',
- 'right_optical_frame_name': 'right/image_rect',
'module_id': module_id
}],
)
diff --git a/isaac_ros_ess/launch/isaac_ros_argus_ess_wide_fov.launch.py b/isaac_ros_ess/launch/isaac_ros_argus_ess_wide_fov.launch.py
index 81874ff..9cb2376 100644
--- a/isaac_ros_ess/launch/isaac_ros_argus_ess_wide_fov.launch.py
+++ b/isaac_ros_ess/launch/isaac_ros_argus_ess_wide_fov.launch.py
@@ -35,7 +35,7 @@ def generate_launch_description():
description='The absolute path to the ESS engine plan.'),
DeclareLaunchArgument(
'threshold',
- default_value='0.35',
+ default_value='0.4',
description='Threshold value ranges between 0.0 and 1.0 '
'for filtering disparity with confidence.'),
DeclareLaunchArgument(
@@ -46,19 +46,27 @@ def generate_launch_description():
'wide_fov',
default_value='true',
description='Flag to enable wide fov in argus camera.'),
+ DeclareLaunchArgument(
+ 'output_width',
+ default_value='960',
+ description='ESS model output width.'),
+ DeclareLaunchArgument(
+ 'output_height',
+ default_value='576',
+ description='ESS model output height.'),
]
engine_file_path = LaunchConfiguration('engine_file_path')
threshold = LaunchConfiguration('threshold')
module_id = LaunchConfiguration('module_id')
wide_fov = LaunchConfiguration('wide_fov')
+ output_width = LaunchConfiguration('output_width')
+ output_height = LaunchConfiguration('output_height')
argus_stereo_node = ComposableNode(
name='argus_stereo',
package='isaac_ros_argus_camera',
plugin='nvidia::isaac_ros::argus::ArgusStereoNode',
parameters=[{
- 'left_optical_frame_name': 'left/image_rect',
- 'right_optical_frame_name': 'right/image_rect',
'module_id': module_id,
'wide_fov': wide_fov,
'type_negotiation_duration_s': 5,
@@ -149,8 +157,8 @@ def generate_launch_description():
plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode',
parameters=[{'engine_file_path': engine_file_path,
'threshold': threshold,
- 'input_layer_width': 960,
- 'input_layer_height': 576,
+ 'input_layer_width': output_width,
+ 'input_layer_height': output_height,
'type_negotiation_duration_s': 5}],
remappings=[
('left/image_rect', 'left/image_crop'),
@@ -165,8 +173,8 @@ def generate_launch_description():
package='isaac_ros_image_proc',
plugin='nvidia::isaac_ros::image_proc::ResizeNode',
parameters=[{
- 'output_width': 960,
- 'output_height': 576,
+ 'output_width': output_width,
+ 'output_height': output_height,
'keep_aspect_ratio': False,
'type_negotiation_duration_s': 5,
}],
diff --git a/isaac_ros_ess/launch/isaac_ros_ess.launch.py b/isaac_ros_ess/launch/isaac_ros_ess.launch.py
index 3c3a8b0..c9e03e5 100644
--- a/isaac_ros_ess/launch/isaac_ros_ess.launch.py
+++ b/isaac_ros_ess/launch/isaac_ros_ess.launch.py
@@ -30,12 +30,22 @@ def generate_launch_description():
description='The absolute path to the ESS engine plan.'),
DeclareLaunchArgument(
'threshold',
- default_value='0.35',
+ default_value='0.4',
description='Threshold value ranges between 0.0 and 1.0 '
'for filtering disparity with confidence.'),
+ DeclareLaunchArgument(
+ 'input_layer_width',
+ default_value='960',
+ description='Input layer width'),
+ DeclareLaunchArgument(
+ 'input_layer_height',
+ default_value='576',
+ description='Input layer height'),
]
engine_file_path = LaunchConfiguration('engine_file_path')
threshold = LaunchConfiguration('threshold')
+ input_layer_width = LaunchConfiguration('input_layer_width')
+ input_layer_height = LaunchConfiguration('input_layer_height')
disparity_node = ComposableNode(
name='disparity',
@@ -43,8 +53,8 @@ def generate_launch_description():
plugin='nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode',
parameters=[{'engine_file_path': engine_file_path,
'threshold': threshold,
- 'input_layer_width': 960,
- 'input_layer_height': 576}],
+ 'input_layer_width': input_layer_width,
+ 'input_layer_height': input_layer_height}],
)
container = ComposableNodeContainer(
diff --git a/isaac_ros_ess/launch/isaac_ros_ess_core.launch.py b/isaac_ros_ess/launch/isaac_ros_ess_core.launch.py
index 41a1f06..2649bec 100644
--- a/isaac_ros_ess/launch/isaac_ros_ess_core.launch.py
+++ b/isaac_ros_ess/launch/isaac_ros_ess_core.launch.py
@@ -100,7 +100,7 @@ def get_launch_actions(interface_specs: Dict[str, Any]) -> \
),
'threshold': DeclareLaunchArgument(
'threshold',
- default_value='0.35'
+ default_value='0.4'
),
'input_layer_width': DeclareLaunchArgument(
'input_layer_width',
diff --git a/isaac_ros_ess/launch/isaac_ros_ess_depth.launch.py b/isaac_ros_ess/launch/isaac_ros_ess_depth.launch.py
index 8a791fb..e6f06a7 100644
--- a/isaac_ros_ess/launch/isaac_ros_ess_depth.launch.py
+++ b/isaac_ros_ess/launch/isaac_ros_ess_depth.launch.py
@@ -48,9 +48,9 @@ def generate_launch_description():
]
left_image_topic = PythonExpression(["'/' + '", camera, "' + '/left/image_compressed'"])
- left_info_topic = PythonExpression(["'/' + '", camera, "' + '/left/camerainfo'"])
+ left_info_topic = PythonExpression(["'/' + '", camera, "' + '/left/camera_info'"])
right_image_topic = PythonExpression(["'/' + '", camera, "' + '/right/image_compressed'"])
- right_info_topic = PythonExpression(["'/' + '", camera, "' + '/right/camerainfo'"])
+ right_info_topic = PythonExpression(["'/' + '", camera, "' + '/right/camera_info'"])
left_raw_image_topic = PythonExpression(["'/' + '", camera, "' + '/left/image_raw'"])
right_raw_image_topic = PythonExpression(["'/' + '", camera, "' + '/right/image_raw'"])
diff --git a/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py b/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py
index 5166cb3..af30672 100644
--- a/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py
+++ b/isaac_ros_ess/launch/isaac_ros_ess_realsense.launch.py
@@ -34,7 +34,7 @@ def generate_launch_description():
description='The absolute path to the ESS engine plan.'),
DeclareLaunchArgument(
'threshold',
- default_value='0.35',
+ default_value='0.4',
description='Threshold value ranges between 0.0 and 1.0 '
'for filtering disparity with confidence.'),
]
diff --git a/isaac_ros_ess/package.xml b/isaac_ros_ess/package.xml
index 8ad0b75..b78bff3 100644
--- a/isaac_ros_ess/package.xml
+++ b/isaac_ros_ess/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_ess
- 3.1.0
+ 3.2.0
DNN Stereo Disparity Network for Isaac ROS
Isaac ROS Maintainers
diff --git a/isaac_ros_ess/scripts/generate_engine.py b/isaac_ros_ess/scripts/generate_engine.py
index 7dc4d59..9bfc2eb 100755
--- a/isaac_ros_ess/scripts/generate_engine.py
+++ b/isaac_ros_ess/scripts/generate_engine.py
@@ -26,8 +26,8 @@
def get_args():
parser = argparse.ArgumentParser(
- description='ESS model engine generator with tao-converter')
- parser.add_argument('--etlt_model', default='', help='ESS etlt model.')
+ description='ESS model engine generator with trtexec')
+ parser.add_argument('--onnx_model', default='', help='ESS onnx model.')
parser.add_argument('--arch',
default='x86_64',
help='Architecture of the target platform.'
@@ -37,7 +37,8 @@ def get_args():
def main():
args = get_args()
- gen = ESSEngineGenerator(etlt_model=args.etlt_model, arch=args.arch)
+ print('Generating ESS engine for model: {}'.format(args.onnx_model))
+ gen = ESSEngineGenerator(onnx_model=args.onnx_model, arch=args.arch)
gen.generate()
diff --git a/isaac_ros_ess/src/ess_disparity_node.cpp b/isaac_ros_ess/src/ess_disparity_node.cpp
index be38be3..ca0c823 100644
--- a/isaac_ros_ess/src/ess_disparity_node.cpp
+++ b/isaac_ros_ess/src/ess_disparity_node.cpp
@@ -160,7 +160,7 @@ ESSDisparityNode::ESSDisparityNode(const rclcpp::NodeOptions & options)
"input_layers_name", {"input_left", "input_right"})),
output_layers_name_(declare_parameter>(
"output_layers_name", {"output_left", "output_conf"})),
- threshold_(declare_parameter("threshold", 0.35)),
+ threshold_(declare_parameter("threshold", 0.4)),
throttler_skip_(declare_parameter("throttler_skip", 0))
{
RCLCPP_DEBUG(get_logger(), "[ESSDisparityNode] Initializing ESSDisparityNode.");
diff --git a/isaac_ros_ess/test/ess_disparity_node_test.cpp b/isaac_ros_ess/test/ess_disparity_node_test.cpp
index bc7c73f..cbd5c32 100644
--- a/isaac_ros_ess/test/ess_disparity_node_test.cpp
+++ b/isaac_ros_ess/test/ess_disparity_node_test.cpp
@@ -15,75 +15,58 @@
//
// SPDX-License-Identifier: Apache-2.0
-#include
+#include
#include "ess_disparity_node.hpp"
#include "rclcpp/rclcpp.hpp"
// Objective: to cover code lines where exceptions are thrown
// Approach: send Invalid Arguments for node parameters to trigger the exception
-class ESSDisparityNodeTestSuite : public ::testing::Test
-{
-protected:
- void SetUp() {rclcpp::init(0, nullptr);}
- void TearDown() {(void)rclcpp::shutdown();}
-};
-
-void test_empty_engine_path()
+TEST(ess_disparity_node_test, test_empty_engine_path)
{
+ rclcpp::init(0, nullptr);
rclcpp::NodeOptions options;
- options.arguments(
+ options.append_parameter_override("engine_file_path", "");
+ EXPECT_THROW(
{
- "--ros-args",
- "-p", "engine_file_path:=''",
- });
- try {
- nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode ess_disparity_node(options);
- } catch (const std::invalid_argument & e) {
- std::string err(e.what());
- if (err.find("Empty engine_file_path") != std::string::npos) {
- _exit(1);
+ try {
+ nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode ess_disparity_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Empty engine_file_path"));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
}
- }
- _exit(0);
+ }, std::invalid_argument);
+ rclcpp::shutdown();
}
-void test_image_type()
+TEST(ess_disparity_node_test, test_image_type)
{
+ rclcpp::init(0, nullptr);
rclcpp::NodeOptions options;
- options.arguments(
+ options.append_parameter_override("engine_file_path", "ess.engine");
+ options.append_parameter_override("image_type", "invalid");
+ EXPECT_THROW(
{
- "--ros-args",
- "-p", "engine_file_path:='isaac_ros_dev.engine'",
- "-p", "image_type:='GBR_U8'",
- });
- try {
- nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode ess_disparity_node(options);
- } catch (const std::invalid_argument & e) {
- std::string err(e.what());
- if (err.find("Only support image_type RGB_U8 and BGR_U8") != std::string::npos) {
- _exit(1);
+ try {
+ nvidia::isaac_ros::dnn_stereo_depth::ESSDisparityNode ess_disparity_node(options);
+ } catch (const std::invalid_argument & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("Only support image_type RGB_U8 and BGR_U8."));
+ throw;
+ } catch (const rclcpp::exceptions::InvalidParameterValueException & e) {
+ EXPECT_THAT(e.what(), testing::HasSubstr("No parameter value set"));
+ throw;
}
- }
- _exit(0);
-}
-
-
-TEST_F(ESSDisparityNodeTestSuite, test_empty_engine_path)
-{
- EXPECT_EXIT(test_empty_engine_path(), testing::ExitedWithCode(1), "");
-}
-
-TEST_F(ESSDisparityNodeTestSuite, test_image_type)
-{
- EXPECT_EXIT(test_image_type(), testing::ExitedWithCode(1), "");
+ }, std::invalid_argument);
+ rclcpp::shutdown();
}
int main(int argc, char ** argv)
{
testing::InitGoogleTest(&argc, argv);
- ::testing::GTEST_FLAG(death_test_style) = "threadsafe";
return RUN_ALL_TESTS();
}
diff --git a/isaac_ros_ess_models_install/CMakeLists.txt b/isaac_ros_ess_models_install/CMakeLists.txt
index 4f5b396..c3ba71b 100644
--- a/isaac_ros_ess_models_install/CMakeLists.txt
+++ b/isaac_ros_ess_models_install/CMakeLists.txt
@@ -24,4 +24,10 @@ ament_auto_find_build_dependencies()
install_isaac_ros_asset(install_ess_models)
install(PROGRAMS asset_scripts/install_ess_models.sh DESTINATION lib/${PROJECT_NAME})
+
+# Embed versioning information into installed files
+ament_index_get_resource(ISAAC_ROS_COMMON_CMAKE_PATH isaac_ros_common_cmake_path isaac_ros_common)
+include("${ISAAC_ROS_COMMON_CMAKE_PATH}/isaac_ros_common-version-info.cmake")
+generate_version_info(${PROJECT_NAME})
+
ament_auto_package(INSTALL_TO_SHARE)
diff --git a/isaac_ros_ess_models_install/asset_scripts/install_ess_models.sh b/isaac_ros_ess_models_install/asset_scripts/install_ess_models.sh
index 13a1829..6bcb6e1 100755
--- a/isaac_ros_ess_models_install/asset_scripts/install_ess_models.sh
+++ b/isaac_ros_ess_models_install/asset_scripts/install_ess_models.sh
@@ -14,45 +14,33 @@
set -e
ASSET_NAME="dnn_stereo_disparity"
-VERSION="4.0.0"
+VERSION="4.1.0_onnx"
EULA_URL="https://catalog.ngc.nvidia.com/orgs/nvidia/teams/isaac/models/dnn_stereo_disparity"
MODELS_DIR="${ISAAC_ROS_WS}/isaac_ros_assets/models/${ASSET_NAME}"
ASSET_DIR="${MODELS_DIR}/${ASSET_NAME}_v${VERSION}"
ASSET_INSTALL_PATHS="${ASSET_DIR}/ess.engine ${ASSET_DIR}/light_ess.engine"
-ARCHIVE_NAME="dnn_stereo_disparity_v4.0.0.tar.gz"
+ARCHIVE_NAME="dnn_stereo_disparity_v${VERSION}.tar.gz"
ESS_MODEL_URL="https://api.ngc.nvidia.com/v2/models/org/nvidia/team/isaac/dnn_stereo_disparity/${VERSION}/files?redirect=true&path=${ARCHIVE_NAME}"
source "isaac_ros_asset_eula.sh"
-# Skip if ESS engine files already exist
-ASSET_INSTALL_PATHS_LIST=($ASSET_INSTALL_PATHS)
-if [[ -f ${ASSET_INSTALL_PATHS_LIST[0]} ]] && [[ -f ${ASSET_INSTALL_PATHS_LIST[1]} ]]
-then
- echo "ESS model engine files already exist at \"${ASSET_DIR}\". Skipping download."
- exit 0
-fi
-
# Download and extract model archive
-echo "Downloading ESS etlt file."
+echo "Downloading ESS onnx file."
wget "${ESS_MODEL_URL}" -O "${MODELS_DIR}/${ARCHIVE_NAME}"
tar -xvf "${MODELS_DIR}/${ARCHIVE_NAME}" -C "${MODELS_DIR}"
# Create ESS engine
-echo "Converting ESS etlt file to engine file."
-export LD_PRELOAD="${ASSET_DIR}/plugins/$(uname -m)/ess_plugins.so"
-tao-converter \
- -k ess \
- -t fp16 \
- -e "${ASSET_DIR}/ess.engine" \
- -o output_left,output_conf \
- "${ASSET_DIR}/ess.etlt"
+echo "Converting ESS onnx file to engine file."
+/usr/src/tensorrt/bin/trtexec \
+ --onnx=${ASSET_DIR}/ess.onnx \
+ --saveEngine=${ASSET_DIR}/ess.engine \
+ --fp16 \
+ --staticPlugins=${ASSET_DIR}/plugins/$(uname -m)/ess_plugins.so
# Create ESS-light engine
-echo "Converting ESS light etlt file to engine file."
-tao-converter \
- -k ess \
- -t fp16 \
- -e "${ASSET_DIR}/light_ess.engine" \
- -o output_left,output_conf \
- "${ASSET_DIR}/light_ess.etlt"
-
+echo "Converting ESS light onnx file to engine file."
+/usr/src/tensorrt/bin/trtexec \
+ --onnx=${ASSET_DIR}/light_ess.onnx \
+ --saveEngine=${ASSET_DIR}/light_ess.engine \
+ --fp16 \
+ --staticPlugins=${ASSET_DIR}/plugins/$(uname -m)/ess_plugins.so
diff --git a/isaac_ros_ess_models_install/package.xml b/isaac_ros_ess_models_install/package.xml
index c6391fd..c617d68 100644
--- a/isaac_ros_ess_models_install/package.xml
+++ b/isaac_ros_ess_models_install/package.xml
@@ -21,7 +21,7 @@ SPDX-License-Identifier: Apache-2.0
isaac_ros_ess_models_install
- 3.1.0
+ 3.2.0
Scripts for installing ESS models
Isaac ROS Maintainers