isaac_ros_object_detection
isaac_ros_object_detection copied to clipboard
PeopleNet with TensorRT
Greetings,
According to the documentation, DetectNet models are not supported by isaac_ros_tensor_rt
The Isaac ROS TensorRT package is not able to perform inference with DetectNet models at this time.
However, I was able to get PeopleNet working using the ONNX available on the NGC website and my own data.
import os
from ament_index_python.packages import get_package_share_directory
import launch
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
INPUT_IMAGE_WIDTH = 960
INPUT_IMAGE_HEIGHT = 540
# Triton
# NETWORK_INPUT_WIDTH = 1200
# NETWORK_INPUT_HEIGHT = 632
# TensorRT
NETWORK_INPUT_WIDTH = 960
NETWORK_INPUT_HEIGHT = 544
def generate_launch_description():
# PeopleNet Model
isaac_ros_ws_path = os.environ.get('ISAAC_ROS_WS', '')
model_dir_path = os.path.join(isaac_ros_ws_path,
'isaac_ros_assets/models')
# Read labels from text file
labels_file_path = f'{model_dir_path}/peoplenet/1/labels.txt'
with open(labels_file_path, 'r') as fd:
label_list = fd.read().strip().splitlines()
config = '/workspaces/isaac_ros-dev/src/isaac_ros_object_detection/isaac_ros_detectnet/config/params.yaml'
with open(labels_file_path, 'r') as fd:
label_list = fd.read().strip().splitlines()
# Resize image
resize_node = ComposableNode(
name='resize_node',
namespace='peoplenet',
package='isaac_ros_image_proc',
plugin='nvidia::isaac_ros::image_proc::ResizeNode',
parameters=[{
'input_width': INPUT_IMAGE_WIDTH,
'input_height': INPUT_IMAGE_HEIGHT,
'output_width': NETWORK_INPUT_WIDTH,
'output_height': NETWORK_INPUT_HEIGHT,
'keep_aspect_ratio': True, # Will be ignored is padding is disabled
'encoding_desired': 'rgb8', # PeopeNet expects RGB8
'disable_padding': False, # Image will be padded to keep aspect ratio
'num_blocks': 40 # should not be less than 40
}],
remappings=[
('image', '/image'),
('camera_info', '/camera_info')
]
)
# Image to tensor
image_to_tensor_node = ComposableNode(
name='image_to_tensor_node',
namespace='peoplenet',
package='isaac_ros_tensor_proc',
plugin='nvidia::isaac_ros::dnn_inference::ImageToTensorNode',
parameters=[{
'scale': True,
'tensor_name': 'image',
}],
remappings=[
('image', 'resize/image'),
('tensor', 'image_tensor'),
]
)
# Normalize the tensor
normalize_node = ComposableNode(
name='normalize_node',
namespace='peoplenet',
package='isaac_ros_tensor_proc',
plugin='nvidia::isaac_ros::dnn_inference::ImageTensorNormalizeNode',
parameters=[{
'mean': [0.0, 0.0, 0.0],
'stddev': [1.0, 1.0, 1.0],
'input_tensor_name': 'image',
'output_tensor_name': 'image',
}],
remappings=[
('tensor', 'image_tensor'),
]
)
# Interleaved to planar
interleaved_to_planar_node = ComposableNode(
name='interleaved_to_planar_node',
namespace='peoplenet',
package='isaac_ros_tensor_proc',
plugin='nvidia::isaac_ros::dnn_inference::InterleavedToPlanarNode',
parameters=[{
'input_tensor_shape': [NETWORK_INPUT_HEIGHT, NETWORK_INPUT_WIDTH, 3],
'num_blocks': 40
}],
remappings=[
('interleaved_tensor', 'normalized_tensor'),
]
)
# Reshape
reshape_node = ComposableNode(
name='reshape_node',
namespace='peoplenet',
package='isaac_ros_tensor_proc',
plugin='nvidia::isaac_ros::dnn_inference::ReshapeNode',
parameters=[{
'output_tensor_name': 'input_tensor',
'input_tensor_shape': [3, NETWORK_INPUT_HEIGHT, NETWORK_INPUT_WIDTH],
'output_tensor_shape': [1, 3, NETWORK_INPUT_HEIGHT, NETWORK_INPUT_WIDTH],
'num_blocks': 40
}],
remappings=[
('tensor', 'planar_tensor'),
('reshaped_tensor', 'tensor_pub')
]
)
# PeopleNet Inference
# triton_node = ComposableNode(
# name='triton_node',
# namespace='peoplenet',
# package='isaac_ros_triton',
# plugin='nvidia::isaac_ros::dnn_inference::TritonNode',
# parameters=[{
# 'model_name': 'peoplenet',
# 'model_repository_paths': [model_dir_path],
# 'input_tensor_names': ['input_tensor'],
# 'input_binding_names': ['input_1'],
# 'input_tensor_formats': ['nitros_tensor_list_nchw_rgb_f32'],
# 'output_tensor_names': ['output_cov', 'output_bbox'],
# 'output_binding_names': ['output_cov/Sigmoid', 'output_bbox/BiasAdd'],
# 'output_tensor_formats': ['nitros_tensor_list_nhwc_rgb_f32'],
# 'log_level': 0
# }])
tensor_rt_node = ComposableNode(
name='tensor_rt',
package='isaac_ros_tensor_rt',
namespace='peoplenet',
plugin='nvidia::isaac_ros::dnn_inference::TensorRTNode',
parameters=[{
'model_file_path': '/workspaces/isaac_ros-dev/assets/models/peoplenet/resnet34_peoplenet.onnx',
'engine_file_path': '/workspaces/isaac_ros-dev/assets/models/peoplenet/resnet34_peoplenet.plan',
'output_binding_names': ['output_cov/Sigmoid:0', 'output_bbox/BiasAdd:0'],
'output_tensor_names': ['output_cov', 'output_bbox'],
'output_tensor_formats': ['nitros_tensor_list_nhwc_rgb_f32'],
'input_tensor_names': ['input_tensor'],
'input_binding_names': ['input_1:0'],
'input_tensor_formats': ['nitros_tensor_list_nchw_rgb_f32'],
'verbose': True,
'force_engine_update': True
}]
)
# PeopleNet Decoder
detectnet_decoder_node = ComposableNode(
name='detectnet_decoder_node',
namespace='peoplenet',
package='isaac_ros_detectnet',
plugin='nvidia::isaac_ros::detectnet::DetectNetDecoderNode',
parameters=[config,
{
'label_list': label_list
}]
)
container = ComposableNodeContainer(
name='peoplenet_container',
namespace='',
package='rclcpp_components',
executable='component_container_mt',
composable_node_descriptions=[
resize_node,
image_to_tensor_node,
normalize_node,
interleaved_to_planar_node,
reshape_node,
# triton_node,
tensor_rt_node,
detectnet_decoder_node],
output='screen'
)
return launch.LaunchDescription([container])
Is there any specific reason this is mentioned in the documentation? are there problems to be expected for long-term deployment?