From cf2ae36752ccabc373b2caf61b0d212f8784661f Mon Sep 17 00:00:00 2001 From: TheNoobInventor Date: Sat, 4 May 2024 17:58:09 +0100 Subject: [PATCH] Confirm aruco visualizer node tracks marker on robot as it moves --- lidarbot_aruco/README.md | 5 +++ .../launch/trajectory_visualizer_launch.py | 34 +++++++++++++++++++ .../aruco_trajectory_visualizer.py | 25 ++++++-------- lidarbot_aruco/setup.py | 4 +++ .../scripts/trajectory_visualizer.py | 18 ++++------ 5 files changed, 59 insertions(+), 27 deletions(-) create mode 100644 lidarbot_aruco/launch/trajectory_visualizer_launch.py diff --git a/lidarbot_aruco/README.md b/lidarbot_aruco/README.md index 282fc92..a0a2ea0 100644 --- a/lidarbot_aruco/README.md +++ b/lidarbot_aruco/README.md @@ -63,6 +63,11 @@ ros2 run usb_cam usb_cam_node_exe --ros-args --params-file ~/dev_ws/src/lidarbot ros2 run lidarbot_aruco aruco_trajectory_visualizer_node ``` +Launch file +```bash +ros2 launch lidarbot_aruco trajectory_visualizer_launch.py +``` + TODO: add gif showing node in action Move this to main README diff --git a/lidarbot_aruco/launch/trajectory_visualizer_launch.py b/lidarbot_aruco/launch/trajectory_visualizer_launch.py new file mode 100644 index 0000000..a69730b --- /dev/null +++ b/lidarbot_aruco/launch/trajectory_visualizer_launch.py @@ -0,0 +1,34 @@ +# Launch file to start the webcam usb driver and aruco trajectory visualizer node +# to track the robot as it moves + +import os +from ament_index_python.packages import get_package_share_directory + +from launch import LaunchDescription +from launch_ros.actions import Node + + +def generate_launch_description(): + + usb_cam_params = os.path.join( + get_package_share_directory("lidarbot_aruco"), "config", "params_1.yaml" + ) + + usb_cam_node = Node( + package="usb_cam", + executable="usb_cam_node_exe", + parameters=[usb_cam_params], + ) + + aruco_visualizer_node = Node( + package="lidarbot_aruco", + executable="aruco_trajectory_visualizer_node", + output="screen", + ) + + return LaunchDescription( + [ + usb_cam_node, + aruco_visualizer_node, + ] + ) diff --git a/lidarbot_aruco/lidarbot_aruco/aruco_trajectory_visualizer.py b/lidarbot_aruco/lidarbot_aruco/aruco_trajectory_visualizer.py index 02ef0cc..53137c0 100755 --- a/lidarbot_aruco/lidarbot_aruco/aruco_trajectory_visualizer.py +++ b/lidarbot_aruco/lidarbot_aruco/aruco_trajectory_visualizer.py @@ -8,7 +8,6 @@ # Import Python libraries import cv2 import numpy as np -import argparse # Import ROS2 libraries import rclpy @@ -17,14 +16,6 @@ from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images from sensor_msgs.msg import Image # Image is the message type -# Construct argument parser and parse the arguments -ap = argparse.ArgumentParser() -ap.add_argument( - "-t", "--type", type=str, default="DICT_4X4_50", help="type of ArUco tag to detect" -) - -args = vars(ap.parse_args()) - # The different ArUco dictionaries built into the OpenCV library ARUCO_DICT = { "DICT_4X4_50": cv2.aruco.DICT_4X4_50, @@ -49,7 +40,7 @@ class ArucoNode(Node): def __init__(self): - super().__init__("aruco_node") + super().__init__("aruco_visualizer_node") # Declare parameters self.declare_parameter("aruco_dictionary_name", "DICT_4X4_50") @@ -83,7 +74,9 @@ def __init__(self): # Check that we have a valid ArUco marker if ARUCO_DICT.get(aruco_dictionary_name, None) is None: self.get_logger().info( - "[INFO] ArUCo tag of '{}' is not supported".format(args["type"]) + "[INFO] ArUCo tag of '{}' is not supported".format( + aruco_dictionary_name + ) ) # Load the camera parameters from the saved file @@ -98,7 +91,9 @@ def __init__(self): self.get_logger().info( "[INFO] detecting '{}' markers...".format(aruco_dictionary_name) ) - self.arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[args["type"]]) + self.arucoDict = cv2.aruco.getPredefinedDictionary( + ARUCO_DICT[aruco_dictionary_name] + ) self.arucoParams = cv2.aruco.DetectorParameters() # Create the subscriber. This subscriber will receive an Image @@ -184,15 +179,15 @@ def main(args=None): rclpy.init(args=args) # Create the node - aruco_node = ArucoNode() + aruco_visualizer_node = ArucoNode() # Spin the node so the callback function is called - rclpy.spin(aruco_node) + rclpy.spin(aruco_visualizer_node) # Destroy the node explicitly # (optional - otherwise it will be done automatically # when the garbage collector destroys the node object) - aruco_node.destroy_node() + aruco_visualizer_node.destroy_node() # Shutdown the ROS client library for Python rclpy.shutdown() diff --git a/lidarbot_aruco/setup.py b/lidarbot_aruco/setup.py index 714fa17..4110c48 100644 --- a/lidarbot_aruco/setup.py +++ b/lidarbot_aruco/setup.py @@ -11,6 +11,10 @@ data_files=[ ("share/ament_index/resource_index/packages", ["resource/" + package_name]), ("share/" + package_name, ["package.xml"]), + ( + os.path.join("share", package_name, "launch"), + glob(os.path.join("launch", "*launch.[pxy][yma]*")), + ), ( os.path.join("share", package_name, "config"), glob(os.path.join("config", "*yaml")), diff --git a/lidarbot_navigation/scripts/trajectory_visualizer.py b/lidarbot_navigation/scripts/trajectory_visualizer.py index cc4e65d..48c1fd9 100755 --- a/lidarbot_navigation/scripts/trajectory_visualizer.py +++ b/lidarbot_navigation/scripts/trajectory_visualizer.py @@ -3,6 +3,8 @@ # Node to visualize the robot trajectory in Rviz. # Adapted from https://github.com/RBinsonB/trajectory_visualization +# WIP + import rclpy from rclpy.node import Node @@ -22,7 +24,6 @@ def __init__(self, name): self.max_poses_param = self.declare_parameter("max_poses", 1000).value self.threshold_param = self.declare_parameter("threshold", 0.001).value self.frame_id_param = self.declare_parameter("frame_id", "map").value - # self.frame_id_param = self.declare_parameter("frame_id", "odom").value self.trajectory_path_msg = Path() self.previous_pose_position = Point() @@ -34,6 +35,8 @@ def __init__(self, name): self.odom_sub = self.create_subscription( Path, "/transformed_global_plan", + # Odometry, + # /odometry/filtered, self.odom_callback, 10, ) @@ -43,7 +46,7 @@ def odom_callback(self, odom_msg): # Process message position and add it to path # self.publish_trajectory_path(odom_msg.pose.pose.position) - self.publish_trajectory_path(odom_msg.pose.position) + self.publish_trajectory_path(odom_msg.poses[0].pose.position) # Add pose and publish trajectory path message def publish_trajectory_path(self, position): @@ -55,21 +58,12 @@ def publish_trajectory_path(self, position): # Add current pose to path self.trajectory_path_msg.header.stamp = self.get_clock().now().to_msg() - # self.trajectory_path_msg.header.stamp.sec = ( - # self.get_clock().now().to_msg().sec() - # ) - # - # self.trajectory_path_msg.header.stamp.nanosec = ( - # self.get_clock().now().to_msg().nanosec() - # ) - self.trajectory_path_msg.header.frame_id = self.frame_id_param pose_stamped_msg = PoseStamped() pose_stamped_msg.header.stamp = self.get_clock().now().to_msg() pose_stamped_msg.pose.position.x = position.x pose_stamped_msg.pose.position.y = position.y - pose_stamped_msg.pose.orientation.z = position.orientation.z - pose_stamped_msg.pose.orientation.w = position.orientation.w + pose_stamped_msg.pose.orientation.w = 1.0 # If max number of poses in path has not been reach, just add pose to message if len(self.trajectory_path_msg.poses) < self.max_poses_param: