From 2609e4b51eda37a7acc1d007c6b24d49ac5f315a Mon Sep 17 00:00:00 2001 From: a-maumau Date: Mon, 16 Dec 2024 16:41:20 +0900 Subject: [PATCH 01/20] add camera_projection class and projection cache Signed-off-by: a-maumau --- .../CMakeLists.txt | 1 + .../README.md | 6 + .../config/fusion_common.param.yaml | 9 +- .../camera_projection.hpp | 88 ++++++++ .../fusion_node.hpp | 11 +- .../pointpainting_fusion/node.hpp | 3 - .../roi_cluster_fusion/node.hpp | 1 - .../roi_detected_object_fusion/node.hpp | 7 +- .../roi_pointcloud_fusion/node.hpp | 2 +- .../segmentation_pointcloud_fusion/node.hpp | 2 +- .../utils/utils.hpp | 4 - .../src/camera_projection.cpp | 196 ++++++++++++++++++ .../src/fusion_node.cpp | 46 +++- .../src/pointpainting_fusion/node.cpp | 58 +++--- .../src/roi_cluster_fusion/node.cpp | 31 ++- .../src/roi_detected_object_fusion/node.cpp | 38 ++-- .../src/roi_pointcloud_fusion/node.cpp | 72 ++++--- .../segmentation_pointcloud_fusion/node.cpp | 21 +- .../src/utils/utils.cpp | 14 -- 19 files changed, 448 insertions(+), 162 deletions(-) create mode 100644 perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp create mode 100644 perception/autoware_image_projection_based_fusion/src/camera_projection.cpp diff --git a/perception/autoware_image_projection_based_fusion/CMakeLists.txt b/perception/autoware_image_projection_based_fusion/CMakeLists.txt index 73d305d2ab2a8..7c8160d6fe1b7 100644 --- a/perception/autoware_image_projection_based_fusion/CMakeLists.txt +++ b/perception/autoware_image_projection_based_fusion/CMakeLists.txt @@ -16,6 +16,7 @@ endif() # Build non-CUDA dependent nodes ament_auto_add_library(${PROJECT_NAME} SHARED + src/camera_projection.cpp src/fusion_node.cpp src/debugger.cpp src/utils/geometry.cpp diff --git a/perception/autoware_image_projection_based_fusion/README.md b/perception/autoware_image_projection_based_fusion/README.md index c976697b0396d..dcf35e45bbd9d 100644 --- a/perception/autoware_image_projection_based_fusion/README.md +++ b/perception/autoware_image_projection_based_fusion/README.md @@ -66,6 +66,12 @@ ros2 launch autoware_image_projection_based_fusion pointpainting_fusion.launch.x The rclcpp::TimerBase timer could not break a for loop, therefore even if time is out when fusing a roi msg at the middle, the program will run until all msgs are fused. +### Approximate camera projection + +For algorithms like `pointpainting_fusion`, the computation required to project points onto an unrectified (raw) image can be substantial. To address this, an option is provided to reduce the computational load. Set the [`approximate_camera_projection parameter`](config/fusion_common.param.yaml) to `true` for each camera (ROIs). If the corresponding `point_project_to_unrectified_image` parameter is also set to true, the projections will be pre-cached. + +The cached projections are calculated using a grid, with the grid size specified by the `approximation_grid_width_size` and `approximation_grid_height_size` parameters in the [configuration file](config/fusion_common.param.yaml). The centers of the grid are used as the projected points. + ### Detail description of each fusion's algorithm is in the following links | Fusion Name | Description | Detail | diff --git a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml index 347cb57b484e9..553bb7e29220e 100644 --- a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml +++ b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml @@ -4,7 +4,8 @@ timeout_ms: 70.0 match_threshold_ms: 50.0 image_buffer_size: 15 - point_project_to_unrectified_image: false + # projection setting for each ROI whether unrectify image + point_project_to_unrectified_image: [false, false, false, false, false, false] debug_mode: false filter_scope_min_x: -100.0 filter_scope_min_y: -100.0 @@ -13,5 +14,11 @@ filter_scope_max_y: 100.0 filter_scope_max_z: 100.0 + # camera cache setting for each ROI + approximate_camera_projection: [true, true, true, true, true, true] + # grid size in pixels + approximation_grid_width_size: 1.0 + approximation_grid_height_size: 1.0 + # debug parameters publish_processing_time_detail: false diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp new file mode 100644 index 0000000000000..cc66dcd15ce62 --- /dev/null +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -0,0 +1,88 @@ +// Copyright 2024 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ +#define AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ + +#define EIGEN_MPL2_ONLY + +#include + +#include + +#include + +#include + +#include + +#include +#include + +namespace autoware::image_projection_based_fusion +{ +struct PixelPos +{ + float x; + float y; +}; + +class CameraProjection +{ +public: + explicit CameraProjection( + const sensor_msgs::msg::CameraInfo & camera_info, + const float grid_width, const float grid_height, + const bool unrectify, + const bool use_approximation + ); + CameraProjection(): grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {}; + void initialize(); + std::function calcImageProjectedPoint; + sensor_msgs::msg::CameraInfo getCameraInfo(); + bool isOutsideHorizontalView(const float px, const float pz); + bool isOutsideVerticalView(const float py, const float pz); + +protected: + bool calcRectifiedImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPointWithApproximation(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + void initializeCache(); + + sensor_msgs::msg::CameraInfo camera_info_; + uint32_t image_h_, image_w_; + double tan_h_x_, tan_h_y_; + + uint32_t cache_size_; + float grid_w_size_; + float grid_h_size_; + float half_grid_w_size_; + float half_grid_h_size_; + float inv_grid_w_size_; + float inv_grid_h_size_; + uint32_t grid_x_num_; + uint32_t grid_y_num_; + float index_grid_out_h_; + float index_grid_out_w_; + + bool unrectify_; + bool use_approximation_; + + std::unique_ptr projection_cache_; + image_geometry::PinholeCameraModel camera_model_; +}; + +} // namespace autoware::image_projection_based_fusion + +#endif // AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__CAMERA_PROJECTION_HPP_ diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp index 04ecf89faa0d4..d522ef1287847 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp @@ -15,6 +15,7 @@ #ifndef AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_ #define AUTOWARE__IMAGE_PROJECTION_BASED_FUSION__FUSION_NODE_HPP_ +#include #include #include #include @@ -56,6 +57,7 @@ using tier4_perception_msgs::msg::DetectedObjectsWithFeature; using tier4_perception_msgs::msg::DetectedObjectWithFeature; using PointCloud = pcl::PointCloud; using autoware_perception_msgs::msg::ObjectClassification; +using autoware::image_projection_based_fusion::CameraProjection; template class FusionNode : public rclcpp::Node @@ -86,7 +88,7 @@ class FusionNode : public rclcpp::Node virtual void fuseOnSingleImage( const TargetMsg3D & input_msg, const std::size_t image_id, const Msg2D & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, TargetMsg3D & output_msg) = 0; + TargetMsg3D & output_msg) = 0; // set args if you need virtual void postprocess(TargetMsg3D & output_msg); @@ -100,11 +102,16 @@ class FusionNode : public rclcpp::Node tf2_ros::Buffer tf_buffer_; tf2_ros::TransformListener tf_listener_; - bool point_project_to_unrectified_image_{false}; + std::vector point_project_to_unrectified_image_{false}; // camera_info std::map camera_info_map_; std::vector::SharedPtr> camera_info_subs_; + // camera projection + std::vector camera_projectors_; + std::vector approx_camera_projection_; + float approx_grid_w_size_; + float approx_grid_h_size_; rclcpp::TimerBase::SharedPtr timer_; double timeout_ms_{}; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp index 8c0e2ed78fc6c..cd6cd87976522 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/pointpainting_fusion/node.hpp @@ -53,15 +53,12 @@ class PointPaintingFusionNode void fuseOnSingleImage( const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override; void postprocess(sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) override; rclcpp::Publisher::SharedPtr obj_pub_ptr_; - std::vector tan_h_; // horizontal field of view - int omp_num_threads_{1}; float score_threshold_{0.0}; std::vector class_names_; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp index be05c0a1c4bc6..903b153af0681 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_cluster_fusion/node.hpp @@ -38,7 +38,6 @@ class RoiClusterFusionNode void fuseOnSingleImage( const DetectedObjectsWithFeature & input_cluster_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) override; std::string trust_object_iou_mode_{"iou"}; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp index 7103537ec852d..f355fd2129b24 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp @@ -43,12 +43,11 @@ class RoiDetectedObjectFusionNode void fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjects & output_object_msg) override; + DetectedObjects & output_object_msg) override; std::map generateDetectedObjectRoIs( - const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model); + const DetectedObjects & input_object_msg, const std::size_t & image_id, + const Eigen::Affine3d & object2camera_affine); void fuseObjectsOnImage( const DetectedObjects & input_object_msg, diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp index 9baf754e224a7..0c454aebac42c 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp @@ -50,7 +50,7 @@ class RoiPointCloudFusionNode void fuseOnSingleImage( const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, PointCloud2 & output_pointcloud_msg) override; + PointCloud2 & output_pointcloud_msg) override; bool out_of_scope(const DetectedObjectWithFeature & obj) override; }; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp index 0bec3195bb402..7454cb7bcd173 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp @@ -61,7 +61,7 @@ class SegmentPointCloudFusionNode : public FusionNode getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time); diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp new file mode 100644 index 0000000000000..af17d85989d35 --- /dev/null +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -0,0 +1,196 @@ +// Copyright 2024 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "autoware/image_projection_based_fusion/camera_projection.hpp" + +#include +#include + +#ifdef ROS_DISTRO_GALACTIC +#include +#include +#else +#include +#include +#endif + +namespace autoware::image_projection_based_fusion +{ +CameraProjection::CameraProjection( + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, const float grid_height, + const bool unrectify, const bool use_approximation=false +): camera_info_(camera_info), grid_w_size_(grid_width), grid_h_size_(grid_height), unrectify_(unrectify), use_approximation_(use_approximation){ + if (grid_w_size_ <= 0.0 || grid_h_size_ <= 0.0) { + throw std::runtime_error("Both grid_width and grid_height must be > 0.0"); + } + + image_w_ = camera_info.width; + image_h_ = camera_info.height; + + // prepare camera model + camera_model_.fromCameraInfo(camera_info); + + // cache settings + // for shifting to the grid center + half_grid_w_size_ = grid_w_size_ / 2.0; + half_grid_h_size_ = grid_h_size_ / 2.0; + inv_grid_w_size_ = 1/grid_w_size_; + inv_grid_h_size_ = 1/grid_h_size_; + grid_x_num_ = static_cast(std::ceil(image_w_ / grid_w_size_)); + grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); + cache_size_ = grid_x_num_*grid_y_num_; + + // for checking the views + // cx/fx + tan_h_x_ = camera_info.k.at(2)/camera_info.k.at(0); + // cy/fy + tan_h_y_ = camera_info.k.at(5)/camera_info.k.at(4); +} + +void CameraProjection::initialize(){ + if (unrectify_) { + if (use_approximation_) { + // create the cache with size of grid center + // store only xy position in float to reduce memory consumption + projection_cache_ = std::make_unique(cache_size_); + initializeCache(); + + calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRawImageProjectedPointWithApproximation(point3d, projected_point); + }; + } else { + calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRawImageProjectedPoint(point3d, projected_point); + }; + } + } else { + calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + return this->calcRectifiedImageProjectedPoint(point3d, projected_point); + }; + } +} + +void CameraProjection::initializeCache(){ + // sample grid centers till the camera height, width to precompute the projection + // + // grid_size + // / + // v + // |---| w + // 0-----------------> + // 0 | . | . | . | + // |___|___|___| + // | . | . | . | + // | ^ + // h | | + // v grid center + // + // each pixel will be rounded in this grid center + // edge pixels in the image will be assign to centers that is the outside of the image + + for (float y = half_grid_h_size_; y < image_h_; y += grid_h_size_) { + for (float x = half_grid_w_size_; x < image_w_; x += grid_w_size_) { + const float qx = std::round((x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; + const float qy = std::round((y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + + const int grid_x = static_cast(std::floor(qx / grid_w_size_)); + const int grid_y = static_cast(std::floor(qy / grid_h_size_)); + const int index = (grid_y) * grid_x_num_ + grid_x; + + // precompute projected point + cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); + projection_cache_[index] = PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; + } + } +} + + +/** + * @brief Calculate a projection of 3D point to rectified image plane 2D point. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRectifiedImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point +){ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + + if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + return false; + } else { + projected_point << rectified_image_point.x, rectified_image_point.y; + return true; + } +} + +/** + * @brief Calculate a projection of 3D point to raw image plane 2D point. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRawImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point +){ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + const cv::Point2d raw_image_point = camera_model_.unrectifyPoint(rectified_image_point); + + if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + return false; + } else { + projected_point << raw_image_point.x, raw_image_point.y; + return true; + } +} + +/** + * @brief Calculate a projection of 3D point to raw image plane 2D point with approximation. + * @return Return a boolean indicating whether the projected point is on the image plane. + */ +bool CameraProjection::calcRawImageProjectedPointWithApproximation( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point +){ + const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); + + // round to a near grid center + const float qx = std::round((rectified_image_point.x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; + if (qx < 0.0 || qx >= image_w_) { + return false; + } + const float qy = std::round((rectified_image_point.y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + if (qy < 0.0 || qy >= image_h_) { + return false; + } + + const int grid_x = static_cast(std::floor(qx / grid_w_size_)); + const int grid_y = static_cast(std::floor(qy / grid_h_size_)); + const int index = grid_y * grid_x_num_ + grid_x; + + projected_point << projection_cache_[index].x, projection_cache_[index].y; + + return true; +} + +sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo(){ + return camera_info_; +} + +bool CameraProjection::isOutsideHorizontalView(const float px, const float pz){ + // assuming the points' origin is centered on the camera + return pz <= 0.0 || px > tan_h_x_ * pz || px < -tan_h_x_ * pz; +} + +bool CameraProjection::isOutsideVerticalView(const float py, const float pz){ + // assuming the points' origin is centered on the camera + return pz <= 0.0 || py > tan_h_y_ * pz || py < -tan_h_y_ * pz; +} + +} // namespace autoware::image_projection_based_fusion diff --git a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp index bd4d57e45c582..48fdfc74ae32b 100644 --- a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp @@ -18,6 +18,7 @@ #include #include +#include #include @@ -124,6 +125,29 @@ FusionNode::FusionNode( timer_ = rclcpp::create_timer( this, get_clock(), period_ns, std::bind(&FusionNode::timer_callback, this)); + // camera projection settings + camera_projectors_.resize(rois_number_); + point_project_to_unrectified_image_ = + declare_parameter>("point_project_to_unrectified_image"); + approx_camera_projection_ = declare_parameter>("approximate_camera_projection"); + if (rois_number_ != approx_camera_projection_.size()) { + const std::size_t current_size = approx_camera_projection_.size(); + RCLCPP_WARN( + this->get_logger(), + "The number of elements in approximate_camera_projection should be the same as in " + "rois_number. " + "It has %zu elements.", + current_size); + if (current_size < rois_number_) { + approx_camera_projection_.resize(rois_number_); + for (std::size_t i = current_size; i < rois_number_; i++) { + approx_camera_projection_.at(i) = true; + } + } + } + approx_grid_w_size_ = declare_parameter("approximation_grid_width_size"); + approx_grid_h_size_ = declare_parameter("approximation_grid_height_size"); + // debugger if (declare_parameter("debug_mode", false)) { std::size_t image_buffer_size = @@ -142,9 +166,6 @@ FusionNode::FusionNode( time_keeper_ = std::make_shared(time_keeper); } - point_project_to_unrectified_image_ = - declare_parameter("point_project_to_unrectified_image"); - // initialize debug tool { using autoware::universe_utils::DebugPublisher; @@ -168,7 +189,18 @@ void FusionNode::cameraInfoCallback( const sensor_msgs::msg::CameraInfo::ConstSharedPtr input_camera_info_msg, const std::size_t camera_id) { - camera_info_map_[camera_id] = *input_camera_info_msg; + // create the CameraProjection when the camera info arrives for the first time + // assuming the camera info does not change while the node is running + if ( + camera_info_map_.find(camera_id) == camera_info_map_.end() && + checkCameraInfo(*input_camera_info_msg)) { + camera_projectors_.at(camera_id) = CameraProjection( + *input_camera_info_msg, approx_grid_w_size_, approx_grid_h_size_, + point_project_to_unrectified_image_.at(camera_id), approx_camera_projection_.at(camera_id)); + camera_projectors_.at(camera_id).initialize(); + + camera_info_map_[camera_id] = *input_camera_info_msg; + } } template @@ -274,7 +306,7 @@ void FusionNode::subCallback( fuseOnSingleImage( *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), - camera_info_map_.at(roi_i), *output_msg); + *output_msg); (cached_roi_msgs_.at(roi_i)).erase(matched_stamp); is_fused_.at(roi_i) = true; @@ -346,9 +378,7 @@ void FusionNode::roiCallback( debugger_->clear(); } - fuseOnSingleImage( - *(cached_msg_.second), roi_i, *input_roi_msg, camera_info_map_.at(roi_i), - *(cached_msg_.second)); + fuseOnSingleImage(*(cached_msg_.second), roi_i, *input_roi_msg, *(cached_msg_.second)); is_fused_.at(roi_i) = true; if (debug_publisher_) { diff --git a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp index 4d12de2685c95..d1c80ec052818 100644 --- a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp @@ -161,13 +161,6 @@ PointPaintingFusionNode::PointPaintingFusionNode(const rclcpp::NodeOptions & opt sub_ = this->create_subscription( "~/input/pointcloud", rclcpp::SensorDataQoS().keep_last(3), sub_callback); - tan_h_.resize(rois_number_); - for (std::size_t roi_i = 0; roi_i < rois_number_; ++roi_i) { - auto fx = camera_info_map_[roi_i].k.at(0); - auto x0 = camera_info_map_[roi_i].k.at(2); - tan_h_[roi_i] = x0 / fx; - } - detection_class_remapper_.setParameters( allow_remapping_by_area_matrix, min_area_matrix, max_area_matrix); @@ -260,9 +253,7 @@ void PointPaintingFusionNode::preprocess(sensor_msgs::msg::PointCloud2 & painted void PointPaintingFusionNode::fuseOnSingleImage( __attribute__((unused)) const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - __attribute__((unused)) const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, + const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, sensor_msgs::msg::PointCloud2 & painted_pointcloud_msg) { if (painted_pointcloud_msg.data.empty() || painted_pointcloud_msg.fields.empty()) { @@ -276,8 +267,6 @@ void PointPaintingFusionNode::fuseOnSingleImage( return; } - if (!checkCameraInfo(camera_info)) return; - std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -312,9 +301,6 @@ void PointPaintingFusionNode::fuseOnSingleImage( .offset; const auto class_offset = painted_pointcloud_msg.fields.at(4).offset; const auto p_step = painted_pointcloud_msg.point_step; - // projection matrix - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); Eigen::Vector3f point_lidar, point_camera; /** dc : don't care @@ -346,24 +332,28 @@ dc | dc dc dc dc ||zc| p_y = point_camera.y(); p_z = point_camera.z(); - if (p_z <= 0.0 || p_x > (tan_h_.at(image_id) * p_z) || p_x < (-tan_h_.at(image_id) * p_z)) { + if (camera_projectors_[image_id].isOutsideHorizontalView(p_x, p_z)) { continue; } + // project - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(p_x, p_y, p_z), point_project_to_unrectified_image_); - - // iterate 2d bbox - for (const auto & feature_object : objects) { - sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; - // paint current point if it is inside bbox - int label2d = feature_object.object.classification.front().label; - if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { - // cppcheck-suppress invalidPointerCast - auto p_class = reinterpret_cast(&output[stride + class_offset]); - for (const auto & cls : isClassTable_) { - // add up the class values if the point belongs to multiple classes - *p_class = cls.second(label2d) ? (class_index_[cls.first] + *p_class) : *p_class; + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(p_x, p_y, p_z), projected_point)) { + // iterate 2d bbox + for (const auto & feature_object : objects) { + sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; + // paint current point if it is inside bbox + int label2d = feature_object.object.classification.front().label; + if ( + !isUnknown(label2d) && + isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { + // cppcheck-suppress invalidPointerCast + auto p_class = reinterpret_cast(&output[stride + class_offset]); + for (const auto & cls : isClassTable_) { + // add up the class values if the point belongs to multiple classes + *p_class = cls.second(label2d) ? (class_index_[cls.first] + *p_class) : *p_class; + } } } #if 0 @@ -375,15 +365,15 @@ dc | dc dc dc dc ||zc| } } - for (const auto & feature_object : input_roi_msg.feature_objects) { - debug_image_rois.push_back(feature_object.feature.roi); - } - if (debugger_) { std::unique_ptr inner_st_ptr; if (time_keeper_) inner_st_ptr = std::make_unique("publish debug message", *time_keeper_); + for (const auto & feature_object : input_roi_msg.feature_objects) { + debug_image_rois.push_back(feature_object.feature.roi); + } + debugger_->image_rois_ = debug_image_rois; debugger_->obstacle_points_ = debug_image_points; debugger_->publishImage(image_id, input_roi_msg.header.stamp); diff --git a/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp index 7520647544684..32db5319bb487 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_cluster_fusion/node.cpp @@ -95,16 +95,12 @@ void RoiClusterFusionNode::postprocess(DetectedObjectsWithFeature & output_clust void RoiClusterFusionNode::fuseOnSingleImage( const DetectedObjectsWithFeature & input_cluster_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) + const DetectedObjectsWithFeature & input_roi_msg, DetectedObjectsWithFeature & output_cluster_msg) { - if (!checkCameraInfo(camera_info)) return; - std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); // get transform from cluster frame id to camera optical frame id geometry_msgs::msg::TransformStamped transform_stamped; @@ -152,18 +148,17 @@ void RoiClusterFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z), - point_project_to_unrectified_image_); - if ( - 0 <= static_cast(projected_point.x()) && - static_cast(projected_point.x()) <= static_cast(camera_info.width) - 1 && - 0 <= static_cast(projected_point.y()) && - static_cast(projected_point.y()) <= static_cast(camera_info.height) - 1) { - min_x = std::min(static_cast(projected_point.x()), min_x); - min_y = std::min(static_cast(projected_point.y()), min_y); - max_x = std::max(static_cast(projected_point.x()), max_x); - max_y = std::max(static_cast(projected_point.y()), max_y); + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(*iter_x, *iter_y, *iter_z), projected_point)) { + const int px = static_cast(projected_point.x()); + const int py = static_cast(projected_point.y()); + + min_x = std::min(px, min_x); + min_y = std::min(py, min_y); + max_x = std::max(px, max_x); + max_y = std::max(py, max_y); + projected_points.push_back(projected_point); if (debugger_) debugger_->obstacle_points_.push_back(projected_point); } diff --git a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp index 035bc259ab73c..38d16ad102332 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp @@ -84,14 +84,11 @@ void RoiDetectedObjectFusionNode::preprocess(DetectedObjects & output_msg) void RoiDetectedObjectFusionNode::fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjects & output_object_msg __attribute__((unused))) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); - if (!checkCameraInfo(camera_info)) return; - Eigen::Affine3d object2camera_affine; { const auto transform_stamped_optional = getTransformStamped( @@ -103,12 +100,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( object2camera_affine = transformToEigen(transform_stamped_optional.value().transform); } - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - const auto object_roi_map = generateDetectedObjectRoIs( - input_object_msg, static_cast(camera_info.width), - static_cast(camera_info.height), object2camera_affine, pinhole_camera_model); + input_object_msg, image_id, object2camera_affine); fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map); if (debugger_) { @@ -122,9 +115,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( std::map RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( - const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model) + const DetectedObjects & input_object_msg, const std::size_t & image_id, + const Eigen::Affine3d & object2camera_affine) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -139,6 +131,10 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( return object_roi_map; } const auto & passthrough_object_flags = passthrough_object_flags_map_.at(timestamp_nsec); + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); + const double image_width = static_cast(camera_info.width); + const double image_height = static_cast(camera_info.height); + for (std::size_t obj_i = 0; obj_i < input_object_msg.objects.size(); ++obj_i) { std::vector vertices_camera_coord; const auto & object = input_object_msg.objects.at(obj_i); @@ -165,18 +161,18 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( continue; } - Eigen::Vector2d proj_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()), - point_project_to_unrectified_image_); + Eigen::Vector2d proj_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(point.x(), point.y(), point.z()), proj_point + )){ + const double px = proj_point.x(); + const double py = proj_point.y(); - min_x = std::min(proj_point.x(), min_x); - min_y = std::min(proj_point.y(), min_y); - max_x = std::max(proj_point.x(), max_x); - max_y = std::max(proj_point.y(), max_y); + min_x = std::min(px, min_x); + min_y = std::min(py, min_y); + max_x = std::max(px, max_x); + max_y = std::max(py, max_y); - if ( - proj_point.x() >= 0 && proj_point.x() < image_width && proj_point.y() >= 0 && - proj_point.y() < image_height) { point_on_image_cnt++; if (debugger_) { diff --git a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp index 206a5f77a0235..fcd3833a5c938 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp @@ -87,9 +87,8 @@ void RoiPointCloudFusionNode::postprocess( } void RoiPointCloudFusionNode::fuseOnSingleImage( const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - __attribute__((unused)) const std::size_t image_id, + const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, - const sensor_msgs::msg::CameraInfo & camera_info, __attribute__((unused)) sensor_msgs::msg::PointCloud2 & output_pointcloud_msg) { std::unique_ptr st_ptr; @@ -98,7 +97,6 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (input_pointcloud_msg.data.empty()) { return; } - if (!checkCameraInfo(camera_info)) return; std::vector output_objs; std::vector debug_image_rois; @@ -122,10 +120,6 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( return; } - // transform pointcloud to camera optical frame id - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - geometry_msgs::msg::TransformStamped transform_stamped; { const auto transform_stamped_optional = getTransformStamped( @@ -136,10 +130,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( } transform_stamped = transform_stamped_optional.value(); } - int point_step = input_pointcloud_msg.point_step; - int x_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; - int y_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; - int z_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; + const int point_step = input_pointcloud_msg.point_step; + const int x_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; + const int y_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; + const int z_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; sensor_msgs::msg::PointCloud2 transformed_cloud; tf2::doTransform(input_pointcloud_msg, transformed_cloud, transform_stamped); @@ -164,33 +158,37 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (transformed_z <= 0.0) { continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z), - point_project_to_unrectified_image_); - for (std::size_t i = 0; i < output_objs.size(); ++i) { - auto & feature_obj = output_objs.at(i); - const auto & check_roi = feature_obj.feature.roi; - auto & cluster = clusters.at(i); - - if ( - clusters_data_size.at(i) >= - static_cast(max_cluster_size_) * static_cast(point_step)) { - continue; - } - if ( - check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() && - check_roi.x_offset + check_roi.width >= projected_point.x() && - check_roi.y_offset + check_roi.height >= projected_point.y()) { - std::memcpy( - &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step); - clusters_data_size.at(i) += point_step; + + Eigen::Vector2d projected_point; + if (camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point) + ) { + for (std::size_t i = 0; i < output_objs.size(); ++i) { + auto & feature_obj = output_objs.at(i); + const auto & check_roi = feature_obj.feature.roi; + auto & cluster = clusters.at(i); + + const double px = projected_point.x(); + const double py = projected_point.y(); + + if ( + clusters_data_size.at(i) >= + static_cast(max_cluster_size_) * static_cast(point_step)) { + continue; + } + if ( + check_roi.x_offset <= px && check_roi.y_offset <= py && + check_roi.x_offset + check_roi.width >= px && + check_roi.y_offset + check_roi.height >= py) { + std::memcpy( + &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], + point_step); + clusters_data_size.at(i) += point_step; + } } - } - if (debugger_) { - // add all points inside image to debug - if ( - projected_point.x() > 0 && projected_point.x() < camera_info.width && - projected_point.y() > 0 && projected_point.y() < camera_info.height) { + + if (debugger_) { + // add all points inside image to debug debug_image_points.push_back(projected_point); } } diff --git a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp index afeff213c0afe..4468c087d2250 100644 --- a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp @@ -87,9 +87,8 @@ void SegmentPointCloudFusionNode::postprocess(PointCloud2 & pointcloud_msg) } void SegmentPointCloudFusionNode::fuseOnSingleImage( - const PointCloud2 & input_pointcloud_msg, __attribute__((unused)) const std::size_t image_id, - [[maybe_unused]] const Image & input_mask, __attribute__((unused)) const CameraInfo & camera_info, - __attribute__((unused)) PointCloud2 & output_cloud) + const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, + [[maybe_unused]] const Image & input_mask, __attribute__((unused)) PointCloud2 & output_cloud) { std::unique_ptr st_ptr; if (time_keeper_) st_ptr = std::make_unique(__func__, *time_keeper_); @@ -97,10 +96,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( if (input_pointcloud_msg.data.empty()) { return; } - if (!checkCameraInfo(camera_info)) return; if (input_mask.height == 0 || input_mask.width == 0) { return; } + + const sensor_msgs::msg::CameraInfo & camera_info = camera_projectors_[image_id].getCameraInfo(); std::vector mask_data(input_mask.data.begin(), input_mask.data.end()); cv::Mat mask = perception_utils::runLengthDecoder(mask_data, input_mask.height, input_mask.width); @@ -115,8 +115,6 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( const int orig_height = camera_info.height; // resize mask to the same size as the camera image cv::resize(mask, mask, cv::Size(orig_width, orig_height), 0, 0, cv::INTER_NEAREST); - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); geometry_msgs::msg::TransformStamped transform_stamped; // transform pointcloud from frame id to camera optical frame id @@ -151,13 +149,10 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z), - point_project_to_unrectified_image_); - - bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width && - projected_point.y() > 0 && projected_point.y() < camera_info.height; - if (!is_inside_image) { + Eigen::Vector2d projected_point; + if (!camera_projectors_[image_id].calcImageProjectedPoint( + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point + )){ continue; } diff --git a/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp b/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp index 338a5605e5a79..4456d25b18167 100644 --- a/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp +++ b/perception/autoware_image_projection_based_fusion/src/utils/utils.cpp @@ -44,20 +44,6 @@ bool checkCameraInfo(const sensor_msgs::msg::CameraInfo & camera_info) return true; } -Eigen::Vector2d calcRawImageProjectedPoint( - const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d, - const bool & unrectify) -{ - const cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d); - - if (!unrectify) { - return Eigen::Vector2d(rectified_image_point.x, rectified_image_point.y); - } - const cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point); - - return Eigen::Vector2d(raw_image_point.x, raw_image_point.y); -} - std::optional getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time) From c0bb6b383c683c2294d6e084443d4031a028b5f2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 07:53:56 +0000 Subject: [PATCH 02/20] style(pre-commit): autofix --- .../camera_projection.hpp | 19 ++--- .../fusion_node.hpp | 2 +- .../roi_detected_object_fusion/node.hpp | 3 +- .../roi_pointcloud_fusion/node.hpp | 3 +- .../src/camera_projection.cpp | 83 ++++++++++++------- .../src/fusion_node.cpp | 3 +- .../src/pointpainting_fusion/node.cpp | 3 +- .../src/roi_detected_object_fusion/node.cpp | 7 +- .../src/roi_pointcloud_fusion/node.cpp | 15 ++-- .../segmentation_pointcloud_fusion/node.cpp | 3 +- 10 files changed, 78 insertions(+), 63 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index cc66dcd15ce62..2bcef9b6fa56a 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -18,13 +18,11 @@ #define EIGEN_MPL2_ONLY #include - #include +#include #include -#include - #include #include @@ -42,12 +40,9 @@ class CameraProjection { public: explicit CameraProjection( - const sensor_msgs::msg::CameraInfo & camera_info, - const float grid_width, const float grid_height, - const bool unrectify, - const bool use_approximation - ); - CameraProjection(): grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {}; + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, + const float grid_height, const bool unrectify, const bool use_approximation); + CameraProjection() : grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {}; void initialize(); std::function calcImageProjectedPoint; sensor_msgs::msg::CameraInfo getCameraInfo(); @@ -55,9 +50,11 @@ class CameraProjection bool isOutsideVerticalView(const float py, const float pz); protected: - bool calcRectifiedImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRectifiedImageProjectedPoint( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point); bool calcRawImageProjectedPoint(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); - bool calcRawImageProjectedPointWithApproximation(const cv::Point3d & point3d, Eigen::Vector2d & projected_point); + bool calcRawImageProjectedPointWithApproximation( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point); void initializeCache(); sensor_msgs::msg::CameraInfo camera_info_; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp index d522ef1287847..82db64f7d7d52 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp @@ -56,8 +56,8 @@ using sensor_msgs::msg::PointCloud2; using tier4_perception_msgs::msg::DetectedObjectsWithFeature; using tier4_perception_msgs::msg::DetectedObjectWithFeature; using PointCloud = pcl::PointCloud; -using autoware_perception_msgs::msg::ObjectClassification; using autoware::image_projection_based_fusion::CameraProjection; +using autoware_perception_msgs::msg::ObjectClassification; template class FusionNode : public rclcpp::Node diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp index f355fd2129b24..43ec134168ef9 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_detected_object_fusion/node.hpp @@ -42,8 +42,7 @@ class RoiDetectedObjectFusionNode void fuseOnSingleImage( const DetectedObjects & input_object_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - DetectedObjects & output_object_msg) override; + const DetectedObjectsWithFeature & input_roi_msg, DetectedObjects & output_object_msg) override; std::map generateDetectedObjectRoIs( const DetectedObjects & input_object_msg, const std::size_t & image_id, diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp index 0c454aebac42c..2f2c8222e196f 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp @@ -49,8 +49,7 @@ class RoiPointCloudFusionNode void fuseOnSingleImage( const PointCloud2 & input_pointcloud_msg, const std::size_t image_id, - const DetectedObjectsWithFeature & input_roi_msg, - PointCloud2 & output_pointcloud_msg) override; + const DetectedObjectsWithFeature & input_roi_msg, PointCloud2 & output_pointcloud_msg) override; bool out_of_scope(const DetectedObjectWithFeature & obj) override; }; diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index af17d85989d35..71318daa6ca80 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -29,8 +29,13 @@ namespace autoware::image_projection_based_fusion { CameraProjection::CameraProjection( const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, const float grid_height, - const bool unrectify, const bool use_approximation=false -): camera_info_(camera_info), grid_w_size_(grid_width), grid_h_size_(grid_height), unrectify_(unrectify), use_approximation_(use_approximation){ + const bool unrectify, const bool use_approximation = false) +: camera_info_(camera_info), + grid_w_size_(grid_width), + grid_h_size_(grid_height), + unrectify_(unrectify), + use_approximation_(use_approximation) +{ if (grid_w_size_ <= 0.0 || grid_h_size_ <= 0.0) { throw std::runtime_error("Both grid_width and grid_height must be > 0.0"); } @@ -45,20 +50,21 @@ CameraProjection::CameraProjection( // for shifting to the grid center half_grid_w_size_ = grid_w_size_ / 2.0; half_grid_h_size_ = grid_h_size_ / 2.0; - inv_grid_w_size_ = 1/grid_w_size_; - inv_grid_h_size_ = 1/grid_h_size_; + inv_grid_w_size_ = 1 / grid_w_size_; + inv_grid_h_size_ = 1 / grid_h_size_; grid_x_num_ = static_cast(std::ceil(image_w_ / grid_w_size_)); grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); - cache_size_ = grid_x_num_*grid_y_num_; + cache_size_ = grid_x_num_ * grid_y_num_; // for checking the views // cx/fx - tan_h_x_ = camera_info.k.at(2)/camera_info.k.at(0); + tan_h_x_ = camera_info.k.at(2) / camera_info.k.at(0); // cy/fy - tan_h_y_ = camera_info.k.at(5)/camera_info.k.at(4); + tan_h_y_ = camera_info.k.at(5) / camera_info.k.at(4); } -void CameraProjection::initialize(){ +void CameraProjection::initialize() +{ if (unrectify_) { if (use_approximation_) { // create the cache with size of grid center @@ -66,22 +72,26 @@ void CameraProjection::initialize(){ projection_cache_ = std::make_unique(cache_size_); initializeCache(); - calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { return this->calcRawImageProjectedPointWithApproximation(point3d, projected_point); }; } else { - calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { return this->calcRawImageProjectedPoint(point3d, projected_point); }; } } else { - calcImageProjectedPoint = [this](const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { + calcImageProjectedPoint = [this]( + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) { return this->calcRectifiedImageProjectedPoint(point3d, projected_point); }; } } -void CameraProjection::initializeCache(){ +void CameraProjection::initializeCache() +{ // sample grid centers till the camera height, width to precompute the projection // // grid_size @@ -101,31 +111,35 @@ void CameraProjection::initializeCache(){ for (float y = half_grid_h_size_; y < image_h_; y += grid_h_size_) { for (float x = half_grid_w_size_; x < image_w_; x += grid_w_size_) { - const float qx = std::round((x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; - const float qy = std::round((y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + const float qx = + std::round((x - half_grid_w_size_) * inv_grid_w_size_) * grid_w_size_ + half_grid_w_size_; + const float qy = + std::round((y - half_grid_h_size_) * inv_grid_h_size_) * grid_h_size_ + half_grid_h_size_; const int grid_x = static_cast(std::floor(qx / grid_w_size_)); const int grid_y = static_cast(std::floor(qy / grid_h_size_)); - const int index = (grid_y) * grid_x_num_ + grid_x; + const int index = (grid_y)*grid_x_num_ + grid_x; // precompute projected point cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); - projection_cache_[index] = PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; + projection_cache_[index] = + PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; } } } - /** * @brief Calculate a projection of 3D point to rectified image plane 2D point. * @return Return a boolean indicating whether the projected point is on the image plane. */ bool CameraProjection::calcRectifiedImageProjectedPoint( - const cv::Point3d & point3d, Eigen::Vector2d & projected_point -){ + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); - if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + if ( + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_) { return false; } else { projected_point << rectified_image_point.x, rectified_image_point.y; @@ -138,12 +152,14 @@ bool CameraProjection::calcRectifiedImageProjectedPoint( * @return Return a boolean indicating whether the projected point is on the image plane. */ bool CameraProjection::calcRawImageProjectedPoint( - const cv::Point3d & point3d, Eigen::Vector2d & projected_point -){ + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); const cv::Point2d raw_image_point = camera_model_.unrectifyPoint(rectified_image_point); - if (rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_){ + if ( + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_) { return false; } else { projected_point << raw_image_point.x, raw_image_point.y; @@ -156,16 +172,20 @@ bool CameraProjection::calcRawImageProjectedPoint( * @return Return a boolean indicating whether the projected point is on the image plane. */ bool CameraProjection::calcRawImageProjectedPointWithApproximation( - const cv::Point3d & point3d, Eigen::Vector2d & projected_point -){ + const cv::Point3d & point3d, Eigen::Vector2d & projected_point) +{ const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); // round to a near grid center - const float qx = std::round((rectified_image_point.x-half_grid_w_size_)*inv_grid_w_size_)*grid_w_size_+half_grid_w_size_; + const float qx = + std::round((rectified_image_point.x - half_grid_w_size_) * inv_grid_w_size_) * grid_w_size_ + + half_grid_w_size_; if (qx < 0.0 || qx >= image_w_) { return false; } - const float qy = std::round((rectified_image_point.y-half_grid_h_size_)*inv_grid_h_size_)*grid_h_size_+half_grid_h_size_; + const float qy = + std::round((rectified_image_point.y - half_grid_h_size_) * inv_grid_h_size_) * grid_h_size_ + + half_grid_h_size_; if (qy < 0.0 || qy >= image_h_) { return false; } @@ -179,16 +199,19 @@ bool CameraProjection::calcRawImageProjectedPointWithApproximation( return true; } -sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo(){ +sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo() +{ return camera_info_; } -bool CameraProjection::isOutsideHorizontalView(const float px, const float pz){ +bool CameraProjection::isOutsideHorizontalView(const float px, const float pz) +{ // assuming the points' origin is centered on the camera return pz <= 0.0 || px > tan_h_x_ * pz || px < -tan_h_x_ * pz; } -bool CameraProjection::isOutsideVerticalView(const float py, const float pz){ +bool CameraProjection::isOutsideVerticalView(const float py, const float pz) +{ // assuming the points' origin is centered on the camera return pz <= 0.0 || py > tan_h_y_ * pz || py < -tan_h_y_ * pz; } diff --git a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp index 48fdfc74ae32b..e3dc710bda1ba 100644 --- a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp @@ -305,8 +305,7 @@ void FusionNode::subCallback( } fuseOnSingleImage( - *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), - *output_msg); + *input_msg, roi_i, *((cached_roi_msgs_.at(roi_i))[matched_stamp]), *output_msg); (cached_roi_msgs_.at(roi_i)).erase(matched_stamp); is_fused_.at(roi_i) = true; diff --git a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp index d1c80ec052818..2837bac458541 100644 --- a/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/pointpainting_fusion/node.cpp @@ -346,8 +346,7 @@ dc | dc dc dc dc ||zc| // paint current point if it is inside bbox int label2d = feature_object.object.classification.front().label; if ( - !isUnknown(label2d) && - isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { + !isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, 1.0)) { // cppcheck-suppress invalidPointerCast auto p_class = reinterpret_cast(&output[stride + class_offset]); for (const auto & cls : isClassTable_) { diff --git a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp index 38d16ad102332..7be18d59fdbf1 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp @@ -100,8 +100,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( object2camera_affine = transformToEigen(transform_stamped_optional.value().transform); } - const auto object_roi_map = generateDetectedObjectRoIs( - input_object_msg, image_id, object2camera_affine); + const auto object_roi_map = + generateDetectedObjectRoIs(input_object_msg, image_id, object2camera_affine); fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map); if (debugger_) { @@ -163,8 +163,7 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( Eigen::Vector2d proj_point; if (camera_projectors_[image_id].calcImageProjectedPoint( - cv::Point3d(point.x(), point.y(), point.z()), proj_point - )){ + cv::Point3d(point.x(), point.y(), point.z()), proj_point)) { const double px = proj_point.x(); const double py = proj_point.y(); diff --git a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp index fcd3833a5c938..998072d87774e 100644 --- a/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp @@ -86,8 +86,7 @@ void RoiPointCloudFusionNode::postprocess( } } void RoiPointCloudFusionNode::fuseOnSingleImage( - const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, - const std::size_t image_id, + const sensor_msgs::msg::PointCloud2 & input_pointcloud_msg, const std::size_t image_id, const DetectedObjectsWithFeature & input_roi_msg, __attribute__((unused)) sensor_msgs::msg::PointCloud2 & output_pointcloud_msg) { @@ -131,9 +130,12 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( transform_stamped = transform_stamped_optional.value(); } const int point_step = input_pointcloud_msg.point_step; - const int x_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; - const int y_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; - const int z_offset = input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; + const int x_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "x")].offset; + const int y_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "y")].offset; + const int z_offset = + input_pointcloud_msg.fields[pcl::getFieldIndex(input_pointcloud_msg, "z")].offset; sensor_msgs::msg::PointCloud2 transformed_cloud; tf2::doTransform(input_pointcloud_msg, transformed_cloud, transform_stamped); @@ -161,8 +163,7 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( Eigen::Vector2d projected_point; if (camera_projectors_[image_id].calcImageProjectedPoint( - cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point) - ) { + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point)) { for (std::size_t i = 0; i < output_objs.size(); ++i) { auto & feature_obj = output_objs.at(i); const auto & check_roi = feature_obj.feature.roi; diff --git a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp index 4468c087d2250..486ae291abc6a 100644 --- a/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp @@ -151,8 +151,7 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( Eigen::Vector2d projected_point; if (!camera_projectors_[image_id].calcImageProjectedPoint( - cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point - )){ + cv::Point3d(transformed_x, transformed_y, transformed_z), projected_point)) { continue; } From b445034d906cce9e21fd3fed8748f73750f82d36 Mon Sep 17 00:00:00 2001 From: a-maumau Date: Thu, 19 Dec 2024 13:04:37 +0900 Subject: [PATCH 03/20] fix FOV filtering Signed-off-by: a-maumau --- .../camera_projection.hpp | 2 + .../src/camera_projection.cpp | 70 +++++++++++++++++-- 2 files changed, 65 insertions(+), 7 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index 2bcef9b6fa56a..abc5c2c077334 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -48,6 +48,7 @@ class CameraProjection sensor_msgs::msg::CameraInfo getCameraInfo(); bool isOutsideHorizontalView(const float px, const float pz); bool isOutsideVerticalView(const float py, const float pz); + bool isOutsideFOV(const float px, const float py, const float pz); protected: bool calcRectifiedImageProjectedPoint( @@ -60,6 +61,7 @@ class CameraProjection sensor_msgs::msg::CameraInfo camera_info_; uint32_t image_h_, image_w_; double tan_h_x_, tan_h_y_; + double fov_left_, fov_right_, fov_top_, fov_bottom_; uint32_t cache_size_; float grid_w_size_; diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 71318daa6ca80..5c8fee71a9f38 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -56,11 +56,46 @@ CameraProjection::CameraProjection( grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); cache_size_ = grid_x_num_ * grid_y_num_; - // for checking the views - // cx/fx - tan_h_x_ = camera_info.k.at(2) / camera_info.k.at(0); - // cy/fy - tan_h_y_ = camera_info.k.at(5) / camera_info.k.at(4); + // compute 3D rays for the image corners and pixels related to optical center + // to find the actual FOV size + // optical centers + const double ocx = static_cast(camera_info.k.at(2)); + const double ocy = static_cast(camera_info.k.at(5)); + // for checking pincushion shape case + const cv::Point3d ray_top_left = camera_model_.projectPixelTo3dRay(cv::Point(0, 0)); + const cv::Point3d ray_top_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_-1, 0)); + const cv::Point3d ray_bottom_left = camera_model_.projectPixelTo3dRay(cv::Point(0, image_h_-1)); + const cv::Point3d ray_bottom_right = + camera_model_.projectPixelTo3dRay(cv::Point(image_w_-1, image_h_-1)); + // for checking barrel shape case + const cv::Point3d ray_mid_top = camera_model_.projectPixelTo3dRay(cv::Point(ocx, 0)); + const cv::Point3d ray_mid_left = camera_model_.projectPixelTo3dRay(cv::Point(0, ocy)); + const cv::Point3d ray_mid_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_-1, ocy)); + const cv::Point3d ray_mid_bottom = camera_model_.projectPixelTo3dRay(cv::Point(ocx, image_h_-1)); + + cv::Point3d x_left = ray_top_left; + cv::Point3d x_right = ray_top_right; + cv::Point3d y_top = ray_top_left; + cv::Point3d y_bottom = ray_bottom_left; + + // check each side of the view + if (ray_bottom_left.x < x_left.x) x_left = ray_bottom_left; + if (ray_mid_left.x < x_left.x) x_left = ray_mid_left; + + if (x_right.x < ray_bottom_right.x) x_right = ray_bottom_right; + if (x_right.x < ray_mid_right.x) x_right = ray_mid_right; + + if (y_top.y < ray_top_right.y) y_top = ray_top_right; + if (y_top.y < ray_mid_top.y) y_top = ray_mid_top; + + if (ray_bottom_left.y < y_bottom.y) y_bottom = ray_bottom_left; + if (ray_mid_bottom.y < y_bottom.y) y_bottom = ray_mid_bottom; + + // set FOV at z = 1.0 + fov_left_ = x_left.x/x_left.z; + fov_right_ = x_right.x/x_right.z; + fov_top_ = y_top.y/y_top.z; + fov_bottom_ = y_bottom.y/y_bottom.z; } void CameraProjection::initialize() @@ -128,6 +163,7 @@ void CameraProjection::initializeCache() } } + /** * @brief Calculate a projection of 3D point to rectified image plane 2D point. * @return Return a boolean indicating whether the projected point is on the image plane. @@ -207,13 +243,33 @@ sensor_msgs::msg::CameraInfo CameraProjection::getCameraInfo() bool CameraProjection::isOutsideHorizontalView(const float px, const float pz) { // assuming the points' origin is centered on the camera - return pz <= 0.0 || px > tan_h_x_ * pz || px < -tan_h_x_ * pz; + if (pz <= 0.0) return true; + if (px < fov_left_ * pz) return true; + if (px > fov_right_ * pz) return true; + + // inside of the horizontal view + return false; } bool CameraProjection::isOutsideVerticalView(const float py, const float pz) { // assuming the points' origin is centered on the camera - return pz <= 0.0 || py > tan_h_y_ * pz || py < -tan_h_y_ * pz; + if (pz <= 0.0) return true; + // up in the camera coordinate is negative + if (py < fov_top_ * pz) return true; + if (py > fov_bottom_ * pz) return true; + + // inside of the vertical view + return false; +} + +bool CameraProjection::isOutsideFOV(const float px, const float py, const float pz) +{ + if (isOutsideHorizontalView(px, pz)) return true; + if (isOutsideVerticalView(py, pz)) return true; + + // inside of the FOV + return false; } } // namespace autoware::image_projection_based_fusion From b3356312b6f8ed81e9200ff1cdf4da32ec765482 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 04:07:44 +0000 Subject: [PATCH 04/20] style(pre-commit): autofix --- .../src/camera_projection.cpp | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 5c8fee71a9f38..5cea3cf74559f 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -63,15 +63,16 @@ CameraProjection::CameraProjection( const double ocy = static_cast(camera_info.k.at(5)); // for checking pincushion shape case const cv::Point3d ray_top_left = camera_model_.projectPixelTo3dRay(cv::Point(0, 0)); - const cv::Point3d ray_top_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_-1, 0)); - const cv::Point3d ray_bottom_left = camera_model_.projectPixelTo3dRay(cv::Point(0, image_h_-1)); + const cv::Point3d ray_top_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_ - 1, 0)); + const cv::Point3d ray_bottom_left = camera_model_.projectPixelTo3dRay(cv::Point(0, image_h_ - 1)); const cv::Point3d ray_bottom_right = - camera_model_.projectPixelTo3dRay(cv::Point(image_w_-1, image_h_-1)); + camera_model_.projectPixelTo3dRay(cv::Point(image_w_ - 1, image_h_ - 1)); // for checking barrel shape case const cv::Point3d ray_mid_top = camera_model_.projectPixelTo3dRay(cv::Point(ocx, 0)); const cv::Point3d ray_mid_left = camera_model_.projectPixelTo3dRay(cv::Point(0, ocy)); - const cv::Point3d ray_mid_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_-1, ocy)); - const cv::Point3d ray_mid_bottom = camera_model_.projectPixelTo3dRay(cv::Point(ocx, image_h_-1)); + const cv::Point3d ray_mid_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_ - 1, ocy)); + const cv::Point3d ray_mid_bottom = + camera_model_.projectPixelTo3dRay(cv::Point(ocx, image_h_ - 1)); cv::Point3d x_left = ray_top_left; cv::Point3d x_right = ray_top_right; @@ -92,10 +93,10 @@ CameraProjection::CameraProjection( if (ray_mid_bottom.y < y_bottom.y) y_bottom = ray_mid_bottom; // set FOV at z = 1.0 - fov_left_ = x_left.x/x_left.z; - fov_right_ = x_right.x/x_right.z; - fov_top_ = y_top.y/y_top.z; - fov_bottom_ = y_bottom.y/y_bottom.z; + fov_left_ = x_left.x / x_left.z; + fov_right_ = x_right.x / x_right.z; + fov_top_ = y_top.y / y_top.z; + fov_bottom_ = y_bottom.y / y_bottom.z; } void CameraProjection::initialize() @@ -163,7 +164,6 @@ void CameraProjection::initializeCache() } } - /** * @brief Calculate a projection of 3D point to rectified image plane 2D point. * @return Return a boolean indicating whether the projected point is on the image plane. From f02e691b824d1f0de275eaf4255cb03c074ce88f Mon Sep 17 00:00:00 2001 From: a-maumau Date: Thu, 19 Dec 2024 13:14:00 +0900 Subject: [PATCH 05/20] remove unused includes Signed-off-by: a-maumau --- .../camera_projection.hpp | 2 -- .../src/camera_projection.cpp | 11 ----------- 2 files changed, 13 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index abc5c2c077334..2c57f22578b8f 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -18,14 +18,12 @@ #define EIGEN_MPL2_ONLY #include -#include #include #include #include -#include #include namespace autoware::image_projection_based_fusion diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 5cea3cf74559f..218384b696e07 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -14,17 +14,6 @@ #include "autoware/image_projection_based_fusion/camera_projection.hpp" -#include -#include - -#ifdef ROS_DISTRO_GALACTIC -#include -#include -#else -#include -#include -#endif - namespace autoware::image_projection_based_fusion { CameraProjection::CameraProjection( From acd69a8c0cc64463b856f688ccd76c54b64b67fb Mon Sep 17 00:00:00 2001 From: a-maumau Date: Thu, 19 Dec 2024 16:50:33 +0900 Subject: [PATCH 06/20] update schema Signed-off-by: a-maumau --- .../schema/fusion_common.schema.json | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json index 73ee1661adaea..81f3de110ec51 100644 --- a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json +++ b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json @@ -65,6 +65,21 @@ "type": "number", "description": "Maximum z position [m].", "default": 100.0 + }, + "approximate_camera_projection": { + "type": "array", + "description": "An array of options indicating whether to use approximated projection for each camera.", + "default": [true, true, true, true, true, true] + }, + "approximation_grid_width_size": { + "type": "number", + "description": "The size of the gird's width of approximate_camera_projection in pixel.", + "default": 1.0 + }, + "approximation_grid_height_size": { + "type": "number", + "description": "The size of the gird's height of approximate_camera_projection in pixel.", + "default": 1.0 } } } From c34003b6f56d14c150293ac09a483b5f396691af Mon Sep 17 00:00:00 2001 From: a-maumau Date: Fri, 20 Dec 2024 10:04:10 +0900 Subject: [PATCH 07/20] fix cpplint error Signed-off-by: a-maumau --- .../image_projection_based_fusion/camera_projection.hpp | 2 +- .../src/camera_projection.cpp | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index 2c57f22578b8f..61cebd42f6e74 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -40,7 +40,7 @@ class CameraProjection explicit CameraProjection( const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, const float grid_height, const bool unrectify, const bool use_approximation); - CameraProjection() : grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {}; + CameraProjection() : grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {} void initialize(); std::function calcImageProjectedPoint; sensor_msgs::msg::CameraInfo getCameraInfo(); diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 218384b696e07..ba7a4ac2ccccd 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -14,6 +14,8 @@ #include "autoware/image_projection_based_fusion/camera_projection.hpp" +#include + namespace autoware::image_projection_based_fusion { CameraProjection::CameraProjection( From e8b5b2ffdcfcca886f51c9d58f26f623356a5932 Mon Sep 17 00:00:00 2001 From: a-maumau Date: Fri, 20 Dec 2024 12:48:02 +0900 Subject: [PATCH 08/20] apply suggestion: more simple and effcient computation Signed-off-by: a-maumau --- .../camera_projection.hpp | 4 +- .../src/camera_projection.cpp | 40 ++++++------------- 2 files changed, 15 insertions(+), 29 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index 61cebd42f6e74..e6a0f1acfdbda 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -68,8 +68,8 @@ class CameraProjection float half_grid_h_size_; float inv_grid_w_size_; float inv_grid_h_size_; - uint32_t grid_x_num_; - uint32_t grid_y_num_; + int grid_x_num_; + int grid_y_num_; float index_grid_out_h_; float index_grid_out_w_; diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index ba7a4ac2ccccd..237fcf21ade11 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -43,8 +43,8 @@ CameraProjection::CameraProjection( half_grid_h_size_ = grid_h_size_ / 2.0; inv_grid_w_size_ = 1 / grid_w_size_; inv_grid_h_size_ = 1 / grid_h_size_; - grid_x_num_ = static_cast(std::ceil(image_w_ / grid_w_size_)); - grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); + grid_x_num_ = static_cast(std::ceil(image_w_ / grid_w_size_)); + grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); cache_size_ = grid_x_num_ * grid_y_num_; // compute 3D rays for the image corners and pixels related to optical center @@ -136,16 +136,12 @@ void CameraProjection::initializeCache() // each pixel will be rounded in this grid center // edge pixels in the image will be assign to centers that is the outside of the image - for (float y = half_grid_h_size_; y < image_h_; y += grid_h_size_) { - for (float x = half_grid_w_size_; x < image_w_; x += grid_w_size_) { - const float qx = - std::round((x - half_grid_w_size_) * inv_grid_w_size_) * grid_w_size_ + half_grid_w_size_; - const float qy = - std::round((y - half_grid_h_size_) * inv_grid_h_size_) * grid_h_size_ + half_grid_h_size_; - - const int grid_x = static_cast(std::floor(qx / grid_w_size_)); - const int grid_y = static_cast(std::floor(qy / grid_h_size_)); - const int index = (grid_y)*grid_x_num_ + grid_x; + for (int grid_y = 0; grid_y < grid_y_num_; grid_y++) { + for (int grid_x = 0; grid_x < grid_x_num_; grid_x++) { + // grid_x * grid_w_size_ + half_grid_w_size_ + const float qx = (grid_x + 0.5f) * grid_w_size_; + const float qy = (grid_y + 0.5f) * grid_h_size_; + const uint32_t index = grid_y*grid_x_num_ + grid_x; // precompute projected point cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); @@ -204,23 +200,13 @@ bool CameraProjection::calcRawImageProjectedPointWithApproximation( const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); // round to a near grid center - const float qx = - std::round((rectified_image_point.x - half_grid_w_size_) * inv_grid_w_size_) * grid_w_size_ + - half_grid_w_size_; - if (qx < 0.0 || qx >= image_w_) { - return false; - } - const float qy = - std::round((rectified_image_point.y - half_grid_h_size_) * inv_grid_h_size_) * grid_h_size_ + - half_grid_h_size_; - if (qy < 0.0 || qy >= image_h_) { - return false; - } + const int grid_x = static_cast(std::floor(rectified_image_point.x * inv_grid_w_size_)); + const int grid_y = static_cast(std::floor(rectified_image_point.y * inv_grid_h_size_)); - const int grid_x = static_cast(std::floor(qx / grid_w_size_)); - const int grid_y = static_cast(std::floor(qy / grid_h_size_)); - const int index = grid_y * grid_x_num_ + grid_x; + if (grid_x < 0.0 || grid_x>= grid_x_num_) return false; + if (grid_y < 0.0 || grid_y>= grid_y_num_) return false; + const uint32_t index = grid_y * grid_x_num_ + grid_x; projected_point << projection_cache_[index].x, projection_cache_[index].y; return true; From bc1e8ed63118455a65313e171af4360f32f2dc36 Mon Sep 17 00:00:00 2001 From: a-maumau Date: Fri, 20 Dec 2024 13:22:50 +0900 Subject: [PATCH 09/20] correct terminology Signed-off-by: a-maumau --- .../config/fusion_common.param.yaml | 4 +- .../camera_projection.hpp | 22 ++++----- .../fusion_node.hpp | 4 +- .../schema/fusion_common.schema.json | 8 ++-- .../src/camera_projection.cpp | 47 +++++++++---------- .../src/fusion_node.cpp | 6 +-- 6 files changed, 42 insertions(+), 49 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml index 553bb7e29220e..86db3bad4f8f8 100644 --- a/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml +++ b/perception/autoware_image_projection_based_fusion/config/fusion_common.param.yaml @@ -17,8 +17,8 @@ # camera cache setting for each ROI approximate_camera_projection: [true, true, true, true, true, true] # grid size in pixels - approximation_grid_width_size: 1.0 - approximation_grid_height_size: 1.0 + approximation_grid_cell_width: 1.0 + approximation_grid_cell_height: 1.0 # debug parameters publish_processing_time_detail: false diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index e6a0f1acfdbda..64530f6058e42 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -38,9 +38,9 @@ class CameraProjection { public: explicit CameraProjection( - const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, - const float grid_height, const bool unrectify, const bool use_approximation); - CameraProjection() : grid_w_size_(1.0), grid_h_size_(1.0), unrectify_(false) {} + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_cell_width, + const float grid_cell_height, const bool unrectify, const bool use_approximation); + CameraProjection() : cell_width_(1.0), cell_height_(1.0), unrectify_(false) {} void initialize(); std::function calcImageProjectedPoint; sensor_msgs::msg::CameraInfo getCameraInfo(); @@ -62,16 +62,12 @@ class CameraProjection double fov_left_, fov_right_, fov_top_, fov_bottom_; uint32_t cache_size_; - float grid_w_size_; - float grid_h_size_; - float half_grid_w_size_; - float half_grid_h_size_; - float inv_grid_w_size_; - float inv_grid_h_size_; - int grid_x_num_; - int grid_y_num_; - float index_grid_out_h_; - float index_grid_out_w_; + float cell_width_; + float cell_height_; + float inv_cell_width_; + float inv_cell_height_; + int grid_width_; + int grid_height_; bool unrectify_; bool use_approximation_; diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp index 82db64f7d7d52..844dece2f00ad 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp @@ -110,8 +110,8 @@ class FusionNode : public rclcpp::Node // camera projection std::vector camera_projectors_; std::vector approx_camera_projection_; - float approx_grid_w_size_; - float approx_grid_h_size_; + float approx_grid_cell_w_size_; + float approx_grid_cell_h_size_; rclcpp::TimerBase::SharedPtr timer_; double timeout_ms_{}; diff --git a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json index 81f3de110ec51..9c8bc9c9bff88 100644 --- a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json +++ b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json @@ -71,14 +71,14 @@ "description": "An array of options indicating whether to use approximated projection for each camera.", "default": [true, true, true, true, true, true] }, - "approximation_grid_width_size": { + "approximation_grid_cell_width": { "type": "number", - "description": "The size of the gird's width of approximate_camera_projection in pixel.", + "description": "The width of grid cell used in approximated projection [pixel].", "default": 1.0 }, - "approximation_grid_height_size": { + "approximation_grid_cell_height": { "type": "number", - "description": "The size of the gird's height of approximate_camera_projection in pixel.", + "description": "The height of grid cell used in approximated projection [pixel].", "default": 1.0 } } diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 237fcf21ade11..afeca45e544d0 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -19,16 +19,16 @@ namespace autoware::image_projection_based_fusion { CameraProjection::CameraProjection( - const sensor_msgs::msg::CameraInfo & camera_info, const float grid_width, const float grid_height, - const bool unrectify, const bool use_approximation = false) + const sensor_msgs::msg::CameraInfo & camera_info, const float grid_cell_width, + const float grid_cell_height, const bool unrectify, const bool use_approximation = false) : camera_info_(camera_info), - grid_w_size_(grid_width), - grid_h_size_(grid_height), + cell_width_(grid_cell_width), + cell_height_(grid_cell_height), unrectify_(unrectify), use_approximation_(use_approximation) { - if (grid_w_size_ <= 0.0 || grid_h_size_ <= 0.0) { - throw std::runtime_error("Both grid_width and grid_height must be > 0.0"); + if (grid_cell_width <= 0.0 || grid_cell_height <= 0.0) { + throw std::runtime_error("Both grid_cell_width and grid_cell_height must be > 0.0"); } image_w_ = camera_info.width; @@ -38,14 +38,11 @@ CameraProjection::CameraProjection( camera_model_.fromCameraInfo(camera_info); // cache settings - // for shifting to the grid center - half_grid_w_size_ = grid_w_size_ / 2.0; - half_grid_h_size_ = grid_h_size_ / 2.0; - inv_grid_w_size_ = 1 / grid_w_size_; - inv_grid_h_size_ = 1 / grid_h_size_; - grid_x_num_ = static_cast(std::ceil(image_w_ / grid_w_size_)); - grid_y_num_ = static_cast(std::ceil(image_h_ / grid_h_size_)); - cache_size_ = grid_x_num_ * grid_y_num_; + inv_cell_width_ = 1 / cell_width_; + inv_cell_height_ = 1 / cell_height_; + grid_width_ = static_cast(std::ceil(image_w_ / cell_width_)); + grid_height_ = static_cast(std::ceil(image_h_ / cell_height_)); + cache_size_ = grid_width_ * grid_height_; // compute 3D rays for the image corners and pixels related to optical center // to find the actual FOV size @@ -136,12 +133,12 @@ void CameraProjection::initializeCache() // each pixel will be rounded in this grid center // edge pixels in the image will be assign to centers that is the outside of the image - for (int grid_y = 0; grid_y < grid_y_num_; grid_y++) { - for (int grid_x = 0; grid_x < grid_x_num_; grid_x++) { + for (int grid_y = 0; grid_y < grid_height_; grid_y++) { + for (int grid_x = 0; grid_x < grid_width_; grid_x++) { // grid_x * grid_w_size_ + half_grid_w_size_ - const float qx = (grid_x + 0.5f) * grid_w_size_; - const float qy = (grid_y + 0.5f) * grid_h_size_; - const uint32_t index = grid_y*grid_x_num_ + grid_x; + const float qx = (grid_x + 0.5f) * cell_width_; + const float qy = (grid_y + 0.5f) * cell_height_; + const uint32_t index = grid_y*grid_width_ + grid_x; // precompute projected point cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); @@ -199,14 +196,14 @@ bool CameraProjection::calcRawImageProjectedPointWithApproximation( { const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); - // round to a near grid center - const int grid_x = static_cast(std::floor(rectified_image_point.x * inv_grid_w_size_)); - const int grid_y = static_cast(std::floor(rectified_image_point.y * inv_grid_h_size_)); + // round to a near grid cell + const int grid_x = static_cast(std::floor(rectified_image_point.x * inv_cell_width_)); + const int grid_y = static_cast(std::floor(rectified_image_point.y * inv_cell_height_)); - if (grid_x < 0.0 || grid_x>= grid_x_num_) return false; - if (grid_y < 0.0 || grid_y>= grid_y_num_) return false; + if (grid_x < 0.0 || grid_x>= grid_width_) return false; + if (grid_y < 0.0 || grid_y>= grid_height_) return false; - const uint32_t index = grid_y * grid_x_num_ + grid_x; + const uint32_t index = grid_y * grid_width_ + grid_x; projected_point << projection_cache_[index].x, projection_cache_[index].y; return true; diff --git a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp index e3dc710bda1ba..7f249d4640e81 100644 --- a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp @@ -145,8 +145,8 @@ FusionNode::FusionNode( } } } - approx_grid_w_size_ = declare_parameter("approximation_grid_width_size"); - approx_grid_h_size_ = declare_parameter("approximation_grid_height_size"); + approx_grid_cell_w_size_ = declare_parameter("approximation_grid_cell_width"); + approx_grid_cell_h_size_ = declare_parameter("approximation_grid_cell_height"); // debugger if (declare_parameter("debug_mode", false)) { @@ -195,7 +195,7 @@ void FusionNode::cameraInfoCallback( camera_info_map_.find(camera_id) == camera_info_map_.end() && checkCameraInfo(*input_camera_info_msg)) { camera_projectors_.at(camera_id) = CameraProjection( - *input_camera_info_msg, approx_grid_w_size_, approx_grid_h_size_, + *input_camera_info_msg, approx_grid_cell_w_size_, approx_grid_cell_h_size_, point_project_to_unrectified_image_.at(camera_id), approx_camera_projection_.at(camera_id)); camera_projectors_.at(camera_id).initialize(); From d2056ca955b5428ac28c14beb131d68f21162bcd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 20 Dec 2024 04:25:46 +0000 Subject: [PATCH 10/20] style(pre-commit): autofix --- .../src/camera_projection.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index afeca45e544d0..5c6a59cdf525b 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -138,7 +138,7 @@ void CameraProjection::initializeCache() // grid_x * grid_w_size_ + half_grid_w_size_ const float qx = (grid_x + 0.5f) * cell_width_; const float qy = (grid_y + 0.5f) * cell_height_; - const uint32_t index = grid_y*grid_width_ + grid_x; + const uint32_t index = grid_y * grid_width_ + grid_x; // precompute projected point cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); @@ -200,8 +200,8 @@ bool CameraProjection::calcRawImageProjectedPointWithApproximation( const int grid_x = static_cast(std::floor(rectified_image_point.x * inv_cell_width_)); const int grid_y = static_cast(std::floor(rectified_image_point.y * inv_cell_height_)); - if (grid_x < 0.0 || grid_x>= grid_width_) return false; - if (grid_y < 0.0 || grid_y>= grid_height_) return false; + if (grid_x < 0.0 || grid_x >= grid_width_) return false; + if (grid_y < 0.0 || grid_y >= grid_height_) return false; const uint32_t index = grid_y * grid_width_ + grid_x; projected_point << projection_cache_[index].x, projection_cache_[index].y; From 36dc896dfe987177e1a8e897ece113d9ff095716 Mon Sep 17 00:00:00 2001 From: a-maumau Date: Fri, 20 Dec 2024 13:41:16 +0900 Subject: [PATCH 11/20] fix comments Signed-off-by: a-maumau --- .../src/camera_projection.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 5c6a59cdf525b..e58f836b1b81a 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -116,9 +116,9 @@ void CameraProjection::initialize() void CameraProjection::initializeCache() { - // sample grid centers till the camera height, width to precompute the projection + // sample grid cell centers till the camera height, width to precompute the projections // - // grid_size + // grid cell size // / // v // |---| w @@ -128,10 +128,11 @@ void CameraProjection::initializeCache() // | . | . | . | // | ^ // h | | - // v grid center + // v grid cell center // - // each pixel will be rounded in this grid center - // edge pixels in the image will be assign to centers that is the outside of the image + // each pixel will be rounded in these grid cell centers + // edge pixels in right and bottom side in the image will be assign to these centers + // that is the outside of the image for (int grid_y = 0; grid_y < grid_height_; grid_y++) { for (int grid_x = 0; grid_x < grid_width_; grid_x++) { From 82e97cefc0c08f2142e3101476b3039fdf1e2658 Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Fri, 20 Dec 2024 15:02:19 +0900 Subject: [PATCH 12/20] fix var name Signed-off-by: a-maumau Co-authored-by: Taekjin LEE --- .../src/camera_projection.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index e58f836b1b81a..418eff84964d3 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -198,14 +198,14 @@ bool CameraProjection::calcRawImageProjectedPointWithApproximation( const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); // round to a near grid cell - const int grid_x = static_cast(std::floor(rectified_image_point.x * inv_cell_width_)); - const int grid_y = static_cast(std::floor(rectified_image_point.y * inv_cell_height_)); + const int grid_idx_x = static_cast(std::floor(rectified_image_point.x * inv_cell_width_)); + const int grid_idx_y = static_cast(std::floor(rectified_image_point.y * inv_cell_height_)); - if (grid_x < 0.0 || grid_x >= grid_width_) return false; - if (grid_y < 0.0 || grid_y >= grid_height_) return false; + if (grid_idx_x < 0.0 || grid_idx_x >= grid_width_) return false; + if (grid_idx_y < 0.0 || grid_idx_y >= grid_height_) return false; - const uint32_t index = grid_y * grid_width_ + grid_x; - projected_point << projection_cache_[index].x, projection_cache_[index].y; + const uint32_t grid_index = grid_idx_y * grid_width_ + grid_idx_x; + projected_point << projection_cache_[grid_index].x, projection_cache_[grid_index].y; return true; } From d28ff1626c184712b1f74543a91194205553c34d Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Fri, 20 Dec 2024 15:03:05 +0900 Subject: [PATCH 13/20] fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE --- .../src/camera_projection.cpp | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 418eff84964d3..e3506c7257464 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -134,16 +134,15 @@ void CameraProjection::initializeCache() // edge pixels in right and bottom side in the image will be assign to these centers // that is the outside of the image - for (int grid_y = 0; grid_y < grid_height_; grid_y++) { - for (int grid_x = 0; grid_x < grid_width_; grid_x++) { - // grid_x * grid_w_size_ + half_grid_w_size_ - const float qx = (grid_x + 0.5f) * cell_width_; - const float qy = (grid_y + 0.5f) * cell_height_; - const uint32_t index = grid_y * grid_width_ + grid_x; - + for (int idx_y = 0; idx_y < grid_height_; idx_y++) { + for (int idx_x = 0; idx_x < grid_width_; idx_x++) { + const uint32_t grid_index = idx_y * grid_width_ + idx_x; + const float qx = (idx_x + 0.5f) * cell_width_; + const float qy = (idx_y + 0.5f) * cell_height_; + // precompute projected point cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); - projection_cache_[index] = + projection_cache_[grid_index] = PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; } } From a094379365c0307c670225b681a5a002114286ea Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Fri, 20 Dec 2024 15:03:22 +0900 Subject: [PATCH 14/20] fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE --- .../src/camera_projection.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index e3506c7257464..1aad7dc2eeda5 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -31,8 +31,8 @@ CameraProjection::CameraProjection( throw std::runtime_error("Both grid_cell_width and grid_cell_height must be > 0.0"); } - image_w_ = camera_info.width; - image_h_ = camera_info.height; + image_width_ = camera_info.width; + image_height_ = camera_info.height; // prepare camera model camera_model_.fromCameraInfo(camera_info); @@ -40,8 +40,8 @@ CameraProjection::CameraProjection( // cache settings inv_cell_width_ = 1 / cell_width_; inv_cell_height_ = 1 / cell_height_; - grid_width_ = static_cast(std::ceil(image_w_ / cell_width_)); - grid_height_ = static_cast(std::ceil(image_h_ / cell_height_)); + grid_width_ = static_cast(std::ceil(image_width_ / cell_width_)); + grid_height_ = static_cast(std::ceil(image_height_ / cell_height_)); cache_size_ = grid_width_ * grid_height_; // compute 3D rays for the image corners and pixels related to optical center From ad2a204432d7bc05f754f5d6c0cba9e0c2b809bd Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Fri, 20 Dec 2024 15:03:38 +0900 Subject: [PATCH 15/20] fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE --- .../src/camera_projection.cpp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 1aad7dc2eeda5..41f3ec275d454 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -51,16 +51,16 @@ CameraProjection::CameraProjection( const double ocy = static_cast(camera_info.k.at(5)); // for checking pincushion shape case const cv::Point3d ray_top_left = camera_model_.projectPixelTo3dRay(cv::Point(0, 0)); - const cv::Point3d ray_top_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_ - 1, 0)); - const cv::Point3d ray_bottom_left = camera_model_.projectPixelTo3dRay(cv::Point(0, image_h_ - 1)); + const cv::Point3d ray_top_right = camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, 0)); + const cv::Point3d ray_bottom_left = camera_model_.projectPixelTo3dRay(cv::Point(0, image_height_ - 1)); const cv::Point3d ray_bottom_right = - camera_model_.projectPixelTo3dRay(cv::Point(image_w_ - 1, image_h_ - 1)); + camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, image_height_ - 1)); // for checking barrel shape case const cv::Point3d ray_mid_top = camera_model_.projectPixelTo3dRay(cv::Point(ocx, 0)); const cv::Point3d ray_mid_left = camera_model_.projectPixelTo3dRay(cv::Point(0, ocy)); - const cv::Point3d ray_mid_right = camera_model_.projectPixelTo3dRay(cv::Point(image_w_ - 1, ocy)); + const cv::Point3d ray_mid_right = camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, ocy)); const cv::Point3d ray_mid_bottom = - camera_model_.projectPixelTo3dRay(cv::Point(ocx, image_h_ - 1)); + camera_model_.projectPixelTo3dRay(cv::Point(ocx, image_height_ - 1)); cv::Point3d x_left = ray_top_left; cv::Point3d x_right = ray_top_right; From edce9da75a276c9f47a7c204aa2033846c2bd639 Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Fri, 20 Dec 2024 15:03:58 +0900 Subject: [PATCH 16/20] fix variable names Signed-off-by: a-maumau Co-authored-by: Taekjin LEE --- .../image_projection_based_fusion/camera_projection.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp index 64530f6058e42..f9b3158b1672a 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/camera_projection.hpp @@ -57,7 +57,7 @@ class CameraProjection void initializeCache(); sensor_msgs::msg::CameraInfo camera_info_; - uint32_t image_h_, image_w_; + uint32_t image_height_, image_width_; double tan_h_x_, tan_h_y_; double fov_left_, fov_right_, fov_top_, fov_bottom_; From 542ed4815a11ea12bbc03ad95e05a7d9e53e7559 Mon Sep 17 00:00:00 2001 From: a-maumau Date: Fri, 20 Dec 2024 15:10:57 +0900 Subject: [PATCH 17/20] fix variable names Signed-off-by: a-maumau --- .../src/camera_projection.cpp | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp index 41f3ec275d454..c0a820609a0a7 100644 --- a/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp +++ b/perception/autoware_image_projection_based_fusion/src/camera_projection.cpp @@ -51,14 +51,17 @@ CameraProjection::CameraProjection( const double ocy = static_cast(camera_info.k.at(5)); // for checking pincushion shape case const cv::Point3d ray_top_left = camera_model_.projectPixelTo3dRay(cv::Point(0, 0)); - const cv::Point3d ray_top_right = camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, 0)); - const cv::Point3d ray_bottom_left = camera_model_.projectPixelTo3dRay(cv::Point(0, image_height_ - 1)); + const cv::Point3d ray_top_right = + camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, 0)); + const cv::Point3d ray_bottom_left = + camera_model_.projectPixelTo3dRay(cv::Point(0, image_height_ - 1)); const cv::Point3d ray_bottom_right = camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, image_height_ - 1)); // for checking barrel shape case const cv::Point3d ray_mid_top = camera_model_.projectPixelTo3dRay(cv::Point(ocx, 0)); const cv::Point3d ray_mid_left = camera_model_.projectPixelTo3dRay(cv::Point(0, ocy)); - const cv::Point3d ray_mid_right = camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, ocy)); + const cv::Point3d ray_mid_right = + camera_model_.projectPixelTo3dRay(cv::Point(image_width_ - 1, ocy)); const cv::Point3d ray_mid_bottom = camera_model_.projectPixelTo3dRay(cv::Point(ocx, image_height_ - 1)); @@ -137,11 +140,11 @@ void CameraProjection::initializeCache() for (int idx_y = 0; idx_y < grid_height_; idx_y++) { for (int idx_x = 0; idx_x < grid_width_; idx_x++) { const uint32_t grid_index = idx_y * grid_width_ + idx_x; - const float qx = (idx_x + 0.5f) * cell_width_; - const float qy = (idx_y + 0.5f) * cell_height_; - + const float px = (idx_x + 0.5f) * cell_width_; + const float py = (idx_y + 0.5f) * cell_height_; + // precompute projected point - cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(qx, qy)); + cv::Point2d raw_image_point = camera_model_.unrectifyPoint(cv::Point2d(px, py)); projection_cache_[grid_index] = PixelPos{static_cast(raw_image_point.x), static_cast(raw_image_point.y)}; } @@ -158,8 +161,8 @@ bool CameraProjection::calcRectifiedImageProjectedPoint( const cv::Point2d rectified_image_point = camera_model_.project3dToPixel(point3d); if ( - rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || - rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_) { + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_width_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_height_) { return false; } else { projected_point << rectified_image_point.x, rectified_image_point.y; @@ -178,8 +181,8 @@ bool CameraProjection::calcRawImageProjectedPoint( const cv::Point2d raw_image_point = camera_model_.unrectifyPoint(rectified_image_point); if ( - rectified_image_point.x < 0.0 || rectified_image_point.x >= image_w_ || - rectified_image_point.y < 0.0 || rectified_image_point.y >= image_h_) { + rectified_image_point.x < 0.0 || rectified_image_point.x >= image_width_ || + rectified_image_point.y < 0.0 || rectified_image_point.y >= image_height_) { return false; } else { projected_point << raw_image_point.x, raw_image_point.y; From 94a0d78b218c37b507b59f6afce35943ea3ae3c2 Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Mon, 23 Dec 2024 15:44:20 +0900 Subject: [PATCH 18/20] fix initialization Co-authored-by: badai nguyen <94814556+badai-nguyen@users.noreply.github.com> Signed-off-by: a-maumau --- .../autoware/image_projection_based_fusion/fusion_node.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp index 844dece2f00ad..239d406650ce8 100644 --- a/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp +++ b/perception/autoware_image_projection_based_fusion/include/autoware/image_projection_based_fusion/fusion_node.hpp @@ -102,7 +102,7 @@ class FusionNode : public rclcpp::Node tf2_ros::Buffer tf_buffer_; tf2_ros::TransformListener tf_listener_; - std::vector point_project_to_unrectified_image_{false}; + std::vector point_project_to_unrectified_image_; // camera_info std::map camera_info_map_; From c3317acc2eb0bee8e1db66d6fe1b18a144218b0e Mon Sep 17 00:00:00 2001 From: Masaki Baba Date: Mon, 23 Dec 2024 16:01:31 +0900 Subject: [PATCH 19/20] add verification to point_project_to_unrectified_image when loading Co-authored-by: badai nguyen <94814556+badai-nguyen@users.noreply.github.com> Signed-off-by: a-maumau --- .../src/fusion_node.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp index 7f249d4640e81..ea0904215cb86 100644 --- a/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp +++ b/perception/autoware_image_projection_based_fusion/src/fusion_node.cpp @@ -129,6 +129,11 @@ FusionNode::FusionNode( camera_projectors_.resize(rois_number_); point_project_to_unrectified_image_ = declare_parameter>("point_project_to_unrectified_image"); + + if (rois_number_ > point_project_to_unrectified_image_.size()) { + throw std::runtime_error( + "The number of point_project_to_unrectified_image does not match the number of rois topics."); + } approx_camera_projection_ = declare_parameter>("approximate_camera_projection"); if (rois_number_ != approx_camera_projection_.size()) { const std::size_t current_size = approx_camera_projection_.size(); From 3b0bbd0837696de640b608c17343789ee8fe70e5 Mon Sep 17 00:00:00 2001 From: Taekjin LEE Date: Tue, 24 Dec 2024 08:46:54 +0900 Subject: [PATCH 20/20] chore: add option description to project points to unrectified image Signed-off-by: Taekjin LEE --- .../schema/fusion_common.schema.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json index 9c8bc9c9bff88..8077f3e2f3e30 100644 --- a/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json +++ b/perception/autoware_image_projection_based_fusion/schema/fusion_common.schema.json @@ -31,6 +31,11 @@ "default": 15, "minimum": 1 }, + "point_project_to_unrectified_image": { + "type": "array", + "description": "An array of options indicating whether to project point to unrectified image or not.", + "default": [false, false, false, false, false, false] + }, "debug_mode": { "type": "boolean", "description": "Whether to debug or not.",