Commit a7247020 authored by xin.wang.waytous's avatar xin.wang.waytous

ros-camera-bev-init

parents
# This file currently only serves to mark the location of a catkin workspace for tool integration
build
devel
release
sdk
logs
.vscode
*.engine
*.bag
*.avi
/opt/ros/noetic/share/catkin/cmake/toplevel.cmake
\ No newline at end of file
cmake_minimum_required(VERSION 3.21.0)
project(perception_camera_bev)
set(CMAKE_CXX_STANDARD 17)
SET(CMAKE_BUILD_TYPE "Release")
#SET(CMAKE_BUILD_TYPE "Debug")
# src 目录
set(src_root ${PROJECT_SOURCE_DIR}/src)
# opencv+cuda
find_package(OpenCV REQUIRED)
message("${OpenCV_LIBRARIES_DIRS}: \n\n" ${OpenCV_LIBRARIES_DIRS})
message("${OpenCV_INCLUDE_DIRS}: \n\n" ${OpenCV_INCLUDE_DIRS})
# enable_language(CUDA)
link_directories(${OpenCV_LIBRARIES_DIRS})
include_directories(${OpenCV_INCLUDE_DIRS})
find_package(CUDA REQUIRED)
link_directories(/usr/local/cuda/lib64)
include_directories(/usr/local/cuda/include)
# eigen3
find_package(Eigen3 REQUIRED)
link_directories(${EIGEN3_LIBRARIES_DIRS})
include_directories(${EIGEN3_INCLUDE_DIRS})
# yaml+glog+gflag
# include_directories(/usr/include/yaml-cpp)
find_package(yaml-cpp REQUIRED)
include_directories(/usr/include/gflags)
include_directories(/usr/include/glog)
# deepinfer
include_directories(${CMAKE_SOURCE_DIR}/../../deepinfer/include)
link_directories(${CMAKE_SOURCE_DIR}/../../deepinfer/build/src)
find_package(catkin REQUIRED COMPONENTS
cv_bridge
roscpp
roslib
rospy
sensor_msgs
geometry_msgs
std_msgs
waytous_perception_msgs
)
find_package(PCL REQUIRED)
include_directories(
${PCL_INCLUDE_DIRS}
${catkin_INCLUDE_DIRS}
)
catkin_package(
INCLUDE_DIRS include
LIBRARIES ${perception_camera_bev}
CATKIN_DEPENDS cv_bridge roscpp sensor_msgs std_msgs geometry_msgs message_runtime roslib
)
set(CMAKE_CUDA_ARCHITECTURES 35 50 72)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wfatal-errors -Ofast")
list(APPEND CUDA_NVCC_FLAGS "-D_FORCE_INLINES -Xcompiler -fPIC")
# SET(CMAKE_CXX_FLAGS_DEBUG "$ENV{CXXFLAGS} -O0 -Wall -g2 -ggdb")
# SET(CMAKE_CXX_FLAGS_RELEASE "$ENV{CXXFLAGS} -O3 -Wall")
add_executable(camera_bev_infer
${src_root}/main.cpp
${src_root}/sensor_manager.cpp
${src_root}/utils.cpp
${src_root}/camera_frame.cpp
${src_root}/vision_transform/ComFunction.cpp
${src_root}/vision_transform/ros_visualization.cpp
${src_root}/vision_transform/vision_3d.cpp
${src_root}/undistort/undistort_v2.cpp
)
target_include_directories(camera_bev_infer PUBLIC
${PROJECT_SOURCE_DIR}/include
${src_root}
)
target_link_libraries(camera_bev_infer PUBLIC
waytous_deepinfer
${CUDA_npp_LIBRARY}
${CUDA_nppc_LIBRARY}
${OpenCV_LIBS}
${PCL_LIBRARIES}
${catkin_LIBRARIES}
cudart
yaml-cpp
gflags
glog
)
rgb_sub_topic_name: /sensor/front/image/bgr
pointcloud_sub_topic_name: /sensor/lidar/livox/front_middle/points
detection_pub_topic_name: /perception/camera/obstacle
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000
creator_name: camera_bev_node
sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0
period_time: 5
vis_res: true
vis_frame_id: front_middle_livox
obstacle_height_threshold: 30 # cm
camera_intrinsic: [969.796542854634, -0.129283640950858, 992.981470492944,
0. , 970.128231641552, 596.651376445781,
0. , 0. , 1. ]
distortion_coefficients: [-0.359234564377185, 0.169945565409037, 0.000788823270375011, 0.000877958277585529, -0.0433517026779682]
camera2lidar_extrinsic: [-0.00223004, -0.999324, -0.0367048, 0.70583,
0.0536961, 0.0365323, -0.997889, -0.761989,
0.998555, -0.00419624, 0.0535783, 0.781821,
0, 0, 0, 1]
src_img_width: 1920
src_img_height: 1080
dst_img_width: 960
dst_img_height: 540
rgb_sub_topic_name: /sensor/camera/front_rgb/image
pointcloud_sub_topic_name: /vehicle_all_pointcloud
detection_pub_topic_name: /perception/camera/obstacle
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000
creator_name: camera_bev_node
sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0
period_time: 5
vis_res: true
vis_frame_id: Lidar
obstacle_height_threshold: 30 # cm
camera_intrinsic: [1942.60953875808, 0, 966.598714709598,
0, 1941.99570266726, 574.677777251193,
0, 0, 1]
distortion_coefficients: [-0.528765703554843, 0.252351515780946, 0.00337392775054378, 0.000123062634511131, -0.0366309311422936]
camera2lidar_extrinsic: [0.0068179, -0.9991158, 0.0414878, 0.027,
-0.2284129, -0.0419479, -0.9726602, 2.415,
0.9735405, -0.0028448, -0.2284969, -1.408,
0, 0, 0, 1]
src_img_width: 1920
src_img_height: 1080
dst_img_width: 960
dst_img_height: 540
rgb_sub_topic_name: /sensor/camera/front_rgb/image
pointcloud_sub_topic_name: /vehicle_all_pointcloud
detection_pub_topic_name: /perception/camera/obstacle
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000
creator_name: camera_bev_node
sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0
period_time: 5
vis_res: true
vis_frame_id: Lidar
obstacle_height_threshold: 30 # cm
camera_intrinsic: [969.963225323767, 0., 961.612472662341,
0., 969.950768718185, 559.945228058623,
0., 0., 1. ]
distortion_coefficients: [-0.348838135451152, 0.148638190210084, -0.000827836996476752, 0.000668923684285954, -0.0328034417302837]
camera2lidar_extrinsic: [0.00629082, -0.999524, -0.0301936, -0.127531,
0.000535903, 0.0301976, -0.999544, 0.13083,
0.99998, 0.00627177, 0.000725616, 0.349921,
0, 0, 0, 1 ]
src_img_width: 1920
src_img_height: 1080
dst_img_width: 960
dst_img_height: 540
rgb_sub_topic_name: /sensor/camera/rgb/data
pointcloud_sub_topic_name: /sensor/lidar/front_middle/points
detection_pub_topic_name: /perception/camera/obstacle
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000
creator_name: camera_bev_node
sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0
period_time: 5
vis_res: true
vis_frame_id: livox
obstacle_height_threshold: 30 # cm
camera_intrinsic: [1039.90153, 0, 637.825684,
0, 1039.90153, 367.552653,
0, 0, 1]
distortion_coefficients: [-2.3540384769439697, -57.614105224609375, -0.0002989917411468923,
-0.0006685509579256177, 427.8574523925781, -2.5440633296966553,
-55.27210998535156, 417.6490173339844]
camera2lidar_extrinsic: [0.00418332, -0.999987, -0.00283105, -0.213958,
0.0281522, 0.00294772, -0.999599, -0.936907,
0.999595, 0.00410195, 0.0281642, 0.834199,
0, 0, 0, 1]
src_img_width: 1280
src_img_height: 720
dst_img_width: 960
dst_img_height: 540
rgb_sub_topic_name: /sensor/camera/front_rgb/image
pointcloud_sub_topic_name: /vehicle_all_pointcloud
detection_pub_topic_name: /perception/camera/obstacle
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000
creator_name: camera_bev_node
sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0
period_time: 5
vis_res: true
vis_frame_id: livox
obstacle_height_threshold: 30 # cm
camera_intrinsic: [1346.61241105747, 0, 951.014999441565,
0, 1344.43795511862, 530.562496955385,
0, 0, 1]
distortion_coefficients: [-0.449196898930979, 0.253956763950171, -0.00139403693782871, -0.00111424348026907, -0.0870171627456232]
camera2lidar_extrinsic: [-0.0423904, -0.999009, 0.0135863, 0.0656235,
0.0313214, -0.0149206, -0.999398, -0.167858,
0.99861, -0.0419393, 0.0319228, -0.0162872,
0, 0, 0, 1]
src_img_width: 1920
src_img_height: 1080
dst_img_width: 960
dst_img_height: 540
rgb_sub_topic_name: /sensor/camera/front_rgb/image
pointcloud_sub_topic_name: /vehicle_all_pointcloud
detection_pub_topic_name: /perception/camera/obstacle
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000
creator_name: camera_bev_node
sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0
period_time: 5
vis_res: true
vis_frame_id: Lidar
obstacle_height_threshold: 30 # cm
camera_intrinsic: [1355.4376, 0, 976.03925,
0, 1358.05982, 561.13218,
0, 0, 1]
distortion_coefficients: [-0.405827, 0.123820, -0.003213, -0.003971, 0.0000000]
camera2lidar_extrinsic: [-0.0115715, -0.9996951, -0.0218133, 0.294,
-0.2273387, 0.0238737, -0.9735231, 3.563,
0.9737470, -0.0063061, -0.2275456, -0.807,
0, 0, 0, 1]
src_img_width: 1920
src_img_height: 1080
dst_img_width: 960
dst_img_height: 540
rgb_sub_topic_name: /sensor/front/image/bgr
pointcloud_sub_topic_name: /sensor/lidar/livox/front_middle/points
detection_pub_topic_name: /perception/camera/obstacle
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000
creator_name: camera_bev_node
sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0
period_time: 5
vis_res: true
vis_frame_id: front_middle_livox
obstacle_height_threshold: 30 # cm
camera_intrinsic: [969.796542854634, -0.129283640950858, 992.981470492944,
0. , 970.128231641552, 596.651376445781,
0. , 0. , 1. ]
distortion_coefficients: [-0.359234564377185, 0.169945565409037, 0.000788823270375011, 0.000877958277585529, -0.0433517026779682]
camera2lidar_extrinsic: [-0.00223004, -0.999324, -0.0367048, 0.70583,
0.0536961, 0.0365323, -0.997889, -0.761989,
0.998555, -0.00419624, 0.0535783, 0.781821,
0, 0, 0, 1]
src_img_width: 1920
src_img_height: 1080
dst_img_width: 960
dst_img_height: 540
name: CameraModel
inputNames: [cvImage]
outputNames: [tracked_instances, out_semantics, out_depths, undistortVisImage]
units:
-
name: CameraSrc
inputNames: [cvImage]
outputNames: [uint8Image]
-
name: UndistortV2
inputNames: [cameraParam, uint8Image]
outputNames: [undistortVisImage]
-
name: ResizeNorm
inputNames: [undistortVisImage]
outputNames: [resizeNormImages]
inputMean: [0, 0, 0]
inputStd: [1, 1, 1]
inferBatchSize: 1
inputWidth: 960
inputHeight: 544
fixAspectRatio: false
useBGR: false
-
name: TRTInference
inputNames: [resizeNormImages]
outputNames: [detections, seg_protos, depths, semantics]
runMode: 1 # 0-fp32 1-fp16 2-int8 (int8 not supported)
weightsPath: "configs/weights/isp_m_best.onnx"
engineFile: "configs/weights/isp_m_best_fp16.engine"
inferDynamic: false
inferBatchSize: 1
inputWidth: 960
inputHeight: 544
maxBatchSize: 1 # used when build engine
-
name: MultiPostProcess
inputNames: [detections, seg_protos, depths, semantics, undistortVisImage]
outputNames: [out_instances, out_semantics, out_depths]
inputWidth: 960
inputHeight: 544
fixAspectRatio: false
# instance-seg
nmsThreshold: 0.45
scoreThreshold: 0.2 # used when inference, can be modified
truncatedThreshold: 0.05
maxOutputNum: 1000
rawDetectionLength: 32130 # 25200
keepTopK: 100
segProtoDim: 32
instanceDownScale: 4
instanceClassNumber: 9
instanceClassNames: ["pedestrian", "two_wheel", "car", "truck", "construction_machine", "fence", "stone", "dust", "cone"]
# semantic-seg
semanticDownScale: 4
semanticClassNumber: 2
semanticClassNames: ["road", "sky"]
# depth
depthDownScale: 4
depthDistanceScale: 256
-
name: ByteTracker
inputNames: [out_instances]
outputNames: [tracked_instances]
frame_rate: 10
track_buffer: 30
track_thresh: 0.2
high_thresh: 0.36
match_thresh: 0.8
init_frames: 3
out_lost_threshold: 0
name: CameraModel
inputNames: [cvImage]
outputNames: [out_instances, out_semantics, out_depths, undistortVisImage]
units:
-
name: CameraSrc
inputNames: [cvImage]
outputNames: [uint8Image]
-
name: UndistortV2
inputNames: [cameraParam, uint8Image]
outputNames: [undistortVisImage]
-
name: ResizeNorm
inputNames: [undistortVisImage]
outputNames: [resizeNormImages]
inputMean: [0, 0, 0]
inputStd: [1, 1, 1]
inferBatchSize: 1
inputWidth: 960
inputHeight: 544
fixAspectRatio: false
useBGR: false
-
name: TRTInference
inputNames: [resizeNormImages]
outputNames: [detections, seg_protos, depths, semantics]
runMode: 1 # 0-fp32 1-fp16 2-int8 (int8 not supported)
weightsPath: "configs/weights/isp_s_best.onnx"
engineFile: "configs/weights/isp_s_best_fp16.engine"
inferDynamic: false
inferBatchSize: 1
inputWidth: 960
inputHeight: 544
maxBatchSize: 1 # used when build engine
-
name: MultiPostProcess
inputNames: [detections, seg_protos, depths, semantics, undistortVisImage]
outputNames: [out_instances, out_semantics, out_depths]
inputWidth: 960
inputHeight: 544
fixAspectRatio: false
# instance-seg
nmsThreshold: 0.45
scoreThreshold: 0.2 # used when inference, can be modified
truncatedThreshold: 0.05
maxOutputNum: 1000
rawDetectionLength: 32130 # 25200
keepTopK: 100
segProtoDim: 32
instanceDownScale: 4
instanceClassNumber: 9
instanceClassNames: ["pedestrian", "two_wheel", "car", "truck", "construction_machine", "fence", "stone", "dust", "cone"]
# semantic-seg
semanticDownScale: 4
semanticClassNumber: 2
semanticClassNames: ["road", "sky"]
# depth
depthDownScale: 4
depthDistanceScale: 256
<?xml version="1.0"?>
<package format="2">
<name>perception_camera_bev</name>
<version>0.0.0</version>
<description>The perception_camera_bev package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="root@todo.todo">root</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/Detect</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<build_depend>message_generation</build_depend>
<exec_depend>message_runtime</exec_depend>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>roslib</build_depend>
<build_depend>rospy</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>image_transport</build_depend>
<build_export_depend>cv_bridge</build_export_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>roslib</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>sensor_msgs</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<build_export_depend>geometry_msgs</build_export_depend>
<exec_depend>cv_bridge</exec_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>roslib</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>sensor_msgs</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>geometry_msgs</exec_depend>
<exec_depend>image_transport</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 02:02:14
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-09-05 07:20:37
*/
#include "camera_frame.h"
namespace perception::camera{
/**
* @name: setDetections
* @msg: 获取图像实例分割结果,并进行resize到需要投影的尺寸
* @param {ios::Detection2DsPtr} dets 模型输出实例分割结果
* @return {*}
*/
void cameraFrame::setDetections(waytous::deepinfer::ios::Detection2DsPtr dets){
for(auto& det: dets->detections){
object_Seg_info obj;
obj.labelIdx = det->class_label;
obj.name = det->class_name;
obj.fConfidence = det->confidence;
obj.rc = cv::Rect(det->x1, det->y1, det->x2 - det->x1, det->y2 - det->y1);
obj.color = name2color.find(det->class_name) == name2color.end() ? Perception_Default_Classification : name2color[det->class_name];
obj.bDetectType = 0; // 0表示实例分割结果
// 将mask resize到框大小的尺寸
cv::resize(det->mask_ptr->decode(), obj.mask, cv::Size(obj.rc.width, obj.rc.height), 0, 0, cv::INTER_NEAREST);
objs_.push_back(obj);
}
};
/**
* @name: setSemantics
* @msg: 获取图像语义分割结果,并进行resize到需要投影的尺寸
* @param {ios::SemanticsPtr} sems 模型输出语义分割结果
* @return {*}
*/
void cameraFrame::setSemantics(waytous::deepinfer::ios::SemanticsPtr sems){
for(auto& seg: sems->semantic_segs){
if(seg->class_name == "sky"){ // delete sky
continue;
}
object_Seg_info obj;
obj.labelIdx = seg->class_label;
obj.name = seg->class_name;
// std::cout<<img_width<<","<<img_height<<std::endl;
obj.rc = cv::Rect(0, 0, img_width, img_height);
obj.color = name2color.find(seg->class_name) == name2color.end() ? Perception_Default_Classification : name2color[seg->class_name];
obj.bDetectType = 1;// 1表示语义分割结果
// 将mask resize到图像大小的尺寸
cv::resize(seg->mask_ptr->decode(), obj.mask, cv::Size(img_width, img_height), 0, 0, cv::INTER_NEAREST);
objs_.push_back(obj);
}
};
/**
* @name: setDepth
* @msg: 获取深度估计结果,并进行resize到需要投影的尺寸
* @param {ios::DepthPtr} depth 模型输出深度估计结果
* @return {*}
*/
void cameraFrame::setDepth(waytous::deepinfer::ios::DepthPtr depth){
auto& dmat = depth->depth;
cv::Mat temp;
cv::resize(dmat, temp, cv::Size(img_width, img_height), 0, 0, cv::INTER_NEAREST);
// 模型输出为0-1,需要进行缩放256倍
temp = temp * 256;
temp.convertTo(depth_, CV_16U);
};
}
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 02:07:48
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-09-21 09:35:09
*/
#ifndef CAMERAFRAME_H
#define CAMERAFRAME_H
#include <memory>
#include <std_msgs/Header.h>
#include <libs/ios/detection.h>
#include <libs/ios/semantic.h>
#include <libs/ios/depth.h>
#include "vision_transform/vision_3d.h"
#include "utils.h"
namespace perception::camera{
class cameraFrame
{
public:
/**
* @name: setImageSrc
* @msg: 设置图像数据,更新img_width和img_height
* @param : {cv:Mat&} src 源图像
* @return {*}
*/
void setImageSrc(cv::Mat& src){
img_src_ = src;
img_width = img_src_.cols;
img_height = img_src_.rows;
}
/**
* @name: setDetections
* @msg: 获取图像实例分割结果,并进行resize到需要投影的尺寸
* @param {ios::Detection2DsPtr} dets 模型输出实例分割结果
* @return {*}
*/
void setDetections(waytous::deepinfer::ios::Detection2DsPtr dets);
/**
* @name: setSemantics
* @msg: 获取图像语义分割结果,并进行resize到需要投影的尺寸
* @param {ios::SemanticsPtr} sems 模型输出语义分割结果
* @return {*}
*/
void setSemantics(waytous::deepinfer::ios::SemanticsPtr sems);
/**
* @name: setDepth
* @msg: 获取深度估计结果,并进行resize到需要投影的尺寸
* @param {ios::DepthPtr} depth 模型输出深度估计结果
* @return {*}
*/
void setDepth(waytous::deepinfer::ios::DepthPtr depth);
/**
* @name: time_valid
* @msg: 判断当前时间和最新结果的间隔是否大于阈值,大于阈值不会进行输出
* @param
* @return {bool}
*/
inline bool time_valid()
{
int64_t tm_now= GetMillSecondTimeStamp();
return abs(tm_now - timestamp_sys_) < timevalid_interval_ ;
}
public:
int64_t timestamp_ = 0;
int64_t timestamp_sys_ = 0; //接收到消息的系统时间
int64_t timevalid_interval_ = 200; //帧有效时间, ms
std_msgs::Header sensor_header; // 传感器的header
std::string cameraName; // 相机名称
std::vector<object_Seg_info> objs_; // 保存当前帧模型输出结果,包含实例分割和语义分割
cv::Mat depth_; // 当前帧输出深度图
cv::Mat img_src_; // 当前帧被畸变矫正后的图像,用做可视化
int img_width, img_height;
// std::shared_ptr<eon::proto::localization::LocalizationData> self_loc_ = nullptr; // 模型推理前的获取的定位信息
};
typedef std::shared_ptr<cameraFrame> cameraFramePtr;
typedef std::shared_ptr<const cameraFrame> cameraFrameConstPtr;
}
#endif //CAMERAFRAME_H
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 03:14:28
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-09-21 09:11:08
*/
#ifndef COMMON_MAP_H_
#define COMMON_MAP_H_
#include <string>
#include <map>
#include <vector>
#include <opencv2/opencv.hpp>
#include "waytous_perception_msgs/Object.h"
namespace perception::camera{
static const int Perception_Default_Classification = waytous_perception_msgs::Object::TYPE_UNKNOWN;
static const int Perception_Default_SubClassification = waytous_perception_msgs::Object::SUBTYPE_UNKNOWN;
static const cv::Scalar Perception_Default_Color = cv::Scalar(255, 255, 255);
static const int Perception_Default_Maxsize = 100; // m
static std::map<std::string, int> name2type = {
{"pedestrian", waytous_perception_msgs::Object::TYPE_PEDESTRIAN},
{"two_wheel", waytous_perception_msgs::Object::TYPE_BICYCLE},
{"car", waytous_perception_msgs::Object::TYPE_VEHICLE},
{"truck", waytous_perception_msgs::Object::TYPE_VEHICLE},
{"construction_machine", waytous_perception_msgs::Object::TYPE_MACHINERY},
{"vehicle", waytous_perception_msgs::Object::TYPE_VEHICLE},
{"fence", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC},
{"stone", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC},
{"dust", waytous_perception_msgs::Object::TYPE_UNKNOWN_MOVABLE},
{"cone", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC},
{"road", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC},
{"sky", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC},
// other maybe used in future
{"curb", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC}, // 路沿
{"puddle", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC}, //水坑
{"pit", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC}, // 坑
{"rut", waytous_perception_msgs::Object::TYPE_UNKNOWN_STATIC} // 车辙
};
// all subtype may not correct
static std::map<std::string, int> name2subtype = {
{"pedestrian", waytous_perception_msgs::Object::SUBTYPE_PEDESTRIAN},
{"two_wheel", waytous_perception_msgs::Object::SUBTYPE_MOTORCYCLIST},
{"car", waytous_perception_msgs::Object::SUBTYPE_CAR},
{"truck", waytous_perception_msgs::Object::SUBTYPE_TRUCK},
{"construction_machine", waytous_perception_msgs::Object::SUBTYPE_DOZER},
{"vehicle", waytous_perception_msgs::Object::SUBTYPE_TRUCK},
{"fence", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_STATIC},
{"stone", waytous_perception_msgs::Object::SUBTYPE_TRAFFIC_CONE},
{"dust", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_MOVABLE},
{"cone", waytous_perception_msgs::Object::SUBTYPE_TRAFFIC_CONE},
{"road", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_STATIC},
{"sky", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_STATIC},
// other maybe used in future
{"curb", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_STATIC}, // 路沿
{"puddle", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_STATIC}, //水坑
{"pit", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_STATIC}, // 坑
{"rut", waytous_perception_msgs::Object::SUBTYPE_UNKNOWN_STATIC} // 车辙
};
/*
static const eon::proto::perception::Classification_Type Perception_Default_Classification = eon::proto::perception::Classification::OBSTACLE_UNKNOWN;
static const eon::proto::perception::Classification_SubType Perception_Default_SubClassification = eon::proto::perception::Classification::SUB_STATIC_UNKNOWN;
static const cv::Scalar Perception_Default_Color = cv::Scalar(255, 255, 255);
static const int Perception_Default_Maxsize = 100; // m
static std::map<std::string, eon::proto::perception::Classification_Type> name2type = {
{"pedestrian", eon::proto::perception::Classification::PERSON},
{"two_wheel", eon::proto::perception::Classification::NON_VEHICLE},
{"car", eon::proto::perception::Classification::CAR},
{"truck", eon::proto::perception::Classification::TRUCK},
{"construction_machine", eon::proto::perception::Classification::CONSTRUCTION_MACHINERY},
{"fence", eon::proto::perception::Classification::FENCE},
{"stone", eon::proto::perception::Classification::STONE},
{"dust", eon::proto::perception::Classification::UNKNOWN},
{"cone", eon::proto::perception::Classification::CONE},
{"road", eon::proto::perception::Classification::ROAD},
{"sky", eon::proto::perception::Classification::UNKNOWN},
// other maybe used in future
{"curb", eon::proto::perception::Classification::CURB}, // 路沿
{"puddle", eon::proto::perception::Classification::PUDDLE}, //水坑
{"pit", eon::proto::perception::Classification::OBSTACLE_UNKNOWN}, // 坑
{"rut", eon::proto::perception::Classification::OBSTACLE_UNKNOWN} // 车辙
};
// all subtype may not correct
static std::map<std::string, eon::proto::perception::Classification_SubType> name2subtype = {
{"pedestrian", eon::proto::perception::Classification::SUB_PEDESTRIAN},
{"two_wheel", eon::proto::perception::Classification::SUB_MOTORCYCLE},
{"car", eon::proto::perception::Classification::SUB_CAR},
{"truck", eon::proto::perception::Classification::SUB_WIDE_BODY_TRUCK},
{"construction_machine", eon::proto::perception::Classification::SUB_DOZER},
{"fence", eon::proto::perception::Classification::SUB_FENCE},
{"stone", eon::proto::perception::Classification::SUB_STONE},
{"dust", eon::proto::perception::Classification::SUB_MOVABLE_UNKNOWN},
{"cone", eon::proto::perception::Classification::SUB_TRAFFIC_CONE},
{"road", eon::proto::perception::Classification::SUB_STATIC_UNKNOWN},
{"sky", eon::proto::perception::Classification::SUB_STATIC_UNKNOWN},
// other maybe used in future
{"curb", eon::proto::perception::Classification::SUB_STATIC_UNKNOWN}, // 路沿
{"puddle", eon::proto::perception::Classification::SUB_STATIC_UNKNOWN}, //水坑
{"pit", eon::proto::perception::Classification::SUB_STATIC_UNKNOWN}, // 坑
{"rut", eon::proto::perception::Classification::SUB_STATIC_UNKNOWN} // 车辙
};
*/
static std::map<std::string, cv::Scalar> name2color = {
{"pedestrian", cv::Scalar(0, 0, 128)},
{"two_wheel", cv::Scalar(0, 255, 128)},
{"car", cv::Scalar(0, 255, 255)},
{"truck", cv::Scalar(0, 128, 0)},
{"construction_machine", cv::Scalar(0, 255, 0)},
{"vehicle", cv::Scalar(0, 255, 0)},
{"fence", cv::Scalar(128, 255, 0)},
{"stone", cv::Scalar(255, 0, 255)},
{"dust", cv::Scalar(255, 0, 0)},
{"cone", cv::Scalar(0, 0, 255)},
{"road", cv::Scalar(0, 128, 128)},
{"sky", cv::Scalar(0, 128, 255)},
// other maybe used in future
{"curb", cv::Scalar(255, 255, 255)}, // 路沿
{"puddle", cv::Scalar(255, 255, 255)}, //水坑
{"pit", cv::Scalar(255, 255, 255)}, // 坑
{"rut", cv::Scalar(255, 255, 255)} // 车辙
};
static std::map<std::string, int> name2max_size = {
{"pedestrian", 1},
{"two_wheel", 2},
{"car", 10},
{"truck", 10},
{"construction_machine", 15},
{"vehicle", 10},
{"fence", 5},
{"stone", 2},
{"dust", Perception_Default_Maxsize},
{"cone", 1},
{"road", Perception_Default_Maxsize},
{"sky", Perception_Default_Maxsize},
// other maybe used in future
{"curb", Perception_Default_Maxsize}, // 路沿
{"puddle", 10}, //水坑
{"pit", 10}, // 坑
{"rut", 10} // 车辙
};
static std::vector<std::string> VehicleTypes = {"car", "truck", "construction_machine", "vehicle"};
static std::vector<std::string> StaticTypes = {"fence", "stone", "cone", "road", "curb", "puddle", "pit", "rut"};
}
#endif //COMMON_MAP_H_
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 01:23:43
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-09-27 06:26:04
*/
#include <string>
#include <opencv2/opencv.hpp>
#include <ros/ros.h>
#include <glog/logging.h>
#include <interfaces/base_model.h>
#include "sensor_manager.h"
#include "vision_transform/vision_3d.h"
int main(int argc, char** argv)
{
google::InitGoogleLogging("WARN");
ros::init(argc, argv, "vision_bev_node");
ros::NodeHandle nh;
// 读取参数
perception::camera::ImageInferNodeParam param;
char buffer[1024];
auto r = getcwd(buffer, 1024);
std::string root = std::string(buffer) + "/";
std::string param_path = root + "src/camera_bev_infer/configs/CameraBevParam.yaml";
if(!waytous::deepinfer::common::PathExists(param_path)){
std::cout << param_path << " does not exist." << std::endl;
return 0;
}
if(!perception::camera::readInferParam(param, param_path)){
// eon::log::info() << "Error: node param setting error, node exit.";
std::cout<< "Error: node param setting error, node exit." << std::endl;
return 0;
};
// int rate = 1000 / param.period_time;
// ros::Rate loop_rate(rate);
while(ros::ok()){
// 初始化sensor manager
auto manager = std::make_shared<perception::camera::sensorManager>(nh, param);
ros::spin();
// ros::spinOnce();
// loop_rate.sleep();
};
return 0;
}
\ No newline at end of file
This diff is collapsed.
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 01:53:20
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-11-22 07:01:31
*/
#ifndef SENSORMANAGER_H
#define SENSORMANAGER_H
#include <string>
#include <boost/thread/thread.hpp>
#include <opencv2/opencv.hpp>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/common/transforms.h>
#include <pcl_conversions/pcl_conversions.h>
#include <interfaces/base_model.h>
#include <libs/ios/detection.h>
#include <libs/ios/camera_ios.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/PointCloud2.h>
#include <geometry_msgs/Pose.h>
#include <geometry_msgs/Vector3.h>
#include <geometry_msgs/Polygon.h>
// msg
#include "waytous_perception_msgs/ObjectArray.h"
#include "waytous_perception_msgs/Object.h"
#include "waytous_perception_msgs/Rect.h"
#include "common_maps.h"
#include "utils.h"
#include "camera_frame.h"
#include "undistort/undistort_v2.h"
#include "vision_transform/ros_visualization.h"
typedef pcl::PointCloud<pcl::PointXYZI> Cloud;
typedef Cloud::ConstPtr CloudConstPtr;
namespace perception::camera{
class sensorManager
{
public:
/**
* @name: sensorManager
* @msg: 构造函数
* @param {ImageInferNodeParam&} param 参数
* @return {*}
*/
sensorManager(ros::NodeHandle& nodehandle, ImageInferNodeParam& param);
~sensorManager(){
delete pub_thread_;
pub_thread_ = nullptr;
}
/**
* @name: infer
* @msg: 模型推理
* @return {cameraFramePtr} 推理结果
*/
cameraFramePtr infer();
/**
* @name: updateImage
* @msg: 更新订阅的图像数据
* @param {sensor_msgs::ImageConstPtr} img_msg 图像数据
* @return {*}
*/
void updateImage(sensor_msgs::ImageConstPtr img_msg){
{
std::lock_guard<std::mutex> lock_(image_mutex);
img_timestamp_sys_ = GetMillSecondTimeStamp();
current_image_ = img_msg;
}
};
/**
* @name: getCurImage
* @msg: 获取最新的图像数据
* @param {*}
* @return {sensor_msgs::ImageConstPtr} 图像数据指针
*/
sensor_msgs::ImageConstPtr getCurImage(){
{
std::lock_guard<std::mutex> lock_(image_mutex);
if(current_image_ == nullptr){
return nullptr;
}
auto temp = current_image_;
current_image_ = nullptr;
std::cout<<"image waiting time: " << GetMillSecondTimeStamp() - img_timestamp_sys_ << "ms" << std::endl;
return temp;
}
};
/**
* @name: updatePointcloud
* @msg: 更新点云信息
* @param {sensor_msgs::PointCloud2ConstPtr>} pdata
* @return {*}
*/
void updatePointcloud(sensor_msgs::PointCloud2ConstPtr pdata){
{
std::lock_guard<std::mutex> lock_(pointcloud_mutex);
pc_timestamp_sys_ = GetMillSecondTimeStamp();
current_pointcloud_ = pdata;
}
};
/**
* @name: getCurPointcloud
* @msg: 获取最新的点云数据
* @param {*}
* @return {sensor_msgs::PointCloud2ConstPtr} 点云数据指针
*/
sensor_msgs::PointCloud2ConstPtr getCurPointcloud(){
{
std::lock_guard<std::mutex> lock_(pointcloud_mutex);
auto temp = current_pointcloud_;
int64_t tm_now = GetMillSecondTimeStamp();
// 超过200ms的点云丢弃
if(std::abs(tm_now - pc_timestamp_sys_) > pc_timevalid_interval_){
current_pointcloud_ = nullptr;
}
return temp;
}
}
/**
* @name: publishDetection
* @msg: 发布交通障碍物数据
* @return {*}
*/
void publishDetection();
/**
* @name: getLatestTimestamp
* @msg: 获取最新数据的时间戳
* @return {int64_t}
*/
inline int64_t getLatestTimestamp(){
std::lock_guard<std::mutex> lock_(ts_mutex);
return latest_timestamp_;
}
private:
// 节点需要的参数
ImageInferNodeParam infer_param;
ros::Publisher obj_publisher_; // 交通障碍物发布者
ros::Subscriber image_subscriber_; // 图像订阅者
ros::Subscriber pointcloud_subscriber_; // 点云订阅者
boost::thread* pub_thread_; // 发布线程
std::mutex image_mutex; // 管理图像信息的互斥量
std::mutex pointcloud_mutex; // 管理点云信息的互斥量
std::mutex ts_mutex; // 管理时间戳的互斥量
int64_t img_timestamp_sys_ = 0; //接收到图像消息的系统时间
int64_t pc_timestamp_sys_ = 0; //接收到点云消息的系统时间
int64_t pc_timevalid_interval_ = 200; //点云帧有效时间, ms
// timestamp: ms
int64_t latest_timestamp_ = 0; // 最新数据时间戳
int64_t frame_num_count = 0; // 程序启动到目前发布的数据量帧数
// 推理模型指针
std::shared_ptr<waytous::deepinfer::interfaces::BaseModel> multi_model = nullptr;
// 2d转bev类
std::shared_ptr<Visione_3D> vision_converter_ = nullptr;
// 可视化类
std::shared_ptr<RosVisualization> ros_visualization_ptr_ = nullptr;
// 模型是否初始化成功
bool inited = false;
// 图像和点云数据指针
sensor_msgs::ImageConstPtr current_image_ = nullptr;
sensor_msgs::PointCloud2ConstPtr current_pointcloud_ = nullptr;
};
}
#endif //SENSORMANAGER_H
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 08:10:01
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-09-03 09:36:43
*/
#ifndef DEEPINFER_CAMERA_PARAM_H_
#define DEEPINFER_CAMERA_PARAM_H_
#include <vector>
#include <string>
#include <Eigen/Dense>
#include <common/common.h>
#include <interfaces/base_io.h>
namespace waytous {
namespace deepinfer {
namespace ios {
class CameraParam: public interfaces::BaseIO{
public:
// 相机内参
Eigen::Matrix<float, 3, 3, Eigen::RowMajor> camera_intrinsic;
Eigen::Matrix<float, 3, 3, Eigen::RowMajor> new_camera_intrinsic;
Eigen::Matrix<float, 1, 14, Eigen::RowMajor> distortion_coefficients;
// 相机外参
Eigen::Matrix4d camera2lidar_extrinsic;
// Camera原始图像尺寸和矫正后尺寸
int src_width_ = 0;
int src_height_ = 0;
int dst_width_ = 0;
int dst_height_ = 0;
};
using CameraParamPtr = std::shared_ptr<CameraParam>;
} // namespace ios
} // namespace deepinfer
} // namespace waytous
#endif
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 08:07:19
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-09-03 11:37:24
*/
#include "undistort/undistort_v2.h"
namespace waytous {
namespace deepinfer {
namespace preprocess {
/**
* @name: Init
* @msg: 初始化畸变矫正节点
* @param {Node&} node 默认接口是从node传参
* @param {BaseIOMapPtr} pmap 保存模型pipeline的map
* @return {bool} 是否初始化成功
*/
bool UndistortV2::Init(YAML::Node& node, interfaces::BaseIOMapPtr pmap) {
if(!BaseUnit::Init(node, pmap)){
LOG_WARN << "Init undistortv2 error";
return false;
};
inited_ = InitParam();
return true;
}
/**
* @name: InitParam
* @msg: 从pmap中获取参数用于初始化畸变矫正
* @return {bool} 是否回去参数成功
*/
bool UndistortV2::InitParam(){
param_ = std::dynamic_pointer_cast<ios::CameraParam>(pMap->GetIOPtr(inputNames[0]));
if (param_ == nullptr){
LOG_INFO << "Undistort input " << inputNames[0] << ", camera param haven't been init or doesn't exist.";
return false;
}
d_mapx_.Reshape({param_->dst_height_, param_->dst_width_});
d_mapy_.Reshape({param_->dst_height_, param_->dst_width_});
InitUndistortRectifyMap();
dst_img = std::make_shared<base::Image8U>(param_->dst_height_, param_->dst_width_, base::Color::BGR);
auto output = std::make_shared<ios::CameraSrcOut>(dst_img);
pMap->SetIOPtr(outputNames[0], output);
return true;
}
/**
* @name: InitUndistortRectifyMap
* @msg: 初始化畸变矫正映射map
* @return {*}
*/
void UndistortV2::InitUndistortRectifyMap() {
float fx = param_->camera_intrinsic(0, 0);
float fy = param_->camera_intrinsic(1, 1);
float cx = param_->camera_intrinsic(0, 2);
float cy = param_->camera_intrinsic(1, 2);
float nfx = param_->new_camera_intrinsic(0, 0);
float nfy = param_->new_camera_intrinsic(1, 1);
float ncx = param_->new_camera_intrinsic(0, 2);
float ncy = param_->new_camera_intrinsic(1, 2);
float k1 = param_->distortion_coefficients(0, 0);
float k2 = param_->distortion_coefficients(0, 1);
float p1 = param_->distortion_coefficients(0, 2);
float p2 = param_->distortion_coefficients(0, 3);
float k3 = param_->distortion_coefficients(0, 4);
float k4 = param_->distortion_coefficients(0, 5);
float k5 = param_->distortion_coefficients(0, 6);
float k6 = param_->distortion_coefficients(0, 7);
float s1 = param_->distortion_coefficients(0, 8);
float s2 = param_->distortion_coefficients(0, 9);
float s3 = param_->distortion_coefficients(0, 10);
float s4 = param_->distortion_coefficients(0, 11);
Eigen::Matrix3f R;
R << 1.f, 0.f, 0.f, 0.f, 1.f, 0.f, 0.f, 0.f, 1.f;
Eigen::Matrix3f Rinv = R.inverse();
for (int v = 0; v < param_->dst_height_; ++v) {
float *x_ptr = d_mapx_.mutable_cpu_data() + d_mapx_.offset(v);
float *y_ptr = d_mapy_.mutable_cpu_data() + d_mapy_.offset(v);
for (int u = 0; u < param_->dst_width_; ++u) {
Eigen::Matrix<float, 3, 1> xy1;
xy1 << (static_cast<float>(u) - ncx) / nfx,
(static_cast<float>(v) - ncy) / nfy, 1;
auto XYW = Rinv * xy1;
double nx = XYW(0, 0) / XYW(2, 0);
double ny = XYW(1, 0) / XYW(2, 0);
double r_square = nx * nx + ny * ny;
// double kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2)/(1 + ((k6*r2 + k5)*r2 + k4)*r2);
// double xd = (x*kr + p1*_2xy + p2*(r2 + 2*x2) + s1*r2+s2*r2*r2);
// double yd = (y*kr + p1*(r2 + 2*y2) + p2*_2xy + s3*r2+s4*r2*r2);
double scale = (1 + r_square * (k1 + r_square * (k2 + r_square * k3)));
scale = scale / (1 + r_square * (k4 + r_square * (k5 + r_square * k6)));
double nnx =
nx * scale + 2 * p1 * nx * ny + p2 * (r_square + 2 * nx * nx) + s1 * r_square + s2 * r_square * r_square;
double nny =
ny * scale + p1 * (r_square + 2 * ny * ny) + 2 * p2 * nx * ny + s3 * r_square + s4 * r_square * r_square;
x_ptr[u] = static_cast<float>(nnx * fx + cx);
y_ptr[u] = static_cast<float>(nny * fy + cy);
}
}
}
/**
* @name: Exec
* @msg: 推理,执行畸变矫正
* @return {bool} 是否正常执行
*/
bool UndistortV2::Exec(){
auto iptr = std::dynamic_pointer_cast<ios::CameraSrcOut>(pMap->GetIOPtr(inputNames[1]));
if (iptr == nullptr){
LOG_ERROR << "Undistort input " << inputNames[1] << " haven't been init or doesn't exist.";
return false;
}
if (!inited_) {
inited_ = InitParam();
if(!inited_){
LOG_WARN << "Undistortion param not init.";
return false;
}
}
auto src_img = iptr->img_ptr_;
NppiInterpolationMode remap_mode = NPPI_INTER_LINEAR;
NppiSize src_image_size, dst_image_size;
src_image_size.width = param_->src_width_;
src_image_size.height = param_->src_height_;
dst_image_size.width = param_->dst_width_;
dst_image_size.height = param_->dst_height_;
NppiRect remap_roi = {0, 0, param_->src_width_, param_->src_height_};
NppStatus status;
int d_map_step = static_cast<int>(d_mapx_.shape(1) * sizeof(float));
switch (src_img->channels()) {
case 1:
status = nppiRemap_8u_C1R(
src_img->gpu_data(), src_image_size, src_img->width_step(), remap_roi,
d_mapx_.gpu_data(), d_map_step, d_mapy_.gpu_data(), d_map_step,
dst_img->mutable_gpu_data(), dst_img->width_step(), dst_image_size,
remap_mode);
break;
case 3:
status = nppiRemap_8u_C3R(
src_img->gpu_data(), src_image_size, src_img->width_step(), remap_roi,
d_mapx_.gpu_data(), d_map_step, d_mapy_.gpu_data(), d_map_step,
dst_img->mutable_gpu_data(), dst_img->width_step(), dst_image_size,
remap_mode);
break;
default:
LOG_ERROR << "Invalid number of channels: " << src_img->channels();
return false;
}
if (status != NPP_SUCCESS) {
LOG_ERROR << "NPP_CHECK_NPP - status = " << status;
return false;
}
return true;
}
/**
* @name: Name
* @msg: 节点名称
* @return {std::string}
*/
std::string UndistortV2::Name() {
return "UndistortV2";
};
} // namespace preprocess
} // namespace deepinfer
} // namespace waytous
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 07:59:23
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-09-03 12:24:49
*/
#ifndef DEEPINFER_PREPROCESS_UNDISTORT_PARAM_H_
#define DEEPINFER_PREPROCESS_UNDISTORT_PARAM_H_
#include <memory>
#include <string>
#include <algorithm>
#include <Eigen/Dense>
#include <npp.h>
#include <common/file.h>
#include <interfaces/base_unit.h>
#include <base/image.h>
#include <libs/ios/camera_ios.h>
#include "undistort/camera_param.h"
namespace waytous {
namespace deepinfer {
namespace preprocess {
class UndistortV2: public interfaces::BaseUnit{
public:
/**
* @name: Init
* @msg: 初始化畸变矫正节点
* @param {Node&} node 默认接口是从node传参
* @param {BaseIOMapPtr} pmap 保存模型pipeline的map
* @return {bool} 是否初始化成功
*/
bool Init(YAML::Node& node, interfaces::BaseIOMapPtr pmap) override;
/**
* @name: Exec
* @msg: 推理,执行畸变矫正
* @return {bool} 是否正常执行
*/
bool Exec() override;
/**
* @name: Name
* @msg: 节点名称
* @return {std::string}
*/
std::string Name() override;
/**
* @name: InitParam
* @msg: 从pmap中获取参数用于初始化畸变矫正
* @return {bool} 是否回去参数成功
*/
bool InitParam();
/**
* @name: InitUndistortRectifyMap
* @msg: 初始化畸变矫正映射map
* @return {*}
*/
void InitUndistortRectifyMap();
UndistortV2(){};
~UndistortV2(){};
public:
// 畸变矫正参数
ios::CameraParamPtr param_ = nullptr;
// 畸变矫正xy的映射map
base::Blob<float> d_mapx_;
base::Blob<float> d_mapy_;
// 畸变矫正后图像
base::Image8UPtr dst_img;
bool inited_ = 0;
};
DEEPINFER_REGISTER_UNIT(UndistortV2);
} // namespace preprocess
} // namespace deepinfer
} // namespace waytous
#endif //DEEPINFER_PREPROCESS_UNDISTORT_PARAM_H_
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-07-25 06:16:52
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-10-11 02:48:38
*/
#include "utils.h"
namespace perception::camera{
bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
auto node = YAML::LoadFile(param_path);
param.rgb_sub_topic_name = node["rgb_sub_topic_name"].as<std::string>();
param.pointcloud_sub_topic_name = node["pointcloud_sub_topic_name"].as<std::string>();
param.detection_pub_topic_name = node["detection_pub_topic_name"].as<std::string>();
param.creator_id = node["creator_id"].as<std::string>();
param.creator_name = node["creator_name"].as<std::string>();
param.sensor_frame_id = node["sensor_frame_id"].as<std::string>();
param.config_root_path = node["config_root_path"].as<std::string>();
if(!waytous::deepinfer::common::PathExists(param.config_root_path)){
// eon::log::info() << "Error: param setting: " << param.config_root_path << " does not exist, node exit.";
std::cout << "Error: param setting: " << param.config_root_path << " does not exist, node exit." << std::endl;
return false;
}
param.rgb_config_path = node["rgb_config_path"].as<std::string>();
param.device = node["device"].as<int>();
param.period_time = node["period_time"].as<int>();
param.vis_res = node["vis_res"].as<bool>();
param.vis_frame_id = node["vis_frame_id"].as<std::string>();
param.obstacle_height_threshold = node["obstacle_height_threshold"].as<float>();
param.cameraParam_ = std::make_shared<waytous::deepinfer::ios::CameraParam>();
param.cameraParam_->src_height_ = node["src_img_height"].as<int>();
param.cameraParam_->src_width_ = node["src_img_width"].as<int>();
param.cameraParam_->dst_height_ = node["dst_img_height"].as<int>();
param.cameraParam_->dst_width_ = node["dst_img_width"].as<int>();
// 内参
std::vector<float> camera_intrinsic = node["camera_intrinsic"].as<std::vector<float>>();
if(camera_intrinsic.size() != 9){
// eon::log::info() << "Error: camera intrinsic number != 9, cannot init node.";
std::cout << "Error: camera intrinsic number != 9, cannot init node." << std::endl;
return false;
}
float fx = float(param.cameraParam_->dst_width_) / float(param.cameraParam_->src_width_);
float fy = float(param.cameraParam_->dst_height_) / float(param.cameraParam_->src_height_);
for(int i=0; i<9; i++){
param.cameraParam_->camera_intrinsic(i) = camera_intrinsic[i];
// 根据dst大小,生成新的内参矩阵
if(i == 0 || i == 2){
param.cameraParam_->new_camera_intrinsic(i) = camera_intrinsic[i] * fx;
}else if (i == 4 || i == 5){
param.cameraParam_->new_camera_intrinsic(i) = camera_intrinsic[i] * fy;
}else{
param.cameraParam_->new_camera_intrinsic(i) = camera_intrinsic[i];
}
}
// 畸变系数
auto effs = node["distortion_coefficients"].as<std::vector<float>>();
int eff_size = effs.size();
if(eff_size != 4 && eff_size != 5 && eff_size != 8 && eff_size != 12){
// eon::log::info() << "Error: camera distortion coeff number != 4, 5, 8, 12, cannot init node.";
std::cout << "Error: camera distortion coeff number != 4, 5, 8, 12, cannot init node." << std::endl;
return false;
}
for(int i=0; i<14; i++){
if(i < eff_size){
param.cameraParam_->distortion_coefficients(i) = effs[i];
}else{
param.cameraParam_->distortion_coefficients(i) = 0;
}
}
auto ex_param = node["camera2lidar_extrinsic"].as<std::vector<float>>();
int ex_size = ex_param.size();
if(ex_size != 16){
// eon::log::info() << "Error: camera2lidar extrinsic number != 16, cannot init node.";
std::cout << "Error: camera2lidar extrinsic number != 16, cannot init node." << std::endl;
return false;
}
cv::Mat matTemp = cv::Mat::zeros(cv::Size(4, 4), CV_32FC1);
for(int i=0; i<16; i++){
matTemp.at<float>(i) = ex_param[i];
}
cv::cv2eigen(matTemp, param.cameraParam_->camera2lidar_extrinsic);
return true;
};
uint64_t GetMillSecondTimeStamp()
{
struct timeval curTime;
gettimeofday(&curTime, NULL);
uint64_t milli = curTime.tv_sec*1000 + curTime.tv_usec / 1000;
return milli;
}
}
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-09-03 03:14:28
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-10-11 02:48:24
*/
#ifndef UTILS_H_
#define UTILS_H_
#include <string>
#include <map>
#include <sys/time.h>
#include <vector>
#include <opencv2/opencv.hpp>
#include <Eigen/Dense>
#include <opencv2/core/eigen.hpp>
#include <yaml-cpp/yaml.h>
#include <common/file.h>
#include "undistort/camera_param.h"
namespace perception::camera{
struct ImageInferNodeParam{
// Subscriber
std::string rgb_sub_topic_name;
std::string pointcloud_sub_topic_name;
// Publisher
std::string detection_pub_topic_name;
// std::string static_gridmap_pub_topic_name;
// publish-header
std::string creator_id = "0000";
std::string creator_name = "camera_bev_node";
std::string sensor_frame_id = "camera_01";
// model param
std::string config_root_path;
std::string rgb_config_path;
int device = 0;
int period_time = 50; // ms
// visualization 是否可视化结果
bool vis_res = true;
std::string vis_frame_id = "Lidar";
// 相机参数
waytous::deepinfer::ios::CameraParamPtr cameraParam_ = nullptr;
// 落石高差阈值 cm
float obstacle_height_threshold = 30;
};
// bool ImageProto2CV(const eon::proto::sensor::Image& msg, cv::Mat& img);
bool readInferParam(ImageInferNodeParam& param, const std::string& param_path);
uint64_t GetMillSecondTimeStamp();
}
#endif //UTILS_H_
#include "vision_transform/ComFunction.h"
#include <unistd.h>
#include<sys/stat.h>
/**
* 按列连接距离较近的间隙
* binImage: [in] 二值图像
* nLength: [in] 连接长度阈值
* bConnectEdge: [in] 是否处理边界部分
*/
void ConnectSmallHeightGap(cv::Mat &binImage, int nLength, int bConnectEdge)
{
int nWidth = binImage.cols;
int nHeight = binImage.rows;
int start = 0;
int end;
int i, j;
int bFlag;
for (i = 0; i < nWidth; ++i)
{
bFlag = 0;
for (j = 0; j < nHeight; ++j)
{
auto& pbySrc = binImage.at<unsigned char>(j, i);
if (bFlag && (pbySrc != 0 || j == nHeight - 1))
{
end = j;
bFlag = 0;
if (bConnectEdge || (!bConnectEdge && start != 0 && end != nHeight - 1))
{
if ((end - start) <= nLength)
{
for(int m = start; m < end; m++ ){
binImage.at<unsigned char>(m, i) = 255;
}
}
}
}
if (!bFlag && pbySrc== 0)
{
start = j;
bFlag = 1;
}
}
}
}
/*******************************************************************
*函数名称:Findlabel
*输入参数:label 经过连通域检测之后的矩阵
* number 被标记的数字号
*输出参数:vvPt 存储被标记联通域点的坐标
*函数说明:查找连通域中指定标记数的坐标
********************************************************************/
int Findlabel(const cv::Mat &label, int number, std::vector<std::vector<cv::Point>> &vvPt)
{
vvPt.clear();
std::vector<cv::Point> vpt;
for (int i = 0; i <= number; i++)
{
vvPt.push_back(vpt);
}
int width = label.cols;
int height = label.rows;
if (CV_16U == label.type())
{
for (int j = 0; j < height; j++)
{
auto pDest = label.ptr<unsigned short>(j);
for (int i = 0; i < width; i++)
{
if (*pDest)
{
vvPt[*pDest].push_back(cv::Point(i, j));
}
pDest++;
}
}
}
else if (CV_32S == label.type())
{
for (int j = 0; j < height; j++)
{
auto pDest = label.ptr<int>(j);
for (int i = 0; i < width; i++)
{
if (*pDest)
{
vvPt[*pDest].push_back(cv::Point(i, j));
}
pDest++;
}
}
}
return 0;
}
cv::Rect GetBundingRect(std::vector<cv::Point> &vPt)
{
if (vPt.empty())
return cv::Rect(0, 0, 0, 0);
int xMax, xMin, yMax, yMin;
xMax = xMin = vPt[0].x;
yMax = yMin = vPt[0].y;
for (int i = 0; i < vPt.size(); i++)
{
xMax = MAX(xMax, vPt[i].x);
xMin = MIN(xMin, vPt[i].x);
yMax = MAX(yMax, vPt[i].y);
yMin = MIN(yMin, vPt[i].y);
}
return cv::Rect(xMin, yMin, xMax - xMin+1, yMax - yMin+1);
}
void removeSmallArea(cv::Mat &imageBin, int nLen)
{
std::vector<std::vector<cv::Point>> vvBinPt;
cv::Mat bin = imageBin.clone();
cv::Mat labels;
int n_comps = connectedComponents(bin, labels, 8, CV_16U);
Findlabel(labels, n_comps, vvBinPt);
for (int i = 1; i < n_comps; i++)
{
std::vector<cv::Point> &vBinPt = vvBinPt[i];
if (vBinPt.size() == 0)
continue;
cv::Rect rect = GetBundingRect(vBinPt);
if (rect.width < nLen) // 小目标
{
for (int m = 0; m < vBinPt.size(); m++)
{
imageBin.at<uchar>(vBinPt[m].y, vBinPt[m].x) = 0;
//*(pDst + vBinPt[m].y*pImageBin->widthStep + vBinPt[m].x) = 0;
}
}
}
}
/**
* 区域生长法分割图像
* mat [in] 原始图像
* seedThr [in] 种子点阈值 将 edgeThr<= 像素值<=nMaxvalue的值当作种子点
* edgeThr [in] 区域生长边界阈值 分割区域内像素值 >=edgeThr
* imgBin [out] 分割的二值图像
* nMaxvalue [in] 分割像素值的最大阈值 超过此阈值的点将不被分割
*/
int AreaGrow_segment(cv::Mat& mat, int seedThr, int edgeThr, cv::Mat & imgBin, int nMaxvalue)
{
imgBin = cv::Mat::zeros(mat.size(), CV_8UC1); //生长区域
cv::Point waitSeed; //待生长种子点
int waitSeed_value = 0; //待生长种子点像素值
int opp_waitSeed_value = 0; //mat_thresh中对应待生长种子点处的像素值
std::vector<cv::Point> seedVector; //种子栈
/////// 根据 seedThr 和 nMaxvalue 值提取种子点
for(int j = 0; j < mat.rows; j++){
for(int i = 0; i < mat.cols; i++){
if( mat.at<uchar>(j,i) >= seedThr && mat.at<uchar>(j,i) <= nMaxvalue )
{
imgBin.at<uchar>(j,i) = 255;
seedVector.push_back(cv::Point(i,j));
}
}
}
/////// 根据edgeThr进行区域生长
//int direct[4][2] = { {0,-1},{1,0}, {0,1}, {-1,0} }; //4邻域,应该用4邻域减小时间复杂度
int direct[8][2] = { {-1,-1}, {0,-1}, {1,-1}, {1,0}, {1,1}, {0,1}, {-1,1}, {-1,0} }; //8邻域
while (!seedVector.empty()) //种子栈不为空则生长,即遍历栈中所有元素后停止生长
{
cv::Point seed = seedVector.back(); //取出最后一个元素
seedVector.pop_back(); //删除栈中最后一个元素,防止重复扫描
for (int i = 0; i < 8; i++) //遍历种子点的4邻域
{
waitSeed.x = seed.x + direct[i][0]; //第i个坐标0行,即x坐标值
waitSeed.y = seed.y + direct[i][1]; //第i个坐标1行,即y坐标值
//检查是否是边缘点
if (waitSeed.x < 0 || waitSeed.y < 0 ||
waitSeed.x >(mat.cols - 1) || (waitSeed.y > mat.rows - 1))
continue;
waitSeed_value = imgBin.at<uchar>(cv::Point(waitSeed.x, waitSeed.y)); //为待生长种子点赋对应位置的像素值
opp_waitSeed_value = mat.at<uchar>(cv::Point(waitSeed.x, waitSeed.y));
if (waitSeed_value == 0) //判断waitSeed是否已经被生长,避免重复生长造成死循环
{
if (opp_waitSeed_value >= edgeThr && opp_waitSeed_value <= nMaxvalue ) //区域生长条件
{
imgBin.at<uchar>(cv::Point(waitSeed.x, waitSeed.y)) = 255;
seedVector.push_back(waitSeed); //将满足生长条件的待生长种子点放入种子栈中
}
}
}
}
return 0;
}
/**
* 填充二值区域中的空洞
* srcBw: [in] 输入图像
* dstBw: [in/out] 输出图像
* sz: [in] 小于sz尺寸的空洞将被填充
*/
void fillHole(const cv::Mat& srcBw, cv::Mat &dstBw, cv::Size sz)
{
cv::Size m_Size = srcBw.size();
int nExtend = 4;
cv::Mat Temp = cv::Mat::zeros(m_Size.height+nExtend,m_Size.width+nExtend,srcBw.type());//延展图像
srcBw.copyTo(Temp(cv::Range(nExtend/2, m_Size.height + nExtend/2), cv::Range(nExtend/2, m_Size.width + nExtend/2)));
cv::floodFill(Temp, cv::Point(0, 0), cv::Scalar(255));
cv::Mat cutImg;//裁剪延展的图像
Temp(cv::Range(nExtend/2, m_Size.height + nExtend/2), cv::Range(nExtend/2, m_Size.width + nExtend/2)).copyTo(cutImg);
cv::Mat fill = ~cutImg;
//removeLargeArea( fill, sz );
dstBw = srcBw | fill;
}
/**
* @name: changeImageToBev
* @msg: 转换图像颜色到RGB点云 图像和点云空间需要预先分配好
* @param {Mat&} imageRgb 输入图像
* @param {PointCloud<PointXYZRGB>&} pointBEV 输入/输出点云
* @return {*}
*/
int changeImageToBev(const cv::Mat& imageRgb, pcl::PointCloud<pcl::PointXYZRGB>& pointBEV)
{
int count = 0;
for(int j = 0; j < imageRgb.rows; j++)
{
for(int i = 0; i < imageRgb.cols; i++)
{
auto& rgb = imageRgb.at<cv::Vec3b>(j,i);
if( rgb[0] || rgb[1] || rgb[2]){
pointBEV[count].r = rgb[0];
pointBEV[count].g = rgb[1];
pointBEV[count].b = rgb[2];
pointBEV[count].z = 0.1;
}
count++;
}
}
return 0;
}
int KeepThrLargestAreaRect(cv::Mat& imgBin)
{
cv::Mat temp;
cv::Mat labels;
imgBin.copyTo(temp);
//1. 标记连通域
int n_comps = connectedComponents(temp, labels, 4, CV_16U);
std::vector<int> histogram_of_labels(n_comps, 0);
// for (int i = 0; i < n_comps; i++)//初始化labels的个数为0
// {
// histogram_of_labels.push_back(0);
// }
int rows = labels.rows;
int cols = labels.cols;
for (int row = 0; row < rows; row++) //计算每个labels的个数
{
for (int col = 0; col < cols; col++)
{
histogram_of_labels.at(labels.at<unsigned short>(row, col)) += 1;
}
}
histogram_of_labels.at(0) = 0; //将背景的labels个数设置为0
//2. 计算最大的连通域labels索引
int maximum = 0;
int max_idx = 0;
for (int i = 0; i < n_comps; i++)
{
if (histogram_of_labels.at(i) > maximum)
{
maximum = histogram_of_labels.at(i);
max_idx = i;
}
}
//3. 将最大连通域外的点置0
for (int row = 0; row < rows; row++)
{
for (int col = 0; col < cols; col++)
{
if (labels.at<unsigned short>(row, col) != max_idx && imgBin.at<uchar>(row,col) )
{
imgBin.at<uchar>(row,col) = 0;
}
}
}
return 0;
}
/*
* @Descripttion: 基础功能函数文件
* @version:
* @Author: chengqingshui
* @Date: 2023-03-24 17:49:15
* @emal: qingshui.cheng@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-11-07 02:34:13
*/
#ifndef COM_FUNCTION_H
#define COM_FUNCTION_H
#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
/**
* 按列连接距离较近的间隙
* binImage: [in] 二值图像
* nLength: [in] 连接长度阈值
* bConnectEdge: [in] 是否处理边界部分
*/
void ConnectSmallHeightGap(cv::Mat &binImage, int nLength, int bConnectEdge =1);
int Findlabel(const cv::Mat &label, int number, std::vector<std::vector<cv::Point>> &vvPt);
cv::Rect GetBundingRect(std::vector<cv::Point>& vPt);
void removeSmallArea(cv::Mat &imageBin, int nLen);
/**
* 区域生长法分割图像
* mat [in] 原始图像
* seedThr [in] 种子点阈值 将 edgeThr<= 像素值<=nMaxvalue的值当作种子点
* edgeThr [in] 区域生长边界阈值 分割区域内像素值 >=edgeThr
* imgBin [out] 分割的二值图像
* nMaxvalue [in] 分割像素值的最大阈值 超过此阈值的点将不被分割
*/
int AreaGrow_segment(cv::Mat& srcMat, int seedThr, int edgeThr, cv::Mat & imgBin, int nMaxvalue);
/**
* 填充二值区域中的空洞
* srcBw: [in] 输入图像
* dstBw: [in/out] 输出图像
* sz: [in] 小于sz尺寸的空洞将被填充
*/
void fillHole(const cv::Mat & srcBw, cv::Mat &dstBw, cv::Size sz);
/**
* @name: changeImageToBev
* @msg: 转换图像颜色到RGB点云 图像和点云空间需要预先分配好
* @param {Mat&} imageRgb 输入图像
* @param {PointCloud<PointXYZRGB>&} pointBEV 输入/输出点云
* @return {*}
*/
int changeImageToBev(const cv::Mat& imageRgb, pcl::PointCloud<pcl::PointXYZRGB>& pointBEV);
int KeepThrLargestAreaRect(cv::Mat& imgBin);
#endif
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-11-07 01:52:07
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-11-23 02:22:56
*/
#include "vision_transform/ros_visualization.h"
RosVisualization::RosVisualization(ros::NodeHandle &nodehandle, std::string frame_id){
m_nodehandle = nodehandle;
current_frame_id = frame_id;
// m_pubCloudPoint_3D = m_nodehandle.advertise<sensor_msgs::PointCloud2>("vision_points_3d", 5);
m_pubCloudPoint_BEV = m_nodehandle.advertise<sensor_msgs::PointCloud2>("/perception/camera/vision_points_bev", 5);
m_pub_bounding_boxes = m_nodehandle.advertise<jsk_recognition_msgs::BoundingBoxArray>("/perception/camera/bounding_boxes", 5);
m_pub_image = m_nodehandle.advertise<sensor_msgs::Image>("/perception/camera/vision_image_result", 1);
}
void RosVisualization::Visualization(BEV_GridMap& vGrid, cv::Mat& image, const std::vector<object_Seg_info> &objs){
pcl::PointCloud<pcl::PointXYZRGB> lidar_acc_cloud; //累计目标点云
if(objs.empty())
{
ShowCloudPoints(lidar_acc_cloud);
ShowBoundingBoxArray(vGrid.vInsObjs);
ShowImage(image, objs);
return;
}
lidar_acc_cloud.points.reserve(200*200); // get 200x200 points at most
vGrid.initVisualInfo();
vGrid.drawInsMasks();
vGrid.drawInsRects();
changeImageToBev(vGrid.m_imgBEV_static, vGrid.m_cloudBEV_static);
for( auto& pt : vGrid.m_cloudBEV_static){
if(pt.z <= 0) continue;
// pt.y += yshift;
lidar_acc_cloud.points.emplace_back(pt);
}
changeImageToBev(vGrid.m_imgBEV_Instance, vGrid.m_cloudBEV_Instance);
for( auto& pt : vGrid.m_cloudBEV_Instance){
if(pt.z <= 0) continue;
lidar_acc_cloud.points.emplace_back(pt);
}
lidar_acc_cloud.width = lidar_acc_cloud.points.size();
lidar_acc_cloud.height = 1;
ShowCloudPoints(lidar_acc_cloud);
ShowBoundingBoxArray(vGrid.vInsObjs);
ShowImage(image, objs);
return;
};
void RosVisualization::ShowImage(cv::Mat& image, const std::vector<object_Seg_info>& objs){
// 图像可视化
for(auto& obj: objs){
cv::Scalar color = obj.valid > 0 ? obj.color : cv::Scalar(255, 255, 255);
if(obj.valid == 2){ // from depth estimnation
color = cv::Scalar(color[0] / 2, color[1] / 2, color[2] / 2);
}
cv::putText(image, obj.name + ":" + waytous::deepinfer::common::formatValue(obj.fConfidence, 2),
cv::Point(obj.rc.x, obj.rc.y - 5),
0, 0.6, color, 2, cv::LINE_8);
auto& rt = obj.rc;
cv::rectangle(image, rt, color, 2);
cv::Mat instance_mask = obj.mask;
for (int x = rt.x; x < rt.x + rt.width; x++) {
for (int y = rt.y; y < rt.y + rt.height; y++) {
float val = instance_mask.at<uchar>(y - rt.y, x - rt.x);
if (val <= 0.5) continue;
// LOG_INFO<<val<<" ";
image.at<cv::Vec3b>(y, x)[0] = image.at<cv::Vec3b>(y, x)[0] / 2 + color[0] / 2;
image.at<cv::Vec3b>(y, x)[1] = image.at<cv::Vec3b>(y, x)[1] / 2 + color[1] / 2;
image.at<cv::Vec3b>(y, x)[2] = image.at<cv::Vec3b>(y, x)[2] / 2 + color[2] / 2;
}
}
}
sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg();
m_pub_image.publish(msg);
}
int RosVisualization::ShowBoundingBoxArray(const std::vector<InstanceBevObject>& bevObjs)
{
// box fitting
jsk_recognition_msgs::BoundingBoxArray arraybox;
for (auto& obj: bevObjs)
{
jsk_recognition_msgs::BoundingBox box;
box.header.stamp = ros::Time::now();
box.header.frame_id = current_frame_id;
cv::Point2f pos = BEV_GridMap::convertmap_2_3d((int)obj.rrc.center.x, (int)obj.rrc.center.y);
box.pose.position.x = pos.x;
box.pose.position.y = pos.y;
box.pose.position.z = 0;
auto orient = tf::createQuaternionFromYaw((-obj.rrc.angle+90)* 3.14 / 180);
box.pose.orientation.x = orient.x();
box.pose.orientation.y = orient.y();
box.pose.orientation.z = orient.z();
box.pose.orientation.w = orient.w();
box.dimensions.x = obj.rrc.size.width*BEV_GRID_RESOLUTION;
box.dimensions.y = obj.rrc.size.height*BEV_GRID_RESOLUTION;
box.dimensions.z = 0.1;
box.label = obj.color.rgba;
//filter object in the air
arraybox.boxes.emplace_back(box);
}
// publish
arraybox.header.stamp = ros::Time::now();
arraybox.header.frame_id = current_frame_id;
m_pub_bounding_boxes.publish(arraybox);
return 0;
}
template <typename PointT>
void RosVisualization::ShowCloudPoints(const pcl::PointCloud<PointT> &pt_cloud)
{
sensor_msgs::PointCloud2 map_point_pub;
pcl::toROSMsg(pt_cloud, map_point_pub);
map_point_pub.header.frame_id = current_frame_id;
map_point_pub.header.stamp = ros::Time::now();
m_pubCloudPoint_BEV.publish(map_point_pub);
}
/*
* @Descripttion:
* @version:
* @Author: wxin
* @Date: 2023-11-07 01:52:23
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-11-07 08:30:49
*/
#ifndef ROS_VISUAL_H_
#define ROS_VISUAL_H_
#include <vector>
#include <opencv2/opencv.hpp>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/PointCloud2.h>
#include <jsk_recognition_msgs/BoundingBoxArray.h>
#include <tf/transform_broadcaster.h>
// deepinfer
#include <common/common.h>
#include <common/file.h>
// vision3d
#include "vision_transform/vision_3d.h"
class RosVisualization{
public:
RosVisualization(ros::NodeHandle &nodehandle, std::string frame_id="Lidar");
void Visualization(BEV_GridMap& vGrid, cv::Mat& image, const std::vector<object_Seg_info>& objs);
/**
* @name: ShowBoundingBoxArray
* @msg: 显示目标的BoundingBoxA框
* @param {const std::vector<InstanceBevObject>&} bevObjs
* @return {*}
*/
int ShowBoundingBoxArray(const std::vector<InstanceBevObject>& bevObjs);
/**
* @name: ShowImage
* @msg: 可视化图像
* @param {cv::Mat&} image
* @param {const std::vector<object_Seg_info>&} objs
* @return {*}
*/
void ShowImage(cv::Mat& image, const std::vector<object_Seg_info>& objs);
/**
* @name: ShowCloudPoints
* @msg: 可视化点云
* @param {const pcl::PointCloud<PointT> &} pt_cloud
* @return {*}
*/
template <typename PointT>
void ShowCloudPoints(const pcl::PointCloud<PointT> &pt_cloud);
private:
// ros::Publisher m_pubCloudPoint_3D;
std::string current_frame_id = "Lidar";
ros::Publisher m_pubCloudPoint_BEV;
ros::Publisher m_pub_bounding_boxes;
ros::NodeHandle m_nodehandle;
ros::Publisher m_pub_image;
};
#endif // ROS_VISUAL_H_
This diff is collapsed.
/*
* @Descripttion:
* @version:
* @Author: chengqingshui
* @Date: 2023-04-13 14:37:39
* @emal: qingshui.cheng@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-11-23 08:53:21
*/
#ifndef VISION_3D_H
#define VISION_3D_H
#include<iostream>
#include <string>
#include <vector>
#include <time.h>
#include <algorithm>
#include <opencv2/opencv.hpp>
#include <Eigen/Dense>
#include <opencv2/core/eigen.hpp>
#include <pcl_conversions/pcl_conversions.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/filters/voxel_grid.h>
#include <pcl/common/transforms.h>
#include <pcl/filters/approximate_voxel_grid.h>
// #include "vision_transform/lshaped_fitting.h"
#include "vision_transform/ComFunction.h"
#include "common_maps.h"
#include "utils.h"
typedef pcl::PointCloud<pcl::PointXYZI> Cloud;
typedef Cloud::ConstPtr CloudConstPtr;
const int IMAGE_RESIZE_WIDTH = 960; //缩放图像宽
const int IMAGE_RESIZE_HEIGHT = 540; //缩放图像高
const int DISPARTY_MAX = 128; //最大视差
const int DISPARTY_MAGNIFICATE = 256; //视差放大倍数
const int BEV_SIZE_WIDTH = 60; //BEV 图像尺寸宽
const int BEV_SIZE_HEIGHT = 85; //BEV 图像尺寸高
const int BEV_CAR_LENGTH = 5; //BEV 图像下车辆长度
const int CAMERA_PERCEPT_LENGTH = BEV_SIZE_HEIGHT-BEV_CAR_LENGTH; //相机感知距离
const float BEV_GRID_RESOLUTION = 0.2f; //bev grid 分辨率
const int DEPTH_TABEL_MAX_INDEX = (int)(CAMERA_PERCEPT_LENGTH/BEV_GRID_RESOLUTION);
const int BEV_IAGE_WIDTH = (int)(BEV_SIZE_WIDTH/BEV_GRID_RESOLUTION);
const int BEV_IAGE_HEIGHT = (int)(BEV_SIZE_HEIGHT/BEV_GRID_RESOLUTION);
struct BEV_COLOR
{
PCL_ADD_UNION_RGB
};
/**
* 目标分割信息
*/
struct object_Seg_info
{
object_Seg_info()
{
labelIdx = 0;
color = cv::Scalar(255,0,255);
}
int labelIdx; //目标类别索引
std::string name; //目标名称
cv::Scalar color; //标注颜色
float fConfidence; //分值
int bDetectType; // 0 instance 1 semantics
cv::Rect rc; //目标矩形框
cv::Mat mask; //目标二值掩码图 大小同矩形框
uint8_t valid = 0;
};
//BEV实例目标信息
struct InstanceBevObject
{
cv::Rect rc; //图像目标矩形区域
cv::RotatedRect rrc; //BEV 外接矩形
BEV_COLOR color; //颜色
std::string names; //目标类别名
std::vector <cv::Point > vPoints; //BEV图像目标mask点集合
float fConfidence; //分值
bool fromDepth = false;
};
class BEV_GridMap
{
public:
BEV_GridMap();
void initVisualInfo();
static cv::Point convert3d_2_map(float fx, float fy);
static cv::Point2f convertmap_2_3d(int x, int y);
/**
* @name: drawInsMasks
* @msg: bev实例图像上 画实例目标mask
* @return {*}
*/
void drawInsMasks();
/**
* @name: drawInsRects
* @msg: bev实例图像上 画实例目标矩形框
* @return {*}
*/
void drawInsRects();
public:
//实例目标
std::vector<InstanceBevObject> vInsObjs;
//bev 语义图像图像
cv::Mat m_imgBEV_static;
//bev 语义地图类别
pcl::PointCloud<pcl::PointXYZI> m_pointcloud_static_class;
// 可视化信息
//bev 实例 图像
cv::Mat m_imgBEV_Instance;
//bev 实例目标 点云
pcl::PointCloud<pcl::PointXYZRGB> m_cloudBEV_Instance;
//bev 原始实例目标点云
// pcl::PointCloud<pcl::PointXYZRGB> m_cloudBEV_ori;
//bev 静态语义点云
pcl::PointCloud<pcl::PointXYZRGB> m_cloudBEV_static;
static int m_nCenterX; //BEV图像中心点坐标X
static int m_nCenterY; //BEV图像中心点坐标Y
};
class Visione_3D
{
public:
Visione_3D(const perception::camera::ImageInferNodeParam& param);
~Visione_3D();
/**
* @name: Process_objects
* @msg: 处理核心函数
* @param {Mat} image RGB图像
* @param {Mat} imgdist 图像视差图 或 深度图 由 bdepth 标识
* @param {vector<object_Seg_info>} 实例 语义目标信息列表
* @param {BEV_GridMap&} vGrid gridmap,保存bev结果
* @return {*}
*/
int Process_objects(const cv::Mat& image, const cv::Mat& imgdist, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid);
/**
* @name: Process_objects
* @msg: 使用点云投影来获取bev结果
* @param {Mat} image RGB图像
* @param {Cloud::Ptr} pcl_cloud 点云
* @param {vector<object_Seg_info>} 实例 语义目标信息列表
* @param {BEV_GridMap&} vGrid gridmap,保存bev结果
* @return {*}
*/
int Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid);
/**
* @name: Process_objects
* @msg: 使用点云投影和深度估计来获取bev结果
* @param {Mat} image RGB图像
* @param {Mat} depth 深度图
* @param {Cloud::Ptr} pcl_cloud 点云
* @param {vector<object_Seg_info>} 实例 语义目标信息列表
* @param {BEV_GridMap&} vGrid gridmap,保存bev结果
* @return {*}
*/
int Process_objects(const cv::Mat& image, const cv::Mat& depth, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid);
private:
int init(waytous::deepinfer::ios::CameraParamPtr camera_param_ptr);
int initConvertMap();
/**
* @name: Calc_Depth_Clouds
* @msg: 计算视差图对应的点云图
* @param {Mat &} disparty 视差图
* @param {PointCloud<PointXYZ>} pointCloud 转换后的点云
* @return {*}
*/
int Calc_Depth_Clouds(const cv::Mat & disparty, pcl::PointCloud<pcl::PointXYZ>& pointCloud );
int Process_Segmantic_Objects(object_Seg_info& obj, const cv::Mat& dispart, const unsigned short * pTable, BEV_GridMap& vGrid ) ;
int Process_Instance_Objects(object_Seg_info& obj, const cv::Mat& dispart, const unsigned short * pTable, BEV_GridMap& vGrid );
int Process_Segmantic_Objects(object_Seg_info& obj, const cv::Mat& indices, Cloud::Ptr& pcl_cloud, BEV_GridMap& vGrid);
int Process_Instance_Objects(object_Seg_info& obj, const cv::Mat& indices, Cloud::Ptr& pcl_cloud, BEV_GridMap& vGrid);
cv::RotatedRect getObjectAreaRect(const cv::Mat& imgBin, const cv::Mat& imgCount, cv::Rect objRc,
std::vector<cv::Point >& vPoints, cv::Vec3b color, const std::string& objClassName);
int conver_worldPt_pixelPt(const pcl::PointXYZ& ptworld, cv::Point2f& ptImg );
std::string current_frame_id = "Lidar";
cv::Mat m_matrixQ;
Eigen::Matrix4d m_lidar2camera;
// Eigen::Matrix4d m_lidar2odom;
float obstacle_height_threshold = 30; // 障碍物高差阈值
float g_depthScaleFactor = 256.0/65536.0f; //256/65536.0f; //
// float g_depthScaleFactor = 100.0/65536.0f;
//uvd -> x y 查询表
short m_Index2D_to_bev[IMAGE_RESIZE_HEIGHT][IMAGE_RESIZE_WIDTH][DEPTH_TABEL_MAX_INDEX][2]; //最后两维存储x y
//视差对应对应的距离索引
// unsigned short m_Dist2Index[DISPARTY_MAX*DISPARTY_MAGNIFICATE];
//深度对应的距离索引
unsigned short m_Depth2Index[65536];
//BEV 网格距离对应的视差值
// int m_nZ2dis[DEPTH_TABEL_MAX_INDEX];
//网格里面 BEV_GRID_RESOLUTION~BEV_GRID_RESOLUTION米大小对应的图像像素点数量
int m_nGridPixelNum [BEV_IAGE_HEIGHT][BEV_IAGE_WIDTH];
//bev 图像像素参考数目
cv::Mat m_imgPixelNum;
//bev 图像像素参考数目
//cv::Mat m_imgPixelValid;
int m_nPixelValidHeight;
//最大车辆长度
int m_nMaxBEVVehicleLength;
};
#endif
cmake_minimum_required(VERSION 2.8.3)
project(waytous_perception_msgs)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
geometry_msgs
message_generation
message_runtime
nav_msgs
std_msgs
grid_map_msgs
roscpp
sensor_msgs
tf
tf2
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
add_message_files(
FILES
Rect.msg
FlatnessMap.msg
NormalMap.msg
Object.msg
ObjectArray.msg
ObjectTemp.msg
ObjectArrayTemp.msg
Road.msg
RoadArray.msg
Scene.msg
WallMap.msg
FleetObject.msg
)
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
generate_messages(
DEPENDENCIES
geometry_msgs
nav_msgs
sensor_msgs
std_msgs
grid_map_msgs
)
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
INCLUDE_DIRS
# LIBRARIES waytous_perception_msgs
CATKIN_DEPENDS geometry_msgs std_msgs message_generation message_runtime grid_map_msgs sensor_msgs nav_msgs roscpp sensor_msgs tf tf2
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
#include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/waytous_perception_msgs.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/waytous_perception_msgs_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_waytous_perception_msgs.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)
Header header
grid_map_msgs/GridMap elevation_map #高程图
grid_map_msgs/GridMap flatness_map #平整度信息图
grid_map_msgs/GridMap slope_map #坡度地图
grid_map_msgs/GridMap occupancy_map #可行驶区域
#---------------------fitness_level--------------------
int8 FLATNESS_LEVEL0 = 0
int8 FLATNESS_LEVEL1 = 1
int8 FLATNESS_LEVEL2 = 2
int8 FLATNESS_LEVEL3 = 3
int8 FLATNESS_LEVEL4 = 4
#---------------------occupancy_level--------------------
int8 OCC_UNKNOWN = 0
int8 OCC_FREE = 1
int8 OCC_MOVABLE = 2
int8 OCC_STATIC = 3
Header header
int8 weather_type # 天气
int8 light_type # 光照条件
int8 place_type # 地点
#-------------------weather_type-----------------
int8 WEATHER_UNKNOWN = 0 # 未知
int8 WEATHER_SUNNY = 1 # 晴天
int8 WEATHER_CLOUDY = 2 # 多云
int8 WEATHER_RAINY = 3 # 雨天
int8 WEATHER_HAZE = 4 # 雾霾
int8 WEATHER_SNOWY = 5 # 雪天
int8 LIGHT_DAYTIME = 0 # 白天
int8 LIGHT_DUSK = 1 # 黄昏
int8 LIGHT_NIGHT = 2 # 晚上
#-------------------place_type-----------------
int8 PLACE_COMMON_ROAD = 0
int8 PLACE_PARKING_SPACE = 1
int8 PLACE_CROSSWALK = 2
int8 PLACE_GAS_STATION = 3
int8 PLACE_WEIGHT_STATION = 4
std_msgs/Header header
waytous_perception_msgs/ObjectArray objects
waytous_perception_msgs/DrivableAreaMap maps
waytous_perception_msgs/DrivingScene scene
waytous_perception_msgs/TrafficLight[] trafficlight_array
Header header
grid_map_msgs/GridMap elevation_map #高程图
grid_map_msgs/GridMap flatness_map #平整度信息图
#---------------------fitness_level--------------------
int8 FLATNESS_LEVEL0 = 0
int8 FLATNESS_LEVEL1 = 1
int8 FLATNESS_LEVEL2 = 2
int8 FLATNESS_LEVEL3 = 3
int8 FLATNESS_LEVEL4 = 4
int8 id # 车辆ID, required
int8 property # 车辆归属, required
int16 weight # 车辆重量
geometry_msgs/Pose pose # 车辆定位, required
geometry_msgs/Vector3 dimensions # 车辆大小
geometry_msgs/Vector3 velocity # 车辆速度, required
geometry_msgs/Vector3 accelerate # 车辆加速度
int8[] next_map_id # 未来行驶的路径ID
float32[] id_probs # 预测可能性
#---------------------property--------------------
int8 PROPERTY_OUTSIDER = 0 # 缺省为非系统内车辆
int8 PROPERTY_MGV = 1 # 系统内有人车
int8 PROPERTY_UGV = 2 # 系统内无人车
std_msgs/Header header
FleetObject[] fleet_object_list
\ No newline at end of file
Header header
float32 ogmwidth #二维栅格地图宽
float32 ogmheight #二维栅格地图高
int16 vehicle_x #车辆在栅格图中的水平位置
int16 vehicle_y #车辆在栅格图中的竖直位置
float32 rectangle_minx #拟合矩形左边界
float32 rectangle_maxx #拟合矩形右边界
float32 rectangle_miny #拟合矩形下边界
float32 rectangle_maxy #拟合矩形上边界
float32 slopeupdegree #坡度值
float32 slopedestination #斜坡方向(0-360度)
bool vehiclepositionflag #斜坡类型(可用来判断是否为斜坡点,及正坡/负坡)
geometry_msgs/Vector3 normal #normal
sensor_msgs/PointCloud2 pointcloud_with_normal
int8 id # Object ID, required
int8 property # vehicle property, optional
float32 exist_prob # exist probability
############### 2D Rect
waytous_perception_msgs/Rect obb2d # [x,y,w,h,orientation], required
############### 3D Dimension
geometry_msgs/Pose pose # the pose of the center of the object, position is required, quaternion is optional
geometry_msgs/Vector3 dimensions # required
geometry_msgs/Polygon convex_hull # the hull of the obejct, required
sensor_msgs/PointCloud2 cloud # the cloud of the obejct, option
############### classification info
int8 type # major type, optional
float32[] type_probs
int8 sub_type # sub type, optional
float32[] sub_type_probs
############### tracking info
int8 track_id # required
geometry_msgs/Vector3 velocity # required
float32[] velocity_covariance # optional
geometry_msgs/Vector3 accelerate # required
geometry_msgs/Vector3[] trace # optional
geometry_msgs/Vector3[] trajectory # optional
FleetObject fleet_object # optional
#---------------------major_type--------------------
int8 TYPE_UNKNOWN = 0
int8 TYPE_PEDESTRIAN = 1
int8 TYPE_BICYCLE = 2
int8 TYPE_VEHICLE = 3
int8 TYPE_MACHINERY = 4
int8 TYPE_UNKNOWN_MOVABLE = 5
int8 TYPE_UNKNOWN_STATIC = 6
#---------------------sub_type--------------------
int8 SUBTYPE_UNKNOWN = 0
int8 SUBTYPE_CAR = 1
int8 SUBTYPE_VAN = 2
int8 SUBTYPE_TRUCK = 3
int8 SUBTYPE_DUMPER = 4
int8 SUBTYPE_FLUSHER = 5
int8 SUBTYPE_DOZER = 6
int8 SUBTYPE_EXCAVATOR = 7
int8 SUBTYPE_CYCLIST = 8
int8 SUBTYPE_MOTORCYCLIST = 9
int8 SUBTYPE_TRICYCLIST = 10
int8 SUBTYPE_PEDESTRIAN = 11
int8 SUBTYPE_TRAFFIC_CONE = 12
int8 SUBTYPE_UNKNOWN_MOVABLE = 13
int8 SUBTYPE_UNKNOWN_STATIC = 14
#---------------------property--------------------
int8 PROPERTY_OUTSIDER = 0
int8 PROPERTY_MGV = 1
int8 PROPERTY_UGV = 2
Header header
int8 sensor_type # Object sensor source, required
sensor_msgs/Image image
waytous_perception_msgs/Object[] foreground_objects
waytous_perception_msgs/Object[] background_objects
#---------------------sensor_type--------------------
int8 SENSOR_INVALID = 0
int8 SENSOR_RADAR = 1
int8 SENSOR_LIDAR = 2
int8 SENSOR_CAMERA = 4
int8 SENSOR_VEHICLE = 8
int8 SENSOR_V2X = 16
int8 SENSOR_FLEET = 32
\ No newline at end of file
Header header
sensor_msgs/Image roi_image
waytous_perception_msgs/ObjectTemp[] foreground_objects
waytous_perception_msgs/ObjectTemp[] background_objects
waytous_perception_msgs/Scene current_scene
int8 id # Object ID, required
int8 property # vehicle property, optional
float32 exist_prob # exist probability
############### 2D Rect
waytous_perception_msgs/Rect obb2d # [x,y,w,h,orientation], required
############### 3D Dimension
geometry_msgs/Pose pose # the pose of the center of the object, position is required, quaternion is optional
geometry_msgs/Vector3 dimensions # required
geometry_msgs/Polygon convex_hull # the hull of the obejct, required
sensor_msgs/PointCloud2 cloud # the cloud of the obejct, option
############### classification info
int8 type # major type, optional
float32[] type_probs
int8 sub_type # sub type, optional
float32[] sub_type_probs
############### tracking info
uint32 track_id # required
geometry_msgs/Vector3 velocity # required
float32[] velocity_covariance # optional
geometry_msgs/Vector3 accelerate # required
geometry_msgs/Vector3[] trace # optional
geometry_msgs/Vector3[] trajectory # optional
FleetObject fleet_object # optional
#---------------------major_type--------------------
int8 TYPE_UNKNOWN = 0
int8 TYPE_PEDESTRIAN = 1
int8 TYPE_BICYCLE = 2
int8 TYPE_VEHICLE = 3
int8 TYPE_MACHINERY = 4
int8 TYPE_UNKNOWN_MOVABLE = 5
int8 TYPE_UNKNOWN_STATIC = 6
#---------------------sub_type--------------------
int8 SUBTYPE_UNKNOWN = 0
int8 SUBTYPE_CAR = 1
int8 SUBTYPE_VAN = 2
int8 SUBTYPE_TRUCK = 3
int8 SUBTYPE_DUMPER = 4
int8 SUBTYPE_FLUSHER = 5
int8 SUBTYPE_DOZER = 6
int8 SUBTYPE_EXCAVATOR = 7
int8 SUBTYPE_CYCLIST = 8
int8 SUBTYPE_MOTORCYCLIST = 9
int8 SUBTYPE_TRICYCLIST = 10
int8 SUBTYPE_PEDESTRIAN = 11
int8 SUBTYPE_TRAFFIC_CONE = 12
int8 SUBTYPE_UNKNOWN_MOVABLE = 13
int8 SUBTYPE_UNKNOWN_STATIC = 14
#---------------------property--------------------
int8 PROPERTY_OUTSIDER = 0
int8 PROPERTY_MGV = 1
int8 PROPERTY_UGV = 2
int8 MOTION_UNKNOWN = 0
int8 MOTION_MOVING = 1
int8 MOTION_STATIONARY = 2
float64 x
float64 y
float64 w
float64 h
float64 orientation
int8 label
geometry_msgs/Vector3[] points
#---------------------label--------------------
int8 TYPE_ROAD = 0
int8 TYPE_ROAD_EDGE = 1
int8 TYPE_NON_ROAD = 2
Header header
waytous_perception_msgs/Road[] road_list
Header header
int8 type
waytous_perception_msgs/Rect rect
float64 reliable
#---------------------type--------------------
int8 TYPE_NORMAL = 0
int8 TYPE_FOG = 1
int8 TYPE_DUST = 2
int8 TYPE_RAIN = 3
int8 TYPE_SNOW = 4
int8 TYPE_DARK = 5
int8 TYPE_TRAFFIC_LIGHT_GREEN = 6
int8 TYPE_TRAFFIC_LIGHT_RED = 7
int8 TYPE_TRAFFIC_LIGHT_YELLOW = 8
int8 TYPE_TRAFFIC_LIGHT_UNKNOWN = 9
std_msgs/Header header
string id # 地图红绿灯ID
int8 sensor_type # 信息来源
int8 color # 红绿灯颜色
waytous_perception_msgs/Rect rect # 红绿灯二维位置
geometry_msgs/Polygon polygon # 红绿灯三维位置
int8 confidence # 检测置信度
float32 remaining_time # 剩余时间
#----------------sensor_type--------------------------
int8 SENSOR_INVALID = 0
int8 SENSOR_CAMERA = 1
int8 SENSOR_V2X = 2
#----------------color--------------------------
int8 COLOR_UNKNOWN = 0
int8 COLOR_RED = 1
int8 COLOR_YELLOW = 2
int8 COLOR_GREEN = 3
int8 COLOR_BLACK = 4
std_msgs/Header header
int16 ogmwidth #二维栅格地图宽
int16 ogmheight #二维栅格地图高
int16 vehicle_x #车辆在栅格图中的水平位置
int16 vehicle_y #车辆在栅格图中的竖直位置
float32 ogmresolution #栅格分辨率
int16[] data #栅格点(像素为1和2的栅格为挡墙)
\ No newline at end of file
<?xml version="1.0"?>
<package format="2">
<name>waytous_perception_msgs</name>
<version>0.0.0</version>
<description>The waytous_perception_msgs package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="yul@todo.todo">yul</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/waytous_perception_msgs</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>message_generation</build_depend>
<build_depend>message_runtime</build_depend>
<build_depend>nav_msgs</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>tf</build_depend>
<build_depend>tf2</build_depend>
<build_depend>grid_map_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<build_export_depend>geometry_msgs</build_export_depend>
<build_export_depend>grid_map_msgs</build_export_depend>
<build_export_depend>nav_msgs</build_export_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>sensor_msgs</build_export_depend>
<build_export_depend>tf</build_export_depend>
<build_export_depend>tf2</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<exec_depend>geometry_msgs</exec_depend>
<exec_depend>grid_map_msgs</exec_depend>
<exec_depend>message_runtime</exec_depend>
<exec_depend>nav_msgs</exec_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>sensor_msgs</exec_depend>
<exec_depend>tf</exec_depend>
<exec_depend>tf2</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>message_generation</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>
Panels:
- Class: rviz/Displays
Help Height: 169
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /PointCloud22
- /PointCloud22/Status1
Splitter Ratio: 0.5
Tree Height: 191
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Name: Time
SyncMode: 0
SyncSource: Image
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 10
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Class: rviz/Axes
Enabled: true
Length: 5
Name: Axes
Radius: 0.10000000149011612
Reference Frame: <Fixed Frame>
Show Trail: false
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /perception/camera/vision_image_result
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /perception/camera/vision_points_bev
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: Intensity
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Min Color: 0; 0; 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 2
Size (m): 0.009999999776482582
Style: Points
Topic: /vehicle_all_pointcloud
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: Lidar
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 78.24835205078125
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Field of View: 0.7853981852531433
Focal Point:
X: 22.903078079223633
Y: -17.76011085510254
Z: -0.23071250319480896
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.9147961735725403
Target Frame: <Fixed Frame>
Yaw: 0.16403070092201233
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1017
Hide Left Dock: false
Hide Right Dock: true
Image:
collapsed: false
QMainWindow State: 000000ff00000000fd00000004000000000000021800000366fc020000000efb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000001a3000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006501000001e4000001bd0000001600fffffffb0000000a0049006d0061006700650000000108000000820000000000000000fb0000000a0049006d0061006700650000000192000000a70000000000000000fb0000000a0049006d00610067006500000002150000018c0000000000000000fb0000000a0049006d006100670065000000023b000000980000000000000000fb0000000a0049006d00610067006501000002ac000000f50000000000000000000000010000010f000001f7fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003b000001f7000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000078000000037fc0100000002fb0000000800540069006d00650100000000000007800000030700fffffffb0000000800540069006d00650100000000000004500000000000000000000005620000036600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1920
X: 1920
Y: 31
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment