Commit 0b481d62 authored by ChengQingshui's avatar ChengQingshui

1、添加鱼眼相机支持 2、添加在原始图像上进行检测逻辑,通过CameraBevParam.yaml文件original_image_detect中字段为true控制。

parent 6a19c2fd
...@@ -15,7 +15,10 @@ ...@@ -15,7 +15,10 @@
# gnome-terminal --tab --title="driver" --working-directory="/home/nvidia" \ # gnome-terminal --tab --title="driver" --working-directory="/home/nvidia" \
# -- bash -c "awe run --killall; awe run -a; exec bash " # -- bash -c "awe run --killall; awe run -a; exec bash "
gnome-terminal --tab --title="ros_camera_bev" --working-directory="/home/nvidia/Projects/ros_camera_bev" \ #gnome-terminal --tab --title="ros_camera_bev" --working-directory="/home/nvidia/Projects/ros_camera_bev" \
#-- bash -c "source ./devel/setup.bash; rosrun perception_camera_bev camera_bev_infer; exec bash "
gnome-terminal --tab --title="ros_camera_bev" --working-directory="/home/cqs/projects/camera_detect/ros_camera_bev/" \
-- bash -c "source ./devel/setup.bash; rosrun perception_camera_bev camera_bev_infer; exec bash " -- bash -c "source ./devel/setup.bash; rosrun perception_camera_bev camera_bev_infer; exec bash "
# gnome-terminal --tab --title="camera_back_warn" --working-directory="/home/nvidia/Projects/perception/ros_camera_bev" \ # gnome-terminal --tab --title="camera_back_warn" --working-directory="/home/nvidia/Projects/perception/ros_camera_bev" \
......
rgb_sub_topic_name: /sensor/camera/inner/image # /sensor/camera/front_middle/image/bgr rgb_sub_topic_name: /front/image/bgr # /sensor/camera/front_middle/image/bgr
pointcloud_sub_topic_name: /livox/lidar_hap # /total_calib_pointcloud pointcloud_sub_topic_name: /merged_lidar_points # /total_calib_pointcloud
detection_pub_topic_name: /camera/objects detection_pub_topic_name: /camera/objects
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap # static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000 creator_id: 0000
creator_name: camera_bev_node creator_name: camera_bev_node
sensor_frame_id: camera_01 sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev # config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer config_root_path: /home/cqs/projects/camera_detect/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0 device: 0
period_time: 5 period_time: 5
vis_res: true vis_res: true
vis_frame_id: livox_frame_hap # vehicle_link vis_frame_id: vehicle_link #livox_frame_hap #
vis_project: false vis_project: true
# ["pedestrian", "vehicle", "stone"] # ["pedestrian", "vehicle", "stone"]
pub_classes: ["pedestrian", "vehicle"] # ["stone", "cone"] pub_classes: ["pedestrian", "vehicle"] # ["stone", "cone"]
obstacle_height_threshold: 25 # cm obstacle_height_threshold: 25 # cm
camera_intrinsic: [1018.532796, 0, 1001.098828, camera_intrinsic: [513.2025464568752, 0. , 967.0902739804869,
0, 1017.127611, 599.751335, 0. , 513.0840642375775, 541.0220773650327,
0, 0, 1 ] 0. , 0. , 1. ]
distortion_coefficients: [-0.385858, 0.125180, -0.004831, -0.001959, 0.000000] distortion_coefficients: [0.1307926007445786, -0.02713495183259862, -0.007965716375255127, 0.002596178751112769]
camera2lidar_extrinsic: [-0.05073, -0.993745, 0.0994831, -0.255167, camera2lidar_extrinsic: [-0.0052204, 0.999982, -0.00280128, 0.0881569,
-0.130468, -0.0921635, -0.98716, -0.0198193, -0.999893, -0.0051816, 0.0136814, 0.0371389,
0.990154, -0.063058, -0.124977, 0.76145, 0.0136667, 0.0028724, 0.999902, -0.0601866,
0, 0, 0, 1 ] 0, 0, 0, 1 ]
camera2lidar_extrinsic1: [-0.0052204, -0.999893, 0.0136667, 0.0384177,
0.999982, -0.00518161, 0.0028724, -0.08779,
-0.00280128, 0.0136814, 0.999902, 0.0599195,
0, 0, 0, 1 ]
original_image_detect: false
src_img_width: 1920 src_img_width: 1920
src_img_height: 1080 src_img_height: 1080
dst_img_width: 960 dst_img_width: 960
......
...@@ -177,8 +177,8 @@ void sensorManager::publishDetection() ...@@ -177,8 +177,8 @@ void sensorManager::publishDetection()
} }
// 2d->bev,结果保存到vGrid中 (frame->objs_有track_id, vGrid->vInsObjs没有) // 2d->bev,结果保存到vGrid中 (frame->objs_有track_id, vGrid->vInsObjs没有)
// vision_converter_->Process_objects(frame_ptr->img_src_, pcl_cloud, frame_ptr->objs_, *vGrid); // vision_converter_->Process_objects(frame_ptr->img_src_, pcl_cloud, frame_ptr->objs_, *vGrid);
vision_converter_->Process_objects(frame_ptr->img_src_, frame_ptr->depth_, pcl_cloud, frame_ptr->objs_, *vGrid); vision_converter_->Process_objects(frame_ptr->img_src_, frame_ptr->depth_, pcl_cloud, frame_ptr->objs_, *vGrid);
}else{ }else{
std::cout<<"--------------------------------------------- no pcl, using all depth -------------------------------------------- "<<std::endl; std::cout<<"--------------------------------------------- no pcl, using all depth -------------------------------------------- "<<std::endl;
vision_converter_->Process_objects(frame_ptr->img_src_, frame_ptr->depth_, frame_ptr->objs_, *vGrid); vision_converter_->Process_objects(frame_ptr->img_src_, frame_ptr->depth_, frame_ptr->objs_, *vGrid);
......
...@@ -17,12 +17,10 @@ ...@@ -17,12 +17,10 @@
#include <common/common.h> #include <common/common.h>
#include <interfaces/base_io.h> #include <interfaces/base_io.h>
namespace waytous { namespace waytous {
namespace deepinfer { namespace deepinfer {
namespace ios { namespace ios {
class CameraParam: public interfaces::BaseIO{ class CameraParam: public interfaces::BaseIO{
public: public:
...@@ -31,6 +29,12 @@ public: ...@@ -31,6 +29,12 @@ public:
Eigen::Matrix<float, 3, 3, Eigen::RowMajor> new_camera_intrinsic; Eigen::Matrix<float, 3, 3, Eigen::RowMajor> new_camera_intrinsic;
Eigen::Matrix<float, 1, 14, Eigen::RowMajor> distortion_coefficients; Eigen::Matrix<float, 1, 14, Eigen::RowMajor> distortion_coefficients;
//add 20250514 by chengqingshui
cv::Mat mat_distortion_coeffs_;
cv::Mat mat_camera_matrix_;
cv::Mat mat_camera_matrix_estimate_;
bool original_image_detect = true; //是否在原始图像上检测
// 相机外参 // 相机外参
Eigen::Matrix4d camera2lidar_extrinsic; Eigen::Matrix4d camera2lidar_extrinsic;
......
...@@ -45,7 +45,8 @@ bool UndistortV2::InitParam(){ ...@@ -45,7 +45,8 @@ bool UndistortV2::InitParam(){
} }
d_mapx_.Reshape({param_->dst_height_, param_->dst_width_}); d_mapx_.Reshape({param_->dst_height_, param_->dst_width_});
d_mapy_.Reshape({param_->dst_height_, param_->dst_width_}); d_mapy_.Reshape({param_->dst_height_, param_->dst_width_});
InitUndistortRectifyMap(); //InitUndistortRectifyMap();
InitUndistortRectifyMap_opencv();
dst_img = std::make_shared<base::Image8U>(param_->dst_height_, param_->dst_width_, base::Color::BGR); dst_img = std::make_shared<base::Image8U>(param_->dst_height_, param_->dst_width_, base::Color::BGR);
auto output = std::make_shared<ios::CameraSrcOut>(dst_img); auto output = std::make_shared<ios::CameraSrcOut>(dst_img);
...@@ -111,6 +112,45 @@ void UndistortV2::InitUndistortRectifyMap() { ...@@ -111,6 +112,45 @@ void UndistortV2::InitUndistortRectifyMap() {
} }
} }
void UndistortV2::InitUndistortRectifyMap_opencv() {
if(param_->original_image_detect){
float fx = float(param_->src_width_) / float(param_->dst_width_) ;
float fy = float(param_->src_height_) /float(param_->dst_height_) ;
for (int v = 0; v < param_->dst_height_; ++v) {
float *x_ptr = d_mapx_.mutable_cpu_data() + d_mapx_.offset(v);
float *y_ptr = d_mapy_.mutable_cpu_data() + d_mapy_.offset(v);
for (int u = 0; u < param_->dst_width_; ++u) {
x_ptr[u] = (u+0.5f)*fx;
y_ptr[u] = (v+0.5f)*fy;
}
}
}
else
{
cv::Size image_size(param_->dst_width_, param_->dst_height_);
cv::Mat R = cv::Mat::eye(3, 3, CV_32FC1);
cv::Mat map_x, map_y;
if( 4 == param_->mat_distortion_coeffs_.rows*param_->mat_distortion_coeffs_.cols) {
cv::fisheye::initUndistortRectifyMap(param_->mat_camera_matrix_, param_->mat_distortion_coeffs_, R,
param_->mat_camera_matrix_estimate_, image_size, CV_32FC1, map_x, map_y);
}
else {
cv::initUndistortRectifyMap(param_->mat_camera_matrix_, param_->mat_distortion_coeffs_, R,
param_->mat_camera_matrix_estimate_, image_size, CV_32FC1, map_x, map_y);
}
for (int v = 0; v < param_->dst_height_; ++v) {
float *x_ptr = d_mapx_.mutable_cpu_data() + d_mapx_.offset(v);
float *y_ptr = d_mapy_.mutable_cpu_data() + d_mapy_.offset(v);
for (int u = 0; u < param_->dst_width_; ++u) {
x_ptr[u] = map_x.at<float>(v, u)*2;
y_ptr[u] = map_y.at<float>(v, u)*2;
}
}
}
}
/** /**
* @name: Exec * @name: Exec
......
...@@ -69,6 +69,9 @@ public: ...@@ -69,6 +69,9 @@ public:
*/ */
void InitUndistortRectifyMap(); void InitUndistortRectifyMap();
//add 20250514 by chengqingshui 用opencv的方法改写InitUndistortRectifyMap()
void InitUndistortRectifyMap_opencv();
UndistortV2(){}; UndistortV2(){};
~UndistortV2(){}; ~UndistortV2(){};
...@@ -80,7 +83,7 @@ public: ...@@ -80,7 +83,7 @@ public:
base::Blob<float> d_mapy_; base::Blob<float> d_mapy_;
// 畸变矫正后图像 // 畸变矫正后图像
base::Image8UPtr dst_img; base::Image8UPtr dst_img;
bool inited_ = 0; bool inited_ = false;
}; };
......
...@@ -49,6 +49,7 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){ ...@@ -49,6 +49,7 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
param.vis_project = node["vis_project"].as<bool>(); param.vis_project = node["vis_project"].as<bool>();
} }
param.obstacle_height_threshold = node["obstacle_height_threshold"].as<float>(); param.obstacle_height_threshold = node["obstacle_height_threshold"].as<float>();
param.cameraParam_ = std::make_shared<waytous::deepinfer::ios::CameraParam>(); param.cameraParam_ = std::make_shared<waytous::deepinfer::ios::CameraParam>();
...@@ -56,11 +57,13 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){ ...@@ -56,11 +57,13 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
param.cameraParam_->src_width_ = node["src_img_width"].as<int>(); param.cameraParam_->src_width_ = node["src_img_width"].as<int>();
param.cameraParam_->dst_height_ = node["dst_img_height"].as<int>(); param.cameraParam_->dst_height_ = node["dst_img_height"].as<int>();
param.cameraParam_->dst_width_ = node["dst_img_width"].as<int>(); param.cameraParam_->dst_width_ = node["dst_img_width"].as<int>();
if(node["original_image_detect"].IsDefined() && (!node["original_image_detect"].IsNull())){
param.cameraParam_->original_image_detect = node["original_image_detect"].as<bool>();
}
// 内参 // 内参
std::vector<float> camera_intrinsic = node["camera_intrinsic"].as<std::vector<float>>(); std::vector<float> camera_intrinsic = node["camera_intrinsic"].as<std::vector<float>>();
if(camera_intrinsic.size() != 9){ if(camera_intrinsic.size() != 9){
// eon::log::info() << "Error: camera intrinsic number != 9, cannot init node.";
std::cout << "Error: camera intrinsic number != 9, cannot init node." << std::endl; std::cout << "Error: camera intrinsic number != 9, cannot init node." << std::endl;
return false; return false;
} }
...@@ -95,6 +98,37 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){ ...@@ -95,6 +98,37 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
} }
} }
//add 20250514 by chengqingshui 添加矩阵形式的相机畸变系数和内参 和估计的内参
// 生成内参矩阵
param.cameraParam_->mat_camera_matrix_ = cv::Mat::zeros(3, 3, CV_64F);
param.cameraParam_->mat_camera_matrix_.at<double>(0, 0) = param.cameraParam_->new_camera_intrinsic(0); // fx
param.cameraParam_->mat_camera_matrix_.at<double>(0, 2) = param.cameraParam_->new_camera_intrinsic(2); // cx
param.cameraParam_->mat_camera_matrix_.at<double>(1, 1) = param.cameraParam_->new_camera_intrinsic(4); // fy
param.cameraParam_->mat_camera_matrix_.at<double>(1, 2) = param.cameraParam_->new_camera_intrinsic(5); // cy
param.cameraParam_->mat_camera_matrix_.at<double>(2, 2) = 1.0;
// 生成畸变系数
param.cameraParam_->mat_distortion_coeffs_ = cv::Mat::zeros(1, effs.size(), CV_64F);
for (int i = 0; i < effs.size(); ++i) {
param.cameraParam_->mat_distortion_coeffs_.at<double>(0, i) = effs[i];
}
// 针对先矫正逻辑生成估计的内参矩阵 不矫正使用缩放内参矩阵
if(param.cameraParam_->original_image_detect){
param.cameraParam_->mat_camera_matrix_estimate_ = param.cameraParam_->mat_camera_matrix_.clone();
}
else
{
cv::Size image_size(param.cameraParam_->dst_width_, param.cameraParam_->dst_height_);
cv::Mat R = cv::Mat::eye(3, 3, CV_32FC1);
if( 4 == param.cameraParam_->mat_distortion_coeffs_.rows*param.cameraParam_->mat_distortion_coeffs_.cols) {
cv::fisheye::estimateNewCameraMatrixForUndistortRectify(param.cameraParam_->mat_camera_matrix_, param.cameraParam_->mat_distortion_coeffs_,
image_size, R, param.cameraParam_->mat_camera_matrix_estimate_, 0, image_size);
}
else {
param.cameraParam_->mat_camera_matrix_estimate_ = cv::getOptimalNewCameraMatrix(param.cameraParam_->mat_camera_matrix_,
param.cameraParam_->mat_distortion_coeffs_, image_size, 0, image_size);
}
}
auto ex_param = node["camera2lidar_extrinsic"].as<std::vector<float>>(); auto ex_param = node["camera2lidar_extrinsic"].as<std::vector<float>>();
int ex_size = ex_param.size(); int ex_size = ex_param.size();
if(ex_size != 16){ if(ex_size != 16){
......
...@@ -172,7 +172,7 @@ public: ...@@ -172,7 +172,7 @@ public:
* @param {BEV_GridMap&} vGrid gridmap,保存bev结果 * @param {BEV_GridMap&} vGrid gridmap,保存bev结果
* @return {*} * @return {*}
*/ */
int Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid); //int Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid);
/** /**
* @name: Process_objects * @name: Process_objects
...@@ -232,6 +232,7 @@ private: ...@@ -232,6 +232,7 @@ private:
// std::map<int, std::pair<int, int>> unvalidTracks; // std::map<int, std::pair<int, int>> unvalidTracks;
cv::Mat m_matrixQ; cv::Mat m_matrixQ;
Eigen::Matrix4d m_lidar2camera; Eigen::Matrix4d m_lidar2camera;
waytous::deepinfer::ios::CameraParamPtr camera_param_ptr_;
// Eigen::Matrix4d m_lidar2odom; // Eigen::Matrix4d m_lidar2odom;
float obstacle_height_threshold = 30; // 障碍物高差阈值 float obstacle_height_threshold = 30; // 障碍物高差阈值
...@@ -262,6 +263,8 @@ private: ...@@ -262,6 +263,8 @@ private:
//最大车辆长度 //最大车辆长度
int m_nMaxBEVVehicleLength; int m_nMaxBEVVehicleLength;
//颜色映射表
cv::Mat mat_colormap_;
}; };
#endif #endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment