Commit 0b481d62 authored by ChengQingshui's avatar ChengQingshui

1、添加鱼眼相机支持 2、添加在原始图像上进行检测逻辑,通过CameraBevParam.yaml文件original_image_detect中字段为true控制。

parent 6a19c2fd
...@@ -15,7 +15,10 @@ ...@@ -15,7 +15,10 @@
# gnome-terminal --tab --title="driver" --working-directory="/home/nvidia" \ # gnome-terminal --tab --title="driver" --working-directory="/home/nvidia" \
# -- bash -c "awe run --killall; awe run -a; exec bash " # -- bash -c "awe run --killall; awe run -a; exec bash "
gnome-terminal --tab --title="ros_camera_bev" --working-directory="/home/nvidia/Projects/ros_camera_bev" \ #gnome-terminal --tab --title="ros_camera_bev" --working-directory="/home/nvidia/Projects/ros_camera_bev" \
#-- bash -c "source ./devel/setup.bash; rosrun perception_camera_bev camera_bev_infer; exec bash "
gnome-terminal --tab --title="ros_camera_bev" --working-directory="/home/cqs/projects/camera_detect/ros_camera_bev/" \
-- bash -c "source ./devel/setup.bash; rosrun perception_camera_bev camera_bev_infer; exec bash " -- bash -c "source ./devel/setup.bash; rosrun perception_camera_bev camera_bev_infer; exec bash "
# gnome-terminal --tab --title="camera_back_warn" --working-directory="/home/nvidia/Projects/perception/ros_camera_bev" \ # gnome-terminal --tab --title="camera_back_warn" --working-directory="/home/nvidia/Projects/perception/ros_camera_bev" \
......
rgb_sub_topic_name: /sensor/camera/inner/image # /sensor/camera/front_middle/image/bgr rgb_sub_topic_name: /front/image/bgr # /sensor/camera/front_middle/image/bgr
pointcloud_sub_topic_name: /livox/lidar_hap # /total_calib_pointcloud pointcloud_sub_topic_name: /merged_lidar_points # /total_calib_pointcloud
detection_pub_topic_name: /camera/objects detection_pub_topic_name: /camera/objects
# static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap # static_gridmap_pub_topic_name: /perception/camera/bev_static_gridmap
creator_id: 0000 creator_id: 0000
creator_name: camera_bev_node creator_name: camera_bev_node
sensor_frame_id: camera_01 sensor_frame_id: camera_01
# config_root_path: /home/nvidia/Projects/perception/eon_camera_bev # config_root_path: /home/nvidia/Projects/perception/eon_camera_bev
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_bev_infer config_root_path: /home/cqs/projects/camera_detect/ros_camera_bev/src/camera_bev_infer
rgb_config_path: configs/weights/multi_yolov5m.yaml rgb_config_path: configs/weights/multi_yolov5m.yaml
device: 0 device: 0
period_time: 5 period_time: 5
vis_res: true vis_res: true
vis_frame_id: livox_frame_hap # vehicle_link vis_frame_id: vehicle_link #livox_frame_hap #
vis_project: false vis_project: true
# ["pedestrian", "vehicle", "stone"] # ["pedestrian", "vehicle", "stone"]
pub_classes: ["pedestrian", "vehicle"] # ["stone", "cone"] pub_classes: ["pedestrian", "vehicle"] # ["stone", "cone"]
obstacle_height_threshold: 25 # cm obstacle_height_threshold: 25 # cm
camera_intrinsic: [1018.532796, 0, 1001.098828, camera_intrinsic: [513.2025464568752, 0. , 967.0902739804869,
0, 1017.127611, 599.751335, 0. , 513.0840642375775, 541.0220773650327,
0, 0, 1 ] 0. , 0. , 1. ]
distortion_coefficients: [-0.385858, 0.125180, -0.004831, -0.001959, 0.000000] distortion_coefficients: [0.1307926007445786, -0.02713495183259862, -0.007965716375255127, 0.002596178751112769]
camera2lidar_extrinsic: [-0.05073, -0.993745, 0.0994831, -0.255167, camera2lidar_extrinsic: [-0.0052204, 0.999982, -0.00280128, 0.0881569,
-0.130468, -0.0921635, -0.98716, -0.0198193, -0.999893, -0.0051816, 0.0136814, 0.0371389,
0.990154, -0.063058, -0.124977, 0.76145, 0.0136667, 0.0028724, 0.999902, -0.0601866,
0, 0, 0, 1 ] 0, 0, 0, 1 ]
camera2lidar_extrinsic1: [-0.0052204, -0.999893, 0.0136667, 0.0384177,
0.999982, -0.00518161, 0.0028724, -0.08779,
-0.00280128, 0.0136814, 0.999902, 0.0599195,
0, 0, 0, 1 ]
original_image_detect: false
src_img_width: 1920 src_img_width: 1920
src_img_height: 1080 src_img_height: 1080
dst_img_width: 960 dst_img_width: 960
......
...@@ -17,12 +17,10 @@ ...@@ -17,12 +17,10 @@
#include <common/common.h> #include <common/common.h>
#include <interfaces/base_io.h> #include <interfaces/base_io.h>
namespace waytous { namespace waytous {
namespace deepinfer { namespace deepinfer {
namespace ios { namespace ios {
class CameraParam: public interfaces::BaseIO{ class CameraParam: public interfaces::BaseIO{
public: public:
...@@ -31,6 +29,12 @@ public: ...@@ -31,6 +29,12 @@ public:
Eigen::Matrix<float, 3, 3, Eigen::RowMajor> new_camera_intrinsic; Eigen::Matrix<float, 3, 3, Eigen::RowMajor> new_camera_intrinsic;
Eigen::Matrix<float, 1, 14, Eigen::RowMajor> distortion_coefficients; Eigen::Matrix<float, 1, 14, Eigen::RowMajor> distortion_coefficients;
//add 20250514 by chengqingshui
cv::Mat mat_distortion_coeffs_;
cv::Mat mat_camera_matrix_;
cv::Mat mat_camera_matrix_estimate_;
bool original_image_detect = true; //是否在原始图像上检测
// 相机外参 // 相机外参
Eigen::Matrix4d camera2lidar_extrinsic; Eigen::Matrix4d camera2lidar_extrinsic;
......
...@@ -45,7 +45,8 @@ bool UndistortV2::InitParam(){ ...@@ -45,7 +45,8 @@ bool UndistortV2::InitParam(){
} }
d_mapx_.Reshape({param_->dst_height_, param_->dst_width_}); d_mapx_.Reshape({param_->dst_height_, param_->dst_width_});
d_mapy_.Reshape({param_->dst_height_, param_->dst_width_}); d_mapy_.Reshape({param_->dst_height_, param_->dst_width_});
InitUndistortRectifyMap(); //InitUndistortRectifyMap();
InitUndistortRectifyMap_opencv();
dst_img = std::make_shared<base::Image8U>(param_->dst_height_, param_->dst_width_, base::Color::BGR); dst_img = std::make_shared<base::Image8U>(param_->dst_height_, param_->dst_width_, base::Color::BGR);
auto output = std::make_shared<ios::CameraSrcOut>(dst_img); auto output = std::make_shared<ios::CameraSrcOut>(dst_img);
...@@ -111,6 +112,45 @@ void UndistortV2::InitUndistortRectifyMap() { ...@@ -111,6 +112,45 @@ void UndistortV2::InitUndistortRectifyMap() {
} }
} }
void UndistortV2::InitUndistortRectifyMap_opencv() {
if(param_->original_image_detect){
float fx = float(param_->src_width_) / float(param_->dst_width_) ;
float fy = float(param_->src_height_) /float(param_->dst_height_) ;
for (int v = 0; v < param_->dst_height_; ++v) {
float *x_ptr = d_mapx_.mutable_cpu_data() + d_mapx_.offset(v);
float *y_ptr = d_mapy_.mutable_cpu_data() + d_mapy_.offset(v);
for (int u = 0; u < param_->dst_width_; ++u) {
x_ptr[u] = (u+0.5f)*fx;
y_ptr[u] = (v+0.5f)*fy;
}
}
}
else
{
cv::Size image_size(param_->dst_width_, param_->dst_height_);
cv::Mat R = cv::Mat::eye(3, 3, CV_32FC1);
cv::Mat map_x, map_y;
if( 4 == param_->mat_distortion_coeffs_.rows*param_->mat_distortion_coeffs_.cols) {
cv::fisheye::initUndistortRectifyMap(param_->mat_camera_matrix_, param_->mat_distortion_coeffs_, R,
param_->mat_camera_matrix_estimate_, image_size, CV_32FC1, map_x, map_y);
}
else {
cv::initUndistortRectifyMap(param_->mat_camera_matrix_, param_->mat_distortion_coeffs_, R,
param_->mat_camera_matrix_estimate_, image_size, CV_32FC1, map_x, map_y);
}
for (int v = 0; v < param_->dst_height_; ++v) {
float *x_ptr = d_mapx_.mutable_cpu_data() + d_mapx_.offset(v);
float *y_ptr = d_mapy_.mutable_cpu_data() + d_mapy_.offset(v);
for (int u = 0; u < param_->dst_width_; ++u) {
x_ptr[u] = map_x.at<float>(v, u)*2;
y_ptr[u] = map_y.at<float>(v, u)*2;
}
}
}
}
/** /**
* @name: Exec * @name: Exec
......
...@@ -69,6 +69,9 @@ public: ...@@ -69,6 +69,9 @@ public:
*/ */
void InitUndistortRectifyMap(); void InitUndistortRectifyMap();
//add 20250514 by chengqingshui 用opencv的方法改写InitUndistortRectifyMap()
void InitUndistortRectifyMap_opencv();
UndistortV2(){}; UndistortV2(){};
~UndistortV2(){}; ~UndistortV2(){};
...@@ -80,7 +83,7 @@ public: ...@@ -80,7 +83,7 @@ public:
base::Blob<float> d_mapy_; base::Blob<float> d_mapy_;
// 畸变矫正后图像 // 畸变矫正后图像
base::Image8UPtr dst_img; base::Image8UPtr dst_img;
bool inited_ = 0; bool inited_ = false;
}; };
......
...@@ -49,6 +49,7 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){ ...@@ -49,6 +49,7 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
param.vis_project = node["vis_project"].as<bool>(); param.vis_project = node["vis_project"].as<bool>();
} }
param.obstacle_height_threshold = node["obstacle_height_threshold"].as<float>(); param.obstacle_height_threshold = node["obstacle_height_threshold"].as<float>();
param.cameraParam_ = std::make_shared<waytous::deepinfer::ios::CameraParam>(); param.cameraParam_ = std::make_shared<waytous::deepinfer::ios::CameraParam>();
...@@ -56,11 +57,13 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){ ...@@ -56,11 +57,13 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
param.cameraParam_->src_width_ = node["src_img_width"].as<int>(); param.cameraParam_->src_width_ = node["src_img_width"].as<int>();
param.cameraParam_->dst_height_ = node["dst_img_height"].as<int>(); param.cameraParam_->dst_height_ = node["dst_img_height"].as<int>();
param.cameraParam_->dst_width_ = node["dst_img_width"].as<int>(); param.cameraParam_->dst_width_ = node["dst_img_width"].as<int>();
if(node["original_image_detect"].IsDefined() && (!node["original_image_detect"].IsNull())){
param.cameraParam_->original_image_detect = node["original_image_detect"].as<bool>();
}
// 内参 // 内参
std::vector<float> camera_intrinsic = node["camera_intrinsic"].as<std::vector<float>>(); std::vector<float> camera_intrinsic = node["camera_intrinsic"].as<std::vector<float>>();
if(camera_intrinsic.size() != 9){ if(camera_intrinsic.size() != 9){
// eon::log::info() << "Error: camera intrinsic number != 9, cannot init node.";
std::cout << "Error: camera intrinsic number != 9, cannot init node." << std::endl; std::cout << "Error: camera intrinsic number != 9, cannot init node." << std::endl;
return false; return false;
} }
...@@ -95,6 +98,37 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){ ...@@ -95,6 +98,37 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
} }
} }
//add 20250514 by chengqingshui 添加矩阵形式的相机畸变系数和内参 和估计的内参
// 生成内参矩阵
param.cameraParam_->mat_camera_matrix_ = cv::Mat::zeros(3, 3, CV_64F);
param.cameraParam_->mat_camera_matrix_.at<double>(0, 0) = param.cameraParam_->new_camera_intrinsic(0); // fx
param.cameraParam_->mat_camera_matrix_.at<double>(0, 2) = param.cameraParam_->new_camera_intrinsic(2); // cx
param.cameraParam_->mat_camera_matrix_.at<double>(1, 1) = param.cameraParam_->new_camera_intrinsic(4); // fy
param.cameraParam_->mat_camera_matrix_.at<double>(1, 2) = param.cameraParam_->new_camera_intrinsic(5); // cy
param.cameraParam_->mat_camera_matrix_.at<double>(2, 2) = 1.0;
// 生成畸变系数
param.cameraParam_->mat_distortion_coeffs_ = cv::Mat::zeros(1, effs.size(), CV_64F);
for (int i = 0; i < effs.size(); ++i) {
param.cameraParam_->mat_distortion_coeffs_.at<double>(0, i) = effs[i];
}
// 针对先矫正逻辑生成估计的内参矩阵 不矫正使用缩放内参矩阵
if(param.cameraParam_->original_image_detect){
param.cameraParam_->mat_camera_matrix_estimate_ = param.cameraParam_->mat_camera_matrix_.clone();
}
else
{
cv::Size image_size(param.cameraParam_->dst_width_, param.cameraParam_->dst_height_);
cv::Mat R = cv::Mat::eye(3, 3, CV_32FC1);
if( 4 == param.cameraParam_->mat_distortion_coeffs_.rows*param.cameraParam_->mat_distortion_coeffs_.cols) {
cv::fisheye::estimateNewCameraMatrixForUndistortRectify(param.cameraParam_->mat_camera_matrix_, param.cameraParam_->mat_distortion_coeffs_,
image_size, R, param.cameraParam_->mat_camera_matrix_estimate_, 0, image_size);
}
else {
param.cameraParam_->mat_camera_matrix_estimate_ = cv::getOptimalNewCameraMatrix(param.cameraParam_->mat_camera_matrix_,
param.cameraParam_->mat_distortion_coeffs_, image_size, 0, image_size);
}
}
auto ex_param = node["camera2lidar_extrinsic"].as<std::vector<float>>(); auto ex_param = node["camera2lidar_extrinsic"].as<std::vector<float>>();
int ex_size = ex_param.size(); int ex_size = ex_param.size();
if(ex_size != 16){ if(ex_size != 16){
......
...@@ -107,6 +107,13 @@ Visione_3D::Visione_3D(const perception::camera::ImageInferNodeParam& param) ...@@ -107,6 +107,13 @@ Visione_3D::Visione_3D(const perception::camera::ImageInferNodeParam& param)
init(param.cameraParam_); init(param.cameraParam_);
initConvertMap(); initConvertMap();
// 初始化颜色映射 (Jet色图)
cv::Mat graymap(256, 1, CV_8UC3);
for (int i = 0; i < 256; ++i) {
graymap.at<uchar>(i) = i;
}
cv::applyColorMap(graymap, mat_colormap_, cv::COLORMAP_JET);
} }
Visione_3D::~Visione_3D() Visione_3D::~Visione_3D()
...@@ -116,8 +123,20 @@ Visione_3D::~Visione_3D() ...@@ -116,8 +123,20 @@ Visione_3D::~Visione_3D()
int Visione_3D::init(waytous::deepinfer::ios::CameraParamPtr camera_param_ptr) int Visione_3D::init(waytous::deepinfer::ios::CameraParamPtr camera_param_ptr)
{ {
camera_param_ptr_ = camera_param_ptr;
// fs["new_camera_matrix"] >> m_matrixQ; // fs["new_camera_matrix"] >> m_matrixQ;
#if 0 //add 20250514 by chengqingshui 使用估计出的内参,需要和生成map_的地方保持一致
cv::eigen2cv(camera_param_ptr->new_camera_intrinsic, m_matrixQ); cv::eigen2cv(camera_param_ptr->new_camera_intrinsic, m_matrixQ);
#else
m_matrixQ = cv::Mat::zeros(3, 3, CV_32FC1);
for(int j = 0; j<3; j++){
for(int i = 0; i<3; i++){
m_matrixQ.at<float>(j,i) = static_cast<float>(camera_param_ptr->mat_camera_matrix_estimate_.at<double>(j,i) );
}
}
#endif
// cv::Mat matTemp; // cv::Mat matTemp;
// fs["Matrix_lidar01_camera01"] >> matTemp; // fs["Matrix_lidar01_camera01"] >> matTemp;
// Eigen::Matrix4d m_lidar2camera_; // Eigen::Matrix4d m_lidar2camera_;
...@@ -168,8 +187,6 @@ int Visione_3D::Calc_Depth_Clouds(const cv::Mat & disparty, pcl::PointCloud<pcl: ...@@ -168,8 +187,6 @@ int Visione_3D::Calc_Depth_Clouds(const cv::Mat & disparty, pcl::PointCloud<pcl:
} }
int Visione_3D::conver_worldPt_pixelPt(const pcl::PointXYZ& ptworld, cv::Point2f& ptImg ) int Visione_3D::conver_worldPt_pixelPt(const pcl::PointXYZ& ptworld, cv::Point2f& ptImg )
{ {
float tx = m_matrixQ.at<float>(0, 2); float tx = m_matrixQ.at<float>(0, 2);
...@@ -765,6 +782,8 @@ cv::RotatedRect Visione_3D::getObjectAreaRect(const cv::Mat& imgBin, const cv:: ...@@ -765,6 +782,8 @@ cv::RotatedRect Visione_3D::getObjectAreaRect(const cv::Mat& imgBin, const cv::
* @param {BEV_GridMap&} vGrid gridmap,保存bev结果 * @param {BEV_GridMap&} vGrid gridmap,保存bev结果
* @return {*} * @return {*}
*/ */
//此函数已经不再使用 注释掉 避免影响分析 2025 05 15
/****
int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid){ int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid){
if( pcl_cloud == nullptr || objs.empty()){ if( pcl_cloud == nullptr || objs.empty()){
return -1; return -1;
...@@ -778,12 +797,58 @@ int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std ...@@ -778,12 +797,58 @@ int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std
// 投影到图像中, 保存全部投影index和 高度小于1.5m的index,用做落石处理 // 投影到图像中, 保存全部投影index和 高度小于1.5m的index,用做落石处理
// pixel coord [y,x] -> 3d point index; 0 channel for all points, 1 channel for near-ground point // pixel coord [y,x] -> 3d point index; 0 channel for all points, 1 channel for near-ground point
cv::Mat point_index_in_camera = cv::Mat::zeros(image.size(), CV_32SC2);// cv::Vec2i ushort cv::Mat point_index_in_camera = cv::Mat::zeros(image.size(), CV_32SC2);// cv::Vec2i ushort
cv::Mat cv_image = image;
int sep = camera_cloud->size() / 60000 + 1;
float depthSegmentLength = 30;
float depthScale = 256.f / depthSegmentLength;
if( camera_param_ptr_->original_image_detect)
{
// 零旋转和平移向量
cv::Mat rvec = cv::Mat::zeros(3, 1, CV_32F);
cv::Mat tvec = cv::Mat::zeros(3, 1, CV_32F);
std::vector<cv::Point2f> imagePoints; // 输出的2D图像点
std::vector<cv::Point3d> object_points;
for(auto & p3d: camera_cloud->points) {
object_points.emplace_back(p3d.x, p3d.y, p3d.z);
}
// 简化后的投影
if(4 == camera_param_ptr_->mat_distortion_coeffs_.rows*camera_param_ptr_->mat_distortion_coeffs_.cols){
cv::fisheye::projectPoints(object_points, imagePoints, rvec, tvec, camera_param_ptr_->mat_camera_matrix_estimate_, camera_param_ptr_->mat_distortion_coeffs_);
}
else{
cv::projectPoints(object_points, rvec, tvec, camera_param_ptr_->mat_camera_matrix_estimate_, camera_param_ptr_->mat_distortion_coeffs_, imagePoints);
}
for(int i=1; i<camera_cloud->size(); i++){
auto& p3d = camera_cloud->points[i];
if(p3d.z <= 0) continue;
auto & p2d = imagePoints[i];
if(p2d.x >= 0 && p2d.y >= 0 && p2d.x < point_index_in_camera.cols && p2d.y < point_index_in_camera.rows){
auto indexs = point_index_in_camera.at<cv::Vec2i>(p2d.y, p2d.x); // 不能使用引用
if(indexs[0] == 0 || camera_cloud->points[indexs[0]].z > p3d.z){ // 保留近处点
point_index_in_camera.at<cv::Vec2i>(p2d.y, p2d.x)[0] = i;
if(pcl_cloud->points[i].z < 1.5){ // 第二个index保存小于1.5m的点云
point_index_in_camera.at<cv::Vec2i>(p2d.y, p2d.x)[1] = i;
}
if(vis_project && i % sep == 0){
//auto color = indexs[0] == 0 ? cv::Scalar(0, 0, 255) : cv::Scalar(255, 255, 0);
int color_idx = static_cast<int>(p3d.z * depthScale) % 256;
cv::Vec3b color = mat_colormap_.at<cv::Vec3b>(color_idx);
cv::circle(cv_image, cv::Point2f(p2d.x, p2d.y), 1, cv::Scalar(color[0], color[1], color[2]), -1);
}
}
}
}
}
else{
float tx = m_matrixQ.at<float>(0, 2); float tx = m_matrixQ.at<float>(0, 2);
float ty = m_matrixQ.at<float>(1, 2); float ty = m_matrixQ.at<float>(1, 2);
float fx = m_matrixQ.at<float>(0, 0); float fx = m_matrixQ.at<float>(0, 0);
float fy = m_matrixQ.at<float>(1, 1); float fy = m_matrixQ.at<float>(1, 1);
cv::Mat cv_image = image;
int sep = camera_cloud->size() / 60000 + 1;
for(int i=1; i<camera_cloud->size(); i++){ for(int i=1; i<camera_cloud->size(); i++){
auto& p3d = camera_cloud->points[i]; auto& p3d = camera_cloud->points[i];
if(p3d.z <= 0) continue; if(p3d.z <= 0) continue;
...@@ -800,12 +865,16 @@ int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std ...@@ -800,12 +865,16 @@ int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std
point_index_in_camera.at<cv::Vec2i>(y, x)[1] = i; point_index_in_camera.at<cv::Vec2i>(y, x)[1] = i;
} }
if(vis_project && i % sep == 0){ if(vis_project && i % sep == 0){
auto color = indexs[0] == 0 ? cv::Scalar(0, 0, 255) : cv::Scalar(255, 255, 0); //auto color = indexs[0] == 0 ? cv::Scalar(0, 0, 255) : cv::Scalar(255, 255, 0);
cv::circle(cv_image, cv::Point(x, y), 1, color, -1); int color_idx = static_cast<int>(p3d.z * depthScale) % 256;
cv::Vec3b color = mat_colormap_.at<cv::Vec3b>(color_idx);
cv::circle(cv_image, cv::Point(x, y), 1, cv::Scalar(color[0], color[1], color[2]), -1);
} }
} }
} }
} }
}
std::cout<<" pcl2camera with "<<camera_cloud->size()<<" points: "<<double(clock()-start_)/CLOCKS_PER_SEC<<"s"<<std::endl; //输出时间(单位:s) std::cout<<" pcl2camera with "<<camera_cloud->size()<<" points: "<<double(clock()-start_)/CLOCKS_PER_SEC<<"s"<<std::endl; //输出时间(单位:s)
// 使用dust-box去除部分点云index // 使用dust-box去除部分点云index
// TODO // TODO
...@@ -847,7 +916,7 @@ int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std ...@@ -847,7 +916,7 @@ int Visione_3D::Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std
} }
return 0; return 0;
}; };
******/
int Visione_3D::Process_Segmantic_Objects(object_Seg_info& obj, const cv::Mat& indices, Cloud::Ptr& pcl_cloud, BEV_GridMap& vGrid){ int Visione_3D::Process_Segmantic_Objects(object_Seg_info& obj, const cv::Mat& indices, Cloud::Ptr& pcl_cloud, BEV_GridMap& vGrid){
cv::Scalar objColor = obj.color; cv::Scalar objColor = obj.color;
...@@ -1094,7 +1163,7 @@ int Visione_3D::Process_objects(const cv::Mat& image, const cv::Mat& depth, Clou ...@@ -1094,7 +1163,7 @@ int Visione_3D::Process_objects(const cv::Mat& image, const cv::Mat& depth, Clou
if(pcl_cloud == nullptr){ if(pcl_cloud == nullptr){
return Process_objects(image, depth, objs, vGrid); return Process_objects(image, depth, objs, vGrid);
} }
if(objs.empty()){ if(objs.empty() && !vis_project){
return -1; return -1;
} }
...@@ -1105,34 +1174,96 @@ int Visione_3D::Process_objects(const cv::Mat& image, const cv::Mat& depth, Clou ...@@ -1105,34 +1174,96 @@ int Visione_3D::Process_objects(const cv::Mat& image, const cv::Mat& depth, Clou
pcl::transformPointCloud(*pcl_cloud, *camera_cloud, c2l); pcl::transformPointCloud(*pcl_cloud, *camera_cloud, c2l);
// 投影到图像中, 保存全部投影index和 高度小于1m的index,用做落石处理 // 投影到图像中, 保存全部投影index和 高度小于1m的index,用做落石处理
cv::Mat point_index_in_camera = cv::Mat::zeros(image.size(), CV_32SC2);// cv::Vec2i ushort cv::Mat point_index_in_camera = cv::Mat::zeros(image.size(), CV_32SC2);// cv::Vec2i ushort
float tx = m_matrixQ.at<float>(0, 2);
float ty = m_matrixQ.at<float>(1, 2);
float fx = m_matrixQ.at<float>(0, 0);
float fy = m_matrixQ.at<float>(1, 1);
cv::Mat cv_image = image; cv::Mat cv_image = image;
int sep = camera_cloud->size() / 60000 + 1; int sep = camera_cloud->size() / 60000 + 1;
float depthSegmentLength = 30;
float depthScale = 256.f / depthSegmentLength;
cv::Mat pro_temp;
if(vis_project){
pro_temp = image.clone();
}
if( camera_param_ptr_->original_image_detect)
{
// 零旋转和平移向量
cv::Mat rvec = cv::Mat::zeros(3, 1, CV_32F);
cv::Mat tvec = cv::Mat::zeros(3, 1, CV_32F);
std::vector<cv::Point2f> imagePoints; // 输出的2D图像点
std::vector<cv::Point3f> object_points;
for(auto & p3d: camera_cloud->points) {
object_points.emplace_back(p3d.x, p3d.y, p3d.z);
}
// 简化后的投影
if(4 == camera_param_ptr_->mat_distortion_coeffs_.rows*camera_param_ptr_->mat_distortion_coeffs_.cols){
cv::fisheye::projectPoints(object_points, imagePoints, rvec, tvec, camera_param_ptr_->mat_camera_matrix_estimate_, camera_param_ptr_->mat_distortion_coeffs_);
}
else{
cv::projectPoints(object_points, rvec, tvec, camera_param_ptr_->mat_camera_matrix_estimate_, camera_param_ptr_->mat_distortion_coeffs_, imagePoints);
}
for(int i=1; i<camera_cloud->size(); i++){ for(int i=1; i<camera_cloud->size(); i++){
auto& p3d = camera_cloud->points[i]; auto& p3d = camera_cloud->points[i];
if(p3d.z <= 0) continue; if(p3d.z <= 0) continue;
auto & p2d = imagePoints[i];
if(p2d.x >= 0 && p2d.y >= 0 && p2d.x < point_index_in_camera.cols && p2d.y < point_index_in_camera.rows){
auto indexs = point_index_in_camera.at<cv::Vec2i>(p2d.y, p2d.x); // 不能使用引用
if(indexs[0] == 0 || camera_cloud->points[indexs[0]].z > p3d.z){ // 保留近处点
point_index_in_camera.at<cv::Vec2i>(p2d.y, p2d.x)[0] = i;
if(pcl_cloud->points[i].z < 1.5){ // 第二个index保存小于1.5m的点云
point_index_in_camera.at<cv::Vec2i>(p2d.y, p2d.x)[1] = i;
}
if(vis_project && i % sep == 0){
//auto color = indexs[0] == 0 ? cv::Scalar(0, 0, 255) : cv::Scalar(255, 255, 0);
int color_idx = static_cast<int>(p3d.z * depthScale) % 256;
#if 1
cv::Vec3b color = mat_colormap_.at<cv::Vec3b>(color_idx);
cv::circle(cv_image, cv::Point2f(p2d.x, p2d.y), 1, cv::Scalar(color[0], color[1], color[2]), -1);
#else
cv_image.at<cv::Vec3b>(p2d.y, p2d.x ) = mat_colormap_.at<cv::Vec3b>(color_idx);
#endif
}
}
}
}
}
else {
float tx = m_matrixQ.at<float>(0, 2);
float ty = m_matrixQ.at<float>(1, 2);
float fx = m_matrixQ.at<float>(0, 0);
float fy = m_matrixQ.at<float>(1, 1);
for (int i = 1; i < camera_cloud->size(); i++) {
auto &p3d = camera_cloud->points[i];
if (p3d.z <= 0)
continue;
// if(p3d.intensity <= 20) continue; // filter dust // if(p3d.intensity <= 20) continue; // filter dust
int x = (p3d.x * fx) / p3d.z + tx; int x = (p3d.x * fx) / p3d.z + tx;
int y = (p3d.y * fy) / p3d.z + ty; int y = (p3d.y * fy) / p3d.z + ty;
if(x >= 0 && y >= 0 && x < point_index_in_camera.cols && y < point_index_in_camera.rows){ if (x >= 0 && y >= 0 && x < point_index_in_camera.cols && y < point_index_in_camera.rows) {
// auto index = point_index_in_camera.at<uint16_t>(y, x); // auto index = point_index_in_camera.at<uint16_t>(y, x);
auto indexs = point_index_in_camera.at<cv::Vec2i>(y, x); auto indexs = point_index_in_camera.at<cv::Vec2i>(y, x);
if(indexs[0] == 0 || camera_cloud->points[indexs[0]].z > p3d.z){ // 保留近处点 if (indexs[0] == 0 || camera_cloud->points[indexs[0]].z > p3d.z) { // 保留近处点
// point_index_in_camera.at<uint16_t>(y, x) = i; // point_index_in_camera.at<uint16_t>(y, x) = i;
point_index_in_camera.at<cv::Vec2i>(y, x)[0] = i; point_index_in_camera.at<cv::Vec2i>(y, x)[0] = i;
if(pcl_cloud->points[i].z < 1.5){ // 第二个index保存小于1.5m的点云 if (pcl_cloud->points[i].z < 1.5) { // 第二个index保存小于1.5m的点云
point_index_in_camera.at<cv::Vec2i>(y, x)[1] = i; point_index_in_camera.at<cv::Vec2i>(y, x)[1] = i;
} }
if(vis_project && i % sep == 0){ if (vis_project && i % sep == 0) {
auto color = indexs[0] == 0 ? cv::Scalar(0, 0, 100) : cv::Scalar(255, 255, 0); //auto color = indexs[0] == 0 ? cv::Scalar(0, 0, 100) : cv::Scalar(255, 255, 0);
cv::circle(cv_image, cv::Point(x, y), 1, color, -1); int color_idx = static_cast<int>(p3d.z * depthScale) % 256;
cv::Vec3b color = mat_colormap_.at<cv::Vec3b>(color_idx);
cv::circle(pro_temp, cv::Point(x, y), 1, cv::Scalar(color[0], color[1], color[2]), -1);
}
} }
} }
} }
} }
if(vis_project){
cv_image = 0.5*cv_image + 0.5*pro_temp;
}
std::cout<<" pcl2camera with "<<camera_cloud->size()<<" points: "<<double(clock()-start_)/CLOCKS_PER_SEC<<"s"<<std::endl; //输出时间(单位:s) std::cout<<" pcl2camera with "<<camera_cloud->size()<<" points: "<<double(clock()-start_)/CLOCKS_PER_SEC<<"s"<<std::endl; //输出时间(单位:s)
std::cout << "[Vision_3d.cpp] objs size = " << objs.size() << std::endl; std::cout << "[Vision_3d.cpp] objs size = " << objs.size() << std::endl;
......
...@@ -172,7 +172,7 @@ public: ...@@ -172,7 +172,7 @@ public:
* @param {BEV_GridMap&} vGrid gridmap,保存bev结果 * @param {BEV_GridMap&} vGrid gridmap,保存bev结果
* @return {*} * @return {*}
*/ */
int Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid); //int Process_objects(const cv::Mat& image, Cloud::Ptr& pcl_cloud, std::vector<object_Seg_info>&objs, BEV_GridMap& vGrid);
/** /**
* @name: Process_objects * @name: Process_objects
...@@ -232,6 +232,7 @@ private: ...@@ -232,6 +232,7 @@ private:
// std::map<int, std::pair<int, int>> unvalidTracks; // std::map<int, std::pair<int, int>> unvalidTracks;
cv::Mat m_matrixQ; cv::Mat m_matrixQ;
Eigen::Matrix4d m_lidar2camera; Eigen::Matrix4d m_lidar2camera;
waytous::deepinfer::ios::CameraParamPtr camera_param_ptr_;
// Eigen::Matrix4d m_lidar2odom; // Eigen::Matrix4d m_lidar2odom;
float obstacle_height_threshold = 30; // 障碍物高差阈值 float obstacle_height_threshold = 30; // 障碍物高差阈值
...@@ -262,6 +263,8 @@ private: ...@@ -262,6 +263,8 @@ private:
//最大车辆长度 //最大车辆长度
int m_nMaxBEVVehicleLength; int m_nMaxBEVVehicleLength;
//颜色映射表
cv::Mat mat_colormap_;
}; };
#endif #endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment