Commit 67f18462 authored by xin.wang.waytous's avatar xin.wang.waytous

camera-back-warn-flip+rotate

parent 0f05e448
# /sensor/back_middle/image/bgr
back_rgb_sub_topic_name: /sensor/camera/rgb/data
back_rgb_sub_topic_name: /front_rgb/image
berm_signal_sub_topic_name: /berm/signal
# Publisher
vis_pub_topic_name: /perception/back/vis_image
......@@ -8,6 +8,8 @@ vis_pub_topic_name: /perception/back/vis_image
config_root_path: /home/ubuntu/projects/ros_camera_bev/src/camera_back_warn
rgb_config_path: configs/weights/rgb_yolov5.yaml
device: 0
flip: false
rotate: -1 # -1=0, 0=90, 1=180, 2=270
range_ratio: 0.01
# // visualization 是否可视化结果
......
......@@ -5,7 +5,7 @@
* @Date: 2023-09-03 02:18:07
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-12-14 10:14:39
* @LastEditTime: 2023-12-25 07:50:18
*/
......@@ -86,6 +86,14 @@ void sensorManager::infer()
continue;
}
// 旋转+flip
if(infer_param.flip){
cv::flip(rgb, rgb, 0);
}
if(infer_param.rotate >= 0){
cv::rotate(rgb, rgb, infer_param.rotate);
}
// 定义模型的输入输出vector
std::vector<cv::Mat*> inputs;
std::vector<waytous::deepinfer::interfaces::BaseIOPtr> outputs;
......@@ -111,7 +119,7 @@ void sensorManager::infer()
if((det->x2 - det->x1) / rgb.cols > infer_param.range_ratio ||
(det->y2 - det->y1) / rgb.rows > infer_param.range_ratio){
// TODO
eon::log::warn(1000) << "Warning! Pedestrain existed in back.";
eon::log::error(0x05400000) << "Warning! Pedestrain existed in back.";
std::cout << "Warning! Pedestrain existed in back." << std::endl;
break;
}
......
......@@ -5,7 +5,7 @@
* @Date: 2023-09-03 01:53:20
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-12-14 10:13:19
* @LastEditTime: 2023-12-19 03:05:34
*/
#ifndef SENSORMANAGER_H
......@@ -91,7 +91,7 @@ public:
};
/**
* @name: updatePointcloud
* @name: updateBermSignal
* @msg: 更新挡墙检测信号
* @param std_msgs::StringConstPtr berm_data
* @return {*}
......
......@@ -5,7 +5,7 @@
* @Date: 2023-07-25 06:16:52
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-12-14 09:02:45
* @LastEditTime: 2023-12-25 07:52:07
*/
......@@ -32,6 +32,9 @@ bool readInferParam(ImageInferNodeParam& param, const std::string& param_path){
param.rgb_config_path = node["rgb_config_path"].as<std::string>();
param.device = node["device"].as<int>();
param.flip = node["flip"].as<bool>();
param.rotate = node["rotate"].as<int>();
param.range_ratio = node["range_ratio"].as<float>();
param.vis_res = node["vis_res"].as<bool>();
......
......@@ -5,7 +5,7 @@
* @Date: 2023-09-03 03:14:28
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-12-14 09:01:47
* @LastEditTime: 2023-12-25 07:46:10
*/
#ifndef UTILS_H_
......@@ -35,6 +35,9 @@ struct ImageInferNodeParam{
std::string rgb_config_path;
int device = 0;
bool flip = false;
int rotate = 0; // 0°
float range_ratio = 0.01; // 检测到的行人长或者宽,大于此比例时,进行警告
// visualization 是否可视化结果
......
......@@ -44,7 +44,7 @@ units:
fixAspectRatio: false
# instance-seg
nmsThreshold: 0.45
scoreThreshold: 0.1 # used when inference, can be modified
scoreThreshold: 0.15 # used when inference, can be modified
truncatedThreshold: 0.05
maxOutputNum: 1000
rawDetectionLength: 32130 # 25200
......
......@@ -5,7 +5,7 @@
* @Date: 2023-09-03 02:18:07
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-12-06 03:43:35
* @LastEditTime: 2024-01-18 04:29:42
*/
......@@ -169,6 +169,7 @@ void sensorManager::publishDetection()
// vision_converter_->Process_objects(frame_ptr->img_src_, pcl_cloud, frame_ptr->objs_, *vGrid);
vision_converter_->Process_objects(frame_ptr->img_src_, frame_ptr->depth_, pcl_cloud, frame_ptr->objs_, *vGrid);
}else{
std::cout<<"--------------------------------------------- no pcl, using all depth -------------------------------------------- "<<std::endl;
vision_converter_->Process_objects(frame_ptr->img_src_, frame_ptr->depth_, frame_ptr->objs_, *vGrid);
}
......
......@@ -5,7 +5,7 @@
* @Date: 2023-09-03 03:13:41
* @email: xin.wang@waytous.com
* @LastEditors: wxin
* @LastEditTime: 2023-12-13 07:25:53
* @LastEditTime: 2024-01-18 04:25:22
*/
......@@ -963,8 +963,8 @@ int Visione_3D::Process_Instance_Objects(object_Seg_info& obj, const cv::Mat& in
}
// std::cout<<" instance find table vision "<<double(clock()-start_)/CLOCKS_PER_SEC<<"s"<<std::endl; //输出时间(单位:s)
float project_ratio = project_count / (mask_count + 1e-5);
std::cout<<"obj: "<< obj.name <<"with project ratio: "<< project_ratio << std::endl;
bool valid = validTracks.find(obj.track_id) != validTracks.end();
// std::cout<<"obj: "<< obj.name <<" with project ratio: "<< project_ratio << ", " << valid << std::endl;
if(valid){
validTracks[obj.track_id] = 0; // update
}
......@@ -1022,10 +1022,10 @@ int Visione_3D::Process_Instance_Objects(object_Seg_info& obj, const cv::Mat& in
min_z = p3d_.z;
}
}
std::cout << "stone with "<<points3d.size() << " points, height diff: " << max_z - min_z << "m." << std::endl;
filtered = (max_z - min_z < obstacle_height_threshold * 0.01);
if(!filtered){
validTracks[obj.track_id] = 0; // init
std::cout << "------------------------ "<< obj.track_id <<" stone with "<<points3d.size() << " points, height diff: " << max_z - min_z << "m." << std::endl;
}
/*else{// 扬尘遮挡,点少
if(unvalidTracks.find(obj.track_id) != unvalidTracks.end()){
......@@ -1056,11 +1056,11 @@ int Visione_3D::Process_Instance_Objects(object_Seg_info& obj, const cv::Mat& in
if(unvalidTracks.find(obj.track_id) != unvalidTracks.end()){
unvalidTracks[obj.track_id].second++;
// 长时间跟踪上,但是未被激活,用深度估计激活
if(unvalidTracks[obj.track_id].second >= 10){
unvalidTracks.erase(obj.track_id);
validTracks[obj.track_id] = 0; // init
return -1; // using depth estimination
}
// if(unvalidTracks[obj.track_id].second >= 10){
// unvalidTracks.erase(obj.track_id);
// validTracks[obj.track_id] = 0; // init
// return -1; // using depth estimination
// }
}else{
unvalidTracks[obj.track_id] = std::make_pair(0, 0);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment