目录
- cmake_minimum_required(VERSION 2.8.3)
- project(image_enhance)
-
- add_compile_options(-std=c++11)
- find_package(catkin REQUIRED COMPONENTS roscpp cv_bridge image_transport sensor_msgs roscpp rospy std_msgs)
- find_package(OpenCV REQUIRED)
-
- catkin_package(
- # INCLUDE_DIRS include
- # LIBRARIES my_image_transport
- # CATKIN_DEPENDS cv_bridge image_transport sensor_msgs
- # DEPENDS system_lib
- )
-
- include_directories( ${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} )
-
-
- add_executable(${PROJECT_NAME}_node src/${PROJECT_NAME}.cpp)
- add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
- target_link_libraries(${PROJECT_NAME}_node ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})
-
-
-
"image_enhance" type="image_enhance_node" name="image_enhance_node" output="screen"> - "image_topic" value="/pub_t"/>
- "frame_rate" value="30"/>
- "mode" value="2"/>
-
-
-
-
"image_enhance" type="image_enhance_node" name="image_enhance_node" output="screen"> - "image_topic" value="/pub_t"/>
- "frame_rate" value="30"/>
- "mode" value="2"/>
-
-
- #include
- #include
//用于ROS图像和OpenCV图像的转换 - #include
//传感器信息 - #include
- #include
- #include
- #include
- #define _NODE_NAME_ "image_enhancement" //定义节点的名称
- //OpenCV的函数都位于cv这一命名空间下,为了调用OpenCV的函数,需要在每个函数前加上cv::,向编译器说明你所调用的函数处于cv命名空间。为了摆脱这种繁琐的工作,可以使用using namespace cv;指令,告诉编译器假设所有函数都位于cv命名空间下。
- using namespace cv;
- using namespace std;
-
- class ImageEnhancement //节点参数类
- {
- private://基本参数
- ros::Publisher enhance_image_pub_;//发布者
- ros::Subscriber image_sub_;//接收者
- std::string image_topic_;//信息类型
- bool is_show_result_;
- bool image_ok_, image_enhanced_;
- int frame_rate_;
- int mode_;
- //0 for 基于直方图均衡化的图像增强
- //1 for 基于对数Log变换的图像增强
- //2 for 基于伽马变换的图像增强
- //3 for 混合增强
- cv_bridge::CvImagePtr cv_ptr_;
- cv_bridge::CvImagePtr cv_ptr2;
- ros::Timer timer_;
-
- public://基本函数
- bool init();
- void loadimage(const sensor_msgs::ImageConstPtr& msg);
- void enhancepub0(const ros::TimerEvent&);
- void enhancepub1(const ros::TimerEvent&);
- void enhancepub2(const ros::TimerEvent&);
- void enhancepub3(const ros::TimerEvent&);
- };
-
- int main(int argc, char** argv)
- {
- ros::init(argc, argv, _NODE_NAME_);
- ImageEnhancement enhance;//创建一个类的对象
- enhance.init();//调用类的成员函数
- ros::spin();//循环
- return 0;
- }
-
-
-
- bool ImageEnhancement::init()//定义ImageEnhancement类的成员函数
- {
- ros::NodeHandle nh, nh_private("~");//开启节点对象nh
- //节点的参数服务器,写在launch文件中的可以随时修改的参数
- nh_private.param
("image_topic", image_topic_, ""); - nh_private.param<int>("frame_rate",frame_rate_,30);
- nh_private.param<int>("mode",mode_,0);
-
- image_ok_ = false;//一个关闭标志
- image_enhanced_ = false;
- enhance_image_pub_ = nh.advertise
("/image_enhancement", 1);//定义发布者 -
- image_sub_ = nh.subscribe(image_topic_, 1, &ImageEnhancement::loadimage, this);//定义订阅image_topic_话题的消息,回调函数,以指针形式存储数据 回调函数在一个类中,后面加上this
-
- if(mode_ == 0)
- timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub0, this);
- else if(mode_ == 1)
- timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub1, this);
- else if(mode_ == 2)
- timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub2, this);
- else
- ROS_ERROR("none mode is starting!");//报错打印指令
- ROS_INFO("image_enhancement initial ok.");//提示打印指令
- }
-
- void ImageEnhancement::loadimage(const sensor_msgs::ImageConstPtr& msg)
- {
- ROS_INFO("[%s]: getting image!",_NODE_NAME_);
- cv_bridge::CvImagePtr cv;//在CVbridge类中创建一个对象cv
- cv = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);//转化储存图像
- cv_ptr_ = cv;
-
- image_ok_ = true;
- image_enhanced_ = false;
- }
-
- void ImageEnhancement::enhancepub0(const ros::TimerEvent&)
- {
- if (image_ok_ == false)
- {
- ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
- return;
- }
- else if (image_enhanced_ == true)
- {
- ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
- return;
- }
- else
- ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
-
- static int width, height;
- width = cv_ptr_->image.cols;
- height = cv_ptr_->image.rows;
- //创建一个enhanced_image对象
- cv::Mat enhanced_image(height, width, CV_8UC3);//cv::Mat是OpenCV2和OpenCV3中基本的数据类型长宽和文件格式
- enhanced_image.setTo(0);//初始化增强后的图
- cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
-
- cv::Mat imageRGB[3];
- split(enhanced_image, imageRGB);
- for (int i=0; i<3; ++i)
- {
- equalizeHist(imageRGB[i], imageRGB[i]);
- }
- merge(imageRGB, 3, enhanced_image);
- //定义发布消息的内容
- sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", enhanced_image).toImageMsg();
- imageMsg->header.frame_id = std::string("enhance image");
- imageMsg->header.stamp = ros::Time::now();
- enhance_image_pub_.publish(imageMsg);//发布消息
- ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
- image_enhanced_ = true;
- }
-
- void ImageEnhancement::enhancepub1(const ros::TimerEvent&)
- {
- if (image_ok_ == false)
- {
- ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
- return;
- }
- else if (image_enhanced_ == true)
- {
- ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
- return;
- }
- else
- ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
-
- static int width, height;
- width = cv_ptr_->image.cols;
- height = cv_ptr_->image.rows;
- cv::Mat enhanced_image(height, width, CV_8UC3);
- enhanced_image.setTo(0);
- cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
-
- cv::Mat srcLog(enhanced_image.size(), CV_32FC3);
-
- for (int i=0; i
- {
- for (int j=0; j
- {
- srcLog.at
(i, j)[0] = log(1 + enhanced_image.at(i, j)[0]); - srcLog.at
(i, j)[1] = log(1 + enhanced_image.at(i, j)[1]); - srcLog.at
(i, j)[2] = log(1 + enhanced_image.at(i, j)[2]); - }
- }
- //归一化
- normalize(srcLog, srcLog, 0, 255, NORM_MINMAX);
- //
- convertScaleAbs(srcLog, srcLog);
-
- sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", srcLog).toImageMsg();
- imageMsg->header.frame_id = std::string("enhance image");
- imageMsg->header.stamp = ros::Time::now();
- enhance_image_pub_.publish(imageMsg);
- ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
- image_enhanced_ = true;
- }
-
- void ImageEnhancement::enhancepub2(const ros::TimerEvent&)
- {
- if (image_ok_ == false)
- {
- ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
- return;
- }
- else if (image_enhanced_ == true)
- {
- ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
- return;
- }
- else
- ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
-
- static int width, height;
- width = cv_ptr_->image.cols;
- height = cv_ptr_->image.rows;
- cv::Mat enhanced_image(height, width, CV_8UC3);
- enhanced_image.setTo(0);
- cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
-
-
- float pixels[256];
- for (int i=0; i<256; ++i)
- {
- pixels[i] = powf(i,2);
- }
-
- cv::Mat srcLog(enhanced_image.size(), CV_32FC3);
- for (int i=0; i
- {
- for (int j=0; j
- {
- srcLog.at
(i, j)[0] = pixels[enhanced_image.at(i, j)[0]]; - srcLog.at
(i, j)[1] = pixels[enhanced_image.at(i, j)[1]]; - srcLog.at
(i, j)[2] = pixels[enhanced_image.at(i, j)[2]]; - }
- }
- normalize(srcLog, srcLog, 0, 255, NORM_MINMAX);
- //Mat img;
- //resize(srcLog, img, Size(640, 360), 0, 0, INTER_CUBIC);
- convertScaleAbs(srcLog, srcLog, 4, 30);
- resize(srcLog, srcLog,Size(1920,1440),INTER_LINEAR);
- medianBlur(srcLog,srcLog, 3);
-
-
- sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8",srcLog).toImageMsg();
- imageMsg->header.frame_id = std::string("enhance image");
- imageMsg->header.stamp = ros::Time::now();
- enhance_image_pub_.publish(imageMsg);
- image_enhanced_ = true;
- ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
- }
二、C++点云——点云投影
1.CMakeLists.txt
- cmake_minimum_required(VERSION 2.8.3)
- project(kittivelo_cam)
- add_compile_options(-std=c++11)
- set(CMAKE_BUILD_TYPE Release)
-
- find_package(catkin REQUIRED COMPONENTS
- cv_bridge
- image_transport
- message_filters
- roscpp
- rospy
- std_msgs
- tf
- pcl_conversions
- tf2
- tf2_ros
- tf2_geometry_msgs
- )
-
- catkin_package(
- # INCLUDE_DIRS include
- # LIBRARIES colored_pointcloud
- # CATKIN_DEPENDS cv_bridge image_transport message_filters roscpp rospy std_msgs
- # DEPENDS system_lib
- )
-
- include_directories(
- include
- ${catkin_INCLUDE_DIRS}
- )
-
- # opencv
- find_package(OpenCV 3 REQUIRED)
- include_directories(${OpenCV_INCLUDE_DIRS})
-
- # pcl
- find_package(PCL 1.7 REQUIRED)
- include_directories(${PCL_INCLUDE_DIRS})
- add_definitions(${PCL_DEFINITIONS})
-
- # boost
- find_package(Boost REQUIRED)
- include_directories(${Boost_INCLUDE_DIRS})
-
- ## Declare a C++ executable
- ## With catkin_make all packages are built within a single CMake context
-
-
-
- add_executable(kittivelo_cam src/kittivelo_cam.cpp)
- ## Specify libraries to link a library or executable target against
- target_link_libraries(kittivelo_cam ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ${Boost_LIBRARIES})
2.package.xml
- "1.0"?>
"2"> -
kittivelo_cam -
0.0.0 -
The colored_pointcloud package -
-
-
-
-
"imrs@todo.todo">imrs -
-
-
-
-
-
TODO -
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
catkin -
cv_bridge -
image_transport -
message_filters -
roscpp -
rospy -
std_msgs -
cv_bridge -
image_transport -
message_filters -
roscpp -
rospy -
std_msgs -
cv_bridge -
image_transport -
message_filters -
roscpp -
rospy -
std_msgs -
-
-
- <export>
-
-
-
3. launch
-
"kittivelo_cam" type="kittivelo_cam" name="kittivelo_cam" output="screen"> -
4. cpp
- //
- // Created by cai on 2021/8/26.
- //
-
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- //#include "fssim_common/Cmd.h"
- #include
- // 稠密矩阵的代数运算(逆,特征值等)
- #include
- #include
- #include "sensor_msgs/LaserScan.h"
- #include "geometry_msgs/PoseWithCovarianceStamped.h"
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
-
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
- #include
-
- #include
- #include
- #include
- #include
- using namespace Eigen;
- using namespace cv;
- using namespace std;
-
- #include "colored_pointcloud/colored_pointcloud.h"
- #include
- #include
-
- cv::Mat P_rect_00(3,4,cv::DataType<double>::type);//3×4 projection matrix after rectification
- cv::Mat R_rect_00(4,4,cv::DataType<double>::type);//3×3 rectifying rotation to make image planes co-planar
- cv::Mat RT(4,4,cv::DataType<double>::type);//rotation matrix and translation vector
- class RsCamFusion
- {
- //**********************************************************************************************************
- //1、定义成员变量
- private:
- typedef message_filters::sync_policies::ApproximateTime
slamSyncPolicy; - message_filters::Synchronizer
* sync_; - message_filters::Subscriber
* camera_sub1; - message_filters::Subscriber
* lidar_sub; -
- pcl::PointCloud
::Ptr colored_cloud_toshow; - pcl::PointCloud
::Ptr colored_cloud; - pcl::PointCloud
::Ptr cloud_toshow; - /*
- pcl::PointCloud
::Ptr colored_cloud_toshow; - pcl::PointCloud
::Ptr colored_cloud; - pcl::PointCloud
::Ptr cloud_toshow; - */
- pcl::PointCloud
::Ptr input_cloud; - pcl::PointCloud
::Ptr input_cloud_ptr; - pcl::PointCloud
::Ptr raw_cloud; - cv::Mat input_image;
- cv::Mat image_to_show,image_to_show1;
- int frame_count = 0;
- static cv::Size imageSize;
- static ros::Publisher pub;
- //store calibration data in Opencv matrices
-
- image_transport::Publisher depth_pub ;
- sensor_msgs::ImagePtr depth_msg;
- ros::NodeHandle nh;
- ros::Publisher colored_cloud_showpub;
- ros::Subscriber sub;
- ros::Publisher fused_image_pub1;
-
- public:
- //构造函数
- RsCamFusion():
-
- nh("~"){
-
- RT.at<double>(0,0) = 7.533745e-03;RT.at<double>(0,1) = -9.999714e-01;RT.at<double>(0,2) = -6.166020e-04;RT.at<double>(0,2) = -4.069766e-03;
- RT.at<double>(1,0) = 1.480249e-02;RT.at<double>(1,1) = 7.280733e-04;RT.at<double>(1,2) = -9.998902e-01;RT.at<double>(1,3) = -7.631618e-02;
- RT.at<double>(2,0) = 9.998621e-01;RT.at<double>(2,1) = 7.523790e-03;RT.at<double>(2,2) = 1.480755e-02;RT.at<double>(2,3) = -2.717806e-01;
- RT.at<double>(3,0) = 0.0;RT.at<double>(3,1) = 0.0;RT.at<double>(3,2) = 0.0;RT.at<double>(3,3) = 1.0;
-
- R_rect_00.at<double>(0,0) = 9.999239e-01;R_rect_00.at<double>(0,1) = 9.837760e-03;R_rect_00.at<double>(0,2) = -7.445048e-03;R_rect_00.at<double>(0,3) = 0.0;
- R_rect_00.at<double>(1,0) = -9.869795e-03;R_rect_00.at<double>(1,1) = 9.999421e-01;R_rect_00.at<double>(1,2) = -4.278459e-03;R_rect_00.at<double>(1,3) = 0.0;
- R_rect_00.at<double>(2,0) = 7.402527e-03;R_rect_00.at<double>(2,1) = 4.351614e-03;R_rect_00.at<double>(2,2) = 9.999631e-01;R_rect_00.at<double>(2,3) = 0.0;
- R_rect_00.at<double>(3,0) = 0.0;R_rect_00.at<double>(3,1) = 0.0;R_rect_00.at<double>(3,2) = 0.0;R_rect_00.at<double>(3,3) = 1.0;
-
- P_rect_00.at<double>(0,0) = 7.215377e+02;P_rect_00.at<double>(0,1) = 0.000000e+00;P_rect_00.at<double>(0,2) = 6.095593e+02;P_rect_00.at<double>(0,3) = 0.000000e+00;
- P_rect_00.at<double>(1,0) = 0.000000e+00;P_rect_00.at<double>(1,1) = 7.215377e+02;P_rect_00.at<double>(1,2) = 1.728540e+02;P_rect_00.at<double>(1,3) = 0.000000e+00;
- P_rect_00.at<double>(2,0) = 0.000000e+00;P_rect_00.at<double>(2,1) = 0.000000e+00;P_rect_00.at<double>(2,2) = 1.000000e+00;P_rect_00.at<double>(2,3) = 0.000000e+00;
- camera_sub1 = new message_filters::Subscriber
(nh, "/forward",300); - lidar_sub = new message_filters::Subscriber
(nh, "/kitti/velo/pointcloud",100); - sync_ = new message_filters::Synchronizer
(slamSyncPolicy(100), *camera_sub1,*lidar_sub); - sync_->registerCallback(boost::bind(&RsCamFusion::callback,this, _1, _2));
- cout<<"waite_image"<
- allocateMemory(); //初始化
- }
-
- void allocateMemory()
- {
- raw_cloud.reset(new pcl::PointCloud
()); - colored_cloud_toshow.reset(new pcl::PointCloud
()); - colored_cloud.reset(new pcl::PointCloud
()); - cloud_toshow.reset(new pcl::PointCloud
()); - input_cloud.reset(new pcl::PointCloud
()); - input_cloud_ptr.reset(new pcl::PointCloud
()); -
- }
- void resetParameters(){
- raw_cloud->clear();
- input_cloud_ptr->clear();
- input_cloud->clear();
- colored_cloud_toshow->clear();
- colored_cloud->clear();
- cloud_toshow->clear();
- }
- void callback(const sensor_msgs::ImageConstPtr input_image_msg1,
- const sensor_msgs::PointCloud2ConstPtr input_cloud_msg)
- {
- resetParameters();
- cv::Mat input_image1;
- cv_bridge::CvImagePtr cv_ptr1;
-
- std_msgs::Header image_header1 = input_image_msg1->header;
- std_msgs::Header cloud_header = input_cloud_msg->header;
-
- //数据获取
- //图像ROS消息转化
-
- cv_ptr1 = cv_bridge::toCvCopy(input_image_msg1,sensor_msgs::image_encodings::BGR8);
- input_image1 = cv_ptr1->image;
-
- //获取点云
-
- pcl::fromROSMsg(*input_cloud_msg, *input_cloud_ptr);//把input_cloud_msg放
- std::vector<int> indices;
- pcl::removeNaNFromPointCloud(*input_cloud_ptr, *input_cloud_ptr, indices);//去除无效点
- transformpoint(input_cloud_ptr,input_image1,P_rect_00,R_rect_00,RT);
- cout<<"start"<
- colored_cloud_showpub = nh.advertise
("colored_cloud_toshow",10); - publishCloudtoShow(colored_cloud_showpub, cloud_header, colored_cloud_toshow);
- fused_image_pub1 = nh.advertise
("image_to_show",10); - publishImage(fused_image_pub1, image_header1, image_to_show1);
- frame_count = frame_count + 1;
- }
-
- //××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
- void transformpoint(const pcl::PointCloud
::ConstPtr& input_cloud, const cv::Mat input_image, cv::Mat &P_rect_00,cv::Mat &R_rect_00,cv::Mat &RT) - {
-
- cv::Mat X(4,1,cv::DataType<double>::type);
- cv::Mat Y(4,1,cv::DataType<double>::type);
- cv::Point pt;
- std::vector
rawPoints; -
- *raw_cloud = *input_cloud;
- image_to_show = input_image.clone();
- for(int i=0;i
size();i++) { -
-
- // convert each 3D point into homogeneous coordinates and store it in a 4D variable X
- X.at<double>(0, 0) = raw_cloud->points[i].x;
- X.at<double>(1, 0) = raw_cloud->points[i].y;
- X.at<double>(2, 0) = raw_cloud->points[i].z;
- X.at<double>(3, 0) = 1;
-
- //apply the projection equation to map X onto the image plane of the camera. Store the result in Y
-
- //计算矩阵
- Y=P_rect_00*R_rect_00*RT*X;
- pt.x=Y.at<double>(0, 0) / Y.at<double>(2, 0);
- pt.y=Y.at<double>(1, 0) / Y.at<double>(2, 0);
- // transform Y back into Euclidean coordinates and store the result in the variable pt
- float d = Y.at<double>(2, 0)*1000.0;
- float val = raw_cloud->points[i].x;
- float maxVal = 20.0;
- int red = min(255, (int) (255 * abs((val - maxVal) / maxVal)));
- int green = min(255, (int) (255 * (1 - abs((val - maxVal) / maxVal))));
-
- if(pt.x<1240 &&pt.x>0 &&pt.y<375 &&pt.y>0 &&d>0)
- {
- /*
- if (int(input_image.at
(pt.y, pt.x)[2]) == 128 || int(input_image.at(pt.y, pt.x)[2]) == 152 - ||int(input_image.at
(pt.y, pt.x)[2]) == 107 || int(input_image.at(pt.y, pt.x)[2]) == 244 - ||int(input_image.at
(pt.y, pt.x)[2]) == 70 ) - {
- */
-
-
- cv::circle(image_to_show, pt, 1, cv::Scalar(0, green, red), cv::FILLED);
- image_to_show1 = image_to_show;
- PointXYZRGBI point;
- point.x = raw_cloud->points[i].x;
- point.y = raw_cloud->points[i].y; //to create colored point clouds
- point.z = raw_cloud->points[i].z;
- point.intensity = raw_cloud->points[i].intensity;
- point.g = input_image.at
(pt.y, pt.x)[1]; - point.b = input_image.at
(pt.y, pt.x)[0]; - point.r = input_image.at
(pt.y, pt.x)[2]; - colored_cloud->points.push_back(point);
-
-
-
- }
- /*
- else
- {
- pcl::PointXYZRGB point;
- point.x = raw_cloud->points[i].x;
- point.y = raw_cloud->points[i].y; //to create colored point clouds
- point.z = raw_cloud->points[i].z;
- //point.intensity = raw_cloud->points[i].intensity;
- point.g = 0;
- point.b = 0;
- point.r = 0;
- cloud_toshow->points.push_back(point);
- }
- */
-
- }
- *colored_cloud_toshow=*colored_cloud+*cloud_toshow;
- }
-
- void publishCloudtoShow(const ros::Publisher& cloudtoshow_pub, const std_msgs::Header& header,
- const pcl::PointCloud
::ConstPtr& cloud) - {
- sensor_msgs::PointCloud2 output_msg;
- pcl::toROSMsg(*cloud, output_msg);
- output_msg.header = header;
- cloudtoshow_pub.publish(output_msg);
- }
- void publishImage(const ros::Publisher& image_pub, const std_msgs::Header& header, const cv::Mat image)
- {
- cv_bridge::CvImage output_image;
- output_image.header = header;
- output_image.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
- output_image.image = image;
- image_pub.publish(output_image);
- }
- };
- //*****************************************************************************************************
- //
- // 程序入口
- //
- //×××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
- int main(int argc, char** argv)
- {
- //1、节点初始化 及定义参数
- ros::init(argc, argv, "kitti3D2");
- RsCamFusion RF;
- ros::spin();
- return 0;
- }
三、python——yolov5神经网络
- #!/usr/bin/env python
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
- """
- Run inference on images, videos, directories, streams, etc.
- Usage - sources:
- $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam
- img.jpg # image
- vid.mp4 # video
- path/ # directory
- path/*.jpg # glob
- 'https://youtu.be/Zgi9g1ksQHc' # YouTube
- 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
- Usage - formats:
- $ python path/to/detect.py --weights yolov5s.pt # PyTorch
- yolov5s.torchscript # TorchScript
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
- yolov5s.xml # OpenVINO
- yolov5s.engine # TensorRT
- yolov5s.mlmodel # CoreML (MacOS-only)
- yolov5s_saved_model # TensorFlow SavedModel
- yolov5s.pb # TensorFlow GraphDef
- yolov5s.tflite # TensorFlow Lite
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
- """
- import os
- import roslib
- import rospy
- from std_msgs.msg import Header
- from std_msgs.msg import String
- from sensor_msgs.msg import Image
- import numpy as np
- import argparse
- import os
- import sys
- from pathlib import Path
-
- import cv2
- import torch
- import torch.backends.cudnn as cudnn
-
- FILE = Path(__file__).resolve()
- ROOT = FILE.parents[0] # YOLOv5 root directory
- if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
-
- from models.common import DetectMultiBackend
- from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
- from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
- increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
- from utils.plots import Annotator, colors, save_one_box
- from utils.torch_utils import select_device, time_sync
-
-
- @torch.no_grad()
- class SubscribeAndPublish:
- def __init__(self):
- self.all_obstacle_str=''
- self.sub1_name="/cam_rgb1/usb_cam/image_raw"
- self.sub1= rospy.Subscriber(self.sub1_name, Image,self.callback_rgb)
- self.pub1_name="detect_rgb"
- self.pub1= rospy.Publisher(self.pub1_name, Image,queue_size=1)
- self.model=model
- self.device=device
- self.img_rgb=[]
- def callback_rgb(self,data):
- print('callback1')
- img_rgb = np.frombuffer(data.data, dtype=np.uint8).reshape(data.height, data.width, -1)
- img_rgb=img_rgb[:,:,::-1]
- self.img_rgb=img_rgb
- cv2.imwrite('./temp/rgb/rgb.jpg',img_rgb)
- img_rgb=self.run(**vars(opt))
- if len(img_rgb)>0:
- print('send img')
- self.publish_image(self.pub1,img_rgb,'base_link')
- def publish_image(self,pub, data, frame_id='base_link'):
- assert len(data.shape) == 3, 'len(data.shape) must be equal to 3.'
- header = Header(stamp=rospy.Time.now())
- header.frame_id = frame_id
-
- msg = Image()
- msg.height = data.shape[0]
- msg.width = data.shape[1]
- msg.encoding = 'rgb8'
- msg.data = np.array(data).tostring()
- msg.header = header
- msg.step = msg.width * 1 * 3
-
- pub.publish(msg)
-
-
- def run(self, weights=ROOT / 'yolov5s.pt', # model.pt path(s) #在类里面+self
- source=ROOT / './temp/rgb/', # file/dir/URL/glob, 0 for webcam
- data=ROOT / 'data/coco128.yaml', # dataset.yaml path
- imgsz=(640, 640), # inference size (height, width)
- conf_thres=0.25, # confidence threshold
- iou_thres=0.45, # NMS IOU threshold
- max_det=1000, # maximum detections per image
- device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
- view_img=False, # show results
- save_txt=False, # save results to *.txt
- save_conf=False, # save confidences in --save-txt labels
- save_crop=False, # save cropped prediction boxes
- nosave=False, # do not save images/videos
- classes=None, # filter by class: --class 0, or --class 0 2 3
- agnostic_nms=False, # class-agnostic NMS
- augment=False, # augmented inference
- visualize=False, # visualize features
- update=False, # update all models
- project=ROOT / 'runs/detect', # save results to project/name
- name='exp', # save results to project/name
- exist_ok=False, # existing project/name ok, do not increment
- line_thickness=3, # bounding box thickness (pixels)
- hide_labels=False, # hide labels
- hide_conf=False, # hide confidences
- half=False, # use FP16 half-precision inference
- dnn=False, # use OpenCV DNN for ONNX inference
- ):
- source = str(source)
- save_img = not nosave and not source.endswith('.txt') # save inference images
- is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
- is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
- webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
- if is_url and is_file:
- source = check_file(source) # download
-
- # Directories
- save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
- # (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
-
- # Load model
-
- stride, names, pt = model.stride, model.names, model.pt
- imgsz = check_img_size(imgsz, s=stride) # check image size
-
- # Dataloader
- if webcam:
- view_img = check_imshow()
- cudnn.benchmark = True # set True to speed up constant image size inference
- dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
- bs = len(dataset) # batch_size
- else:
- dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
- bs = 1 # batch_size
- vid_path, vid_writer = [None] * bs, [None] * bs
-
- # Run inference
- model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
- dt, seen = [0.0, 0.0, 0.0], 0
- for path, im, im0s, vid_cap, s in dataset:
- t1 = time_sync()
- im=torch.from_numpy(im).to(self.device)
- im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
- im /= 255 # 0 - 255 to 0.0 - 1.0
- if len(im.shape) == 3:
- im = im[None] # expand for batch dim
- t2 = time_sync()
- dt[0] += t2 - t1
-
- # Inference
- visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
- pred = model(im, augment=augment, visualize=visualize)
- t3 = time_sync()
- dt[1] += t3 - t2
-
- # NMS
- pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
- dt[2] += time_sync() - t3
-
- # Second-stage classifier (optional)
- # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
-
- # Process predictions
- for i, det in enumerate(pred): # per image
- seen += 1
- if webcam: # batch_size >= 1
- p, im0, frame = path[i], im0s[i].copy(), dataset.count
- s += f'{i}: '
- else:
- p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
-
- p = Path(p) # to Path
- save_path = str(save_dir / p.name) # im.jpg
- txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
- s += '%gx%g ' % im.shape[2:] # print string
- gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
- imc = im0.copy() if save_crop else im0 # for save_crop
- annotator = Annotator(im0, line_width=line_thickness, example=str(names))
- if len(det):
- # Rescale boxes from img_size to im0 size
- det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
-
- # Print results
- for c in det[:, -1].unique():
- n = (det[:, -1] == c).sum() # detections per class
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
-
- # Write results
- for *xyxy, conf, cls in reversed(det):
- if save_txt: # Write to file
- xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
- line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
- with open(txt_path + '.txt', 'a') as f:
- f.write(('%g ' * len(line)).rstrip() % line + '\n')
-
- if save_img or save_crop or view_img: # Add bbox to image
- c = int(cls) # integer class
- label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
- annotator.box_label(xyxy, label, color=colors(c, True))
- if save_crop:
- save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
-
- # Stream results
- im0 = annotator.result()
- if view_img:
- cv2.imshow(str(p), im0)
- cv2.waitKey(1) # 1 millisecond
-
- # Save results (image with detections)
- if save_img:
- if dataset.mode == 'image':
- cv2.imwrite(save_path, im0)
- else: # 'video' or 'stream'
- if vid_path[i] != save_path: # new video
- vid_path[i] = save_path
- if isinstance(vid_writer[i], cv2.VideoWriter):
- vid_writer[i].release() # release previous video writer
- if vid_cap: # video
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
- else: # stream
- fps, w, h = 30, im0.shape[1], im0.shape[0]
- save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
- vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
- vid_writer[i].write(im0)
-
- # Print time (inference-only)
- LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
-
- # Print results
- t = tuple(x / seen * 1E3 for x in dt) # speeds per image
- LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
- if save_txt or save_img:
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
- LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
- if update:
- strip_optimizer(weights) # update model (to fix SourceChangeWarning)
- return im0
-
-
- def parse_opt():
- parser = argparse.ArgumentParser()
- parser.add_argument('--weights', type=str, default=ROOT / '/home/cxl/ros_yolov5/src/yolov5/weights/yolov5x.pt', help='model path(s)')
- parser.add_argument('--source', type=str, default=ROOT / './temp/rgb/', help='file/dir/URL/glob, 0 for webcam')
- parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
- parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
- parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')#概率大于0.25显示出来
- parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')#检测框的概率
- parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--view-img', action='store_true', help='show results')#实时查看结果
- parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')#保存标注结果
- parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')#保存标注结果
- parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') #保存置信度
- parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
- parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') #只检测特定类别
- parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
- parser.add_argument('--augment', action='store_true', help='augmented inference')#增强算法
- parser.add_argument('--visualize', action='store_true', help='visualize features')
- parser.add_argument('--update', action='store_true', help='update all models')
- parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')#结果保存在什么位置
- parser.add_argument('--name', default='exp', help='save results to project/name')
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')#保存在原文件夹还是新文件夹
- parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
- parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
- parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
- parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
- parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
- opt = parser.parse_args() #参数都会放到opt
- opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
- print_args(FILE.stem, opt)
- return opt
-
-
- def main(opt,model,device):
- rospy.init_node('yolov5', anonymous=True)
-
- #####################
- t=SubscribeAndPublish()
- #####################
- rospy.spin()
-
-
- if __name__ == "__main__":
- opt = parse_opt()
- device = ''
- weights = '/home/cxl/ros_yolov5/src/yolov5/weights/yolov5x.pt'
- dnn=False
- device = select_device(device)
- model = DetectMultiBackend(weights, device=device, dnn=dnn)
- main(opt,model,device)
四、python——图片除雾
- #!/usr/bin/env python
- import os
- import roslib
- import rospy
-
- from std_msgs.msg import Header
- from std_msgs.msg import String
- from sensor_msgs.msg import Image
-
- import cv2
- import numpy as np
- import argparse
-
-
- class SubscribeAndPublish:
- def __init__(self):
- print("waiting image")
- self.all_obstacle_str=''
- self.sub1_name="/image_get"
- self.sub1= rospy.Subscriber(self.sub1_name, Image,self.callback)
- self.pub1_name="refog_image"
- self.pub1= rospy.Publisher(self.pub1_name, Image,queue_size=1)
- self.img = []
-
- def callback(self,data):
- print('callback')
- global i
- img = np.frombuffer(data.data, dtype=np.uint8).reshape(data.height, data.width, -1)
- img = img[:,:,::-1]
- img = cv2.resize(img, (720, 405), interpolation=cv2.INTER_LINEAR)
- #img = cv2.medianBlur(img, 5)
-
- #kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) #定义一个核
- #img = cv2.filter2D(img, -1, kernel=kernel)
- #print(img.shape)
- m = self.deHaze(img/255)*255
- #print(dtype) # 处理完要float64 还有负值
- m = np.clip(m,a_min=0,a_max=255) #先归一化到0-2500消除负数
- #print(m)
- cv2.imwrite("/home/cxl/ros_yolov5/src/datasets/result/2.png",m)
- m = m.astype(np.int8) #在转化为8位数
- i+=1
- #print(m.dtype)
- self.publish_image(self.pub1,m,'base_link')
- #print('finish remove fog')
-
- def publish_image(self,pub, data, frame_id='base_link'):
- assert len(data.shape) == 3, 'len(data.shape) must be equal to 3.'
- print(data.shape)
- header = Header(stamp=rospy.Time.now())
- header.frame_id = frame_id
-
- msg = Image()
- msg.height = data.shape[0]
- msg.width = data.shape[1]
- msg.encoding = 'rgb8' #传入数据要求是8位
- msg.data = np.array(data).tostring()
- msg.header = header
- msg.step = msg.width *1*3
- #print("**************************")
-
- pub.publish(msg)
- #print("---------------------------")
-
- def zmMinFilterGray(self,src, r=7):
- '''最小值滤波,r是滤波器半径'''
- '''if r <= 0:
- return src
- h, w = src.shape[:2]
- I = src
- res = np.minimum(I , I[[0]+range(h-1) , :])
- res = np.minimum(res, I[range(1,h)+[h-1], :])
- I = res
- res = np.minimum(I , I[:, [0]+range(w-1)])
- res = np.minimum(res, I[:, range(1,w)+[w-1]])
- return zmMinFilterGray(res, r-1)'''
- return cv2.erode(src, np.ones((2*r+1, 2*r+1))) #使用opencv的erode函数更高效
- def guidedfilter(self,I, p, r, eps):
- '''引导滤波,直接参考网上的matlab代码'''
- height, width = I.shape
- m_I = cv2.boxFilter(I, -1, (r,r))
- m_p = cv2.boxFilter(p, -1, (r,r))
- m_Ip = cv2.boxFilter(I*p, -1, (r,r))
- cov_Ip = m_Ip-m_I*m_p
-
- m_II = cv2.boxFilter(I*I, -1, (r,r))
- var_I = m_II-m_I*m_I
-
- a = cov_Ip/(var_I+eps)
- b = m_p-a*m_I
-
- m_a = cv2.boxFilter(a, -1, (r,r))
- m_b = cv2.boxFilter(b, -1, (r,r))
- return m_a*I+m_b
-
- def getV1(self,m, r, eps, w, maxV1): #输入rgb图像,值范围[0,1]
- '''计算大气遮罩图像V1和光照值A, V1 = 1-t/A'''
- V1 = np.min(m,2) #得到暗通道图像
- V1 = self.guidedfilter(V1, self.zmMinFilterGray(V1,7), r, eps) #使用引导滤波优化
- bins = 2000 #2000
- ht = np.histogram(V1, bins) #计算大气光照A
- d = np.cumsum(ht[0])/float(V1.size)
- for lmax in range(bins-1, 0, -1):
- if d[lmax]<=0.999:
- break
- A = np.mean(m,2)[V1>=ht[1][lmax]].max()
-
- V1 = np.minimum(V1*w, maxV1) #对值范围进行限制
-
- return V1,A
-
- def imgBrightness(self,img1, a, b):
- h, w, ch = img1.shape#获取shape的数值,height和width、通道
- #新建全零图片数组src2,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片)
- src2 = np.zeros([h, w, ch], img1.dtype)
- dst = cv2.addWeighted(img1, a, src2, 1-a, b)#addWeighted函数说明如下
- return dst
-
-
- def deHaze(self,img, r=81, eps=0.001, w=0.95, maxV1=0.80, bGamma=False): #r=81, eps=0.001, w=0.95, maxV1=0.80, bGamma=False
-
- Y = np.zeros(img.shape)
- V1,A = self.getV1(img, r, eps, w, maxV1) #得到遮罩图像和大气光照
- for k in range(3):
- Y[:,:,k] = (img[:,:,k]-V1)/(1-V1/A) #颜色校正
- Y = np.clip(Y, 0, 1)
- #if bGamma:
- #Y = Y**(np.log(0.3)/np.log(Y.mean())) #gamma校正, 越大越亮 0.3
- #Y = self.imgBrightness(Y, 2.8, -0.15) #第一个值是亮度,第二个是对比度 2.8 -0.15
- return Y
-
-
- def init():
- rospy.init_node('remove_fog',anonymous=True)
- t = SubscribeAndPublish()
- rospy.spin()
-
- if __name__ == '__main__':
- i=1
- init()
-
五、python——接收点云
- #! /usr/bin/env python
-
- from sensor_msgs.msg import PointCloud2
- from sensor_msgs import point_cloud2
- import rospy
- import time
-
- def callback_pointcloud(data):
- assert isinstance(data, PointCloud2)
- gen = point_cloud2.read_points(data, field_names=("x", "y", "z"), skip_nans=True)
- time.sleep(1)
- print type(gen)
- for p in gen:
- print " x : %.3f y: %.3f z: %.3f" %(p[0],p[1],p[2])
-
- def main():
- rospy.init_node('pcl_listener', anonymous=True)
- rospy.Subscriber('/my_camera/depth/points', PointCloud2, callback_pointcloud)
- rospy.spin()
-
- if __name__ == "__main__":
- main()
-
相关阅读:
【代码源每日一题Div1】平方计数「nlogn巧妙枚举倍数优化」
一生一芯10——verilator v5.008环境搭建
【优化组合】基于遗传算法求解不同投资比例的收益附matlab代码
.NET 9 预览版 3 发布
EFLK日志收集
金仓数据库KingbaseES客户端编程接口指南-ado.net(14. 示例)
java毕业设计智慧问诊系统Mybatis+系统+数据库+调试部署
【亲测有效】hive sql DML语句优化思路 hive表查询优化 优化你的hive任务,all you need,持续更新中
发现广告的是什么?
MFC Windows 程序设计[151]之色彩控件之谜
-
原文地址:https://blog.csdn.net/HUASHUDEYANJING/article/details/126503128