• ROS常用模板


    目录

    一、C++图片——图片增强

    1.CMakeLists.txt

    2.package.xml

    3.launch 文件

    4.cpp

    二、C++点云——点云投影

    1.CMakeLists.txt

    2.package.xml

    3. launch

    4. cpp

    三、python——yolov5神经网络

    四、python——图片除雾

    五、python——接收点云


    一、C++图片——图片增强

    1.CMakeLists.txt

    1. cmake_minimum_required(VERSION 2.8.3)
    2. project(image_enhance)
    3. add_compile_options(-std=c++11)
    4. find_package(catkin REQUIRED COMPONENTS roscpp cv_bridge image_transport sensor_msgs roscpp rospy std_msgs)
    5. find_package(OpenCV REQUIRED)
    6. catkin_package(
    7. # INCLUDE_DIRS include
    8. # LIBRARIES my_image_transport
    9. # CATKIN_DEPENDS cv_bridge image_transport sensor_msgs
    10. # DEPENDS system_lib
    11. )
    12. include_directories( ${catkin_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} )
    13. add_executable(${PROJECT_NAME}_node src/${PROJECT_NAME}.cpp)
    14. add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
    15. target_link_libraries(${PROJECT_NAME}_node ${catkin_LIBRARIES} ${OpenCV_LIBRARIES})

    2.package.xml

    1. "image_enhance" type="image_enhance_node" name="image_enhance_node" output="screen">
    2. "image_topic" value="/pub_t"/>
    3. "frame_rate" value="30"/>
    4. "mode" value="2"/>

    3.launch 文件

    1. "image_enhance" type="image_enhance_node" name="image_enhance_node" output="screen">
    2. "image_topic" value="/pub_t"/>
    3. "frame_rate" value="30"/>
    4. "mode" value="2"/>

    4.cpp

    1. #include
    2. #include //用于ROS图像和OpenCV图像的转换
    3. #include //传感器信息
    4. #include
    5. #include
    6. #include
    7. #include
    8. #define _NODE_NAME_ "image_enhancement" //定义节点的名称
    9. //OpenCV的函数都位于cv这一命名空间下,为了调用OpenCV的函数,需要在每个函数前加上cv::,向编译器说明你所调用的函数处于cv命名空间。为了摆脱这种繁琐的工作,可以使用using namespace cv;指令,告诉编译器假设所有函数都位于cv命名空间下。
    10. using namespace cv;
    11. using namespace std;
    12. class ImageEnhancement //节点参数类
    13. {
    14. private://基本参数
    15. ros::Publisher enhance_image_pub_;//发布者
    16. ros::Subscriber image_sub_;//接收者
    17. std::string image_topic_;//信息类型
    18. bool is_show_result_;
    19. bool image_ok_, image_enhanced_;
    20. int frame_rate_;
    21. int mode_;
    22. //0 for 基于直方图均衡化的图像增强
    23. //1 for 基于对数Log变换的图像增强
    24. //2 for 基于伽马变换的图像增强
    25. //3 for 混合增强
    26. cv_bridge::CvImagePtr cv_ptr_;
    27. cv_bridge::CvImagePtr cv_ptr2;
    28. ros::Timer timer_;
    29. public://基本函数
    30. bool init();
    31. void loadimage(const sensor_msgs::ImageConstPtr& msg);
    32. void enhancepub0(const ros::TimerEvent&);
    33. void enhancepub1(const ros::TimerEvent&);
    34. void enhancepub2(const ros::TimerEvent&);
    35. void enhancepub3(const ros::TimerEvent&);
    36. };
    37. int main(int argc, char** argv)
    38. {
    39. ros::init(argc, argv, _NODE_NAME_);
    40. ImageEnhancement enhance;//创建一个类的对象
    41. enhance.init();//调用类的成员函数
    42. ros::spin();//循环
    43. return 0;
    44. }
    45. bool ImageEnhancement::init()//定义ImageEnhancement类的成员函数
    46. {
    47. ros::NodeHandle nh, nh_private("~");//开启节点对象nh
    48. //节点的参数服务器,写在launch文件中的可以随时修改的参数
    49. nh_private.param("image_topic", image_topic_, "");
    50. nh_private.param<int>("frame_rate",frame_rate_,30);
    51. nh_private.param<int>("mode",mode_,0);
    52. image_ok_ = false;//一个关闭标志
    53. image_enhanced_ = false;
    54. enhance_image_pub_ = nh.advertise("/image_enhancement", 1);//定义发布者
    55. image_sub_ = nh.subscribe(image_topic_, 1, &ImageEnhancement::loadimage, this);//定义订阅image_topic_话题的消息,回调函数,以指针形式存储数据 回调函数在一个类中,后面加上this
    56. if(mode_ == 0)
    57. timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub0, this);
    58. else if(mode_ == 1)
    59. timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub1, this);
    60. else if(mode_ == 2)
    61. timer_ = nh.createTimer(ros::Duration(0.01), &ImageEnhancement::enhancepub2, this);
    62. else
    63. ROS_ERROR("none mode is starting!");//报错打印指令
    64. ROS_INFO("image_enhancement initial ok.");//提示打印指令
    65. }
    66. void ImageEnhancement::loadimage(const sensor_msgs::ImageConstPtr& msg)
    67. {
    68. ROS_INFO("[%s]: getting image!",_NODE_NAME_);
    69. cv_bridge::CvImagePtr cv;//在CVbridge类中创建一个对象cv
    70. cv = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);//转化储存图像
    71. cv_ptr_ = cv;
    72. image_ok_ = true;
    73. image_enhanced_ = false;
    74. }
    75. void ImageEnhancement::enhancepub0(const ros::TimerEvent&)
    76. {
    77. if (image_ok_ == false)
    78. {
    79. ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
    80. return;
    81. }
    82. else if (image_enhanced_ == true)
    83. {
    84. ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
    85. return;
    86. }
    87. else
    88. ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
    89. static int width, height;
    90. width = cv_ptr_->image.cols;
    91. height = cv_ptr_->image.rows;
    92. //创建一个enhanced_image对象
    93. cv::Mat enhanced_image(height, width, CV_8UC3);//cv::Mat是OpenCV2和OpenCV3中基本的数据类型长宽和文件格式
    94. enhanced_image.setTo(0);//初始化增强后的图
    95. cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
    96. cv::Mat imageRGB[3];
    97. split(enhanced_image, imageRGB);
    98. for (int i=0; i<3; ++i)
    99. {
    100. equalizeHist(imageRGB[i], imageRGB[i]);
    101. }
    102. merge(imageRGB, 3, enhanced_image);
    103. //定义发布消息的内容
    104. sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", enhanced_image).toImageMsg();
    105. imageMsg->header.frame_id = std::string("enhance image");
    106. imageMsg->header.stamp = ros::Time::now();
    107. enhance_image_pub_.publish(imageMsg);//发布消息
    108. ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
    109. image_enhanced_ = true;
    110. }
    111. void ImageEnhancement::enhancepub1(const ros::TimerEvent&)
    112. {
    113. if (image_ok_ == false)
    114. {
    115. ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
    116. return;
    117. }
    118. else if (image_enhanced_ == true)
    119. {
    120. ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
    121. return;
    122. }
    123. else
    124. ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
    125. static int width, height;
    126. width = cv_ptr_->image.cols;
    127. height = cv_ptr_->image.rows;
    128. cv::Mat enhanced_image(height, width, CV_8UC3);
    129. enhanced_image.setTo(0);
    130. cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
    131. cv::Mat srcLog(enhanced_image.size(), CV_32FC3);
    132. for (int i=0; i
    133. {
    134. for (int j=0; j
    135. {
    136. srcLog.at(i, j)[0] = log(1 + enhanced_image.at(i, j)[0]);
    137. srcLog.at(i, j)[1] = log(1 + enhanced_image.at(i, j)[1]);
    138. srcLog.at(i, j)[2] = log(1 + enhanced_image.at(i, j)[2]);
    139. }
    140. }
    141. //归一化
    142. normalize(srcLog, srcLog, 0, 255, NORM_MINMAX);
    143. //
    144. convertScaleAbs(srcLog, srcLog);
    145. sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", srcLog).toImageMsg();
    146. imageMsg->header.frame_id = std::string("enhance image");
    147. imageMsg->header.stamp = ros::Time::now();
    148. enhance_image_pub_.publish(imageMsg);
    149. ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
    150. image_enhanced_ = true;
    151. }
    152. void ImageEnhancement::enhancepub2(const ros::TimerEvent&)
    153. {
    154. if (image_ok_ == false)
    155. {
    156. ROS_ERROR("[%s]: no image input!",_NODE_NAME_);
    157. return;
    158. }
    159. else if (image_enhanced_ == true)
    160. {
    161. ROS_INFO("[%s]: waiting for new image!",_NODE_NAME_);
    162. return;
    163. }
    164. else
    165. ROS_INFO("[%s]: image enhancement start! mode:0",_NODE_NAME_);
    166. static int width, height;
    167. width = cv_ptr_->image.cols;
    168. height = cv_ptr_->image.rows;
    169. cv::Mat enhanced_image(height, width, CV_8UC3);
    170. enhanced_image.setTo(0);
    171. cv_ptr_->image.copyTo(enhanced_image(Rect(0, 0, width, height)));
    172. float pixels[256];
    173. for (int i=0; i<256; ++i)
    174. {
    175. pixels[i] = powf(i,2);
    176. }
    177. cv::Mat srcLog(enhanced_image.size(), CV_32FC3);
    178. for (int i=0; i
    179. {
    180. for (int j=0; j
    181. {
    182. srcLog.at(i, j)[0] = pixels[enhanced_image.at(i, j)[0]];
    183. srcLog.at(i, j)[1] = pixels[enhanced_image.at(i, j)[1]];
    184. srcLog.at(i, j)[2] = pixels[enhanced_image.at(i, j)[2]];
    185. }
    186. }
    187. normalize(srcLog, srcLog, 0, 255, NORM_MINMAX);
    188. //Mat img;
    189. //resize(srcLog, img, Size(640, 360), 0, 0, INTER_CUBIC);
    190. convertScaleAbs(srcLog, srcLog, 4, 30);
    191. resize(srcLog, srcLog,Size(1920,1440),INTER_LINEAR);
    192. medianBlur(srcLog,srcLog, 3);
    193. sensor_msgs::ImagePtr imageMsg = cv_bridge::CvImage(std_msgs::Header(), "bgr8",srcLog).toImageMsg();
    194. imageMsg->header.frame_id = std::string("enhance image");
    195. imageMsg->header.stamp = ros::Time::now();
    196. enhance_image_pub_.publish(imageMsg);
    197. image_enhanced_ = true;
    198. ROS_INFO("[%s]: image enhancement done!",_NODE_NAME_);
    199. }

    二、C++点云——点云投影

    1.CMakeLists.txt

    1. cmake_minimum_required(VERSION 2.8.3)
    2. project(kittivelo_cam)
    3. add_compile_options(-std=c++11)
    4. set(CMAKE_BUILD_TYPE Release)
    5. find_package(catkin REQUIRED COMPONENTS
    6. cv_bridge
    7. image_transport
    8. message_filters
    9. roscpp
    10. rospy
    11. std_msgs
    12. tf
    13. pcl_conversions
    14. tf2
    15. tf2_ros
    16. tf2_geometry_msgs
    17. )
    18. catkin_package(
    19. # INCLUDE_DIRS include
    20. # LIBRARIES colored_pointcloud
    21. # CATKIN_DEPENDS cv_bridge image_transport message_filters roscpp rospy std_msgs
    22. # DEPENDS system_lib
    23. )
    24. include_directories(
    25. include
    26. ${catkin_INCLUDE_DIRS}
    27. )
    28. # opencv
    29. find_package(OpenCV 3 REQUIRED)
    30. include_directories(${OpenCV_INCLUDE_DIRS})
    31. # pcl
    32. find_package(PCL 1.7 REQUIRED)
    33. include_directories(${PCL_INCLUDE_DIRS})
    34. add_definitions(${PCL_DEFINITIONS})
    35. # boost
    36. find_package(Boost REQUIRED)
    37. include_directories(${Boost_INCLUDE_DIRS})
    38. ## Declare a C++ executable
    39. ## With catkin_make all packages are built within a single CMake context
    40. add_executable(kittivelo_cam src/kittivelo_cam.cpp)
    41. ## Specify libraries to link a library or executable target against
    42. target_link_libraries(kittivelo_cam ${catkin_LIBRARIES} ${PCL_LIBRARIES} ${OpenCV_LIBRARIES} ${Boost_LIBRARIES})

    2.package.xml

    1. "1.0"?>
    2. "2">
    3. kittivelo_cam
    4. 0.0.0
    5. The colored_pointcloud package
    6. "imrs@todo.todo">imrs
    7. TODO
    8. catkin
    9. cv_bridge
    10. image_transport
    11. message_filters
    12. roscpp
    13. rospy
    14. std_msgs
    15. cv_bridge
    16. image_transport
    17. message_filters
    18. roscpp
    19. rospy
    20. std_msgs
    21. cv_bridge
    22. image_transport
    23. message_filters
    24. roscpp
    25. rospy
    26. std_msgs
    27. <export>

    3. launch

    1. "kittivelo_cam" type="kittivelo_cam" name="kittivelo_cam" output="screen">

    4. cpp

    1. //
    2. // Created by cai on 2021/8/26.
    3. //
    4. #include
    5. #include
    6. #include
    7. #include
    8. #include
    9. #include
    10. #include
    11. #include
    12. #include
    13. #include
    14. //#include "fssim_common/Cmd.h"
    15. #include
    16. // 稠密矩阵的代数运算(逆,特征值等)
    17. #include
    18. #include
    19. #include "sensor_msgs/LaserScan.h"
    20. #include "geometry_msgs/PoseWithCovarianceStamped.h"
    21. #include
    22. #include
    23. #include
    24. #include
    25. #include
    26. #include
    27. #include
    28. #include
    29. #include
    30. #include
    31. #include
    32. #include
    33. #include
    34. #include
    35. #include
    36. #include
    37. #include
    38. #include
    39. #include
    40. #include
    41. #include
    42. #include
    43. using namespace Eigen;
    44. using namespace cv;
    45. using namespace std;
    46. #include "colored_pointcloud/colored_pointcloud.h"
    47. #include
    48. #include
    49. cv::Mat P_rect_00(3,4,cv::DataType<double>::type);//3×4 projection matrix after rectification
    50. cv::Mat R_rect_00(4,4,cv::DataType<double>::type);//3×3 rectifying rotation to make image planes co-planar
    51. cv::Mat RT(4,4,cv::DataType<double>::type);//rotation matrix and translation vector
    52. class RsCamFusion
    53. {
    54. //**********************************************************************************************************
    55. //1、定义成员变量
    56. private:
    57. typedef message_filters::sync_policies::ApproximateTime slamSyncPolicy;
    58. message_filters::Synchronizer* sync_;
    59. message_filters::Subscriber* camera_sub1;
    60. message_filters::Subscriber* lidar_sub;
    61. pcl::PointCloud::Ptr colored_cloud_toshow;
    62. pcl::PointCloud::Ptr colored_cloud;
    63. pcl::PointCloud::Ptr cloud_toshow;
    64. /*
    65. pcl::PointCloud::Ptr colored_cloud_toshow;
    66. pcl::PointCloud::Ptr colored_cloud;
    67. pcl::PointCloud::Ptr cloud_toshow;
    68. */
    69. pcl::PointCloud::Ptr input_cloud;
    70. pcl::PointCloud::Ptr input_cloud_ptr;
    71. pcl::PointCloud::Ptr raw_cloud;
    72. cv::Mat input_image;
    73. cv::Mat image_to_show,image_to_show1;
    74. int frame_count = 0;
    75. static cv::Size imageSize;
    76. static ros::Publisher pub;
    77. //store calibration data in Opencv matrices
    78. image_transport::Publisher depth_pub ;
    79. sensor_msgs::ImagePtr depth_msg;
    80. ros::NodeHandle nh;
    81. ros::Publisher colored_cloud_showpub;
    82. ros::Subscriber sub;
    83. ros::Publisher fused_image_pub1;
    84. public:
    85. //构造函数
    86. RsCamFusion():
    87. nh("~"){
    88. RT.at<double>(0,0) = 7.533745e-03;RT.at<double>(0,1) = -9.999714e-01;RT.at<double>(0,2) = -6.166020e-04;RT.at<double>(0,2) = -4.069766e-03;
    89. RT.at<double>(1,0) = 1.480249e-02;RT.at<double>(1,1) = 7.280733e-04;RT.at<double>(1,2) = -9.998902e-01;RT.at<double>(1,3) = -7.631618e-02;
    90. RT.at<double>(2,0) = 9.998621e-01;RT.at<double>(2,1) = 7.523790e-03;RT.at<double>(2,2) = 1.480755e-02;RT.at<double>(2,3) = -2.717806e-01;
    91. RT.at<double>(3,0) = 0.0;RT.at<double>(3,1) = 0.0;RT.at<double>(3,2) = 0.0;RT.at<double>(3,3) = 1.0;
    92. R_rect_00.at<double>(0,0) = 9.999239e-01;R_rect_00.at<double>(0,1) = 9.837760e-03;R_rect_00.at<double>(0,2) = -7.445048e-03;R_rect_00.at<double>(0,3) = 0.0;
    93. R_rect_00.at<double>(1,0) = -9.869795e-03;R_rect_00.at<double>(1,1) = 9.999421e-01;R_rect_00.at<double>(1,2) = -4.278459e-03;R_rect_00.at<double>(1,3) = 0.0;
    94. R_rect_00.at<double>(2,0) = 7.402527e-03;R_rect_00.at<double>(2,1) = 4.351614e-03;R_rect_00.at<double>(2,2) = 9.999631e-01;R_rect_00.at<double>(2,3) = 0.0;
    95. R_rect_00.at<double>(3,0) = 0.0;R_rect_00.at<double>(3,1) = 0.0;R_rect_00.at<double>(3,2) = 0.0;R_rect_00.at<double>(3,3) = 1.0;
    96. P_rect_00.at<double>(0,0) = 7.215377e+02;P_rect_00.at<double>(0,1) = 0.000000e+00;P_rect_00.at<double>(0,2) = 6.095593e+02;P_rect_00.at<double>(0,3) = 0.000000e+00;
    97. P_rect_00.at<double>(1,0) = 0.000000e+00;P_rect_00.at<double>(1,1) = 7.215377e+02;P_rect_00.at<double>(1,2) = 1.728540e+02;P_rect_00.at<double>(1,3) = 0.000000e+00;
    98. P_rect_00.at<double>(2,0) = 0.000000e+00;P_rect_00.at<double>(2,1) = 0.000000e+00;P_rect_00.at<double>(2,2) = 1.000000e+00;P_rect_00.at<double>(2,3) = 0.000000e+00;
    99. camera_sub1 = new message_filters::Subscriber(nh, "/forward",300);
    100. lidar_sub = new message_filters::Subscriber(nh, "/kitti/velo/pointcloud",100);
    101. sync_ = new message_filters::Synchronizer(slamSyncPolicy(100), *camera_sub1,*lidar_sub);
    102. sync_->registerCallback(boost::bind(&RsCamFusion::callback,this, _1, _2));
    103. cout<<"waite_image"<
    104. allocateMemory(); //初始化
    105. }
    106. void allocateMemory()
    107. {
    108. raw_cloud.reset(new pcl::PointCloud());
    109. colored_cloud_toshow.reset(new pcl::PointCloud());
    110. colored_cloud.reset(new pcl::PointCloud());
    111. cloud_toshow.reset(new pcl::PointCloud());
    112. input_cloud.reset(new pcl::PointCloud());
    113. input_cloud_ptr.reset(new pcl::PointCloud());
    114. }
    115. void resetParameters(){
    116. raw_cloud->clear();
    117. input_cloud_ptr->clear();
    118. input_cloud->clear();
    119. colored_cloud_toshow->clear();
    120. colored_cloud->clear();
    121. cloud_toshow->clear();
    122. }
    123. void callback(const sensor_msgs::ImageConstPtr input_image_msg1,
    124. const sensor_msgs::PointCloud2ConstPtr input_cloud_msg)
    125. {
    126. resetParameters();
    127. cv::Mat input_image1;
    128. cv_bridge::CvImagePtr cv_ptr1;
    129. std_msgs::Header image_header1 = input_image_msg1->header;
    130. std_msgs::Header cloud_header = input_cloud_msg->header;
    131. //数据获取
    132. //图像ROS消息转化
    133. cv_ptr1 = cv_bridge::toCvCopy(input_image_msg1,sensor_msgs::image_encodings::BGR8);
    134. input_image1 = cv_ptr1->image;
    135. //获取点云
    136. pcl::fromROSMsg(*input_cloud_msg, *input_cloud_ptr);//把input_cloud_msg放
    137. std::vector<int> indices;
    138. pcl::removeNaNFromPointCloud(*input_cloud_ptr, *input_cloud_ptr, indices);//去除无效点
    139. transformpoint(input_cloud_ptr,input_image1,P_rect_00,R_rect_00,RT);
    140. cout<<"start"<
    141. colored_cloud_showpub = nh.advertise("colored_cloud_toshow",10);
    142. publishCloudtoShow(colored_cloud_showpub, cloud_header, colored_cloud_toshow);
    143. fused_image_pub1 = nh.advertise("image_to_show",10);
    144. publishImage(fused_image_pub1, image_header1, image_to_show1);
    145. frame_count = frame_count + 1;
    146. }
    147. //××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
    148. void transformpoint(const pcl::PointCloud::ConstPtr& input_cloud, const cv::Mat input_image, cv::Mat &P_rect_00,cv::Mat &R_rect_00,cv::Mat &RT)
    149. {
    150. cv::Mat X(4,1,cv::DataType<double>::type);
    151. cv::Mat Y(4,1,cv::DataType<double>::type);
    152. cv::Point pt;
    153. std::vector rawPoints;
    154. *raw_cloud = *input_cloud;
    155. image_to_show = input_image.clone();
    156. for(int i=0;isize();i++) {
    157. // convert each 3D point into homogeneous coordinates and store it in a 4D variable X
    158. X.at<double>(0, 0) = raw_cloud->points[i].x;
    159. X.at<double>(1, 0) = raw_cloud->points[i].y;
    160. X.at<double>(2, 0) = raw_cloud->points[i].z;
    161. X.at<double>(3, 0) = 1;
    162. //apply the projection equation to map X onto the image plane of the camera. Store the result in Y
    163. //计算矩阵
    164. Y=P_rect_00*R_rect_00*RT*X;
    165. pt.x=Y.at<double>(0, 0) / Y.at<double>(2, 0);
    166. pt.y=Y.at<double>(1, 0) / Y.at<double>(2, 0);
    167. // transform Y back into Euclidean coordinates and store the result in the variable pt
    168. float d = Y.at<double>(2, 0)*1000.0;
    169. float val = raw_cloud->points[i].x;
    170. float maxVal = 20.0;
    171. int red = min(255, (int) (255 * abs((val - maxVal) / maxVal)));
    172. int green = min(255, (int) (255 * (1 - abs((val - maxVal) / maxVal))));
    173. if(pt.x<1240 &&pt.x>0 &&pt.y<375 &&pt.y>0 &&d>0)
    174. {
    175. /*
    176. if (int(input_image.at(pt.y, pt.x)[2]) == 128 || int(input_image.at(pt.y, pt.x)[2]) == 152
    177. ||int(input_image.at(pt.y, pt.x)[2]) == 107 || int(input_image.at(pt.y, pt.x)[2]) == 244
    178. ||int(input_image.at(pt.y, pt.x)[2]) == 70 )
    179. {
    180. */
    181. cv::circle(image_to_show, pt, 1, cv::Scalar(0, green, red), cv::FILLED);
    182. image_to_show1 = image_to_show;
    183. PointXYZRGBI point;
    184. point.x = raw_cloud->points[i].x;
    185. point.y = raw_cloud->points[i].y; //to create colored point clouds
    186. point.z = raw_cloud->points[i].z;
    187. point.intensity = raw_cloud->points[i].intensity;
    188. point.g = input_image.at(pt.y, pt.x)[1];
    189. point.b = input_image.at(pt.y, pt.x)[0];
    190. point.r = input_image.at(pt.y, pt.x)[2];
    191. colored_cloud->points.push_back(point);
    192. }
    193. /*
    194. else
    195. {
    196. pcl::PointXYZRGB point;
    197. point.x = raw_cloud->points[i].x;
    198. point.y = raw_cloud->points[i].y; //to create colored point clouds
    199. point.z = raw_cloud->points[i].z;
    200. //point.intensity = raw_cloud->points[i].intensity;
    201. point.g = 0;
    202. point.b = 0;
    203. point.r = 0;
    204. cloud_toshow->points.push_back(point);
    205. }
    206. */
    207. }
    208. *colored_cloud_toshow=*colored_cloud+*cloud_toshow;
    209. }
    210. void publishCloudtoShow(const ros::Publisher& cloudtoshow_pub, const std_msgs::Header& header,
    211. const pcl::PointCloud::ConstPtr& cloud)
    212. {
    213. sensor_msgs::PointCloud2 output_msg;
    214. pcl::toROSMsg(*cloud, output_msg);
    215. output_msg.header = header;
    216. cloudtoshow_pub.publish(output_msg);
    217. }
    218. void publishImage(const ros::Publisher& image_pub, const std_msgs::Header& header, const cv::Mat image)
    219. {
    220. cv_bridge::CvImage output_image;
    221. output_image.header = header;
    222. output_image.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
    223. output_image.image = image;
    224. image_pub.publish(output_image);
    225. }
    226. };
    227. //*****************************************************************************************************
    228. //
    229. // 程序入口
    230. //
    231. //×××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××××
    232. int main(int argc, char** argv)
    233. {
    234. //1、节点初始化 及定义参数
    235. ros::init(argc, argv, "kitti3D2");
    236. RsCamFusion RF;
    237. ros::spin();
    238. return 0;
    239. }

    三、python——yolov5神经网络

    1. #!/usr/bin/env python
    2. # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
    3. """
    4. Run inference on images, videos, directories, streams, etc.
    5. Usage - sources:
    6. $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam
    7. img.jpg # image
    8. vid.mp4 # video
    9. path/ # directory
    10. path/*.jpg # glob
    11. 'https://youtu.be/Zgi9g1ksQHc' # YouTube
    12. 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
    13. Usage - formats:
    14. $ python path/to/detect.py --weights yolov5s.pt # PyTorch
    15. yolov5s.torchscript # TorchScript
    16. yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
    17. yolov5s.xml # OpenVINO
    18. yolov5s.engine # TensorRT
    19. yolov5s.mlmodel # CoreML (MacOS-only)
    20. yolov5s_saved_model # TensorFlow SavedModel
    21. yolov5s.pb # TensorFlow GraphDef
    22. yolov5s.tflite # TensorFlow Lite
    23. yolov5s_edgetpu.tflite # TensorFlow Edge TPU
    24. """
    25. import os
    26. import roslib
    27. import rospy
    28. from std_msgs.msg import Header
    29. from std_msgs.msg import String
    30. from sensor_msgs.msg import Image
    31. import numpy as np
    32. import argparse
    33. import os
    34. import sys
    35. from pathlib import Path
    36. import cv2
    37. import torch
    38. import torch.backends.cudnn as cudnn
    39. FILE = Path(__file__).resolve()
    40. ROOT = FILE.parents[0] # YOLOv5 root directory
    41. if str(ROOT) not in sys.path:
    42. sys.path.append(str(ROOT)) # add ROOT to PATH
    43. ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
    44. from models.common import DetectMultiBackend
    45. from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
    46. from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr,
    47. increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
    48. from utils.plots import Annotator, colors, save_one_box
    49. from utils.torch_utils import select_device, time_sync
    50. @torch.no_grad()
    51. class SubscribeAndPublish:
    52. def __init__(self):
    53. self.all_obstacle_str=''
    54. self.sub1_name="/cam_rgb1/usb_cam/image_raw"
    55. self.sub1= rospy.Subscriber(self.sub1_name, Image,self.callback_rgb)
    56. self.pub1_name="detect_rgb"
    57. self.pub1= rospy.Publisher(self.pub1_name, Image,queue_size=1)
    58. self.model=model
    59. self.device=device
    60. self.img_rgb=[]
    61. def callback_rgb(self,data):
    62. print('callback1')
    63. img_rgb = np.frombuffer(data.data, dtype=np.uint8).reshape(data.height, data.width, -1)
    64. img_rgb=img_rgb[:,:,::-1]
    65. self.img_rgb=img_rgb
    66. cv2.imwrite('./temp/rgb/rgb.jpg',img_rgb)
    67. img_rgb=self.run(**vars(opt))
    68. if len(img_rgb)>0:
    69. print('send img')
    70. self.publish_image(self.pub1,img_rgb,'base_link')
    71. def publish_image(self,pub, data, frame_id='base_link'):
    72. assert len(data.shape) == 3, 'len(data.shape) must be equal to 3.'
    73. header = Header(stamp=rospy.Time.now())
    74. header.frame_id = frame_id
    75. msg = Image()
    76. msg.height = data.shape[0]
    77. msg.width = data.shape[1]
    78. msg.encoding = 'rgb8'
    79. msg.data = np.array(data).tostring()
    80. msg.header = header
    81. msg.step = msg.width * 1 * 3
    82. pub.publish(msg)
    83. def run(self, weights=ROOT / 'yolov5s.pt', # model.pt path(s) #在类里面+self
    84. source=ROOT / './temp/rgb/', # file/dir/URL/glob, 0 for webcam
    85. data=ROOT / 'data/coco128.yaml', # dataset.yaml path
    86. imgsz=(640, 640), # inference size (height, width)
    87. conf_thres=0.25, # confidence threshold
    88. iou_thres=0.45, # NMS IOU threshold
    89. max_det=1000, # maximum detections per image
    90. device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
    91. view_img=False, # show results
    92. save_txt=False, # save results to *.txt
    93. save_conf=False, # save confidences in --save-txt labels
    94. save_crop=False, # save cropped prediction boxes
    95. nosave=False, # do not save images/videos
    96. classes=None, # filter by class: --class 0, or --class 0 2 3
    97. agnostic_nms=False, # class-agnostic NMS
    98. augment=False, # augmented inference
    99. visualize=False, # visualize features
    100. update=False, # update all models
    101. project=ROOT / 'runs/detect', # save results to project/name
    102. name='exp', # save results to project/name
    103. exist_ok=False, # existing project/name ok, do not increment
    104. line_thickness=3, # bounding box thickness (pixels)
    105. hide_labels=False, # hide labels
    106. hide_conf=False, # hide confidences
    107. half=False, # use FP16 half-precision inference
    108. dnn=False, # use OpenCV DNN for ONNX inference
    109. ):
    110. source = str(source)
    111. save_img = not nosave and not source.endswith('.txt') # save inference images
    112. is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
    113. is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
    114. webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
    115. if is_url and is_file:
    116. source = check_file(source) # download
    117. # Directories
    118. save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
    119. # (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
    120. # Load model
    121. stride, names, pt = model.stride, model.names, model.pt
    122. imgsz = check_img_size(imgsz, s=stride) # check image size
    123. # Dataloader
    124. if webcam:
    125. view_img = check_imshow()
    126. cudnn.benchmark = True # set True to speed up constant image size inference
    127. dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt)
    128. bs = len(dataset) # batch_size
    129. else:
    130. dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
    131. bs = 1 # batch_size
    132. vid_path, vid_writer = [None] * bs, [None] * bs
    133. # Run inference
    134. model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
    135. dt, seen = [0.0, 0.0, 0.0], 0
    136. for path, im, im0s, vid_cap, s in dataset:
    137. t1 = time_sync()
    138. im=torch.from_numpy(im).to(self.device)
    139. im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
    140. im /= 255 # 0 - 255 to 0.0 - 1.0
    141. if len(im.shape) == 3:
    142. im = im[None] # expand for batch dim
    143. t2 = time_sync()
    144. dt[0] += t2 - t1
    145. # Inference
    146. visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
    147. pred = model(im, augment=augment, visualize=visualize)
    148. t3 = time_sync()
    149. dt[1] += t3 - t2
    150. # NMS
    151. pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
    152. dt[2] += time_sync() - t3
    153. # Second-stage classifier (optional)
    154. # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
    155. # Process predictions
    156. for i, det in enumerate(pred): # per image
    157. seen += 1
    158. if webcam: # batch_size >= 1
    159. p, im0, frame = path[i], im0s[i].copy(), dataset.count
    160. s += f'{i}: '
    161. else:
    162. p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
    163. p = Path(p) # to Path
    164. save_path = str(save_dir / p.name) # im.jpg
    165. txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
    166. s += '%gx%g ' % im.shape[2:] # print string
    167. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
    168. imc = im0.copy() if save_crop else im0 # for save_crop
    169. annotator = Annotator(im0, line_width=line_thickness, example=str(names))
    170. if len(det):
    171. # Rescale boxes from img_size to im0 size
    172. det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
    173. # Print results
    174. for c in det[:, -1].unique():
    175. n = (det[:, -1] == c).sum() # detections per class
    176. s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
    177. # Write results
    178. for *xyxy, conf, cls in reversed(det):
    179. if save_txt: # Write to file
    180. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
    181. line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
    182. with open(txt_path + '.txt', 'a') as f:
    183. f.write(('%g ' * len(line)).rstrip() % line + '\n')
    184. if save_img or save_crop or view_img: # Add bbox to image
    185. c = int(cls) # integer class
    186. label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
    187. annotator.box_label(xyxy, label, color=colors(c, True))
    188. if save_crop:
    189. save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
    190. # Stream results
    191. im0 = annotator.result()
    192. if view_img:
    193. cv2.imshow(str(p), im0)
    194. cv2.waitKey(1) # 1 millisecond
    195. # Save results (image with detections)
    196. if save_img:
    197. if dataset.mode == 'image':
    198. cv2.imwrite(save_path, im0)
    199. else: # 'video' or 'stream'
    200. if vid_path[i] != save_path: # new video
    201. vid_path[i] = save_path
    202. if isinstance(vid_writer[i], cv2.VideoWriter):
    203. vid_writer[i].release() # release previous video writer
    204. if vid_cap: # video
    205. fps = vid_cap.get(cv2.CAP_PROP_FPS)
    206. w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    207. h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    208. else: # stream
    209. fps, w, h = 30, im0.shape[1], im0.shape[0]
    210. save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
    211. vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
    212. vid_writer[i].write(im0)
    213. # Print time (inference-only)
    214. LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)')
    215. # Print results
    216. t = tuple(x / seen * 1E3 for x in dt) # speeds per image
    217. LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
    218. if save_txt or save_img:
    219. s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
    220. LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
    221. if update:
    222. strip_optimizer(weights) # update model (to fix SourceChangeWarning)
    223. return im0
    224. def parse_opt():
    225. parser = argparse.ArgumentParser()
    226. parser.add_argument('--weights', type=str, default=ROOT / '/home/cxl/ros_yolov5/src/yolov5/weights/yolov5x.pt', help='model path(s)')
    227. parser.add_argument('--source', type=str, default=ROOT / './temp/rgb/', help='file/dir/URL/glob, 0 for webcam')
    228. parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
    229. parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
    230. parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')#概率大于0.25显示出来
    231. parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')#检测框的概率
    232. parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
    233. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
    234. parser.add_argument('--view-img', action='store_true', help='show results')#实时查看结果
    235. parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')#保存标注结果
    236. parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')#保存标注结果
    237. parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') #保存置信度
    238. parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
    239. parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') #只检测特定类别
    240. parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    241. parser.add_argument('--augment', action='store_true', help='augmented inference')#增强算法
    242. parser.add_argument('--visualize', action='store_true', help='visualize features')
    243. parser.add_argument('--update', action='store_true', help='update all models')
    244. parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')#结果保存在什么位置
    245. parser.add_argument('--name', default='exp', help='save results to project/name')
    246. parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')#保存在原文件夹还是新文件夹
    247. parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
    248. parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
    249. parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
    250. parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
    251. parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
    252. opt = parser.parse_args() #参数都会放到opt
    253. opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
    254. print_args(FILE.stem, opt)
    255. return opt
    256. def main(opt,model,device):
    257. rospy.init_node('yolov5', anonymous=True)
    258. #####################
    259. t=SubscribeAndPublish()
    260. #####################
    261. rospy.spin()
    262. if __name__ == "__main__":
    263. opt = parse_opt()
    264. device = ''
    265. weights = '/home/cxl/ros_yolov5/src/yolov5/weights/yolov5x.pt'
    266. dnn=False
    267. device = select_device(device)
    268. model = DetectMultiBackend(weights, device=device, dnn=dnn)
    269. main(opt,model,device)

    四、python——图片除雾

    1. #!/usr/bin/env python
    2. import os
    3. import roslib
    4. import rospy
    5. from std_msgs.msg import Header
    6. from std_msgs.msg import String
    7. from sensor_msgs.msg import Image
    8. import cv2
    9. import numpy as np
    10. import argparse
    11. class SubscribeAndPublish:
    12. def __init__(self):
    13. print("waiting image")
    14. self.all_obstacle_str=''
    15. self.sub1_name="/image_get"
    16. self.sub1= rospy.Subscriber(self.sub1_name, Image,self.callback)
    17. self.pub1_name="refog_image"
    18. self.pub1= rospy.Publisher(self.pub1_name, Image,queue_size=1)
    19. self.img = []
    20. def callback(self,data):
    21. print('callback')
    22. global i
    23. img = np.frombuffer(data.data, dtype=np.uint8).reshape(data.height, data.width, -1)
    24. img = img[:,:,::-1]
    25. img = cv2.resize(img, (720, 405), interpolation=cv2.INTER_LINEAR)
    26. #img = cv2.medianBlur(img, 5)
    27. #kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) #定义一个核
    28. #img = cv2.filter2D(img, -1, kernel=kernel)
    29. #print(img.shape)
    30. m = self.deHaze(img/255)*255
    31. #print(dtype) # 处理完要float64 还有负值
    32. m = np.clip(m,a_min=0,a_max=255) #先归一化到0-2500消除负数
    33. #print(m)
    34. cv2.imwrite("/home/cxl/ros_yolov5/src/datasets/result/2.png",m)
    35. m = m.astype(np.int8) #在转化为8位数
    36. i+=1
    37. #print(m.dtype)
    38. self.publish_image(self.pub1,m,'base_link')
    39. #print('finish remove fog')
    40. def publish_image(self,pub, data, frame_id='base_link'):
    41. assert len(data.shape) == 3, 'len(data.shape) must be equal to 3.'
    42. print(data.shape)
    43. header = Header(stamp=rospy.Time.now())
    44. header.frame_id = frame_id
    45. msg = Image()
    46. msg.height = data.shape[0]
    47. msg.width = data.shape[1]
    48. msg.encoding = 'rgb8' #传入数据要求是8位
    49. msg.data = np.array(data).tostring()
    50. msg.header = header
    51. msg.step = msg.width *1*3
    52. #print("**************************")
    53. pub.publish(msg)
    54. #print("---------------------------")
    55. def zmMinFilterGray(self,src, r=7):
    56. '''最小值滤波,r是滤波器半径'''
    57. '''if r <= 0:
    58. return src
    59. h, w = src.shape[:2]
    60. I = src
    61. res = np.minimum(I , I[[0]+range(h-1) , :])
    62. res = np.minimum(res, I[range(1,h)+[h-1], :])
    63. I = res
    64. res = np.minimum(I , I[:, [0]+range(w-1)])
    65. res = np.minimum(res, I[:, range(1,w)+[w-1]])
    66. return zmMinFilterGray(res, r-1)'''
    67. return cv2.erode(src, np.ones((2*r+1, 2*r+1))) #使用opencv的erode函数更高效
    68. def guidedfilter(self,I, p, r, eps):
    69. '''引导滤波,直接参考网上的matlab代码'''
    70. height, width = I.shape
    71. m_I = cv2.boxFilter(I, -1, (r,r))
    72. m_p = cv2.boxFilter(p, -1, (r,r))
    73. m_Ip = cv2.boxFilter(I*p, -1, (r,r))
    74. cov_Ip = m_Ip-m_I*m_p
    75. m_II = cv2.boxFilter(I*I, -1, (r,r))
    76. var_I = m_II-m_I*m_I
    77. a = cov_Ip/(var_I+eps)
    78. b = m_p-a*m_I
    79. m_a = cv2.boxFilter(a, -1, (r,r))
    80. m_b = cv2.boxFilter(b, -1, (r,r))
    81. return m_a*I+m_b
    82. def getV1(self,m, r, eps, w, maxV1): #输入rgb图像,值范围[0,1]
    83. '''计算大气遮罩图像V1和光照值A, V1 = 1-t/A'''
    84. V1 = np.min(m,2) #得到暗通道图像
    85. V1 = self.guidedfilter(V1, self.zmMinFilterGray(V1,7), r, eps) #使用引导滤波优化
    86. bins = 2000 #2000
    87. ht = np.histogram(V1, bins) #计算大气光照A
    88. d = np.cumsum(ht[0])/float(V1.size)
    89. for lmax in range(bins-1, 0, -1):
    90. if d[lmax]<=0.999:
    91. break
    92. A = np.mean(m,2)[V1>=ht[1][lmax]].max()
    93. V1 = np.minimum(V1*w, maxV1) #对值范围进行限制
    94. return V1,A
    95. def imgBrightness(self,img1, a, b):
    96. h, w, ch = img1.shape#获取shape的数值,height和width、通道
    97. #新建全零图片数组src2,将height和width,类型设置为原图片的通道类型(色素全为零,输出为全黑图片)
    98. src2 = np.zeros([h, w, ch], img1.dtype)
    99. dst = cv2.addWeighted(img1, a, src2, 1-a, b)#addWeighted函数说明如下
    100. return dst
    101. def deHaze(self,img, r=81, eps=0.001, w=0.95, maxV1=0.80, bGamma=False): #r=81, eps=0.001, w=0.95, maxV1=0.80, bGamma=False
    102. Y = np.zeros(img.shape)
    103. V1,A = self.getV1(img, r, eps, w, maxV1) #得到遮罩图像和大气光照
    104. for k in range(3):
    105. Y[:,:,k] = (img[:,:,k]-V1)/(1-V1/A) #颜色校正
    106. Y = np.clip(Y, 0, 1)
    107. #if bGamma:
    108. #Y = Y**(np.log(0.3)/np.log(Y.mean())) #gamma校正, 越大越亮 0.3
    109. #Y = self.imgBrightness(Y, 2.8, -0.15) #第一个值是亮度,第二个是对比度 2.8 -0.15
    110. return Y
    111. def init():
    112. rospy.init_node('remove_fog',anonymous=True)
    113. t = SubscribeAndPublish()
    114. rospy.spin()
    115. if __name__ == '__main__':
    116. i=1
    117. init()

    五、python——接收点云

    1. #! /usr/bin/env python
    2. from sensor_msgs.msg import PointCloud2
    3. from sensor_msgs import point_cloud2
    4. import rospy
    5. import time
    6. def callback_pointcloud(data):
    7. assert isinstance(data, PointCloud2)
    8. gen = point_cloud2.read_points(data, field_names=("x", "y", "z"), skip_nans=True)
    9. time.sleep(1)
    10. print type(gen)
    11. for p in gen:
    12. print " x : %.3f y: %.3f z: %.3f" %(p[0],p[1],p[2])
    13. def main():
    14. rospy.init_node('pcl_listener', anonymous=True)
    15. rospy.Subscriber('/my_camera/depth/points', PointCloud2, callback_pointcloud)
    16. rospy.spin()
    17. if __name__ == "__main__":
    18. main()

     

  • 相关阅读:
    【代码源每日一题Div1】平方计数「nlogn巧妙枚举倍数优化」
    一生一芯10——verilator v5.008环境搭建
    【优化组合】基于遗传算法求解不同投资比例的收益附matlab代码
    .NET 9 预览版 3 发布
    EFLK日志收集
    金仓数据库KingbaseES客户端编程接口指南-ado.net(14. 示例)
    java毕业设计智慧问诊系统Mybatis+系统+数据库+调试部署
    【亲测有效】hive sql DML语句优化思路 hive表查询优化 优化你的hive任务,all you need,持续更新中
    发现广告的是什么?
    MFC Windows 程序设计[151]之色彩控件之谜
  • 原文地址:https://blog.csdn.net/HUASHUDEYANJING/article/details/126503128