• rviz是如何获取图像里选择的点云的3D坐标的


    以前以为rviz是用OpenGL渲染绘图,那么获取图像里像素点对应的真实3D坐标是采用的OpenGL里提供的API实现的,结果一看代码还真不是这样,rviz也就渲染用了OpenGL,其他都是自己实现的,图像界面的实现完全是遵循MVC设计模式自己实现的透视投影和坐标转换等所有相关类。获取点云图像里所选择的点云点的3D坐标相关的代码是这里:

    1. src/rviz/selection/selection_manager.cpp:
    2. bool SelectionManager::getPatchDepthImage(Ogre::Viewport* viewport,
    3. int x,
    4. int y,
    5. unsigned width,
    6. unsigned height,
    7. std::vector<float>& depth_vector)
    8. {
    9. unsigned int num_pixels = width * height;
    10. depth_vector.reserve(num_pixels);
    11. setDepthTextureSize(width, height);
    12. M_CollisionObjectToSelectionHandler::iterator handler_it = objects_.begin();
    13. M_CollisionObjectToSelectionHandler::iterator handler_end = objects_.end();
    14. for (; handler_it != handler_end; ++handler_it)
    15. {
    16. handler_it->second->preRenderPass(0);
    17. }
    18. if (render(viewport, depth_render_texture_, x, y, x + width, y + height, depth_pixel_box_, "Depth",
    19. depth_texture_width_, depth_texture_height_))
    20. {
    21. uint8_t* data_ptr = (uint8_t*)depth_pixel_box_.data;
    22. for (uint32_t pixel = 0; pixel < num_pixels; ++pixel)
    23. {
    24. uint8_t a = data_ptr[4 * pixel];
    25. uint8_t b = data_ptr[4 * pixel + 1];
    26. uint8_t c = data_ptr[4 * pixel + 2];
    27. int int_depth = (c << 16) | (b << 8) | a;
    28. float normalized_depth = ((float)int_depth) / (float)0xffffff;
    29. depth_vector.push_back(normalized_depth * camera_->getFarClipDistance());
    30. }
    31. }
    32. else
    33. {
    34. ROS_WARN("Failed to render depth patch\n");
    35. return false;
    36. }
    37. handler_it = objects_.begin();
    38. handler_end = objects_.end();
    39. for (; handler_it != handler_end; ++handler_it)
    40. {
    41. handler_it->second->postRenderPass(0);
    42. }
    43. return true;
    44. }
    45. bool SelectionManager::get3DPatch(Ogre::Viewport* viewport,
    46. int x,
    47. int y,
    48. unsigned width,
    49. unsigned height,
    50. bool skip_missing,
    51. std::vector<Ogre::Vector3>& result_points)
    52. {
    53. boost::recursive_mutex::scoped_lock lock(global_mutex_);
    54. ROS_DEBUG("SelectionManager.get3DPatch()");
    55. std::vector<float> depth_vector;
    56. if (!getPatchDepthImage(viewport, x, y, width, height, depth_vector))
    57. return false;
    58. unsigned int pixel_counter = 0;
    59. Ogre::Matrix4 projection = camera_->getProjectionMatrix();
    60. float depth;
    61. for (unsigned y_iter = 0; y_iter < height; ++y_iter)
    62. for (unsigned x_iter = 0; x_iter < width; ++x_iter)
    63. {
    64. depth = depth_vector[pixel_counter];
    65. // Deal with missing or invalid points
    66. if ((depth > camera_->getFarClipDistance()) || (depth == 0))
    67. {
    68. ++pixel_counter;
    69. if (!skip_missing)
    70. {
    71. result_points.push_back(Ogre::Vector3(NAN, NAN, NAN));
    72. }
    73. continue;
    74. }
    75. Ogre::Vector3 result_point;
    76. // We want to shoot rays through the center of pixels, not the corners,
    77. // so add .5 pixels to the x and y coordinate to get to the center
    78. // instead of the top left of the pixel.
    79. Ogre::Real screenx = float(x_iter + .5) / float(width);
    80. Ogre::Real screeny = float(y_iter + .5) / float(height);
    81. if (projection[3][3] == 0.0) // If this is a perspective projection
    82. {
    83. // get world-space ray from camera & mouse coord
    84. Ogre::Ray vp_ray = camera_->getCameraToViewportRay(screenx, screeny);
    85. // transform ray direction back into camera coords
    86. Ogre::Vector3 dir_cam = camera_->getDerivedOrientation().Inverse() * vp_ray.getDirection();
    87. // normalize, so dir_cam.z == -depth
    88. dir_cam = dir_cam / dir_cam.z * depth * -1;
    89. // compute 3d point from camera origin and direction*/
    90. result_point = camera_->getDerivedPosition() + camera_->getDerivedOrientation() * dir_cam;
    91. }
    92. else // else this must be an orthographic projection.
    93. {
    94. // For orthographic projection, getCameraToViewportRay() does
    95. // the right thing for us, and the above math does not work.
    96. Ogre::Ray ray;
    97. camera_->getCameraToViewportRay(screenx, screeny, &ray);
    98. result_point = ray.getPoint(depth);
    99. }
    100. result_points.push_back(result_point);
    101. ++pixel_counter;
    102. }
    103. return !result_points.empty();
    104. }
    105. bool SelectionManager::get3DPoint(Ogre::Viewport* viewport, int x, int y, Ogre::Vector3& result_point)
    106. {
    107. ROS_DEBUG("SelectionManager.get3DPoint()");
    108. std::vector<Ogre::Vector3> result_points_temp;
    109. bool success = get3DPatch(viewport, x, y, 1, 1, true, result_points_temp);
    110. if (result_points_temp.empty())
    111. {
    112. // return result_point unmodified if get point fails.
    113. return false;
    114. }
    115. result_point = result_points_temp[0];
    116. return success;
    117. }

    世界3D坐标是用的射线法计算出来。SelectionManager::get3DPoint()被rviz里多个地方调用,凡是UI界面上需要查看点的坐标地方都是调用它。

  • 相关阅读:
    什么是黑盒测试
    故障分析 | MySQL 耗尽主机内存一例分析
    1-字典树-实现 Trie (前缀树)
    用 docker 创建 jmeter 容器,能做性能测试?
    Go Mutex(互斥锁)
    Spring框架宝典:彻底理解三级缓存策略
    基于多种优化算法及神经网络的光伏系统控制(Matlab代码实现)
    TypeScript学习01--安装和基本数据类型
    MYSQL误删数据恢复
    列表页优化思路
  • 原文地址:https://blog.csdn.net/XCCCCZ/article/details/134469493