• RealSense D435 的开发日记(pyrealsense小实战项目)


    🌞欢迎来到机器学习的世界 
    🌈博客主页:卿云阁

    💌欢迎关注🎉点赞👍收藏⭐️留言📝

    🌟本文由卿云阁原创!

    🌠本阶段属于练气阶段,希望各位仙友顺利完成突破

    📆首发时间:🌹2021年6月22日🌹

    ✉️希望可以和大家一起完成进阶之路!

    🙏作者水平很有限,如果发现错误,请留言轰炸哦!万分感谢!

     6月22日    星期三   天气晴


    🍈 一、使用 OpenCV 和 Numpy 的帮助渲染深度和彩色图像

    1. ## License: Apache 2.0. See LICENSE file in root directory.
    2. ## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
    3. ###############################################
    4. ## Open CV and Numpy integration ##
    5. ###############################################
    6. import pyrealsense2 as rs
    7. import numpy as np
    8. import cv2
    9. # Configure depth and color streams
    10. pipeline = rs.pipeline()
    11. config = rs.config()
    12. # Get device product line for setting a supporting resolution
    13. pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    14. pipeline_profile = config.resolve(pipeline_wrapper)
    15. device = pipeline_profile.get_device()
    16. device_product_line = str(device.get_info(rs.camera_info.product_line))
    17. found_rgb = False
    18. for s in device.sensors:
    19. if s.get_info(rs.camera_info.name) == 'RGB Camera':
    20. found_rgb = True
    21. break
    22. if not found_rgb:
    23. print("The demo requires Depth camera with Color sensor")
    24. exit(0)
    25. config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    26. if device_product_line == 'L500':
    27. config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    28. else:
    29. config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    30. # Start streaming
    31. pipeline.start(config)
    32. try:
    33. while True:
    34. # Wait for a coherent pair of frames: depth and color
    35. frames = pipeline.wait_for_frames()
    36. depth_frame = frames.get_depth_frame()
    37. color_frame = frames.get_color_frame()
    38. if not depth_frame or not color_frame:
    39. continue
    40. # Convert images to numpy arrays
    41. depth_image = np.asanyarray(depth_frame.get_data())
    42. color_image = np.asanyarray(color_frame.get_data())
    43. # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
    44. depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    45. depth_colormap_dim = depth_colormap.shape
    46. color_colormap_dim = color_image.shape
    47. # If depth and color resolutions are different, resize color image to match depth image for display
    48. if depth_colormap_dim != color_colormap_dim:
    49. resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
    50. images = np.hstack((resized_color_image, depth_colormap))
    51. else:
    52. images = np.hstack((color_image, depth_colormap))
    53. # Show images
    54. cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
    55. cv2.imshow('RealSense', images)
    56. cv2.waitKey(1)
    57. finally:
    58. # Stop streaming
    59. pipeline.stop()

     

     🍉二、通过将深度图像与彩色图像对齐并执行简单计算以剥离背景来执行背景删除的方法

    1. ## License: Apache 2.0. See LICENSE file in root directory.
    2. ## Copyright(c) 2017 Intel Corporation. All Rights Reserved.
    3. #####################################################
    4. ## Align Depth to Color ##
    5. #####################################################
    6. # First import the library
    7. import pyrealsense2 as rs
    8. # Import Numpy for easy array manipulation
    9. import numpy as np
    10. # Import OpenCV for easy image rendering
    11. import cv2
    12. # Create a pipeline
    13. pipeline = rs.pipeline()
    14. # Create a config and configure the pipeline to stream
    15. # different resolutions of color and depth streams
    16. config = rs.config()
    17. # Get device product line for setting a supporting resolution
    18. pipeline_wrapper = rs.pipeline_wrapper(pipeline)
    19. pipeline_profile = config.resolve(pipeline_wrapper)
    20. device = pipeline_profile.get_device()
    21. device_product_line = str(device.get_info(rs.camera_info.product_line))
    22. found_rgb = False
    23. for s in device.sensors:
    24. if s.get_info(rs.camera_info.name) == 'RGB Camera':
    25. found_rgb = True
    26. break
    27. if not found_rgb:
    28. print("The demo requires Depth camera with Color sensor")
    29. exit(0)
    30. config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
    31. if device_product_line == 'L500':
    32. config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
    33. else:
    34. config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
    35. # Start streaming
    36. profile = pipeline.start(config)
    37. # Getting the depth sensor's depth scale (see rs-align example for explanation)
    38. depth_sensor = profile.get_device().first_depth_sensor()
    39. depth_scale = depth_sensor.get_depth_scale()
    40. print("Depth Scale is: " , depth_scale)
    41. # We will be removing the background of objects more than
    42. # clipping_distance_in_meters meters away
    43. clipping_distance_in_meters = 1 #1 meter
    44. clipping_distance = clipping_distance_in_meters / depth_scale
    45. # Create an align object
    46. # rs.align allows us to perform alignment of depth frames to others frames
    47. # The "align_to" is the stream type to which we plan to align depth frames.
    48. align_to = rs.stream.color
    49. align = rs.align(align_to)
    50. # Streaming loop
    51. try:
    52. while True:
    53. # Get frameset of color and depth
    54. frames = pipeline.wait_for_frames()
    55. # frames.get_depth_frame() is a 640x360 depth image
    56. # Align the depth frame to color frame
    57. aligned_frames = align.process(frames)
    58. # Get aligned frames
    59. aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
    60. color_frame = aligned_frames.get_color_frame()
    61. # Validate that both frames are valid
    62. if not aligned_depth_frame or not color_frame:
    63. continue
    64. depth_image = np.asanyarray(aligned_depth_frame.get_data())
    65. color_image = np.asanyarray(color_frame.get_data())
    66. # Remove background - Set pixels further than clipping_distance to grey
    67. grey_color = 153
    68. depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
    69. bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
    70. # Render images:
    71. # depth align to color on left
    72. # depth on right
    73. depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
    74. images = np.hstack((bg_removed, depth_colormap))
    75. cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL)
    76. cv2.imshow('Align Example', images)
    77. key = cv2.waitKey(1)
    78. # Press esc or 'q' to close the image window
    79. if key & 0xFF == ord('q') or key == 27:
    80. cv2.destroyAllWindows()
    81. break
    82. finally:
    83. pipeline.stop()

     

    🍊三、使用多个摄像头计算物体的长度、宽度和高度的简单演示

    librealsense/wrappers/python/examples at master · IntelRealSense/librealsense (github.com)icon-default.png?t=M5H6https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python/examples

  • 相关阅读:
    [附源码]Python计算机毕业设计Django拉勾教育课程管理系统
    MoviePy视频编辑
    刷题记录:牛客NC15447wyh的问题
    也许是最客观、全面的比较 Rust 与 Go:都想把 Rust 也学一下
    严格反馈非线性系统基于事件触发的自抗扰预设有限时间跟踪控制
    线性筛素数(欧拉筛)
    PDO:插入示例
    小学期-中期总结报告
    数组常见方法
    华为AGC-推送通知类AB测试实战指导
  • 原文地址:https://blog.csdn.net/zzqingyun/article/details/125410831