• sfm算法之三角化(三角测量)


            sfm算法流程一般是特征点提取、特征点匹配、计算本质矩阵/基础矩阵,最后三角化。但是利用机械臂去观察周围,前后帧姿态变化参数是具有的,所以不需要通过基础矩阵获取。

            即利用机械臂的信息直接进行深度估计。已知:手眼标定、相机外参(利用机械臂可获取)、两次拍摄图片目标的像素位置。求目标深度。目前从几次测试来看,效果还是可信的,具体误差没有评估。

            利用的公式如下:

     

    1. #!/usr/bin/env python
    2. # -*- coding: utf-8 -*-
    3. # @Time :2022/7/27 14:59
    4. # @Author :weiz
    5. # @ProjectName :robotVision3
    6. # @File :triangulation.py
    7. # @Description :
    8. import cv2
    9. from triangulation_data import *
    10. import math
    11. def get_M(R, t):
    12. """
    13. 获取旋转平移的矩阵
    14. :param R:
    15. :param t:
    16. :return:
    17. """
    18. M = [[R[0][0], R[0][1], R[0][2], t[0]],
    19. [R[1][0], R[1][1], R[1][2], t[1]],
    20. [R[2][0], R[2][1], R[2][2], t[2]]]
    21. return np.array(M)
    22. def get_M_homo(R, t):
    23. """
    24. 获取旋转平移的齐次矩阵
    25. :param R:
    26. :param t:
    27. :return:
    28. """
    29. M = [[R[0][0], R[0][1], R[0][2], t[0]],
    30. [R[1][0], R[1][1], R[1][2], t[1]],
    31. [R[2][0], R[2][1], R[2][2], t[2]],
    32. [0, 0, 0, 1]]
    33. return np.array(M)
    34. def get_world2point2_R_t(R_camera2gripper, t_camera2gripper, point1_gripper, point2_gripper):
    35. """
    36. 获取关键点1坐标系到关键点2坐标系的 R,t
    37. :param R_camera2gripper:手眼标定的R
    38. :param t_camera2gripper:手眼标定的t
    39. :param point1_gripper:视点1时刻机械臂末端的位姿[x,y,z,rx,ry,rz]
    40. :param point2_gripper:视点2时刻机械臂末端的位姿[x,y,z,rx,ry,rz]
    41. :return:
    42. """
    43. R_gripper2base_point1 = np.zeros((3, 3))
    44. t_gripper2base_point1 = np.array([point1_gripper[0], point1_gripper[1], point1_gripper[2]])
    45. cv2.Rodrigues(np.array([point1_gripper[3], point1_gripper[4], point1_gripper[5]]), R_gripper2base_point1)
    46. R_gripper2base_point2 = np.zeros((3, 3))
    47. t_gripper2base_point2 = np.array([point2_gripper[0], point2_gripper[1], point2_gripper[2]])
    48. cv2.Rodrigues(np.array([point2_gripper[3], point2_gripper[4], point2_gripper[5]]), R_gripper2base_point2)
    49. R_gripper2camera = np.linalg.inv(np.array(R_camera2gripper))
    50. t_gripper2camera = -np.dot(R_gripper2camera, t_camera2gripper)
    51. R_base2gripper_point2 = np.linalg.inv(np.array(R_gripper2base_point2))
    52. t_base2gripper_point2 = -np.dot(R_base2gripper_point2, t_gripper2base_point2)
    53. M = get_M_homo(R_camera2gripper, t_camera2gripper)
    54. M = np.matmul(get_M_homo(R_gripper2base_point1, t_gripper2base_point1), M)
    55. M = np.matmul(get_M_homo(R_base2gripper_point2, t_base2gripper_point2), M)
    56. M = np.matmul(get_M_homo(R_gripper2camera, t_gripper2camera), M)
    57. R_world2point2 = M[0:3, 0:3]
    58. t_world2point2 = np.array([M[0][3], M[1][3], M[2][3]])
    59. # print(M)
    60. # print(R_world2point2)
    61. # print(t_world2point2)
    62. return R_world2point2, t_world2point2, M
    63. def triangulation(point1, point2, ipm, R_world2point2, t_world2point2):
    64. """
    65. 三角测量:利用机械臂位姿信息直接进行三维坐标估计
    66. :param point1:该坐标系为世界坐标系
    67. :param point2:
    68. :param ipm:相机内参
    69. :param R_world2point2:
    70. :param t_world2point2:
    71. :return:
    72. """
    73. ipm = np.array(ipm)
    74. M1 = np.matmul(ipm, get_M(np.eye(3, 3), [0, 0, 0]))
    75. M2 = np.matmul(ipm, get_M(R_world2point2, t_world2point2))
    76. A = []
    77. A.append(point1[0] * M1[2] - M1[0])
    78. A.append(point1[1] * M1[2] - M1[1])
    79. A.append(point2[0] * M2[2] - M2[0])
    80. A.append(point2[1] * M2[2] - M2[1])
    81. # print(np.array(A))
    82. U, sigma, V = np.linalg.svd(A)
    83. p_1 = [V[3][0], V[3][1], V[3][2], V[3][3]]
    84. p_2 = [V[3][0] / V[3][3], V[3][1] / V[3][3], V[3][2] / V[3][3]]
    85. # print(p_1)
    86. # print(p_2)
    87. total_error = 0
    88. point1_pro, _ = cv2.projectPoints(np.array([p_2]), np.eye(3), np.array([[0.], [0.], [0.]]), ipm, None)
    89. point2_pro, _ = cv2.projectPoints(np.array([p_2]), R_world2point2, t_world2point2, ipm, None)
    90. print(point2_pro)
    91. total_error = total_error + cv2.norm(np.array([[point1]], dtype=np.double), point1_pro, cv2.NORM_L2) / 1.
    92. total_error = total_error + cv2.norm(np.array([[point1]], dtype=np.float64), point2_pro, cv2.NORM_L2) / 1.
    93. print(total_error)
    94. return p_2
    95. def triangulation_opencv(point1, point2, ipm, R_world2point2, t_world2point2):
    96. point1 = np.array([point1], dtype=np.float32)
    97. point2 = np.array([point2], dtype=np.float32)
    98. M1 = np.zeros((3, 4))
    99. M2 = np.zeros((3, 4))
    100. M1[0:3, 0:3] = np.eye(3)
    101. M2[0:3, 0:3] = np.float32(R_world2point2)
    102. M2[:, 3] = np.float32(t_world2point2)
    103. M1 = np.matmul(ipm, M1)
    104. M2 = np.matmul(ipm, M2)
    105. p_homo = cv2.triangulatePoints(M1, M2, point1.T, point2.T)
    106. p = []
    107. # 齐次坐标转三维坐标:前三个维度除以第四个维度
    108. for i in range(len(p_homo[0])):
    109. col = p_homo[:, i]
    110. col /= col[3]
    111. p.append([col[0], col[1], col[2]])
    112. return p[0]
    113. R_camera2gripper =[[-0.999266726198567, -0.016016251269208765, -0.0347777171142492],
    114. [-0.03487664683144269, 0.005939423009193905, 0.99937397542667],
    115. [-0.015799665129108013, 0.9998540908300567, -0.00649366092498505]]
    116. t_camera2gripper = [0.057164748537088694, 0.10581519613132581, 0.1255394553568957]
    117. ipm = np.array([[535.05, 0, 653.005], [0, 535.01, 386.191], [0, 0, 1]])
    118. if __name__ == "__main__":
    119. R_world2point2, t_world2point2, M = get_world2point2_R_t(R_camera2gripper, t_camera2gripper, point1_gripper, point2_gripper)
    120. p = triangulation(point1, point2, ipm, R_world2point2, t_world2point2)
    121. print(p)
    122. p = triangulation_opencv(point1, point2, ipm, R_world2point2, t_world2point2)
    123. print(p)

    数据:

    1. #!/usr/bin/env python
    2. # -*- coding: utf-8 -*-
    3. # @Time :2022/7/29 9:28
    4. # @Author :weiz
    5. # @ProjectName :Sfm-python-master
    6. # @File :triangulation_data.py
    7. # @Description :
    8. # Copyright (C) 2021-2025 Jiangxi Institute Of Intelligent Industry Technology Innovation
    9. import numpy as np
    10. point1 = [738, 389] # 0.5410932
    11. point1_gripper = [0.5122332451863898, 0.5128182769039566, 0.9630757531852082, -0.7115522167741687, -1.7916510038378088, 1.7013068394360287]
    12. point2 = [797, 374]
    13. point2_gripper = [0.3820852469525631, 0.5235437570806256, 0.9711089613285708, -0.7071726699555351, -1.9459688091738208, 1.7263544531858268]
    14. # point1 = [797, 374] # 0.54940385
    15. # point1_gripper = [0.3820852469525631, 0.5235437570806256, 0.9711089613285708, -0.7071726699555351, -1.9459688091738208, 1.7263544531858268]
    16. # point2 = [715, 343]
    17. # point2_gripper = [0.5056936674804589, 0.4650883838281726, 1.0634729391460807, -0.6999271853483486, -1.749012465435611, 1.6882826312357782]
    18. # point1 = [715, 343] # 0.64878213
    19. # point1_gripper = [0.5056936674804589, 0.4650883838281726, 1.0634729391460807, -0.6999271853483486, -1.749012465435611, 1.6882826312357782]
    20. # point2 = [733, 368]
    21. # point2_gripper = [0.5485518509025477, 0.5157901201998042, 0.9837568437877331, -0.6609672121210775, -1.7084733902118903, 1.7261963960916609]
    22. # point1 = [733, 368] # 0.5727386
    23. # point1_gripper = [0.5485518509025477, 0.5157901201998042, 0.9837568437877331, -0.6609672121210775, -1.7084733902118903, 1.7261963960916609]
    24. # point2 = [738, 389]
    25. # point2_gripper = [0.5122332451863898, 0.5128182769039566, 0.9630757531852082, -0.7115522167741687, -1.7916510038378088, 1.7013068394360287]
    26. def pixel_normalization(point1, point2):
    27. points_norm = []
    28. points_norm.append(point1)
    29. points_norm.append(point2)
    30. points_norm = np.array(points_norm)
    31. points_norm = (points_norm - np.mean(points_norm)) / np.std(points_norm)
    32. return points_norm[0], points_norm[1]
    33. if __name__ == "__main__":
    34. p1, p2 = pixel_normalization(point1, point2)
    35. print(p1)
    36. print(p2)

  • 相关阅读:
    mac安装python 和 pip
    Windows上安装jdk17后没有jre的解决方法
    车载T-BOX
    论坛介绍|COSCon'23开源商业(V)
    Python进阶教程:pandas数据分析实践示例总结
    Hbuilder打包成APP流程,以及遇到的坑
    多线程详细介绍
    Linux多线程服务端编程:使用muduo C++网络库 学习笔记 第四章 C++多线程系统编程精要
    Web渗透_手动漏洞挖掘
    13.linux进程基础
  • 原文地址:https://blog.csdn.net/qq_31112205/article/details/126466206