当前位置:   article > 正文

机器人手眼标定原理与python实现_手眼标定程序 python

手眼标定程序 python

一、标定原理

机器人手眼标定分为eye in hand与eye to hand两种。介绍之前进行变量定义说明:

{b}: base基坐标系

{g}: gripper夹具坐标系

{t}: target标定板坐标系

{c}: camera相机坐标系

1、眼在手上(eye in hand)

眼在手上,相机固定在机器人上。

图1. eye in hand示意图

由以上两公式得:

经变换得:

可得:

求解X即标定 :

2、眼在手外(eye to hand)

眼在在手外,相机固定在机器人外。

图2. eye to hand示意图

由以上两公式可得:

经变换得:

可得:

求解X即标定:

二 、标定步骤

  1. 将标定板固定至机械臂末端;

  1. 在位置1采集标定板图像,并记录机械臂在位置1下的位置与姿态;

  1. 在位置2采集标定板图像,并记录机械臂在位置2下的位置与姿态;

  1. 移动机械臂更换不同位置,采集25-40张图像,并记录机械臂在每个位置下的位姿;

  1. 相机标定,获取25-40组Tt_c ;

  1. 位姿读取,获取25-40组Tb_g ;

  1. 根据5,6调用标定接口,获取Tc_b 。

三、标定代码

  1. import os
  2. import cv2
  3. import xlrd2
  4. from math import *
  5. import numpy as np
  6. class Calibration:
  7. def __init__(self):
  8. self.K = np.array([[2.54565632e+03, 0.00000000e+00, 9.68119560e+02],
  9. [0.00000000e+00, 2.54565632e+03, 5.31897821e+02],
  10. [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]], dtype=np.float64)
  11. self.distortion = np.array([[-0.2557898, 0.81056366, 0.0, 0.0, -8.39153683]])
  12. self.target_x_number = 12
  13. self.target_y_number = 8
  14. self.target_cell_size = 40
  15. def angle2rotation(self, x, y, z):
  16. Rx = np.array([[1, 0, 0], [0, cos(x), -sin(x)], [0, sin(x), cos(x)]])
  17. Ry = np.array([[cos(y), 0, sin(y)], [0, 1, 0], [-sin(y), 0, cos(y)]])
  18. Rz = np.array([[cos(z), -sin(z), 0], [sin(z), cos(z), 0], [0, 0, 1]])
  19. R = Rz @ Ry @ Rx
  20. return R
  21. def gripper2base(self, x, y, z, tx, ty, tz):
  22. thetaX = x / 180 * pi
  23. thetaY = y / 180 * pi
  24. thetaZ = z / 180 * pi
  25. R_gripper2base = self.angle2rotation(thetaX, thetaY, thetaZ)
  26. T_gripper2base = np.array([[tx], [ty], [tz]])
  27. Matrix_gripper2base = np.column_stack([R_gripper2base, T_gripper2base])
  28. Matrix_gripper2base = np.row_stack((Matrix_gripper2base, np.array([0, 0, 0, 1])))
  29. R_gripper2base = Matrix_gripper2base[:3, :3]
  30. T_gripper2base = Matrix_gripper2base[:3, 3].reshape((3, 1))
  31. return R_gripper2base, T_gripper2base
  32. def target2camera(self, img):
  33. gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  34. ret, corners = cv2.findChessboardCorners(gray, (self.target_x_number, self.target_y_number), None)
  35. corner_points = np.zeros((2, corners.shape[0]), dtype=np.float64)
  36. for i in range(corners.shape[0]):
  37. corner_points[:, i] = corners[i, 0, :]
  38. object_points = np.zeros((3, self.target_x_number * self.target_y_number), dtype=np.float64)
  39. count = 0
  40. for i in range(self.target_y_number):
  41. for j in range(self.target_x_number):
  42. object_points[:2, count] = np.array(
  43. [(self.target_x_number - j - 1) * self.target_cell_size,
  44. (self.target_y_number - i - 1) * self.target_cell_size])
  45. count += 1
  46. retval, rvec, tvec = cv2.solvePnP(object_points.T, corner_points.T, self.K, distCoeffs=distortion)
  47. Matrix_target2camera = np.column_stack(((cv2.Rodrigues(rvec))[0], tvec))
  48. Matrix_target2camera = np.row_stack((Matrix_target2camera, np.array([0, 0, 0, 1])))
  49. R_target2camera = Matrix_target2camera[:3, :3]
  50. T_target2camera = Matrix_target2camera[:3, 3].reshape((3, 1))
  51. return R_target2camera, T_target2camera
  52. def process(self, img_path, pose_path):
  53. image_list = []
  54. for root, dirs, files in os.walk(img_path):
  55. if files:
  56. for file in files:
  57. image_name = os.path.join(root, file)
  58. image_list.append(image_name)
  59. R_target2camera_list = []
  60. T_target2camera_list = []
  61. for img_path in image_list:
  62. img = cv2.imread(img_path)
  63. R_target2camera, T_target2camera = self.target2camera(img)
  64. R_target2camera_list.append(R_target2camera)
  65. T_target2camera_list.append(T_target2camera)
  66. R_gripper2base_list = []
  67. T_gripper2base_list = []
  68. data = xlrd2.open_workbook(pose_path)
  69. table = data.sheets()[0]
  70. for row in range(table.nrows):
  71. x = table.cell_value(row, 0)
  72. y = table.cell_value(row, 1)
  73. z = table.cell_value(row, 2)
  74. tx = table.cell_value(row, 3)
  75. ty = table.cell_value(row, 4)
  76. tz = table.cell_value(row, 5)
  77. R_gripper2base, T_gripper2base = self.gripper2base(x, y, z, tx, ty, tz)
  78. R_gripper2base_list.append(R_gripper2base)
  79. T_gripper2base_list.append(T_gripper2base)
  80. R_camera2base, T_camera2base = cv2.calibrateHandEye(R_gripper2base_list, T_gripper2base_list,
  81. R_target2camera_list, T_target2camera_list)
  82. return R_camera2base, T_camera2base, R_gripper2base_list, T_gripper2base_list, R_target2camera_list, T_target2camera_list
  83. def check_result(self, R_cb, T_cb, R_gb, T_gb, R_tc, T_tc):
  84. for i in range(len(R_gb)):
  85. RT_gripper2base = np.column_stack((R_gb[i], T_gb[i]))
  86. RT_gripper2base = np.row_stack((RT_gripper2base, np.array([0, 0, 0, 1])))
  87. RT_base2gripper = np.linalg.inv(RT_gripper2base)
  88. print(RT_base2gripper)
  89. RT_camera_to_base = np.column_stack((R_cb, T_cb))
  90. RT_camera_to_base = np.row_stack((RT_camera_to_base, np.array([0, 0, 0, 1])))
  91. print(RT_camera_to_base)
  92. RT_target_to_camera = np.column_stack((R_tc[i], T_tc[i]))
  93. RT_target_to_camera = np.row_stack((RT_target_to_camera, np.array([0, 0, 0, 1])))
  94. RT_camera2target = np.linalg.inv(RT_target_to_camera)
  95. print(RT_camera2target)
  96. RT_target_to_gripper = RT_base2gripper @ RT_camera_to_base @ RT_camera2target
  97. print("第{}次验证结果为:".format(i))
  98. print(RT_target_to_gripper)
  99. print('')
  100. if __name__ == "__main__":
  101. image_path = r"D\code\img"
  102. pose_path = r"D\code\pose.xlsx"
  103. calibrator = Calibration()
  104. R_cb, T_cb, R_gb, T_gb, R_tc, T_tc = calibrator.process(image_path, pose_path)
  105. calibrator.check_result(R_cb, T_cb, R_gb, T_gb, R_tc, T_tc)

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/IT小白/article/detail/659908
推荐阅读
相关标签
  

闽ICP备14008679号