当前位置:   article > 正文

Sim2Real两个源代码

sim2real

1.AXS_baseline.py

  1. # %%
  2. import warnings
  3. warnings.filterwarnings("ignore")
  4. from airbot.backend import Arm, Camera, Base, Gripper
  5. import os
  6. import numpy as np
  7. import copy
  8. from airbot.backend.utils.utils import camera2base, armbase2world
  9. from airbot.lm import Detector, Segmentor
  10. from airbot.grasp.graspmodel import GraspPredictor
  11. from PIL import Image
  12. import time
  13. import cv2
  14. from airbot.example.utils.draw import draw_bbox, obb2poly
  15. from airbot.example.utils.vis_depth import vis_image_and_depth
  16. from scipy.spatial.transform import Rotation
  17. from threading import Thread, Lock
  18. import math
  19. from airbot.lm.utils import depth2cloud
  20. os.environ['LM_CONFIG'] = "/root/Workspace/AXS_baseline/ICRA2024-Sim2Real-AXS/local.yaml"
  21. os.environ['CKPT_DIR'] = '/root/Workspace/AXS_baseline/ckpt'
  22. class Solution:
  23. CAMERA_SHIFT = np.array([-0.093, 0, 0.07]), np.array([0.5, -0.5, 0.5, -0.5])
  24. BEFORE_MW_BASE_POSE = (np.array([
  25. -0.02596185755302699,
  26. -0.5065354084074988,
  27. -0.17365354084074988,
  28. ]), np.array([
  29. 0.00179501,
  30. 0.06667202,
  31. 0.04863613,
  32. 0.99658725,
  33. ]))
  34. ARM_POSE_TO_MICROWAVE = (np.array([0.67335, 0.2, -0.07678]), np.array([0.0, 0.0, 0.0, 1.0]))
  35. POSE_CLOSE_MICROWAVE = (np.array([-0.05931371154832413, 0.10621326743872146, 0.052710225767309826]), np.array([0.00476639, 0.08115882, 0.05789078, 0.99500713]))
  36. ARM_POSE_CLOSE_MICROWAVE = (np.array([0.47335, 0.1, 0.05]), np.array([0.0, 0.0, 0.0, 1.0]))
  37. ARM_POSE_CLOSE_MICROWAVE_END = (np.array([0.4, -0.55, 0.05]), np.array([0.0, 0.0, 0.0, 1.0]))
  38. POSE_TO_BOWL = (np.array([-0.01226429675289562, 0.11259263609492792, -0.042619463119529605]),
  39. np.array([-0.05418989389554988, 0.056031992518506414, 0.707, 0.707]))
  40. ARM_POSE_TO_LOWER_CABINET = (np.array([0.62335, 0.25, -0.05678]), np.array([0.0, 0.0, 0.0, 1.0]))
  41. ARM_POSE_PUT_LOWER = (np.array([0.62335, 0.25, -0.09678]), np.array([0.0, 0.0, 0.0, 1.0]))
  42. ARM_POSE_TO_UPPER_CABINET = (np.array([0.62335, 0.25, 0.22]), np.array([0.0, 0.0, 0.0, 1.0]))
  43. ARM_POSE_PUT_UPPER = (np.array([0.62335, 0.25, 0.17]), np.array([0.0, 0.0, 0.0, 1.0]))
  44. POSE_OPEN_CAB = (np.array([-0.2741637241544763, 0.3241416117180577, -0.07743623649227918]), np.array([-0.026456952502619244, 0.022510511678367467, 0.6642090190392874, 0.7467393692280592]))
  45. ARM_POSE_STANDARD_MOVING = (np.array([0.3225, 0.00, 0.219]), np.array([0.0, 0.0, 0.0, 1.0]))
  46. GRASP_POSE_1 = (np.array([0.345502073568463, 0.49365995419306763, 0.07947950001408821]), np.array(
  47. [-0.0051582, 0.09461529, 0.05903872, 0.99374834]))
  48. GRASP_POSE_2 = (np.array([0.40524870062559315, 0.4479303912730242, 0.004671858854359251]), np.array(
  49. [-0.04836788, 0.0417043, 0.66597635, 0.74323402]))
  50. OBSERVE_ARM_POSE_1 = (np.array([
  51. 0.2565699,
  52. 0.2,
  53. 0.171663168,
  54. ]), np.array([
  55. -0.13970062182177911,
  56. 0.6487791800204252,
  57. 0.032918235938941776,
  58. 0.7473190092439113,
  59. ]))
  60. OBSERVE_ARM_POSE_2 = (np.array([
  61. 0.2565699,
  62. -0.2,
  63. 0.171663168,
  64. ]), np.array([
  65. -0.13970062182177911,
  66. 0.6487791800204252,
  67. 0.032918235938941776,
  68. 0.7473190092439113,
  69. ]))
  70. END_POSE = (np.array([-1.2317611908186263, -0.9177336410440479, -0.2374505629662929]),
  71. np.array([0.006592923324957343, -0.012942241749323641, 0.014944697147015459, 0.9997828203003477]))
  72. def __init__(self):
  73. self.arm = Arm(backend='ros')
  74. self.base = Base(backend='ros')
  75. self.gripper = Gripper(backend='ros')
  76. self.gripper.open()
  77. self.camera = Camera(backend='ros')
  78. self.detector = Detector(model='grounding-dino')
  79. # self.detector = Detector(model='yolo-v7')
  80. self.segmentor = Segmentor(model='segment-anything')
  81. self.grasper = GraspPredictor(model='graspnet')
  82. self.image_lock = Lock()
  83. self.result_lock = Lock()
  84. self.prompt_lock = Lock()
  85. self.running = True
  86. self.prompt = 'sky'
  87. self.update_once()
  88. self.t_vis = Thread(target=self.vis, daemon=True)
  89. self.t_vis.start()
  90. self.t_update = Thread(target=self.update, daemon=True)
  91. self.t_update.start()
  92. @property
  93. def image(self):
  94. with self.image_lock:
  95. return copy.deepcopy(self._image)
  96. @image.setter
  97. def image(self, value):
  98. with self.image_lock:
  99. self._image = copy.deepcopy(value)
  100. @property
  101. def prompt(self):
  102. with self.prompt_lock:
  103. return copy.deepcopy(self._prompt)
  104. @prompt.setter
  105. def prompt(self, value):
  106. with self.prompt_lock:
  107. self._prompt = copy.deepcopy(value)
  108. @property
  109. def depth(self):
  110. with self.image_lock:
  111. return copy.deepcopy(self._depth)
  112. @depth.setter
  113. def depth(self, value):
  114. with self.image_lock:
  115. self._depth = copy.deepcopy(value)
  116. @property
  117. def bbox(self):
  118. with self.result_lock:
  119. return copy.deepcopy(self._bbox)
  120. @bbox.setter
  121. def bbox(self, value):
  122. with self.result_lock:
  123. self._bbox = copy.deepcopy(value)
  124. @property
  125. def mask(self):
  126. with self.result_lock:
  127. return copy.deepcopy(self._mask)
  128. @mask.setter
  129. def mask(self, value):
  130. with self.result_lock:
  131. self._mask = copy.deepcopy(value)
  132. def update_once(self):
  133. with self.image_lock, self.result_lock:
  134. self._image = copy.deepcopy(self.camera.get_rgb())
  135. self._depth = copy.deepcopy(self.camera.get_depth())
  136. self._det_result = self.detector.infer(self._image, self._prompt)
  137. self._bbox = self._det_result['bbox'].numpy().astype(int)
  138. self._sam_result = self.segmentor.infer(self._image, self._bbox[None, :2][:, [1, 0]])
  139. self._mask = self._sam_result['mask']
  140. def update(self):
  141. while self.running:
  142. self.update_once()
  143. time.sleep(0.005)
  144. def vis(self):
  145. try:
  146. # Infinite loop to display images
  147. while self.running:
  148. image_draw = self.image
  149. image_draw = image_draw * (self.mask[:, :, None].astype(np.uint8) * 0.75 + 0.25)
  150. image_draw = draw_bbox(image_draw, obb2poly(self.bbox[None, ...]).astype(int))
  151. image_draw = image_draw.astype(np.uint8)
  152. image_show = cv2.cvtColor(image_draw, cv2.COLOR_RGB2BGR)
  153. cv2.imshow('RGB', image_show)
  154. _flag = np.any(self.mask)
  155. if _flag == True:
  156. cv2.putText(image_show,
  157. f"det score: {self._det_result['score']}, sam score: {self._sam_result['score']}",
  158. (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
  159. cv2.imshow('RGB', image_show)
  160. if cv2.waitKey(1) & 0xFF == ord('q'):
  161. break
  162. except KeyboardInterrupt:
  163. print("Exiting due to user interruption.")
  164. finally:
  165. cv2.destroyAllWindows()
  166. @staticmethod
  167. def _bbox2mask(image, bbox):
  168. mask = np.zeros_like(image[:, :, 0], dtype=bool)
  169. mask[
  170. bbox[0] - bbox[2] // 2:bbox[0] + bbox[2] // 2,
  171. bbox[1] - bbox[3] // 2:bbox[1] + bbox[3] // 2,
  172. ] = True
  173. return mask
  174. @staticmethod
  175. def base_cloud(image, depth, intrinsic, shift, end_pose):
  176. cam_cloud = depth2cloud(depth, intrinsic)
  177. cam_cloud = np.copy(np.concatenate((cam_cloud, image), axis=2))
  178. return camera2base(cam_cloud, shift, end_pose)
  179. def grasp(self):
  180. # find the center of self.mask
  181. # center_cam = np.array(np.nonzeros(self.mask)).mean(axis=1)
  182. # center_frame = self.base_cloud[center_cam]
  183. # self.base.move_to(*self.OPERATE_BASE_POSE_1)
  184. # self.arm.move_end_to_pose(*self.OPERATE_ARM_POSE)
  185. # time.sleep(3)
  186. # method = input('method')
  187. method = "2"
  188. with self.image_lock, self.result_lock:
  189. _depth = copy.deepcopy(self._depth)
  190. _image = copy.deepcopy(self._image)
  191. _bbox = copy.deepcopy(self._bbox)
  192. _mask = copy.deepcopy(self._mask)
  193. cloud = self.base_cloud(_image, _depth, self.camera.INTRINSIC, self.CAMERA_SHIFT, self.arm.end_pose)
  194. if method == "1":
  195. direction = cloud[_bbox[0] - _bbox[2] // 2, _bbox[1]][:3] - self.arm.end_pose[0]
  196. direction = direction / np.linalg.norm(direction)
  197. grasp_position = cloud[_bbox[0] - _bbox[2] // 2 + 9, _bbox[1]][:3] - 0.12 * direction
  198. grasp_rotation = Rotation.from_euler('xyz', [0, np.pi / 2, np.pi / 2], degrees=False).as_quat()
  199. elif method == "2":
  200. grasp_position = cloud[ _bbox[0], _bbox[1] - _bbox[3] // 2 + 8][:3]
  201. grasp_position[2] = -0.165
  202. grasp_rotation = Rotation.from_euler('xyz', [0, np.pi / 2, 0], degrees=False).as_quat()
  203. else:
  204. bbox_mask = self._bbox2mask(_image, _bbox)
  205. (grasp_position, grasp_rotation), _ = self.grasper.infer(cloud, bbox_mask)
  206. grasp_rotation = Rotation.from_euler("yz", [np.pi / 2, np.pi / 2], degrees=False).as_quat()
  207. self.arm.move_end_to_pose(grasp_position, grasp_rotation)
  208. time.sleep(2)
  209. self.gripper.close()
  210. time.sleep(4)
  211. self.arm.move_end_to_pose(*self.ARM_POSE_STANDARD_MOVING)
  212. @staticmethod
  213. def _vis_grasp(cloud, position, rotation):
  214. import open3d as o3d
  215. from graspnetAPI.grasp import GraspGroup
  216. o3d_cloud = o3d.geometry.PointCloud()
  217. cloud = copy.deepcopy(cloud)
  218. o3d_cloud.points = o3d.utility.Vector3dVector(cloud[:, :, :3].reshape(-1, 3).astype(np.float32))
  219. o3d_cloud.colors = o3d.utility.Vector3dVector(cloud[:, :, 3:].reshape(-1, 3).astype(np.float32) / 255.)
  220. gg = GraspGroup(
  221. np.array([1., 0.06, 0.01, 0.06, *Rotation.from_quat(rotation).as_matrix().flatten(), *position,
  222. 0]).reshape(1, -1))
  223. coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
  224. o3d.visualization.draw_geometries([o3d_cloud, *gg.to_open3d_geometry_list(), coordinate_frame])
  225. def place_microwave(self):
  226. self.base.move_to(*self.BEFORE_MW_BASE_POSE, 'world', False)
  227. self.arm.move_end_to_pose(*self.ARM_POSE_TO_MICROWAVE)
  228. input_pose = (np.array([0.25, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  229. self.base.move_to(*input_pose, 'robot', True)
  230. self.gripper.open()
  231. time.sleep(2)
  232. output_pose = (np.array([-0.3, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  233. self.base.move_to(*output_pose, 'robot', True)
  234. def close_microwave(self):
  235. self.arm.move_end_to_pose(*self.ARM_POSE_STANDARD_MOVING)
  236. self.base.move_to(*self.POSE_CLOSE_MICROWAVE, 'world', False)
  237. self.arm.move_end_to_pose(*self.ARM_POSE_CLOSE_MICROWAVE)
  238. self.arm.move_end_to_pose(*self.ARM_POSE_CLOSE_MICROWAVE_END)
  239. output_pose1 = (np.array([-0.25, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  240. self.base.move_to(*output_pose1, 'robot', True)
  241. self.arm.move_end_to_pose(*self.ARM_POSE_STANDARD_MOVING)
  242. def place_bowl_lower(self):
  243. self.arm.move_end_to_pose(*self.ARM_POSE_STANDARD_MOVING)
  244. self.base.move_to(*self.POSE_TO_BOWL, 'world', False)
  245. self.arm.move_end_to_pose(*self.ARM_POSE_TO_LOWER_CABINET)
  246. input_pose = (np.array([0.35, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  247. self.base.move_to(*input_pose, 'robot', True)
  248. self.gripper.open()
  249. self.arm.move_end_to_pose(*self.ARM_POSE_PUT_LOWER)
  250. time.sleep(2)
  251. output_pose = (np.array([- 0.35, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  252. self.base.move_to(*output_pose, 'robot', True)
  253. self.arm.move_end_to_pose(*self.ARM_POSE_STANDARD_MOVING)
  254. def place_bowl_upper(self):
  255. self.arm.move_end_to_pose(*self.ARM_POSE_STANDARD_MOVING)
  256. self.base.move_to(*self.POSE_TO_BOWL,'world', False)
  257. self.arm.move_end_to_pose(*self.ARM_POSE_TO_UPPER_CABINET)
  258. input_pose = (np.array([0.35, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  259. self.base.move_to(*input_pose, 'robot', True)
  260. self.gripper.open()
  261. self.arm.move_end_to_pose(*self.ARM_POSE_PUT_UPPER)
  262. time.sleep(2)
  263. output_pose = (np.array([-0.35, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  264. self.base.move_to(*output_pose, 'robot', True)
  265. self.arm.move_end_to_pose(*self.ARM_POSE_STANDARD_MOVING)
  266. def lookforonce(self, det_th, sam_th):
  267. with self.image_lock, self.result_lock:
  268. _rgb = copy.deepcopy(self.camera.get_rgb())
  269. _depth = copy.deepcopy(self.camera.get_depth())
  270. _det_result = copy.deepcopy(self._det_result)
  271. _sam_result = copy.deepcopy(self._sam_result)
  272. _bbox = _det_result['bbox'].numpy().astype(int)
  273. _mask = _sam_result['mask']
  274. if np.any(_mask) is False:
  275. print(f"direction {direction} not Found")
  276. print("det score:", _det_result['score'])
  277. print("sam score:", _sam_result['score'])
  278. if _det_result['score'] > det_th and _sam_result['score'] > sam_th:
  279. print(f"Found the {self._prompt}")
  280. centerpoint = depth2cloud(_depth, self.camera.INTRINSIC, organized=True)[_bbox[0] // 1, _bbox[1] // 1]
  281. centerpoint = camera2base(centerpoint, self.CAMERA_SHIFT, self.arm.end_pose)
  282. centerpoint = (armbase2world(centerpoint, (self.base.position, self.base.rotation)).squeeze())
  283. object_rgb = _rgb[_bbox[0] - np.int32(_bbox[2]/4):_bbox[0] + np.int32(_bbox[2]/4), _bbox[1] - np.int32(_bbox[3]/4):_bbox[1] + np.int32(_bbox[3]/4)]
  284. mean_rgb = (np.mean(np.mean(object_rgb, axis=0), axis=0).astype(int))
  285. print('-' * 50)
  286. print('centerpoint is', centerpoint)
  287. print('object rgb is', mean_rgb)
  288. print('-' * 50)
  289. return centerpoint, mean_rgb
  290. if __name__ == '__main__':
  291. s = Solution()
  292. s.base.move_to(*s.POSE_OPEN_CAB, 'world', False)
  293. time.sleep(1)
  294. POS_DOOR_HANDLE = np.array([0.30353946626186371, 1.230472918510437, 0])
  295. centerp_car = np.linalg.inv(np.array(Rotation.from_quat(s.base.rotation).as_matrix())).dot((POS_DOOR_HANDLE-s.base.position))
  296. ARM_POSE_DOOR_HANDLE = (np.array([
  297. centerp_car[0] - 0.2975 - 0.01,
  298. centerp_car[1] + 0.17309 - 0.01,
  299. 0.2,
  300. ]), np.array([
  301. 0.0,
  302. 0.0,
  303. 0.0,
  304. 1.0,
  305. ]))
  306. s.arm.move_end_to_pose(*ARM_POSE_DOOR_HANDLE)
  307. s.gripper.close()
  308. time.sleep(5)
  309. for i in range(7):
  310. d = 0.1 * (i + 1)
  311. new_ori = np.array([0, 0, 0, 1])
  312. new_pos = ARM_POSE_DOOR_HANDLE[0] + np.array([-0.4*math.sin(d), 0.4-0.4*math.cos(d), 0])
  313. r = Rotation.from_euler("xyz", np.array([0, 0, -d]), degrees=False)
  314. new_ori = r.as_quat()
  315. s.arm.move_end_to_pose(new_pos, np.array(new_ori))
  316. time.sleep(0.5)
  317. s.gripper.open()
  318. time.sleep(3)
  319. s.arm.move_end_to_pose(np.array([0.3225, 0.00, 0.219]), np.array([0.0, 0.0, 0.0, 1.0]))
  320. s.arm.move_end_to_pose(np.array([0.3225, -0.25, 0.219]), np.array([0.0, 0.0, 0.0, 1.0]))
  321. s.arm.move_end_to_pose(np.array([0.5615004168820418, -0.2, 0.35123932220414126]), np.array([0.0, 0.0, 0.2953746452532359, 0.9547541169761965]))
  322. s.arm.move_end_to_pose(np.array([0.6015004168820418, -0.15, 0.35123932220414126]), np.array([0.0, 0.0, 0.2953746452532359, 0.9547541169761965]))
  323. s.arm.move_end_to_pose(np.array([0.4882092425581316, 0.2917225555849343, 0.3515424067641672]), np.array([0.0, 0.0, 0.6045684271573144, 0.7957869908463996]))
  324. back_pose = (np.array([-0.05, 0.0, 0.0]), np.array([0.0, 0.0, 0.0, 1.0]))
  325. s.base.move_to(*back_pose, 'robot', True)
  326. s.arm.move_end_to_pose(*s.ARM_POSE_STANDARD_MOVING)
  327. s._prompt = 'white mug'
  328. cp = None
  329. s.base.move_to(*s.GRASP_POSE_1, 'world', False)
  330. look_num = 0
  331. while cp is None:
  332. for direction in [1, 2]:
  333. if direction == 1:
  334. s.arm.move_end_to_pose(*s.OBSERVE_ARM_POSE_1)
  335. else:
  336. s.arm.move_end_to_pose(*s.OBSERVE_ARM_POSE_2)
  337. cp = s.lookforonce(0.65,0.65)
  338. if cp is not None:
  339. break
  340. look_num += 1
  341. if look_num>3:
  342. break
  343. if cp is not None:
  344. centerpoint, object_mean_rgb = cp
  345. centerp_car = np.linalg.inv(np.array(Rotation.from_quat(s.base.rotation).as_matrix())).dot((centerpoint-s.base.position))
  346. OBSERVE_ARM_POSE_TOP = (np.array([
  347. centerp_car[0]- 0.2975 - 0.05,
  348. centerp_car[1] + 0.17309,
  349. 0.018713334665877806,
  350. ]), np.array([
  351. -0.13970062182177911,
  352. 0.6487791800204252,
  353. 0.032918235938941776,
  354. 0.7473190092439113,
  355. ]))
  356. s.arm.move_end_to_pose(*OBSERVE_ARM_POSE_TOP)
  357. time.sleep(1)
  358. s.grasp()
  359. s.place_microwave()
  360. s.close_microwave()
  361. obj_rgb = []
  362. for j in range(5):
  363. s._prompt = 'bowl'
  364. cp = None
  365. s.base.move_to(*s.GRASP_POSE_1, 'world', False)
  366. look_num = []
  367. while cp is None:
  368. for direction in [1, 2]:
  369. if direction == 1:
  370. s.arm.move_end_to_pose(*s.OBSERVE_ARM_POSE_1)
  371. else:
  372. s.arm.move_end_to_pose(*s.OBSERVE_ARM_POSE_2)
  373. cp = s.lookforonce(0.6, 0.6)
  374. if cp is not None:
  375. break
  376. look_num.append(1)
  377. if len(look_num)>2:
  378. break
  379. if len(look_num)>2:
  380. break
  381. centerpoint, object_mean_rgb = cp
  382. centerp_car = np.linalg.inv(np.array(Rotation.from_quat(s.base.rotation).as_matrix())).dot((centerpoint-s.base.position))
  383. OBSERVE_ARM_POSE_TOP = (np.array([
  384. centerp_car[0]- 0.2975 - 0.05,
  385. centerp_car[1] + 0.17309,
  386. 0.018713334665877806,
  387. ]), np.array([
  388. -0.13970062182177911,
  389. 0.6487791800204252,
  390. 0.032918235938941776,
  391. 0.7473190092439113,
  392. ]))
  393. s.arm.move_end_to_pose(*OBSERVE_ARM_POSE_TOP)
  394. time.sleep(1)
  395. s.grasp()
  396. obj_rgb.append(object_mean_rgb)
  397. if j != 0:
  398. print("color", abs(sum(obj_rgb[j]-obj_rgb[0])))
  399. if j == 0:
  400. s.place_bowl_lower()
  401. elif abs(sum(obj_rgb[j]-obj_rgb[0]))>30:
  402. s.place_bowl_upper()
  403. else:
  404. s.place_bowl_lower()
  405. obj_rgb_2 = []
  406. for j in range(5):
  407. s._prompt = 'bowl'
  408. cp = None
  409. s.base.move_to(*s.GRASP_POSE_2, 'world', False)
  410. look_num = []
  411. while cp is None:
  412. for direction in [1, 2]:
  413. if direction == 1:
  414. s.arm.move_end_to_pose(*s.OBSERVE_ARM_POSE_1)
  415. else:
  416. s.arm.move_end_to_pose(*s.OBSERVE_ARM_POSE_2)
  417. cp = s.lookforonce(0.6, 0.6)
  418. if cp is not None:
  419. break
  420. look_num.append(1)
  421. if len(look_num)>2:
  422. break
  423. if len(look_num)>2:
  424. break
  425. centerpoint, object_mean_rgb = cp
  426. centerp_car = np.linalg.inv(np.array(Rotation.from_quat(s.base.rotation).as_matrix())).dot((centerpoint-s.base.position))
  427. OBSERVE_ARM_POSE_TOP = (np.array([
  428. centerp_car[0]- 0.2975 - 0.05,
  429. centerp_car[1] + 0.17309,
  430. 0.018713334665877806,
  431. ]), np.array([
  432. -0.13970062182177911,
  433. 0.6487791800204252,
  434. 0.032918235938941776,
  435. 0.7473190092439113,
  436. ]))
  437. s.arm.move_end_to_pose(*OBSERVE_ARM_POSE_TOP)
  438. time.sleep(1)
  439. s.grasp()
  440. obj_rgb_2.append(object_mean_rgb)
  441. if len(obj_rgb) != 0:
  442. if abs(sum(obj_rgb_2[j]-obj_rgb[0]))>30:
  443. s.place_bowl_upper()
  444. else:
  445. s.place_bowl_lower()
  446. else:
  447. if j == 0:
  448. s.place_bowl_lower()
  449. elif abs(sum(obj_rgb_2[j]-obj_rgb_2[0]))>30:
  450. s.place_bowl_upper()
  451. else:
  452. s.place_bowl_lower()
  453. print("finish the task")
  454. s.base.move_to(*s.END_POSE, 'world', False)
  455. time.sleep(20)

2.ros_base_control.py

  1. from . import transformations, CoordinateTools
  2. import numpy as np
  3. import time
  4. from typing import Union
  5. from threading import Thread
  6. class BaseControlTools(object):
  7. """机器人底盘运动控制工具类:
  8. pose(tuple):
  9. 0:position:(x,y,z)
  10. 1:rotation:(r,p,y)(欧拉角,按xyz顺序,r:[-pi,pi],p:[-pi/2,pi/2],y:[-pi,pi])(函数参数设置也可使用四元数,按xyzw顺序)
  11. velocity(tuple):
  12. 0:linear:(x,y,z)
  13. 1:angular:(r,p,y)(与rotation对应)
  14. """
  15. coor = CoordinateTools()
  16. _TEST_ = False
  17. @staticmethod
  18. def target_pose_to_velocity(
  19. posi_or_rota: Union[float, np.ndarray], kp, limits: tuple, dead_zone: float
  20. ) -> Union[float, np.ndarray]:
  21. """根据目标在机器人坐标系下的位置或姿态和给定参数计算机器人的线速度或角速度"""
  22. if limits[0] > limits[1]:
  23. limits = (limits[1], limits[0])
  24. if dead_zone > limits[0]:
  25. dead_zone = limits[0]
  26. if isinstance(posi_or_rota, np.ndarray):
  27. raw_velocity_norm = np.linalg.norm(posi_or_rota)
  28. else:
  29. raw_velocity_norm = abs(posi_or_rota)
  30. target_linear_velocity_norm = kp * raw_velocity_norm
  31. if target_linear_velocity_norm == 0:
  32. target_velocity = posi_or_rota
  33. elif target_linear_velocity_norm > limits[1]:
  34. target_velocity = limits[1] / raw_velocity_norm * posi_or_rota
  35. elif target_linear_velocity_norm < dead_zone:
  36. target_velocity *= 0.0
  37. elif target_linear_velocity_norm < limits[0]:
  38. target_velocity = limits[0] / raw_velocity_norm * posi_or_rota
  39. else:
  40. target_velocity = (
  41. target_linear_velocity_norm / raw_velocity_norm * posi_or_rota
  42. )
  43. return target_velocity
  44. @staticmethod
  45. def min_rotation_move(distance: float, direcition: float) -> tuple:
  46. """将机器人从当前方向轴按最小方向转向目标方向轴,使得机器人的方向轴(如x轴)与目标方向共线(同向或反向)。
  47. 输入:
  48. distance: 机器人与目标点的距离 >= 0
  49. direction: 机器人与目标点的方向 [-pi, pi]
  50. 输出:
  51. distance: 机器人与目标点的距离,若转向后机器人方向轴与目标方向同向则为正,反向则为负
  52. direction: 机器人与目标点的方向 [-pi/2, pi/2]
  53. """
  54. if direcition < -np.pi / 2:
  55. direcition += np.pi
  56. distance *= -1
  57. elif direcition > np.pi / 2:
  58. direcition -= np.pi
  59. distance *= -1
  60. return distance, direcition
  61. @classmethod
  62. def composit_velocity(
  63. cls, target_pose, current_pose, kp, limits, dead_zone
  64. ) -> tuple:
  65. """根据目标位姿和当前位姿及给定参数计算机器人的线速度和角速度"""
  66. raw_linear_velocity, raw_angular_velocity = cls.coor.to_robot_coordinate(
  67. target_pose, current_pose
  68. )
  69. target_linear_velocity = cls.target_pose_to_velocity(
  70. raw_linear_velocity, kp[0], limits[0], dead_zone[0]
  71. )
  72. target_angular_velocity = cls.target_pose_to_velocity(
  73. raw_angular_velocity, kp[1], limits[1], dead_zone[1]
  74. )
  75. return target_linear_velocity, target_angular_velocity
  76. @classmethod
  77. def three_stage_control(
  78. self, target_pose, current_pose, tolerance, kp, limits, dead_zone
  79. ) -> tuple:
  80. """三阶段底盘位置-速度控制法(先旋转调整方向,再平移调整位置,然后再选择调整姿态;根据误差反馈动态调整)"""
  81. target_linear_velocity = np.zeros(3, dtype=np.float64)
  82. target_angular_velocity = np.zeros(3, dtype=np.float64)
  83. stage, bias = self.get_stage_and_bias(
  84. target_pose, current_pose, tolerance[:2], tolerance[2], improve=False
  85. )
  86. if stage in [1, 3]:
  87. target_angular_velocity[2] = self.target_pose_to_velocity(
  88. bias, kp[1], limits[1], dead_zone[1]
  89. )
  90. elif stage == 2:
  91. target_linear_velocity[0] = self.target_pose_to_velocity(
  92. bias, kp[0], limits[0], dead_zone[0]
  93. )
  94. return target_linear_velocity, target_angular_velocity
  95. @classmethod
  96. def get_stage_and_bias(
  97. cls,
  98. target_pose: tuple,
  99. current_pose: tuple,
  100. pose_tolerance: tuple,
  101. direction_tolerance: Union[float, tuple, None] = None,
  102. improve=False,
  103. last_stage=None,
  104. new_target=1,
  105. avoid_321=False,
  106. same_ignore=False,
  107. avoid_swing=False,
  108. ) -> tuple:
  109. """获得三阶段控制法的当前阶段及其相关偏差量"""
  110. position_error, rotation_error = cls.coor.get_pose_error_in_axis(
  111. target_pose, current_pose
  112. ) # rotation_error范围为[-pi, pi]
  113. position_distance, rotation_distance = cls.coor.norm(
  114. position_error
  115. ), cls.coor.norm(
  116. rotation_error
  117. ) # 计算位置以及姿态(优弧范围内)的距离
  118. same_target = same_ignore and new_target == 0
  119. if cls._TEST_:
  120. print("position_error:", position_error)
  121. print("rotation_error:", rotation_error)
  122. print("position_distance:", position_distance)
  123. print("rotation_distance:", rotation_distance)
  124. print("new_target:", new_target)
  125. print("same_target:", same_target)
  126. if (
  127. position_distance <= pose_tolerance[0]
  128. or (avoid_321 and last_stage == 3 and new_target == 0)
  129. or (last_stage == -1 and same_target)
  130. ):
  131. if rotation_distance <= pose_tolerance[1]:
  132. return -1, 0 # end and stop
  133. else:
  134. return 3, rotation_error[2]
  135. else:
  136. pose_in_robot = cls.coor.to_robot_coordinate(target_pose, current_pose)
  137. position_in_robot = pose_in_robot[0]
  138. direction_error = cls.coor.get_spherical(position_in_robot)[2]
  139. # 劣弧处理成优弧(position_distance这时带有正负了)
  140. position_distance, direction_error = cls.min_rotation_move(
  141. position_distance, direction_error
  142. )
  143. if cls._TEST_:
  144. print("direction_error:", direction_error)
  145. direction_error_abs = abs(direction_error)
  146. if improve: # 使用改进三阶段控制法
  147. if (
  148. ((last_stage in [None, 1, -1]) or not same_target)
  149. and direction_error_abs > direction_tolerance[0]
  150. ) or direction_error_abs > direction_tolerance[1]:
  151. return 1, direction_error
  152. elif direction_error_abs <= 0: # 完全对齐后才开始绝对直线
  153. return 2, position_distance
  154. elif direction_error_abs <= direction_tolerance[1]:
  155. if avoid_swing:
  156. direction_error = 0 # 不再修正方向,避免来回摆动
  157. if position_distance < 0:
  158. direction_error *= -1 # 倒车修正
  159. return 1.5, (position_distance, direction_error) # 1.5的bias是个tuple
  160. else:
  161. if (
  162. direction_tolerance is None
  163. and direction_error_abs <= pose_tolerance[2]
  164. ) or (direction_error_abs <= direction_tolerance):
  165. return 2, position_distance
  166. else:
  167. return 1, direction_error
  168. @classmethod
  169. def three_stage_control_improved(
  170. self,
  171. target_pose,
  172. current_pose,
  173. pose_tolerance,
  174. direction_tolerance,
  175. kp,
  176. limits,
  177. dead_zone,
  178. enhance=1,
  179. last_stage=None,
  180. new_target=1,
  181. avoid_321=False,
  182. same_ignore=False,
  183. avoid_swing=False,
  184. ) -> tuple:
  185. """改进三阶段底盘位置-速度控制法(增加单轴移动+旋转叠加阶段)"""
  186. target_linear_velocity = np.zeros(3, dtype=np.float64)
  187. target_angular_velocity = np.zeros(3, dtype=np.float64)
  188. stage, bias = self.get_stage_and_bias(
  189. target_pose,
  190. current_pose,
  191. pose_tolerance,
  192. direction_tolerance,
  193. improve=True,
  194. last_stage=last_stage,
  195. new_target=new_target,
  196. avoid_321=avoid_321,
  197. same_ignore=same_ignore,
  198. avoid_swing=avoid_swing,
  199. )
  200. if self._TEST_:
  201. print("stage:", stage)
  202. if stage in [1, 1.5, 3]:
  203. if stage == 1.5:
  204. bias_r = bias[1]
  205. else:
  206. bias_r = bias
  207. target_angular_velocity[2] = (
  208. self.target_pose_to_velocity(bias_r, kp[1], limits[1], dead_zone[1])
  209. / enhance
  210. )
  211. if stage in [1.5, 2]:
  212. if stage == 1.5:
  213. bias_t = bias[0]
  214. else:
  215. bias_t = bias
  216. target_linear_velocity[0] = (
  217. self.target_pose_to_velocity(bias_t, kp[0], limits[0], dead_zone[0])
  218. * enhance
  219. )
  220. return (target_linear_velocity, target_angular_velocity), stage
  221. @staticmethod
  222. def four_steering_wheel_ik(
  223. linear_vx, linear_vy, angular_vz, width, lenth, radius=1, reduction=1
  224. ) -> tuple:
  225. """四舵轮底盘的逆运动学(给定目标速度和相关参数,计算四轮转向和轮速)"""
  226. r = np.hypot(lenth, width)
  227. # 各轮的合成分速度
  228. A = linear_vy - angular_vz * lenth / r # 右轮的y方向分速度
  229. B = linear_vy + angular_vz * lenth / r # 左轮的y方向分速度
  230. C = linear_vx - angular_vz * width / r # 前轮的x方向分速度
  231. D = linear_vx + angular_vz * width / r # 后轮的x方向分速度
  232. # 各轮合成速度大小
  233. ws1 = np.sqrt(B**2 + C**2)
  234. ws2 = np.sqrt(B**2 + D**2)
  235. ws3 = np.sqrt(A**2 + D**2)
  236. ws4 = np.sqrt(A**2 + C**2)
  237. # 各轮合成速度方向
  238. wa1 = np.arctan2(B, C)
  239. wa2 = np.arctan2(B, D)
  240. wa3 = np.arctan2(A, D)
  241. wa4 = np.arctan2(A, C)
  242. # 车轮自身转速缩放因子(减速比/轮半径)
  243. rotating_factor = reduction / radius
  244. # 各轮转速大小(rotating_factor为1时为线速度)
  245. speeds = np.array([ws1, ws2, ws3, ws4], dtype=np.float64) * rotating_factor
  246. # 各轮旋转角度
  247. angles = np.array([wa1, wa2, wa3, wa4], dtype=np.float64)
  248. return speeds, angles
  249. class BaseControl(object):
  250. """机器人底盘运动控制顶层类"""
  251. _TEST_ = False
  252. def __init__(self) -> None:
  253. self._move_kp = (100, 100)
  254. self._velocity_dead_zone = (0.0, 0.0)
  255. self._linear_limits = (0.2, 0.3)
  256. self._angular_limits = (0.17, 0.18)
  257. self._direction_tolerance = 0.2
  258. self._direction_tolerance_improved = (0.1, 0.3)
  259. self._wait_tolerance = (0.1, 0.1)
  260. self._improve_enhance = 1
  261. self._last_stage = None
  262. self._wait_timeout = 5
  263. self._wait_period = 0.01
  264. self._move_stop = True
  265. self._move_method = "three_stage_improved"
  266. self._current_position = self._current_rotation = None
  267. BaseControlTools._TEST_ = self._TEST_
  268. self._tools = BaseControlTools()
  269. self._muilti_avoid = {"set": False, "get": False}
  270. self._new_target = 1
  271. self._avoid_321 = False
  272. self._same_ignore = False
  273. self._position_target = np.zeros(3, dtype=np.float64)
  274. self._rotation_target = np.zeros(3, dtype=np.float64)
  275. self._last_orientation_cmd = np.zeros(4, dtype=np.float64)
  276. self._last_pose_ref = "world"
  277. def move_to(self, position: np.ndarray, rotation: np.ndarray) -> bool:
  278. """设置并移动机器人到指定的目标位姿"""
  279. self.set_target_pose(position, rotation)
  280. result = self.move()
  281. return result
  282. def shift_pose(self, axis: int, target: float) -> bool:
  283. """在机器人当前位姿的基础上,沿指定轴移动指定距离"""
  284. position, rotation = self.get_current_world_pose()
  285. if axis < 3:
  286. position[axis] += target
  287. else:
  288. rotation[axis-3] += target
  289. result = self.move_to(position, rotation)
  290. return result
  291. def set_target_pose(
  292. self, position: np.ndarray, rotation: np.ndarray, ref: str = "world"
  293. ) -> None:
  294. """
  295. 设置机器人在世界(ref=world)/自身(ref=robot)当前坐标系下的目标位姿;
  296. 默认为世界坐标系下的目标位姿;
  297. """
  298. len_rotation = len(rotation)
  299. # 目标相同是否忽略
  300. if self._avoid_321:
  301. if self._last_pose_ref == ref and (position == self._position_target).all():
  302. if len_rotation == 4:
  303. if (rotation == self._last_orientation_cmd).all():
  304. return
  305. elif (rotation == self._rotation_target).all():
  306. return
  307. else:
  308. self._new_target += 1
  309. # 重复设置目标位姿时的警告
  310. if self._muilti_avoid["set"]:
  311. print("Warning: set_target_pose is called before the last is finished!")
  312. return
  313. else:
  314. self._muilti_avoid["set"] = True
  315. # 设置目标位姿
  316. if ref == "robot":
  317. position, rotation = self._tools.coor.to_world_coordinate(
  318. (position, rotation), self.get_current_world_pose()
  319. )
  320. else:
  321. if ref != "world":
  322. print("ref is not 'world' or 'robot', by default 'world' is used!")
  323. ref = "world"
  324. if len_rotation == 4:
  325. self._last_orientation_cmd = rotation
  326. rotation = transformations.euler_from_quaternion(rotation)
  327. rotation = np.array(rotation, dtype=np.float64)
  328. self._last_pose_ref = ref
  329. self._position_target, self._rotation_target = position, rotation
  330. self._muilti_avoid["set"] = False
  331. def get_target_pose(self) -> tuple:
  332. """获取机器人在世界坐标系下的目标位姿"""
  333. return self._position_target, self._rotation_target
  334. def get_velocity_cmd(self, ignore_stop=False) -> tuple:
  335. """获取机器人的速度指令"""
  336. if self._muilti_avoid["get"]:
  337. print("Warning: get_velocity_cmd is called before the last is finished!")
  338. return self._vel_cmd
  339. else:
  340. self._muilti_avoid["get"] = True
  341. if self._move_stop and not ignore_stop:
  342. self._vel_cmd = (np.array((0, 0, 0)), np.array((0, 0, 0)))
  343. elif self._move_method == "three_stage":
  344. self._three_stage_control()
  345. elif self._move_method == "three_stage_improved":
  346. self._three_stage_control_improved()
  347. else:
  348. self._composit_velocity()
  349. self._muilti_avoid["get"] = False
  350. if self._new_target > 0:
  351. self._new_target -= 1
  352. return self._vel_cmd
  353. def move(self, time_out=None) -> bool:
  354. """移动机器人到最新设置的目标位姿"""
  355. self._move_stop = False
  356. start_time = time.time()
  357. self.get_velocity_cmd() # 防止第一次速度指令为0
  358. wait_time = time_out if time_out is not None else self._wait_timeout
  359. while (self._vel_cmd[0] != 0).any() and (self._vel_cmd[1] != 0).any():
  360. if (time.time() - start_time) > wait_time:
  361. print(
  362. "Move timeout, the robot may be stuck! The motion will be stopped and the program will continue!"
  363. )
  364. self._move_stop = True
  365. return False
  366. time.sleep(self._wait_period)
  367. self._move_stop = True
  368. return True
  369. def set_wait_tolerance(
  370. self, position: float, orientation: float, timeout: float, frequency: float
  371. ):
  372. """设置等待的误差容限和超时时间,以及等待的刷新频率"""
  373. self._wait_tolerance = np.array([position, orientation], dtype=np.float64)
  374. self._wait_timeout = timeout
  375. self._wait_period = 1 / frequency
  376. def avoid_321(self, avoid: bool = True, same_ignore: bool = True):
  377. """避免3阶段旋转误差造成的3阶段中、3阶段结束后相同命令引起的退化"""
  378. self._avoid_321 = avoid
  379. self._same_ignore = same_ignore
  380. def avoid_swing(self, avoid: bool = True):
  381. self._avoid_swing = avoid
  382. def set_move_kp(self, tarns: float, rotat: float):
  383. """设置机器人运动控制的比例增益"""
  384. self._move_kp = (tarns, rotat)
  385. def set_velocity_limits(self, linear: tuple, angular: tuple):
  386. """设置机器人运动的线速度和角速度的上下限"""
  387. self._linear_limits = linear
  388. self._angular_limits = angular
  389. def set_velocity_dead_zone(self, linear_dead_zone: float, angular_dead_zone: float):
  390. """设置机器人运动的线速度和角速度死区"""
  391. self._velocity_dead_zone = (linear_dead_zone, angular_dead_zone)
  392. def set_direction_tolerance(self, tolerance: Union[float, tuple, list]):
  393. """设置机器人运动的方向容限"""
  394. if isinstance(tolerance, (tuple, list)):
  395. self._direction_tolerance_improved = tolerance
  396. else:
  397. self._direction_tolerance = tolerance
  398. def set_improve_enhance(self, enhance: float):
  399. """设置机器人运动的方向容限"""
  400. self._improve_enhance = enhance
  401. def set_move_method(self, method: str):
  402. """设置机器人运动的控制方法"""
  403. self._move_method = method
  404. def _composit_velocity(self):
  405. velocity = self._tools.composit_velocity(
  406. self.get_target_pose(),
  407. self.get_current_world_pose(),
  408. self._move_kp,
  409. (self._linear_limits, self._angular_limits),
  410. self._velocity_dead_zone,
  411. )
  412. self._vel_cmd = velocity
  413. return velocity
  414. def _three_stage_control(self):
  415. velocity = self._tools.three_stage_control(
  416. self.get_target_pose(),
  417. self.get_current_world_pose(),
  418. list(self._wait_tolerance) + [self._direction_tolerance],
  419. self._move_kp,
  420. (self._linear_limits, self._angular_limits),
  421. self._velocity_dead_zone,
  422. )
  423. self._vel_cmd = velocity
  424. return velocity
  425. def _three_stage_control_improved(self) -> tuple:
  426. velocity, self._last_stage = self._tools.three_stage_control_improved(
  427. self.get_target_pose(),
  428. self.get_current_world_pose(),
  429. self._wait_tolerance,
  430. self._direction_tolerance_improved,
  431. self._move_kp,
  432. (self._linear_limits, self._angular_limits),
  433. self._velocity_dead_zone,
  434. enhance=self._improve_enhance,
  435. last_stage=self._last_stage,
  436. new_target=self._new_target,
  437. avoid_321=self._avoid_321,
  438. same_ignore=self._same_ignore,
  439. avoid_swing=self._avoid_swing,
  440. )
  441. if self._TEST_:
  442. print("target_rotation:", self.get_target_pose()[1])
  443. print("current_rotation:", self.get_current_world_pose()[1])
  444. print("target_position:", self.get_target_pose()[0])
  445. print("current_position:", self.get_current_world_pose()[0])
  446. print("velocity cmd:", velocity)
  447. print(" ")
  448. self._vel_cmd = velocity
  449. return velocity
  450. def _get_wait_error(self) -> np.ndarray:
  451. """得到机器人当前与目标位姿的误差(欧式距离)"""
  452. wait_error = self._tools.coor.get_pose_distance(
  453. self.get_target_pose(), self.get_current_world_pose()
  454. )
  455. return wait_error
  456. def set_control_handel(self, hanle, frequency=None):
  457. """设置外部控制函数,将启动一个线程来循环执行该函数"""
  458. def thread_handle():
  459. while True:
  460. hanle(self.get_velocity_cmd())
  461. if frequency is not None:
  462. time.sleep(1 / frequency)
  463. Thread(target=thread_handle, daemon=True).start()
  464. def set_current_pose_handle(self, handle):
  465. """设置获得当前位姿的外部句柄函数调用(重写get_current_world_pose)"""
  466. self.get_current_world_pose = handle
  467. def set_current_world_pose(self, position, rotation):
  468. """设置机器人当前位姿"""
  469. if len(rotation) == 4:
  470. rotation = transformations.euler_from_quaternion(rotation)
  471. rotation = np.array(rotation, dtype=np.float64)
  472. self._current_position, self._current_rotation = position, rotation
  473. def get_current_world_pose(self) -> tuple:
  474. """
  475. 获取机器人在世界坐标系下的位置和姿态;
  476. 若未设置过当前位姿,则返回目标位姿(不论目标位姿是否设置过);
  477. """
  478. if self._current_position is None or self._current_rotation is None:
  479. return self._position_target, self._rotation_target
  480. else:
  481. return self._current_position, self._current_rotation
  482. def set_model_four_steering_wheel(self, width, lenth, radius=1, reduction=1):
  483. """设置四舵轮机器人底盘模型"""
  484. self.chasis_width = width
  485. self.chasis_lenth = lenth
  486. self.wheel_radius = radius
  487. self.wheel_reduction = reduction
  488. def ik_four_steering_wheel(self, linear_vx, linear_vy, angular_vz):
  489. """四舵轮底盘的逆运动学(给定目标速度和相关参数,计算四轮转向和轮速)"""
  490. speeds, angles = self._tools.four_steering_wheel_ik(
  491. linear_vx,
  492. linear_vy,
  493. angular_vz,
  494. self.chasis_width,
  495. self.chasis_lenth,
  496. self.wheel_radius,
  497. self.wheel_reduction,
  498. )
  499. return speeds, angles
  500. def pose_ik_four_steering_wheel(self):
  501. """根据当前位姿和目标位姿,直接解算得到四轮转角和转速"""
  502. vel = self._composit_velocity()
  503. speeds, angles = self.ik_four_steering_wheel(vel[0][0], vel[0][1], vel[1][2])
  504. return speeds, angles
  505. if __name__ == "__main__":
  506. import rospy
  507. from geometry_msgs.msg import Pose, Twist
  508. from . import to_ros_msgs
  509. rospy.init_node("test_base_control")
  510. base_control = BaseControl()
  511. base_control.set_move_method("three_stage_improved")
  512. base_control.set_move_kp(1.5, 0.2)
  513. base_control.set_velocity_limits((0.0, 0.3), (0.0, 0.18))
  514. base_control.set_velocity_dead_zone(0.0, 0.0)
  515. base_control.set_direction_tolerance(0.2)
  516. base_control.set_wait_tolerance(0.1, 0.1, 60, 200)
  517. base_control.set_improve_enhance(2)
  518. # 注册机器人底盘外部控制接口
  519. pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
  520. def vel_cmd_pub(raw_cmd):
  521. pub.publish(to_ros_msgs.to_Twist(raw_cmd))
  522. base_control.set_control_handel(vel_cmd_pub, frequency=200)
  523. # 实现机器人当前位姿更新接口
  524. def pose_cb(msg: Pose):
  525. euler = transformations.euler_from_quaternion(
  526. [msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w]
  527. )
  528. base_control.set_current_world_pose(
  529. np.array(
  530. [msg.position.x, msg.position.y, msg.position.z], dtype=np.float64
  531. ),
  532. np.array(euler, dtype=np.float64),
  533. )
  534. rospy.Subscriber("/airbot/pose", Pose, pose_cb)
  535. rospy.sleep(1)
  536. target_pose0 = base_control.get_current_world_pose()
  537. target_pose1 = (target_pose0[0], np.array([0.0, 0.0, 1.5], dtype=np.float64))
  538. target_pose2 = (np.array([-2.5, -4, 0.5], dtype=np.float64), target_pose1[1])
  539. base_control.set_target_pose(*target_pose2)
  540. print("*************************************")
  541. print("Start moving!")
  542. base_control.move()
  543. print("Move finished!")
  544. print("*************************************")
  545. rospy.sleep(0.5)

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Guff_9hys/article/detail/816297
推荐阅读
相关标签
  

闽ICP备14008679号