|
| 1 | +#!/usr/bin/env python |
| 2 | +""" |
| 3 | +@author Kerry He and Rhys Newbury |
| 4 | +""" |
| 5 | + |
| 6 | +import swift |
| 7 | +import spatialgeometry as sg |
| 8 | +import roboticstoolbox as rtb |
| 9 | +import spatialmath as sm |
| 10 | +import numpy as np |
| 11 | +import qpsolvers as qp |
| 12 | +import math |
| 13 | + |
| 14 | + |
| 15 | +def transform_between_vectors(a, b): |
| 16 | + # Finds the shortest rotation between two vectors using angle-axis, |
| 17 | + # then outputs it as a 4x4 transformation matrix |
| 18 | + a = a / np.linalg.norm(a) |
| 19 | + b = b / np.linalg.norm(b) |
| 20 | + |
| 21 | + angle = np.arccos(np.dot(a, b)) |
| 22 | + axis = np.cross(a, b) |
| 23 | + |
| 24 | + return sm.SE3.AngleAxis(angle, axis) |
| 25 | + |
| 26 | + |
| 27 | +# Launch the simulator Swift |
| 28 | +env = swift.Swift() |
| 29 | +env.launch() |
| 30 | + |
| 31 | +# Create a Fetch and Camera robot object |
| 32 | +fetch = rtb.models.Fetch() |
| 33 | +fetch_camera = rtb.models.FetchCamera() |
| 34 | + |
| 35 | +# Set joint angles to zero configuration |
| 36 | +fetch.q = fetch.qz |
| 37 | +fetch_camera.q = fetch_camera.qz |
| 38 | + |
| 39 | +# Make target object obstacles with velocities |
| 40 | +target = sg.Sphere(radius=0.05, base=sm.SE3(-2.0, 0.0, 0.5)) |
| 41 | + |
| 42 | +# Make line of sight object to visualize where the camera is looking |
| 43 | +sight_base = sm.SE3.Ry(np.pi / 2) * sm.SE3(0.0, 0.0, 2.5) |
| 44 | +centroid_sight = sg.Cylinder( |
| 45 | + radius=0.001, |
| 46 | + length=5.0, |
| 47 | + base=fetch_camera.fkine(fetch_camera.q).A @ sight_base.A, |
| 48 | +) |
| 49 | + |
| 50 | +# Add the Fetch and other shapes to the simulator |
| 51 | +env.add(fetch) |
| 52 | +env.add(fetch_camera) |
| 53 | +env.add(centroid_sight) |
| 54 | +env.add(target) |
| 55 | + |
| 56 | +# Set the desired end-effector pose to the location of target |
| 57 | +Tep = fetch.fkine(fetch.q) |
| 58 | +Tep.A[:3, :3] = sm.SE3.Rz(np.pi).R |
| 59 | +Tep.A[:3, 3] = target.T[:3, -1] |
| 60 | + |
| 61 | +env.step() |
| 62 | + |
| 63 | +n_base = 2 |
| 64 | +n_arm = 8 |
| 65 | +n_camera = 2 |
| 66 | +n = n_base + n_arm + n_camera |
| 67 | + |
| 68 | + |
| 69 | +def step(): |
| 70 | + |
| 71 | + # Find end-effector pose in world frame |
| 72 | + wTe = fetch.fkine(fetch.q).A |
| 73 | + # Find camera pose in world frame |
| 74 | + wTc = fetch_camera.fkine(fetch_camera.q).A |
| 75 | + |
| 76 | + # Find transform between end-effector and goal |
| 77 | + eTep = np.linalg.inv(wTe) @ Tep.A |
| 78 | + # Find transform between camera and goal |
| 79 | + cTep = np.linalg.inv(wTc) @ Tep.A |
| 80 | + |
| 81 | + # Spatial error between end-effector and target |
| 82 | + et = np.sum(np.abs(eTep[:3, -1])) |
| 83 | + |
| 84 | + # Weighting function used for objective function |
| 85 | + def w_lambda(et, alpha, gamma): |
| 86 | + return alpha * np.power(et, gamma) |
| 87 | + |
| 88 | + # Quadratic component of objective function |
| 89 | + Q = np.eye(n + 10) |
| 90 | + |
| 91 | + Q[: n_base + n_arm, : n_base + n_arm] *= 0.01 # Robotic manipulator |
| 92 | + Q[:n_base, :n_base] *= w_lambda(et, 1.0, -1.0) # Mobile base |
| 93 | + Q[n_base + n_arm : n, n_base + n_arm : n] *= 0.01 # Camera |
| 94 | + Q[n : n + 3, n : n + 3] *= w_lambda(et, 1000.0, -2.0) # Slack arm linear |
| 95 | + Q[n + 3 : n + 6, n + 3 : n + 6] *= w_lambda(et, 0.01, -5.0) # Slack arm angular |
| 96 | + Q[n + 6 : -1, n + 6 : -1] *= 100 # Slack camera |
| 97 | + Q[-1, -1] *= w_lambda(et, 1000.0, 3.0) # Slack self-occlusion |
| 98 | + |
| 99 | + # Calculate target velocities for end-effector to reach target |
| 100 | + v_manip, _ = rtb.p_servo(wTe, Tep, 1.5) |
| 101 | + v_manip[3:] *= 1.3 |
| 102 | + |
| 103 | + # Calculate target angular velocity for camera to rotate towards target |
| 104 | + head_rotation = transform_between_vectors(np.array([1, 0, 0]), cTep[:3, 3]) |
| 105 | + v_camera, _ = rtb.p_servo(sm.SE3(), head_rotation, 25) |
| 106 | + |
| 107 | + # The equality contraints to achieve velocity targets |
| 108 | + Aeq = np.c_[fetch.jacobe(fetch.q), np.zeros((6, 2)), np.eye(6), np.zeros((6, 4))] |
| 109 | + beq = v_manip.reshape((6,)) |
| 110 | + |
| 111 | + jacobe_cam = fetch_camera.jacobe(fetch_camera.q)[3:, :] |
| 112 | + Aeq_cam = np.c_[ |
| 113 | + jacobe_cam[:, :3], |
| 114 | + np.zeros((3, 7)), |
| 115 | + jacobe_cam[:, 3:], |
| 116 | + np.zeros((3, 6)), |
| 117 | + np.eye(3), |
| 118 | + np.zeros((3, 1)), |
| 119 | + ] |
| 120 | + Aeq = np.r_[Aeq, Aeq_cam] |
| 121 | + beq = np.r_[beq, v_camera[3:].reshape((3,))] |
| 122 | + |
| 123 | + # The inequality constraints for joint limit avoidance |
| 124 | + Ain = np.zeros((n + 10, n + 10)) |
| 125 | + bin = np.zeros(n + 10) |
| 126 | + |
| 127 | + # The minimum angle (in radians) in which the joint is allowed to approach |
| 128 | + # to its limit |
| 129 | + ps = 0.1 |
| 130 | + |
| 131 | + # The influence angle (in radians) in which the velocity damper |
| 132 | + # becomes active |
| 133 | + pi = 0.9 |
| 134 | + |
| 135 | + # Form the joint limit velocity damper |
| 136 | + Ain[: fetch.n, : fetch.n], bin[: fetch.n] = fetch.joint_velocity_damper( |
| 137 | + ps, pi, fetch.n |
| 138 | + ) |
| 139 | + |
| 140 | + Ain_torso, bin_torso = fetch_camera.joint_velocity_damper(0.0, 0.05, fetch_camera.n) |
| 141 | + Ain[2, 2] = Ain_torso[2, 2] |
| 142 | + bin[2] = bin_torso[2] |
| 143 | + |
| 144 | + Ain_cam, bin_cam = fetch_camera.joint_velocity_damper(ps, pi, fetch_camera.n) |
| 145 | + Ain[n_base + n_arm : n, n_base + n_arm : n] = Ain_cam[3:, 3:] |
| 146 | + bin[n_base + n_arm : n] = bin_cam[3:] |
| 147 | + |
| 148 | + # Create line of sight object between camera and object |
| 149 | + c_Ain, c_bin = fetch.vision_collision_damper( |
| 150 | + target, |
| 151 | + camera=fetch_camera, |
| 152 | + camera_n=2, |
| 153 | + q=fetch.q[: fetch.n], |
| 154 | + di=0.3, |
| 155 | + ds=0.2, |
| 156 | + xi=1.0, |
| 157 | + end=fetch.link_dict["gripper_link"], |
| 158 | + start=fetch.link_dict["shoulder_pan_link"], |
| 159 | + ) |
| 160 | + |
| 161 | + if c_Ain is not None and c_bin is not None: |
| 162 | + c_Ain = np.c_[ |
| 163 | + c_Ain, np.zeros((c_Ain.shape[0], 9)), -np.ones((c_Ain.shape[0], 1)) |
| 164 | + ] |
| 165 | + |
| 166 | + Ain = np.r_[Ain, c_Ain] |
| 167 | + bin = np.r_[bin, c_bin] |
| 168 | + |
| 169 | + # Linear component of objective function: the manipulability Jacobian |
| 170 | + c = np.concatenate( |
| 171 | + ( |
| 172 | + np.zeros(n_base), |
| 173 | + # -fetch.jacobm(start=fetch.links[3]).reshape((n_arm,)), |
| 174 | + np.zeros(n_arm), |
| 175 | + np.zeros(n_camera), |
| 176 | + np.zeros(10), |
| 177 | + ) |
| 178 | + ) |
| 179 | + |
| 180 | + # Get base to face end-effector |
| 181 | + kε = 0.5 |
| 182 | + bTe = fetch.fkine(fetch.q, include_base=False).A |
| 183 | + θε = math.atan2(bTe[1, -1], bTe[0, -1]) |
| 184 | + ε = kε * θε |
| 185 | + c[0] = -ε |
| 186 | + |
| 187 | + # The lower and upper bounds on the joint velocity and slack variable |
| 188 | + lb = -np.r_[ |
| 189 | + fetch.qdlim[: fetch.n], |
| 190 | + fetch_camera.qdlim[3 : fetch_camera.n], |
| 191 | + 100 * np.ones(9), |
| 192 | + 0, |
| 193 | + ] |
| 194 | + ub = np.r_[ |
| 195 | + fetch.qdlim[: fetch.n], |
| 196 | + fetch_camera.qdlim[3 : fetch_camera.n], |
| 197 | + 100 * np.ones(9), |
| 198 | + 100, |
| 199 | + ] |
| 200 | + |
| 201 | + # Solve for the joint velocities dq |
| 202 | + qd = qp.solve_qp(Q, c, Ain, bin, Aeq, beq, lb=lb, ub=ub) |
| 203 | + qd_cam = np.concatenate((qd[:3], qd[fetch.n : fetch.n + 2])) |
| 204 | + qd = qd[: fetch.n] |
| 205 | + |
| 206 | + if et > 0.5: |
| 207 | + qd *= 0.7 / et |
| 208 | + qd_cam *= 0.7 / et |
| 209 | + else: |
| 210 | + qd *= 1.4 |
| 211 | + qd_cam *= 1.4 |
| 212 | + |
| 213 | + arrived = et < 0.02 |
| 214 | + |
| 215 | + fetch.qd = qd |
| 216 | + fetch_camera.qd = qd_cam |
| 217 | + centroid_sight.T = fetch_camera.fkine(fetch_camera.q).A @ sight_base.A |
| 218 | + |
| 219 | + return arrived |
| 220 | + |
| 221 | + |
| 222 | +arrived = False |
| 223 | +while not arrived: |
| 224 | + arrived = step() |
| 225 | + env.step(0.01) |
0 commit comments