|
5 | 5 | print(robot)
|
6 | 6 | T = robot.fkine(robot.qz)
|
7 | 7 | print(T)
|
8 |
| -qt = rtb.trajectory.jtraj(robot.qz, robot.qr, 50) |
9 |
| -robot.plot(qt.q, movie="panda1.gif") |
10 | 8 |
|
11 | 9 | # IK
|
12 | 10 | from spatialmath import SE3
|
13 | 11 |
|
14 |
| -T = SE3(0.1, 0.2, 0.5) * SE3.OA([0, 1, 0], [0, 0, -1]) |
15 |
| -q, *_ = robot.ikunc(T) |
16 |
| -print(q) |
17 |
| -print(robot.fkine(q)) |
| 12 | +T = SE3(0.8, 0.2, 0.1) * SE3.OA([0, 1, 0], [0, 0, -1]) |
| 13 | +q_pickup, *_ = robot.ikine(T) # solve IK, ignore additional outputs |
| 14 | +print(q_pickup)# display joint angles |
| 15 | +print(robot.fkine(q_pickup)) # FK shows that desired end-effector pose was achieved |
| 16 | + |
| 17 | + |
| 18 | +qt = rtb.trajectory.jtraj(robot.qz, q_pickup, 50) |
| 19 | +robot.plot(qt.q, movie="panda1.gif") |
18 | 20 |
|
19 |
| -# URDF model |
| 21 | +# URDF + Swift version |
| 22 | +dt = 0.050 # simulation timestep in seconds |
20 | 23 | robot = rtb.models.URDF.Panda()
|
21 | 24 | print(robot)
|
22 |
| -env = rtb.backends.Swift() |
23 |
| -env.launch() |
24 |
| -env.add(robot) |
25 |
| -for qk in qt.q: |
26 |
| - robot.q = qk |
27 |
| - env.step() |
| 25 | + |
| 26 | +env = rtb.backends.Swift() # instantiate 3D browser-based visualizer |
| 27 | +env.launch("chrome") # activate it |
| 28 | +env.add(robot) # add robot to the 3D scene |
| 29 | +env.start_recording("panda2", 1 / dt) |
| 30 | +for qk in qt.q: # for each joint configuration on trajectory |
| 31 | + robot.q = qk # update the robot state |
| 32 | + env.step() # update visualization |
| 33 | +env.stop_recording() |
| 34 | + |
| 35 | +# ffmpeg -i panda2.webm -vf "scale=iw*.5:ih*.5" panda2.gif |
0 commit comments