代码拉取完成,页面将自动刷新
import numpy as np
import collections
import os
from constants import DT, XML_DIR, START_ARM_POSE
from constants import PUPPET_GRIPPER_POSITION_CLOSE
from constants import PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN
from constants import PUPPET_GRIPPER_POSITION_NORMALIZE_FN
from constants import PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN
from utils import sample_box_pose, sample_insertion_pose
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
import IPython
e = IPython.embed
def make_ee_sim_env(task_name):
"""
通过末端执行器控制、操作相比make_sim_env更精细(文件名称中包含ee(代表末端执行器))
模拟环境中的动作空间与观察空间
Environment for simulated robot bi-manual manipulation, with end-effector control.
注意,下面这段make_ee_sim_env对动作的定义和make_sim_env中的动作定义不同
· make_sim_env中,动作空间包括每只手臂的6个关节位置和1个规范化抓手位置
· make_ee_sim_env中,动作空间包括每只手臂的末端执行器7个参数(位置和四元数)和1个规范化抓手位置
Action space: [left_arm_pose (7), # position and quaternion for end effector
left_gripper_positions (1), # normalized gripper position (0: close, 1: open)
right_arm_pose (7), # position and quaternion for end effector
right_gripper_positions (1),] # normalized gripper position (0: close, 1: open)
观察空间包括上面那些动作空间的元素,以及左臂和右臂的绝对关节速度、左手和右手的标准化夹具速度,以及一个主图像(和make_sim_env中的观察空间完全一致)
Observation space: {"qpos": Concat[ left_arm_qpos (6), # absolute joint position
left_gripper_position (1), # normalized gripper position (0: close, 1: open)
right_arm_qpos (6), # absolute joint position
right_gripper_qpos (1)] # normalized gripper position (0: close, 1: open)
"qvel": Concat[ left_arm_qvel (6), # absolute joint velocity (rad)
left_gripper_velocity (1), # normalized gripper velocity (pos: opening, neg: closing)
right_arm_qvel (6), # absolute joint velocity (rad)
right_gripper_qvel (1)] # normalized gripper velocity (pos: opening, neg: closing)
"images": {"main": (480x640x3)} # h, w, c, dtype='uint8'
"""
# 函数首先检查任务名称中是否包含'sim_transfer_cube'。
# 如果是,它将创建一个用于模拟立方体转移任务的环境。
# 这个环境使用了一个名为`TransferCubeEETask`的任务,该任务在`TransferCubeEETask`类中定义。
# 这个任务的目标是让机器人的左手夹具抓住一个立方体,并将其转移到右手夹具
if 'sim_transfer_cube' in task_name:
xml_path = os.path.join(XML_DIR, f'bimanual_viperx_ee_transfer_cube.xml')
physics = mujoco.Physics.from_xml_path(xml_path)
task = TransferCubeEETask(random=False)
env = control.Environment(physics, task, time_limit=20, control_timestep=DT,
n_sub_steps=None, flat_observation=False)
# 如果任务名称中包含'sim_insertion',函数将创建一个用于模拟插入任务的环境。
# 这个环境使用了一个名为`InsertionEETask`的任务,该任务在`InsertionEETask`类中定义。
# 这个任务的目标是让机器人的右手夹具抓住一个插头,并将其插入左手夹具中的插座
elif 'sim_insertion' in task_name:
xml_path = os.path.join(XML_DIR, f'bimanual_viperx_ee_insertion.xml')
physics = mujoco.Physics.from_xml_path(xml_path)
task = InsertionEETask(random=False)
env = control.Environment(physics, task, time_limit=20, control_timestep=DT,
n_sub_steps=None, flat_observation=False)
# 如果任务名称既不包含'sim_transfer_cube'也不包含'sim_insertion',函数将抛出一个`NotImplementedError`异常。
else:
raise NotImplementedError
# 最后,函数返回创建的环境
return env
class BimanualViperXEETask(base.Task):
"""
定义了一个名为`BimanualViperXEETask`的类,它是`base.Task`的子类。这个类是用于强化学习的任务,特别是双手操作的任务
"""
def __init__(self, random=None):
"""
`__init__`方法是类的构造函数,它接受一个可选的`random`参数,并将其传递给父类的构造函数。
:param random:
"""
super().__init__(random=random)
def before_step(self, action, physics):
"""
`before_step`方法在每一步动作之前被调用。它接受动作和物理模型作为参数。
动作被分为左右两部分,分别对应左右手的动作。这些动作被用来设置模拟环境中的位置和方向。
:param action:
:param physics:
:return:
"""
a_len = len(action) // 2
action_left = action[:a_len]
action_right = action[a_len:]
# set mocap position and quat
# left
np.copyto(physics.data.mocap_pos[0], action_left[:3])
np.copyto(physics.data.mocap_quat[0], action_left[3:7])
# right
np.copyto(physics.data.mocap_pos[1], action_right[:3])
np.copyto(physics.data.mocap_quat[1], action_right[3:7])
# set gripper
g_left_ctrl = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(action_left[7])
g_right_ctrl = PUPPET_GRIPPER_POSITION_UNNORMALIZE_FN(action_right[7])
np.copyto(physics.data.ctrl, np.array([g_left_ctrl, -g_left_ctrl, g_right_ctrl, -g_right_ctrl]))
def initialize_robots(self, physics):
"""
`initialize_robots`方法用于初始化机器人的状态。
它首先重置关节位置,然后设置模拟环境中的位置和方向,最后重置夹具控制
:param physics:
:return:
"""
# reset joint position
physics.named.data.qpos[:16] = START_ARM_POSE
# reset mocap to align with end effector
# to obtain these numbers:
# (1) make an ee_sim env and reset to the same start_pose
# (2) get env._physics.named.data.xpos['vx300s_left/gripper_link']
# get env._physics.named.data.xquat['vx300s_left/gripper_link']
# repeat the same for right side
np.copyto(physics.data.mocap_pos[0], [-0.31718881+0.1, 0.5, 0.29525084])
np.copyto(physics.data.mocap_quat[0], [1, 0, 0, 0])
# right
np.copyto(physics.data.mocap_pos[1], np.array([0.31718881-0.1, 0.49999888, 0.29525084]))
np.copyto(physics.data.mocap_quat[1], [1, 0, 0, 0])
# reset gripper control
close_gripper_control = np.array([
PUPPET_GRIPPER_POSITION_CLOSE,
-PUPPET_GRIPPER_POSITION_CLOSE,
PUPPET_GRIPPER_POSITION_CLOSE,
-PUPPET_GRIPPER_POSITION_CLOSE,
])
np.copyto(physics.data.ctrl, close_gripper_control)
def initialize_episode(self, physics):
"""
`initialize_episode`方法在每个episode开始时被调用,它调用父类的同名方法
Sets the state of the environment at the start of each episode.
"""
super().initialize_episode(physics)
@staticmethod
def get_qpos(physics):
"""
`get_qpos`和`get_qvel`方法分别用于获取关节位置和关节速度。
它们都接受物理模型作为参数,并返回一个包含了左右手臂和夹具的位置或速度的数组
:param physics:
:return:
"""
qpos_raw = physics.data.qpos.copy()
left_qpos_raw = qpos_raw[:8]
right_qpos_raw = qpos_raw[8:16]
left_arm_qpos = left_qpos_raw[:6]
right_arm_qpos = right_qpos_raw[:6]
left_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(left_qpos_raw[6])]
right_gripper_qpos = [PUPPET_GRIPPER_POSITION_NORMALIZE_FN(right_qpos_raw[6])]
return np.concatenate([left_arm_qpos, left_gripper_qpos, right_arm_qpos, right_gripper_qpos])
@staticmethod
def get_qvel(physics):
"""
`get_qpos`和`get_qvel`方法分别用于获取关节位置和关节速度。
它们都接受物理模型作为参数,并返回一个包含了左右手臂和夹具的位置或速度的数组
:param physics:
:return:
"""
qvel_raw = physics.data.qvel.copy()
left_qvel_raw = qvel_raw[:8]
right_qvel_raw = qvel_raw[8:16]
left_arm_qvel = left_qvel_raw[:6]
right_arm_qvel = right_qvel_raw[:6]
left_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(left_qvel_raw[6])]
right_gripper_qvel = [PUPPET_GRIPPER_VELOCITY_NORMALIZE_FN(right_qvel_raw[6])]
return np.concatenate([left_arm_qvel, left_gripper_qvel, right_arm_qvel, right_gripper_qvel])
@staticmethod
def get_env_state(physics):
"""
`get_env_state`方法是一个静态方法,它应该被子类实现,用于获取环境的状态
:param physics:
:return:
"""
raise NotImplementedError
def get_observation(self, physics):
"""
`get_observation`方法用于获取观察结果。它接受物理模型作为参数,并返回一个包含了各种观察结果的字典
:param physics:
:return:
"""
# note: it is important to do .copy()
obs = collections.OrderedDict()
obs['qpos'] = self.get_qpos(physics)
obs['qvel'] = self.get_qvel(physics)
obs['env_state'] = self.get_env_state(physics)
obs['images'] = dict()
obs['images']['top'] = physics.render(height=480, width=640, camera_id='top')
# obs['images']['angle'] = physics.render(height=480, width=640, camera_id='angle')
# obs['images']['vis'] = physics.render(height=480, width=640, camera_id='front_close')
# used in scripted policy to obtain starting pose
obs['mocap_pose_left'] = np.concatenate([physics.data.mocap_pos[0], physics.data.mocap_quat[0]]).copy()
obs['mocap_pose_right'] = np.concatenate([physics.data.mocap_pos[1], physics.data.mocap_quat[1]]).copy()
# used when replaying joint trajectory
obs['gripper_ctrl'] = physics.data.ctrl.copy()
return obs
def get_reward(self, physics):
"""
`get_reward`方法是一个抽象方法,它应该被子类实现,用于计算每一步的奖励
:param physics:
:return:
"""
raise NotImplementedError
class TransferCubeEETask(BimanualViperXEETask):
def __init__(self, random=None):
super().__init__(random=random)
self.max_reward = 4
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
self.initialize_robots(physics)
# randomize box position
cube_pose = sample_box_pose()
box_start_idx = physics.model.name2id('red_box_joint', 'joint')
np.copyto(physics.data.qpos[box_start_idx : box_start_idx + 7], cube_pose)
# print(f"randomized cube position to {cube_position}")
super().initialize_episode(physics)
@staticmethod
def get_env_state(physics):
env_state = physics.data.qpos.copy()[16:]
return env_state
def get_reward(self, physics):
# return whether left gripper is holding the box
all_contact_pairs = []
for i_contact in range(physics.data.ncon):
id_geom_1 = physics.data.contact[i_contact].geom1
id_geom_2 = physics.data.contact[i_contact].geom2
name_geom_1 = physics.model.id2name(id_geom_1, 'geom')
name_geom_2 = physics.model.id2name(id_geom_2, 'geom')
contact_pair = (name_geom_1, name_geom_2)
all_contact_pairs.append(contact_pair)
touch_left_gripper = ("red_box", "vx300s_left/10_left_gripper_finger") in all_contact_pairs
touch_right_gripper = ("red_box", "vx300s_right/10_right_gripper_finger") in all_contact_pairs
touch_table = ("red_box", "table") in all_contact_pairs
reward = 0
if touch_right_gripper:
reward = 1
if touch_right_gripper and not touch_table: # lifted
reward = 2
if touch_left_gripper: # attempted transfer
reward = 3
if touch_left_gripper and not touch_table: # successful transfer
reward = 4
return reward
class InsertionEETask(BimanualViperXEETask):
def __init__(self, random=None):
super().__init__(random=random)
self.max_reward = 4
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
self.initialize_robots(physics)
# randomize peg and socket position
peg_pose, socket_pose = sample_insertion_pose()
id2index = lambda j_id: 16 + (j_id - 16) * 7 # first 16 is robot qpos, 7 is pose dim # hacky
peg_start_id = physics.model.name2id('red_peg_joint', 'joint')
peg_start_idx = id2index(peg_start_id)
np.copyto(physics.data.qpos[peg_start_idx : peg_start_idx + 7], peg_pose)
# print(f"randomized cube position to {cube_position}")
socket_start_id = physics.model.name2id('blue_socket_joint', 'joint')
socket_start_idx = id2index(socket_start_id)
np.copyto(physics.data.qpos[socket_start_idx : socket_start_idx + 7], socket_pose)
# print(f"randomized cube position to {cube_position}")
super().initialize_episode(physics)
@staticmethod
def get_env_state(physics):
env_state = physics.data.qpos.copy()[16:]
return env_state
def get_reward(self, physics):
# return whether peg touches the pin
all_contact_pairs = []
for i_contact in range(physics.data.ncon):
id_geom_1 = physics.data.contact[i_contact].geom1
id_geom_2 = physics.data.contact[i_contact].geom2
name_geom_1 = physics.model.id2name(id_geom_1, 'geom')
name_geom_2 = physics.model.id2name(id_geom_2, 'geom')
contact_pair = (name_geom_1, name_geom_2)
all_contact_pairs.append(contact_pair)
touch_right_gripper = ("red_peg", "vx300s_right/10_right_gripper_finger") in all_contact_pairs
touch_left_gripper = ("socket-1", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \
("socket-2", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \
("socket-3", "vx300s_left/10_left_gripper_finger") in all_contact_pairs or \
("socket-4", "vx300s_left/10_left_gripper_finger") in all_contact_pairs
peg_touch_table = ("red_peg", "table") in all_contact_pairs
socket_touch_table = ("socket-1", "table") in all_contact_pairs or \
("socket-2", "table") in all_contact_pairs or \
("socket-3", "table") in all_contact_pairs or \
("socket-4", "table") in all_contact_pairs
peg_touch_socket = ("red_peg", "socket-1") in all_contact_pairs or \
("red_peg", "socket-2") in all_contact_pairs or \
("red_peg", "socket-3") in all_contact_pairs or \
("red_peg", "socket-4") in all_contact_pairs
pin_touched = ("red_peg", "pin") in all_contact_pairs
reward = 0
if touch_left_gripper and touch_right_gripper: # touch both
reward = 1
if touch_left_gripper and touch_right_gripper and (not peg_touch_table) and (not socket_touch_table): # grasp both
reward = 2
if peg_touch_socket and (not peg_touch_table) and (not socket_touch_table): # peg and socket touching
reward = 3
if pin_touched: # successful insertion
reward = 4
return reward
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。