diff --git a/examples/teleop/README.md b/examples/teleop/README.md new file mode 100644 index 00000000..d4a56f2c --- /dev/null +++ b/examples/teleop/README.md @@ -0,0 +1,43 @@ +# Teleoperation with Meta Quest +Teleoperate your robot (optinally dual arm) with the Meta Quest + +## How does it work? +In the script [`quest_iris_dual_arm.py`](quest_iris_dual_arm.py) we use the [IRIS platform](https://intuitive-robots.github.io/iris-project-page/index.html) to get controller poses from the meta quest. +With the relative space wrapper and the relative to configured origin setting theses poses are then apply to the robot in a delta fashion whenever the trigger button is pressed. +The buttons are used to start and stop data recording with the [`StorageWrapper`](robot-control-stack/python/rcs/envs/storage_wrapper.py). + +## Installation +[Install RCS](https://robotcontrolstack.org/getting_started/index.html) and the [FR3 extension](https://robotcontrolstack.org/extensions/rcs_fr3.html) (the script is writte for the FR3 as example but can be easily adapted for other robots). +Install the IRIS APK on your quest following [these instructions](https://github.com/intuitive-robots/IRIS-Meta-Quest3). +Finally, install [SimPub](https://github.com/intuitive-robots/SimPublisher) the IRIS python client by + +```shell +pip install -r requirements.txt +``` + +## Configuration + +### Teleoperating in sim + +1. go to [`quest_iris_dual_arm.py`](quest_iris_dual_arm.py) and set `ROBOT_INSTANCE = RobotPlatform.SIMULATION` + +### Teleoperating a real robot +Note that dual arm is only supported for a aloha like setup where the robot face each other (for more advanced setups you need to change the transformation between the robots yourself). +1. put your robots into FCI mode +2. go to [`quest_iris_dual_arm.py`](quest_iris_dual_arm.py), set `ROBOT_INSTANCE = RobotPlatform.HARDWARE` and set your IP addresses of your robots. Remove the left robot if you only have one. + + +## Running +1. make sure your computer and quest is in the same subnetwork and they can ping each other. +2. start IRIS meta quest app on your quest (it should be located in the Library under "Unkown Sources" after installation) +3. run the [`quest_align_frame.py`](quest_align_frame.py) script once. Navigate to the link printed on the top likly [http://127.0.0.1:7000](http://127.0.0.1:7000). + - click scan + - your meta quest should show up + - click change name and type "RCSNode" and click ok + - the script should now print a bunch of numbers (the controller poses) +4. put on your quest (dont remove it until done with teleop, otherwise the axis might change and you need to recalibrate), you should see a white ball with coordinate axis somewhere in your room (red is x, green is y and blue is z) +5. use the right controller to change the orientation of the coordinate axis to fit your right robot (for franka: x front, y left, z up) +6. click the "teleportation scene" button on the still open website +7. cancel the script +8. start the teleoperation script [`quest_iris_dual_arm.py`](quest_iris_dual_arm.py) and enjoy. + diff --git a/examples/teleop/quest_align_frame.py b/examples/teleop/quest_align_frame.py new file mode 100644 index 00000000..9a4bda17 --- /dev/null +++ b/examples/teleop/quest_align_frame.py @@ -0,0 +1,8 @@ +from quest_iris_dual_arm import MQ3_ADDR, MySimPublisher, MySimScene +from simpub.xr_device.meta_quest3 import MetaQuest3 + +MySimPublisher(MySimScene(), MQ3_ADDR) +reader = MetaQuest3("RCSNode") +while True: + data = reader.get_controller_data() + print(data) diff --git a/examples/teleop/quest_iris_dual_arm.py b/examples/teleop/quest_iris_dual_arm.py new file mode 100644 index 00000000..a6746a39 --- /dev/null +++ b/examples/teleop/quest_iris_dual_arm.py @@ -0,0 +1,342 @@ +import logging +import threading +from time import sleep + +import numpy as np +from rcs._core.common import RPY, Pose, RobotPlatform +from rcs._core.sim import SimConfig +from rcs.camera.hw import HardwareCameraSet +from rcs.envs.base import ( + ControlMode, + GripperDictType, + LimitedTQuatRelDictType, + RelativeActionSpace, + RelativeTo, +) +from rcs.envs.creators import SimMultiEnvCreator +from rcs.envs.storage_wrapper import StorageWrapper +from rcs.envs.utils import default_sim_gripper_cfg, default_sim_robot_cfg +from rcs.utils import SimpleFrameRate +from rcs_fr3.creators import RCSFR3MultiEnvCreator +from rcs_fr3.utils import default_fr3_hw_gripper_cfg, default_fr3_hw_robot_cfg +from rcs_realsense.utils import default_realsense +from simpub.core.simpub_server import SimPublisher +from simpub.parser.simdata import SimObject, SimScene +from simpub.sim.mj_publisher import MujocoPublisher +from simpub.xr_device.meta_quest3 import MetaQuest3 + +# from rcs_xarm7.creators import RCSXArm7EnvCreator + +logger = logging.getLogger(__name__) + +# download the iris apk from the following repo release: https://github.com/intuitive-robots/IRIS-Meta-Quest3 +# in order to use usb connection install adb install adb +# sudo apt install android-tools-adb +# install it on your quest with +# adb install IRIS-Meta-Quest3.apk + + +INCLUDE_ROTATION = True +ROBOT2IP = { + "left": "192.168.102.1", + "right": "192.168.101.1", +} + + +# ROBOT_INSTANCE = RobotPlatform.SIMULATION +ROBOT_INSTANCE = RobotPlatform.HARDWARE +RECORD_FPS = 30 +# set camera dict to none disable cameras +# CAMERA_DICT = { +# "side_right": "244222071045", +# "bird_eye": "243522070364", +# "arro": "243522070385", +# } +CAMERA_DICT = None +MQ3_ADDR = "10.42.0.1" + +DATASET_PATH = "test_data_iris_dual_arm" +INSTRUCTION = "build a tower with the blocks in front of you" + + +class MySimPublisher(SimPublisher): + def get_update(self): + return {} + + +class MySimScene(SimScene): + def __init__(self): + super().__init__() + self.root = SimObject(name="root") + + +class QuestReader(threading.Thread): + + transform_to_robot = Pose() + + def __init__(self, env: RelativeActionSpace): + super().__init__() + self._reader = MetaQuest3("RCSNode") + + self._resource_lock = threading.Lock() + self._env_lock = threading.Lock() + self._env = env + + self.controller_names = ROBOT2IP.keys() if ROBOT_INSTANCE == RobotPlatform.HARDWARE else ["right"] + self._trg_btn = {"left": "index_trigger", "right": "index_trigger"} + self._grp_btn = {"left": "hand_trigger", "right": "hand_trigger"} + self._start_btn = "A" + self._stop_btn = "B" + self._unsuccessful_btn = "Y" + + self._prev_data = None + self._exit_requested = False + self._grp_pos = {key: 1.0 for key in self.controller_names} # start with opened gripper + self._last_controller_pose = {key: Pose() for key in self.controller_names} + self._offset_pose = {key: Pose() for key in self.controller_names} + + for robot in ROBOT2IP: + self._env.envs[robot].set_origin_to_current() + + self._step_env = False + self._set_frame = {key: Pose() for key in self.controller_names} + + def next_action(self) -> tuple[dict[str, Pose], dict[str, float]]: + with self._resource_lock: + transforms = {} + for controller in self.controller_names: + transform = Pose( + translation=( + self._last_controller_pose[controller].translation() + - self._offset_pose[controller].translation() + ), + quaternion=( + self._last_controller_pose[controller] * self._offset_pose[controller].inverse() + ).rotation_q(), + ) + + set_axes = Pose(quaternion=self._set_frame[controller].rotation_q()) + + transform = ( + QuestReader.transform_to_robot + * set_axes.inverse() + * transform + * set_axes + * QuestReader.transform_to_robot.inverse() + ) + if not INCLUDE_ROTATION: + transform = Pose(translation=transform.translation()) # identity rotation + transforms[controller] = transform + return transforms, {key: self._grp_pos[key] for key in self.controller_names} + + def run(self): + rate_limiter = SimpleFrameRate(90, "teleop readout") + warning_raised = False + while not self._exit_requested: + # pos, buttons = self._reader.get_transformations_and_buttons() + input_data = self._reader.get_controller_data() + if not warning_raised and input_data is None: + logger.warning("[Quest Reader] packets empty") + warning_raised = True + sleep(0.5) + continue + if input_data is None: + sleep(0.5) + continue + if warning_raised: + logger.warning("[Quest Reader] packets arriving again") + warning_raised = False + + # start recording + if input_data[self._start_btn] and (self._prev_data is None or not self._prev_data[self._start_btn]): + print("start button pressed") + with self._env_lock: + self._env.get_wrapper_attr("start_record")() + + if input_data[self._stop_btn] and (self._prev_data is None or not self._prev_data[self._stop_btn]): + print("reset successful pressed: resetting env") + with self._env_lock: + # set successful + self._env.get_wrapper_attr("success")() + # this might also move the robot to the home position + self._env.reset() + + # reset unsuccessful + if input_data[self._unsuccessful_btn] and ( + self._prev_data is None or not self._prev_data[self._unsuccessful_btn] + ): + print("reset unsuccessful pressed: resetting env") + with self._env_lock: + self._env.reset() + + for controller in self.controller_names: + last_controller_pose = Pose( + translation=np.array(input_data[controller]["pos"]), + quaternion=np.array(input_data[controller]["rot"]), + ) + if controller == "left": + last_controller_pose = ( + Pose(translation=np.array([0, 0, 0]), rpy=RPY(roll=0, pitch=0, yaw=np.deg2rad(180))) + * last_controller_pose + ) + + if input_data[controller][self._trg_btn[controller]] and ( + self._prev_data is None or not self._prev_data[controller][self._trg_btn[controller]] + ): + # trigger just pressed (first data sample with button pressed) + + with self._resource_lock: + self._offset_pose[controller] = last_controller_pose + self._last_controller_pose[controller] = last_controller_pose + + elif not input_data[controller][self._trg_btn[controller]] and ( + self._prev_data is None or self._prev_data[controller][self._trg_btn[controller]] + ): + # released + transform = Pose( + translation=( + self._last_controller_pose[controller].translation() + - self._offset_pose[controller].translation() + ), + quaternion=( + self._last_controller_pose[controller] * self._offset_pose[controller].inverse() + ).rotation_q(), + ) + print(np.linalg.norm(transform.translation())) + print(np.rad2deg(transform.total_angle())) + with self._resource_lock: + self._last_controller_pose[controller] = Pose() + self._offset_pose[controller] = Pose() + with self._env_lock: + self._env.envs[controller].set_origin_to_current() + + elif input_data[controller][self._trg_btn[controller]]: + # button is pressed + with self._resource_lock: + self._last_controller_pose[controller] = last_controller_pose + else: + # trg button is not pressed + # TODO: deactivated for now + # if data["buttons"][self._home_btn] and ( + # self._prev_data is None or not self._prev_data["buttons"][self._home_btn] + # ): + # # home button just pressed + # with self._env_lock: + # self._env.unwrapped.robot.move_home() + # self._env.set_origin_to_current() + pass + + if input_data[controller][self._grp_btn[controller]] and ( + self._prev_data is None or not self._prev_data[controller][self._grp_btn[controller]] + ): + # just pressed + self._grp_pos[controller] = 0 + if not input_data[controller][self._grp_btn[controller]] and ( + self._prev_data is None or self._prev_data[controller][self._grp_btn[controller]] + ): + # just released + self._grp_pos[controller] = 1 + + self._prev_data = input_data + rate_limiter() + + def stop(self): + self._exit_requested = True + self.join() + + def __enter__(self): + self.start() + return self + + def __exit__(self, *_): + self.stop() + + def stop_env_loop(self): + self._step_env = False + + def environment_step_loop(self): + rate_limiter = SimpleFrameRate(RECORD_FPS, "env loop") + self._step_env = True + while self._step_env: + if self._exit_requested: + self._step_env = False + break + transforms, grippers = self.next_action() + actions = {} + for robot, transform in transforms.items(): + action = dict( + LimitedTQuatRelDictType(tquat=np.concatenate([transform.translation(), transform.rotation_q()])) + ) + + action.update(GripperDictType(gripper=grippers[robot])) + actions[robot] = action + + with self._env_lock: + self._env.step(actions) + rate_limiter() + + +def main(): + if ROBOT_INSTANCE == RobotPlatform.HARDWARE: + + camera_set = HardwareCameraSet([default_realsense(CAMERA_DICT)]) if CAMERA_DICT is not None else None # type: ignore + env_rel = RCSFR3MultiEnvCreator()( + name2ip=ROBOT2IP, + camera_set=camera_set, + robot_cfg=default_fr3_hw_robot_cfg(async_control=True), + control_mode=ControlMode.CARTESIAN_TQuat, + gripper_cfg=default_fr3_hw_gripper_cfg(async_control=True), + max_relative_movement=(0.5, np.deg2rad(90)), + relative_to=RelativeTo.CONFIGURED_ORIGIN, + ) + env_rel = StorageWrapper( + env_rel, DATASET_PATH, INSTRUCTION, batch_size=32, max_rows_per_group=100, max_rows_per_file=1000 + ) + MySimPublisher(MySimScene(), MQ3_ADDR) + + # DIGITAL TWIN: comment out the line above and uncomment the lines below to use a digital twin + # Note: only supported for one arm at the moment + + # robot_cfg = default_sim_robot_cfg("fr3_empty_world") + # sim_cfg = SimConfig() + # sim_cfg.async_control = True + # twin_env = SimMultiEnvCreator()( + # name2id=ROBOT2IP, + # robot_cfg=robot_cfg, + # control_mode=ControlMode.JOINTS, + # gripper_cfg=default_sim_gripper_cfg(), + # sim_cfg=sim_cfg, + # ) + # sim = env_rel.unwrapped.envs[ROBOT2IP.keys().__iter__().__next__()].sim + # sim.open_gui() + # MujocoPublisher(sim.model, sim.data, MQ3_ADDR, visible_geoms_groups=list(range(1, 3))) + # env_rel = DigitalTwin(env_rel, twin_env) + + else: + # FR3 + robot_cfg = default_sim_robot_cfg("fr3_empty_world") + + sim_cfg = SimConfig() + sim_cfg.async_control = True + env_rel = SimMultiEnvCreator()( + name2id=ROBOT2IP, + robot_cfg=robot_cfg, + control_mode=ControlMode.CARTESIAN_TQuat, + gripper_cfg=default_sim_gripper_cfg(), + # cameras=default_mujoco_cameraset_cfg(), + max_relative_movement=0.5, + relative_to=RelativeTo.CONFIGURED_ORIGIN, + sim_cfg=sim_cfg, + ) + sim = env_rel.unwrapped.envs[ROBOT2IP.keys().__iter__().__next__()].sim # type: ignore + sim.open_gui() + MujocoPublisher(sim.model, sim.data, MQ3_ADDR, visible_geoms_groups=list(range(1, 3))) + + env_rel.reset() + + with env_rel, QuestReader(env_rel) as action_server: # type: ignore + action_server.environment_step_loop() + + +if __name__ == "__main__": + main() diff --git a/examples/teleop/requirements.txt b/examples/teleop/requirements.txt new file mode 100644 index 00000000..db25a19a --- /dev/null +++ b/examples/teleop/requirements.txt @@ -0,0 +1 @@ +simpub @ git+https://github.com/intuitive-robots/SimPublisher.git \ No newline at end of file diff --git a/extensions/rcs_fr3/src/hw/Franka.cpp b/extensions/rcs_fr3/src/hw/Franka.cpp index 1b6972a0..2014ccaf 100644 --- a/extensions/rcs_fr3/src/hw/Franka.cpp +++ b/extensions/rcs_fr3/src/hw/Franka.cpp @@ -174,6 +174,7 @@ void Franka::controller_set_joint_position(const common::Vector7d& desired_q) { if (this->running_controller == Controller::none) { this->controller_time = 0.0; this->get_joint_position(); + this->joint_interpolator = common::LinearJointPositionTrajInterpolator(); } else if (this->running_controller != Controller::jsc) { // runtime error throw std::runtime_error( @@ -208,7 +209,7 @@ void Franka::osc_set_cartesian_position( if (this->running_controller == Controller::none) { this->controller_time = 0.0; - this->get_cartesian_position(); + this->traj_interpolator = common::LinearPoseTrajInterpolator(); } else if (this->running_controller != Controller::osc) { throw std::runtime_error( "Controller type must but osc but is " + @@ -586,6 +587,7 @@ void Franka::zero_torque_controller() { void Franka::move_home() { // sync + this->stop_control_thread(); FrankaMotionGenerator motion_generator( this->cfg.speed_factor, common::robots_meta_config.at(this->cfg.robot_type).q_home); diff --git a/pyproject.toml b/pyproject.toml index 81f4b465..b317ef2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,8 @@ dependencies = ["websockets>=11.0", "pyopengl>=3.1.9", "ompl>=1.7.0", "rpyc==6.0.2", + "pyarrow", + "simplejpeg", ] readme = "README.md" maintainers = [ diff --git a/python/rcs/envs/creators.py b/python/rcs/envs/creators.py index e336ae80..64bcce19 100644 --- a/python/rcs/envs/creators.py +++ b/python/rcs/envs/creators.py @@ -14,6 +14,7 @@ ControlMode, GripperWrapper, HandWrapper, + MultiRobotWrapper, RelativeActionSpace, RelativeTo, RobotEnv, @@ -132,6 +133,54 @@ def __call__( # type: ignore return env +class SimMultiEnvCreator(RCSHardwareEnvCreator): + def __call__( # type: ignore + self, + name2id: dict[str, str], + control_mode: ControlMode, + robot_cfg: rcs.sim.SimRobotConfig, + gripper_cfg: rcs.sim.SimGripperConfig | None = None, + sim_cfg: rcs.sim.SimConfig | None = None, + hand_cfg: rcs.sim.SimTilburgHandConfig | None = None, + cameras: dict[str, SimCameraConfig] | None = None, + max_relative_movement: float | tuple[float, float] | None = None, + relative_to: RelativeTo = RelativeTo.LAST_STEP, + sim_wrapper: Type[SimWrapper] | None = None, + ) -> gym.Env: + + simulation = sim.Sim(robot_cfg.mjcf_scene_path, sim_cfg) + ik = rcs.common.Pin( + robot_cfg.kinematic_model_path, + robot_cfg.attachment_site, + urdf=robot_cfg.kinematic_model_path.endswith(".urdf"), + ) + # ik = rcs_robotics_library._core.rl.RoboticsLibraryIK(robot_cfg.kinematic_model_path) + + robots: dict[str, rcs.sim.SimRobot] = {} + for key in name2id: + robots[key] = rcs.sim.SimRobot(sim=simulation, ik=ik, cfg=robot_cfg) + + envs = {} + for key in name2id: + env: gym.Env = RobotEnv(robots[key], control_mode) + env = RobotSimWrapper(env, simulation, sim_wrapper) + if gripper_cfg is not None: + gripper = rcs.sim.SimGripper(simulation, gripper_cfg) + env = GripperWrapper(env, gripper, binary=True) + + if max_relative_movement is not None: + env = RelativeActionSpace(env, max_mov=max_relative_movement, relative_to=relative_to) + envs[key] = env + + env = MultiRobotWrapper(envs) + if cameras is not None: + camera_set = typing.cast( + BaseCameraSet, SimCameraSet(simulation, cameras, physical_units=True, render_on_demand=True) + ) + env = CameraSetWrapper(env, camera_set, include_depth=True) + return env + + class SimTaskEnvCreator(EnvCreator): def __call__( # type: ignore self, diff --git a/python/rcs/envs/sim.py b/python/rcs/envs/sim.py index 74eb2c78..3b1cd523 100644 --- a/python/rcs/envs/sim.py +++ b/python/rcs/envs/sim.py @@ -449,3 +449,17 @@ def step(self, action: dict[str, Any]): # normalize reward /= 5 # type: ignore return obs, reward, success, truncated, info + + +class DigitalTwin(gym.Wrapper): + + def __init__(self, env, twin_env): + super().__init__(env) + self.twin_env = twin_env + + def step(self, action): + obs, reward, terminated, truncated, info = super().step(action) + + twin_obs, _, _, _, _ = self.twin_env.step(obs) + info["twin_obs"] = twin_obs + return obs, reward, terminated, truncated, info diff --git a/python/rcs/envs/storage_wrapper.py b/python/rcs/envs/storage_wrapper.py new file mode 100644 index 00000000..2ef0847b --- /dev/null +++ b/python/rcs/envs/storage_wrapper.py @@ -0,0 +1,194 @@ +import operator +from concurrent.futures import ThreadPoolExecutor, wait +from itertools import chain +from queue import Queue +from typing import Any, Optional +from uuid import uuid4 + +import gymnasium as gym +import numpy as np +import pyarrow as pa +import pyarrow.dataset as ds +import simplejpeg + + +class StorageWrapper(gym.Wrapper): + QueueSentinel = None + + def __init__( + self, + env: gym.Env, + base_dir: str, + instruction: str, + batch_size: int = 32, + schema: Optional[pa.Schema] = None, + start_record: bool = False, + basename_template: Optional[str] = None, + max_rows_per_group: Optional[int] = None, + max_rows_per_file: Optional[int] = None, + ): + """ + Asynchronously log environment transitions to a Parquet + dataset on disk. + + Observation handling: + - Expects observations to be dictionaries. + - RGB camera frames are JPEG-encoded. + - Numpy arrays with ndim > 1 inside the observation dict are flattened + in-place, and their original shapes are stored alongside as + ``"_shape"``. Nested dicts are traversed recursively. + - Lists/tuples of arrays are not supported. + - ``close()`` must be called to flush the final batch. + + Parameters + ---------- + env : gym.Env + The environment to wrap. + base_dir : str + Output directory where the Parquet dataset will be written. + batch_size : int + Number of transitions to accumulate before flushing a RecordBatch + to the writer queue. + schema : Optional[pa.Schema], default=None + Optional Arrow schema to enforce for all batches. If None, the schema + is inferred from the first flushed batch and then reused. + basename_template : Optional[str], default=None + Template controlling Parquet file basenames. Passed through to + ``pyarrow.dataset.write_dataset``. + max_rows_per_group : Optional[int], default=None + Maximum row count per Parquet row group. Passed through to + ``pyarrow.dataset.write_dataset``. + max_rows_per_file : Optional[int], default=None + Maximum row count per Parquet file. Passed through to + ``pyarrow.dataset.write_dataset``. + """ + + super().__init__(env) + self.base_dir = base_dir + self.batch_size = batch_size + self.schema = schema + self.basename_template = basename_template + self.max_rows_per_group = max_rows_per_group + self.max_rows_per_file = max_rows_per_file + self.buffer: list[dict[str, Any]] = [] + self.step_cnt = 0 + self._pause = True + self.instruction = instruction + self._success = start_record + self._prev_action = None + self.thread_pool = ThreadPoolExecutor() + self.queue: Queue[pa.Table | pa.RecordBatch] = Queue(maxsize=2) + self.uuid = uuid4() + self._writer_future = self.thread_pool.submit(self._writer_worker) + + def _generator_from_queue(self): + while (batch := self.queue.get()) is not self.QueueSentinel: + yield batch + + def _writer_worker(self): + gen = self._generator_from_queue() + first = next(gen) + ds.write_dataset( + data=chain([first], gen), + base_dir=self.base_dir, + format="parquet", + schema=self.schema, + existing_data_behavior="overwrite_or_ignore", + basename_template=self.basename_template, + max_rows_per_group=self.max_rows_per_group, + max_rows_per_file=self.max_rows_per_file, + partitioning=ds.partitioning( + schema=pa.schema(fields=[pa.field("uuid", pa.string())]), + flavor="filename", + ), + ) + + def _flush(self): + batch = pa.RecordBatch.from_pylist(self.buffer, schema=self.schema) + if self.schema is None: + self.schema = batch.schema + self.queue.put(batch) + self.buffer.clear() + + def _flatten_arrays(self, d: dict[str, Any]): + # NOTE: list / tuples of arrays not supported + updates = {} + for k, v in d.items(): + if isinstance(v, dict): + self._flatten_arrays(v) + elif isinstance(v, np.ndarray) and len(v.shape) > 1: + d[k] = v.flatten() + updates[f"{k}_shape"] = v.shape + d.update(updates) + + def _encode_images(self, obs: dict[str, Any]): + _ = [ + *self.thread_pool.map( + lambda cam: operator.setitem( + obs["frames"][cam]["rgb"], + "data", + simplejpeg.encode_jpeg(np.ascontiguousarray(obs["frames"][cam]["rgb"]["data"])), + ), + obs["frames"], + ) + ] + + def step(self, action): + # NOTE: expects the observation to be a dictionary + if self._writer_future.done(): + exc = self._writer_future.exception() + assert exc is not None + msg = "Writer thread failed" + raise RuntimeError(msg) from exc + obs, reward, terminated, truncated, info = self.env.step(action) + if not self._pause: + assert isinstance(obs, dict) + if "frames" in obs: + self._encode_images(obs) + self._flatten_arrays(obs) + if info.get("success"): + self.success() + self.buffer.append( + { + "obs": obs, + "reward": reward, + "step": self.step_cnt, + "uuid": self.uuid.hex, + "success": self._success, + "action": self._prev_action, + "instruction": self.instruction, + } + ) + self._prev_action = action + self.step_cnt += 1 + if len(self.buffer) == self.batch_size: + self._flush() + return obs, reward, terminated, truncated, info + + def success(self): + self._success = True + + def stop_record(self): + self._pause = True + if len(self.buffer) > 0: + self._flush() + + def start_record(self): + self._pause = False + + def reset(self, *, seed: int | None = None, options: dict[str, Any] | None = None): + if len(self.buffer) > 0: + self._flush() + self._pause = True + self._success = False + self._prev_action = None + obs, info = self.env.reset() + self.step_cnt = 0 + self.uuid = uuid4() + return obs, info + + def close(self): + if len(self.buffer) > 0: + self._flush() + self.queue.put(self.QueueSentinel) + wait([self._writer_future]) diff --git a/python/rcs/envs/utils.py b/python/rcs/envs/utils.py index 6f1e4460..1d440098 100644 --- a/python/rcs/envs/utils.py +++ b/python/rcs/envs/utils.py @@ -2,6 +2,7 @@ from os import PathLike from digit_interface import Digit +from rcs._core import common from rcs._core.common import BaseCameraConfig from rcs._core.sim import CameraType, SimCameraConfig from rcs.camera.digit_cam import DigitCam @@ -17,8 +18,12 @@ def default_sim_robot_cfg(scene: str = "fr3_empty_world", idx: str = "0") -> sim.SimRobotConfig: robot_cfg = rcs.sim.SimRobotConfig() robot_cfg.robot_type = rcs.scenes[scene].robot_type + robot_cfg.tcp_offset = common.Pose(common.FrankaHandTCPOffset()) robot_cfg.add_id(idx) - robot_cfg.mjcf_scene_path = rcs.scenes[scene].mjb + if rcs.scenes[scene].mjb is not None: + robot_cfg.mjcf_scene_path = rcs.scenes[scene].mjb + else: + robot_cfg.mjcf_scene_path = rcs.scenes[scene].mjcf_scene robot_cfg.kinematic_model_path = rcs.scenes[scene].mjcf_robot # robot_cfg.kinematic_model_path = rcs.scenes[scene].urdf return robot_cfg