|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| """
|
| Example:
|
| ```shell
|
| python src/lerobot/scripts/server/policy_server.py \
|
| --host=127.0.0.1 \
|
| --port=8080 \
|
| --fps=30 \
|
| --inference_latency=0.033 \
|
| --obs_queue_timeout=1
|
| ```
|
| """
|
|
|
| import logging
|
| import pickle
|
| import threading
|
| import time
|
| from concurrent import futures
|
| from dataclasses import asdict
|
| from pprint import pformat
|
| from queue import Empty, Queue
|
|
|
| import draccus
|
| import grpc
|
| import torch
|
|
|
| from lerobot.policies.factory import get_policy_class
|
| from lerobot.scripts.server.configs import PolicyServerConfig
|
| from lerobot.scripts.server.constants import SUPPORTED_POLICIES
|
| from lerobot.scripts.server.helpers import (
|
| FPSTracker,
|
| Observation,
|
| RemotePolicyConfig,
|
| TimedAction,
|
| TimedObservation,
|
| get_logger,
|
| observations_similar,
|
| raw_observation_to_observation,
|
| receive_bytes_in_chunks,
|
| )
|
| from lerobot.transport import (
|
| async_inference_pb2,
|
| async_inference_pb2_grpc,
|
| )
|
|
|
|
|
| class PolicyServer(async_inference_pb2_grpc.AsyncInferenceServicer):
|
| prefix = "policy_server"
|
| logger = get_logger(prefix)
|
|
|
| def __init__(self, config: PolicyServerConfig):
|
| self.config = config
|
| self._running_event = threading.Event()
|
|
|
|
|
| self.fps_tracker = FPSTracker(target_fps=config.fps)
|
|
|
| self.observation_queue = Queue(maxsize=1)
|
|
|
| self._predicted_timesteps_lock = threading.Lock()
|
| self._predicted_timesteps = set()
|
|
|
| self.last_processed_obs = None
|
|
|
|
|
| self.device = None
|
| self.policy_type = None
|
| self.lerobot_features = None
|
| self.actions_per_chunk = None
|
| self.policy = None
|
|
|
| @property
|
| def running(self):
|
| return self._running_event.is_set()
|
|
|
| @property
|
| def policy_image_features(self):
|
| return self.policy.config.image_features
|
|
|
| def _reset_server(self) -> None:
|
| """Flushes server state when new client connects."""
|
|
|
| self._running_event.clear()
|
| self.observation_queue = Queue(maxsize=1)
|
|
|
| with self._predicted_timesteps_lock:
|
| self._predicted_timesteps = set()
|
|
|
| def Ready(self, request, context):
|
| client_id = context.peer()
|
| self.logger.info(f"Client {client_id} connected and ready")
|
| self._reset_server()
|
| self._running_event.set()
|
|
|
| return async_inference_pb2.Empty()
|
|
|
| def SendPolicyInstructions(self, request, context):
|
| """Receive policy instructions from the robot client"""
|
|
|
| if not self.running:
|
| self.logger.warning("Server is not running. Ignoring policy instructions.")
|
| return async_inference_pb2.Empty()
|
|
|
| client_id = context.peer()
|
|
|
| policy_specs = pickle.loads(request.data)
|
|
|
| if not isinstance(policy_specs, RemotePolicyConfig):
|
| raise TypeError(f"Policy specs must be a RemotePolicyConfig. Got {type(policy_specs)}")
|
|
|
| if policy_specs.policy_type not in SUPPORTED_POLICIES:
|
| raise ValueError(
|
| f"Policy type {policy_specs.policy_type} not supported. "
|
| f"Supported policies: {SUPPORTED_POLICIES}"
|
| )
|
|
|
| self.logger.info(
|
| f"Receiving policy instructions from {client_id} | "
|
| f"Policy type: {policy_specs.policy_type} | "
|
| f"Pretrained name or path: {policy_specs.pretrained_name_or_path} | "
|
| f"Actions per chunk: {policy_specs.actions_per_chunk} | "
|
| f"Device: {policy_specs.device}"
|
| )
|
|
|
| self.device = policy_specs.device
|
| self.policy_type = policy_specs.policy_type
|
| self.lerobot_features = policy_specs.lerobot_features
|
| self.actions_per_chunk = policy_specs.actions_per_chunk
|
|
|
| policy_class = get_policy_class(self.policy_type)
|
|
|
| start = time.perf_counter()
|
| self.policy = policy_class.from_pretrained(policy_specs.pretrained_name_or_path)
|
| self.policy.to(self.device)
|
| end = time.perf_counter()
|
|
|
| self.logger.info(f"Time taken to put policy on {self.device}: {end - start:.4f} seconds")
|
|
|
| return async_inference_pb2.Empty()
|
|
|
| def SendObservations(self, request_iterator, context):
|
| """Receive observations from the robot client"""
|
| client_id = context.peer()
|
| self.logger.debug(f"Receiving observations from {client_id}")
|
|
|
| receive_time = time.time()
|
| start_deserialize = time.perf_counter()
|
| received_bytes = receive_bytes_in_chunks(
|
| request_iterator, self._running_event, self.logger
|
| )
|
| timed_observation = pickle.loads(received_bytes)
|
| deserialize_time = time.perf_counter() - start_deserialize
|
|
|
| self.logger.debug(f"Received observation #{timed_observation.get_timestep()}")
|
|
|
| obs_timestep = timed_observation.get_timestep()
|
| obs_timestamp = timed_observation.get_timestamp()
|
|
|
|
|
| fps_metrics = self.fps_tracker.calculate_fps_metrics(obs_timestamp)
|
|
|
| self.logger.info(
|
| f"Received observation #{obs_timestep} | "
|
| f"Avg FPS: {fps_metrics['avg_fps']:.2f} | "
|
| f"Target: {fps_metrics['target_fps']:.2f} | "
|
| f"One-way latency: {(receive_time - obs_timestamp) * 1000:.2f}ms"
|
| )
|
|
|
| self.logger.debug(
|
| f"Server timestamp: {receive_time:.6f} | "
|
| f"Client timestamp: {obs_timestamp:.6f} | "
|
| f"Deserialization time: {deserialize_time:.6f}s"
|
| )
|
|
|
| if not self._enqueue_observation(
|
| timed_observation
|
| ):
|
| self.logger.info(f"Observation #{obs_timestep} has been filtered out")
|
|
|
| return async_inference_pb2.Empty()
|
|
|
| def GetActions(self, request, context):
|
| """Returns actions to the robot client. Actions are sent as a single
|
| chunk, containing multiple actions."""
|
| client_id = context.peer()
|
| self.logger.debug(f"Client {client_id} connected for action streaming")
|
|
|
|
|
| try:
|
| getactions_starts = time.perf_counter()
|
| obs = self.observation_queue.get(timeout=self.config.obs_queue_timeout)
|
| self.logger.info(
|
| f"Running inference for observation #{obs.get_timestep()} (must_go: {obs.must_go})"
|
| )
|
|
|
| with self._predicted_timesteps_lock:
|
| self._predicted_timesteps.add(obs.get_timestep())
|
|
|
| start_time = time.perf_counter()
|
| action_chunk = self._predict_action_chunk(obs)
|
| inference_time = time.perf_counter() - start_time
|
|
|
| start_time = time.perf_counter()
|
| actions_bytes = pickle.dumps(action_chunk)
|
| serialize_time = time.perf_counter() - start_time
|
|
|
|
|
| actions = async_inference_pb2.Actions(data=actions_bytes)
|
|
|
| self.logger.info(
|
| f"Action chunk #{obs.get_timestep()} generated | "
|
| f"Total time: {(inference_time + serialize_time) * 1000:.2f}ms"
|
| )
|
|
|
| self.logger.debug(
|
| f"Action chunk #{obs.get_timestep()} generated | "
|
| f"Inference time: {inference_time:.2f}s |"
|
| f"Serialize time: {serialize_time:.2f}s |"
|
| f"Total time: {inference_time + serialize_time:.2f}s"
|
| )
|
|
|
| time.sleep(
|
| max(0, self.config.inference_latency - max(0, time.perf_counter() - getactions_starts))
|
| )
|
|
|
| return actions
|
|
|
| except Empty:
|
| return async_inference_pb2.Empty()
|
|
|
| except Exception as e:
|
| self.logger.error(f"Error in StreamActions: {e}")
|
|
|
| return async_inference_pb2.Empty()
|
|
|
| def _obs_sanity_checks(self, obs: TimedObservation, previous_obs: TimedObservation) -> bool:
|
| """Check if the observation is valid to be processed by the policy"""
|
| with self._predicted_timesteps_lock:
|
| predicted_timesteps = self._predicted_timesteps
|
|
|
| if obs.get_timestep() in predicted_timesteps:
|
| self.logger.debug(f"Skipping observation #{obs.get_timestep()} - Timestep predicted already!")
|
| return False
|
|
|
| elif observations_similar(obs, previous_obs, lerobot_features=self.lerobot_features):
|
| self.logger.debug(
|
| f"Skipping observation #{obs.get_timestep()} - Observation too similar to last obs predicted!"
|
| )
|
| return False
|
|
|
| else:
|
| return True
|
|
|
| def _enqueue_observation(self, obs: TimedObservation) -> bool:
|
| """Enqueue an observation if it must go through processing, otherwise skip it.
|
| Observations not in queue are never run through the policy network"""
|
|
|
| if (
|
| obs.must_go
|
| or self.last_processed_obs is None
|
| or self._obs_sanity_checks(obs, self.last_processed_obs)
|
| ):
|
| last_obs = self.last_processed_obs.get_timestep() if self.last_processed_obs else "None"
|
| self.logger.debug(
|
| f"Enqueuing observation. Must go: {obs.must_go} | Last processed obs: {last_obs}"
|
| )
|
|
|
|
|
| if self.observation_queue.full():
|
|
|
| _ = self.observation_queue.get_nowait()
|
| self.logger.debug("Observation queue was full, removed oldest observation")
|
|
|
|
|
| self.observation_queue.put(obs)
|
| return True
|
|
|
| return False
|
|
|
| def _time_action_chunk(self, t_0: float, action_chunk: list[torch.Tensor], i_0: int) -> list[TimedAction]:
|
| """Turn a chunk of actions into a list of TimedAction instances,
|
| with the first action corresponding to t_0 and the rest corresponding to
|
| t_0 + i*environment_dt for i in range(len(action_chunk))
|
| """
|
| return [
|
| TimedAction(timestamp=t_0 + i * self.config.environment_dt, timestep=i_0 + i, action=action)
|
| for i, action in enumerate(action_chunk)
|
| ]
|
|
|
| def _prepare_observation(self, observation_t: TimedObservation) -> Observation:
|
| """
|
| Prepare observation, ready for policy inference.
|
| E.g.: To keep observation sampling rate high (and network packet tiny) we send int8 [0,255] images from the
|
| client and then convert them to float32 [0,1] images here, before running inference.
|
| """
|
|
|
| observation: Observation = raw_observation_to_observation(
|
| observation_t.get_observation(),
|
| self.lerobot_features,
|
| self.policy_image_features,
|
| self.device,
|
| )
|
|
|
|
|
| return observation
|
|
|
| def _get_action_chunk(self, observation: dict[str, torch.Tensor]) -> torch.Tensor:
|
| """Get an action chunk from the policy. The chunk contains only"""
|
| chunk = self.policy.predict_action_chunk(observation)
|
| if chunk.ndim != 3:
|
| chunk = chunk.unsqueeze(0)
|
|
|
| return chunk[:, : self.actions_per_chunk, :]
|
|
|
| def _predict_action_chunk(self, observation_t: TimedObservation) -> list[TimedAction]:
|
| """Predict an action chunk based on an observation"""
|
| inference_starts = time.perf_counter()
|
|
|
| """1. Prepare observation"""
|
| start_time = time.perf_counter()
|
| observation = self._prepare_observation(observation_t)
|
| preprocessing_time = time.perf_counter() - start_time
|
|
|
| self.last_processed_obs: TimedObservation = observation_t
|
|
|
| """2. Get action chunk"""
|
| start_time = time.perf_counter()
|
| action_tensor = self._get_action_chunk(observation)
|
| inference_time = time.perf_counter() - start_time
|
|
|
| """3. Post-inference processing"""
|
| start_time = time.perf_counter()
|
|
|
| action_tensor = action_tensor.cpu().squeeze(0)
|
|
|
| action_chunk = self._time_action_chunk(
|
| observation_t.get_timestamp(), list(action_tensor), observation_t.get_timestep()
|
| )
|
| postprocessing_time = time.perf_counter() - start_time
|
| inference_stops = time.perf_counter()
|
|
|
| self.logger.info(
|
| f"Observation {observation_t.get_timestep()} |"
|
| f"Inference time: {1000 * (inference_stops - inference_starts):.2f}ms"
|
| )
|
|
|
|
|
| self.logger.debug(
|
| f"Observation {observation_t.get_timestep()} | "
|
| f"Preprocessing time: {1000 * (preprocessing_time - inference_starts):.2f}ms | "
|
| f"Inference time: {1000 * (inference_time - preprocessing_time):.2f}ms | "
|
| f"Postprocessing time: {1000 * (postprocessing_time - inference_time):.2f}ms | "
|
| f"Total time: {1000 * (postprocessing_time - inference_starts):.2f}ms"
|
| )
|
|
|
| return action_chunk
|
|
|
| def stop(self):
|
| """Stop the server"""
|
| self._reset_server()
|
| self.logger.info("Server stopping...")
|
|
|
|
|
| @draccus.wrap()
|
| def serve(cfg: PolicyServerConfig):
|
| """Start the PolicyServer with the given configuration.
|
|
|
| Args:
|
| config: PolicyServerConfig instance. If None, uses default configuration.
|
| """
|
| logging.info(pformat(asdict(cfg)))
|
|
|
|
|
| policy_server = PolicyServer(cfg)
|
|
|
|
|
| server = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
|
| async_inference_pb2_grpc.add_AsyncInferenceServicer_to_server(policy_server, server)
|
| server.add_insecure_port(f"{cfg.host}:{cfg.port}")
|
|
|
| policy_server.logger.info(f"PolicyServer started on {cfg.host}:{cfg.port}")
|
| server.start()
|
|
|
| server.wait_for_termination()
|
|
|
| policy_server.logger.info("Server terminated")
|
|
|
|
|
| if __name__ == "__main__":
|
| serve()
|
|
|