Francesco Capuano commited on
Commit
cb01674
·
1 Parent(s): 8e3c75f

ahahah gpu too brr

Browse files
lerobot/scripts/server/policy_server.py CHANGED
@@ -267,6 +267,7 @@ class PolicyServer(async_inference_pb2_grpc.AsyncInferenceServicer):
267
  f"Action chunk size: {len(action_chunk)}"
268
  )
269
 
 
270
  return action
271
 
272
  def _stream_action_chunks_from_dataset(self) -> Generator[List[torch.Tensor], None, None]:
 
267
  f"Action chunk size: {len(action_chunk)}"
268
  )
269
 
270
+ time.sleep(inference_latency)
271
  return action
272
 
273
  def _stream_action_chunks_from_dataset(self) -> Generator[List[torch.Tensor], None, None]: