in src/sagemaker_pytorch_serving_container/ts_environment.py [0:0]
def batch_size(self): # type: () -> int """int: number of requests to batch before running inference on the server""" return self._batch_size