def mock_handle_with_batching()

in dynalab/tasks/task_io.py [0:0]


    def mock_handle_with_batching(self, model_name: str, use_gpu: bool, handle_func):
        mock_torchserve_context = TaskIO._get_mock_torchserve_context(
            model_name, use_gpu
        )
        mock_datapoints, _ = self.get_mock_data()
        N = len(mock_datapoints)
        mock_data = [
            {
                "body": "\n".join(
                    json.dumps(sample, ensure_ascii=False) for sample in mock_datapoints
                )
            }
        ]
        print("Getting model response ...")
        responses = handle_func(mock_data, mock_torchserve_context)
        assert len(responses) == 1, "The model should return one torchserve sample !"

        try:
            responses = [json.loads(r) for r in responses[0].splitlines()]
        except Exception as e:
            raise RuntimeError("The model response isn't serializable to json !") from e

        for i, (data, response) in enumerate(zip(mock_datapoints, responses)):
            print(f"Test data {i+1} / {N}")
            print(f"Mock input data is: ", data)
            print(f"Your model response is {response}")
            print(f"Verifying model response ...")
            self.verify_response(response, data)