def mock_handle_individually()

in dynalab/tasks/task_io.py [0:0]


    def mock_handle_individually(self, model_name: str, use_gpu: bool, handle_func):
        mock_torchserve_context = TaskIO._get_mock_torchserve_context(
            model_name, use_gpu
        )
        mock_datapoints, _ = self.get_mock_data()
        N = len(mock_datapoints)
        for i, data in enumerate(mock_datapoints):
            print(f"Test data {i+1} / {N}")
            print(f"Mock input data is: ", data)
            mock_data = [{"body": data}]
            print("Getting model response ...")
            responses = handle_func(mock_data, mock_torchserve_context)
            assert (
                len(responses) == 1
            ), "The model should return one torchserve sample !"
            response = responses[0]
            print(f"Your model response is {response}")
            if isinstance(response, str):
                # Model can return either dict or string. Torchserve will handle serialization.
                try:
                    response = json.loads(response)
                except Exception as e:
                    raise RuntimeError("The model response isn't valid json !") from e
            else:
                try:
                    json.dumps(response)
                except Exception as e:
                    raise RuntimeError(
                        "The model response isn't serializable to json !"
                    ) from e
            print(f"Verifying model response ...")
            self.verify_response(response, data)