in tfx/components/infra_validator/model_server_clients/tensorflow_serving_client.py [0:0]
def _GetServingStatus(self) -> types.ModelServingStatus:
"""Check whether the model is available for query or not.
In TensorFlow Serving, model is READY if and only if the state from
_GetModelStatus() is AVAILABLE. If returned state is END, it will never
become READY therefore returns UNAVAILABLE. Otherwise it will return
NOT_READY.
Returns:
A ModelState.
"""
try:
resp = self._GetModelStatus()
except grpc.RpcError as e:
logging.info('Model status is not available yet:\n%s', e)
return types.ModelServingStatus.NOT_READY
# When no versions available. (empty list)
if not resp.model_version_status:
return types.ModelServingStatus.NOT_READY
# Wait until all serving model versions are in AVAILABLE state.
# In TensorFlow Serving, model state lifecycle is
# START -> LOADING -> AVAILABLE -> UNLOADING -> END
# if loaded successfully or
# START -> LOADING -> END
# if loaded unsuccessfully. The model is available iff state is AVAILABLE.
# The model is unavailable for goods iff state is END.
# https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/get_model_status.proto
if all(mvs.state == State.AVAILABLE
for mvs in resp.model_version_status):
return types.ModelServingStatus.READY
if any(mvs.state == State.END
for mvs in resp.model_version_status):
return types.ModelServingStatus.UNAVAILABLE
return types.ModelServingStatus.NOT_READY