in backends/python/server/text_embeddings_server/server.py [0:0]
def __init__(self, model: Model):
self.model = model
# Force inference mode for the lifetime of EmbeddingService
self._inference_mode_raii_guard = torch._C._InferenceMode(True)