in backends/python/server/text_embeddings_server/utils/device.py [0:0]
def get_device():
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda")
elif is_hpu():
import habana_frameworks.torch.core as htcore
# WA for perf degradation from pytorch 2.5
if ALLOW_REDUCED_PRECISION:
torch._C._set_math_sdp_allow_fp16_bf16_reduction(True)
if hasattr(torch, "hpu") and torch.hpu.is_available(): # type: ignore
device = torch.device("hpu")
elif use_ipex():
import intel_extension_for_pytorch as ipex
if hasattr(torch, "xpu") and torch.xpu.is_available():
device = torch.device("xpu")
return device