in optimum/onnxruntime/base.py [0:0]
def to(self, *args, **kwargs):
"""
Moves the session to the specified device by updating the execution provider and its options.
Args:
device (`str`, `int`, `torch.device`):
The device to move the session to. It can be a string (e.g., "cuda", "cpu"), an integer (e.g., 0 for GPU 0),
or a `torch.device` object.
Returns:
`ORTSessionMixin`: The updated session.
Raises:
ValueError: If the device is not supported or if the provider is not available.
"""
dtype = None
device = None
for arg in args:
if isinstance(arg, (str, torch.device)):
device = arg
elif isinstance(arg, int):
device = torch.device(arg)
elif isinstance(arg, torch.device):
device = arg
elif isinstance(arg, torch.dtype):
dtype = arg
for key, value in kwargs.items():
if key == "device":
device = value
elif key == "dtype":
dtype = value
if dtype is not None:
# we don't support changing the dtype of the model
return self
if device is None:
# no device was provided, we don't change the device
return self
device, provider_option = parse_device(device)
provider = get_provider_for_device(device)
validate_provider_availability(provider)
if device == self.device:
return self
self.session.set_providers([provider], provider_options=[provider_option])
if self.use_io_binding is None:
if self.provider == "CUDAExecutionProvider":
logger.info(
"`use_io_binding` was set to `None` before the provider was changed to CUDAExecutionProvider. "
"Setting `use_io_binding=True` to leverage IO Binding and improve performance. "
"You can disable it by setting `model.use_io_binding=False`."
)
self.use_io_binding = True
self._device = device
return self