optimum_benchmark/trackers/latency.py [213:224]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.device = device
        self.backend = backend

        self.is_pytorch_cuda = (self.backend, self.device) == ("pytorch", "cuda")

        if self.is_pytorch_cuda:
            LOGGER.info("\t\t+ Tracking latency using Pytorch CUDA events")
        else:
            LOGGER.info("\t\t+ Tracking latency using CPU performance counter")

        self.start_events: List[Union[float, torch.cuda.Event]] = []
        self.end_events: List[Union[float, torch.cuda.Event]] = []
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



optimum_benchmark/trackers/latency.py [600:611]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.device = device
        self.backend = backend

        self.is_pytorch_cuda = (self.backend, self.device) == ("pytorch", "cuda")

        if self.is_pytorch_cuda:
            LOGGER.info("\t\t+ Tracking latency using Pytorch CUDA events")
        else:
            LOGGER.info("\t\t+ Tracking latency using CPU performance counter")

        self.start_events: List[Union[float, torch.cuda.Event]] = []
        self.end_events: List[Union[float, torch.cuda.Event]] = []
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



