def __init__()

in optimum_benchmark/trackers/energy.py [0:0]


    def __init__(self, device: str, backend: str, device_ids: Optional[Union[str, int, List[int]]] = None):
        self.device = device
        self.backend = backend
        self.device_ids = device_ids

        self.is_gpu = self.device == "cuda"
        self.is_pytorch_cuda = (self.backend, self.device) == ("pytorch", "cuda")

        LOGGER.info("\t\t+ Tracking RAM and CPU energy consumption")

        if self.is_gpu:
            if isinstance(self.device_ids, str):
                self.device_ids = list(map(int, self.device_ids.split(",")))
            elif isinstance(self.device_ids, int):
                self.device_ids = [self.device_ids]
            elif isinstance(self.device_ids, list):
                self.device_ids = self.device_ids
            elif self.device_ids is None:
                raise ValueError("GPU device IDs must be provided for energy tracking on GPUs")
            else:
                raise ValueError("GPU device IDs must be a string, an integer, or a list of integers")

            LOGGER.info(f"\t\t+ Tracking GPU energy consumption on devices {self.device_ids}")

        if not is_codecarbon_available():
            raise ValueError(
                "The library codecarbon is required to run energy benchmark, but is not installed. "
                "Please install it through `pip install codecarbon`."
            )

        try:
            self.emission_tracker = EmissionsTracker(
                log_level="warning",
                # tracking_mode="process" only tries to track memory consumption of current process
                # but computes cpu and gpu energy consumption based on the machine-level tracking
                tracking_mode="machine",
                gpu_ids=self.device_ids,
                # allow multiple trackers to run in the same machine (e.g., for distributed inference/training)
                # and also for testing purposes (we run many benchmarks in parallel)
                # https://github.com/mlco2/codecarbon/pull/562 added this feature
                # but it doesn't explain why one tracker is better than multiple
                allow_multiple_runs=True,
                output_file="codecarbon.csv",
                measure_power_secs=POWER_CONSUMPTION_SAMPLING_RATE,
            )
        except Exception:
            LOGGER.warning("\t\t+ Falling back to Offline Emissions Tracker")

            if os.environ.get("COUNTRY_ISO_CODE", None) is None:
                LOGGER.warning(
                    "\t\t+ Offline Emissions Tracker requires COUNTRY_ISO_CODE to be set. "
                    "We will set it to USA but the carbon footprint might be inaccurate."
                )

            self.emission_tracker = OfflineEmissionsTracker(
                log_level="warning",
                # tracking_mode="process" only tries to track memory consumption of current process
                # but computes cpu and gpu energy consumption based on the machine-level tracking
                tracking_mode="machine",
                gpu_ids=self.device_ids,
                # allow multiple trackers to run in the same machine (e.g., for distributed inference/training)
                # and also for testing purposes (we run many benchmarks in parallel)
                # https://github.com/mlco2/codecarbon/pull/562 added this feature
                # but it doesn't explain why one tracker is better than multiple
                allow_multiple_runs=True,
                output_file="codecarbon.csv",
                measure_power_secs=POWER_CONSUMPTION_SAMPLING_RATE,
                country_iso_code=os.environ.get("COUNTRY_ISO_CODE", "USA"),
            )

        self.total_energy: Optional[float] = None
        self.cpu_energy: Optional[float] = None
        self.gpu_energy: Optional[float] = None
        self.ram_energy: Optional[float] = None