fairscale/optim/adam.py [32:40]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
            self.master = master_tensor
            self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}

        def get(self, device: torch.device) -> torch.Tensor:
            retval = self._per_device_tensors.get(device, None)
            if retval is None:
                retval = self.master.to(device=device, non_blocking=True, copy=True)
                self._per_device_tensors[device] = retval
            return retval
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



fairscale/optim/grad_scaler.py [32:40]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        self.master = master_tensor
        self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}

    def get(self, device: torch.device) -> torch.Tensor:
        retval = self._per_device_tensors.get(device, None)
        if retval is None:
            retval = self.master.to(device=device, non_blocking=True, copy=True)
            self._per_device_tensors[device] = retval
        return retval
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



