def _register_grad_hook()

in src/fairseq/fairseq/legacy_distributed_data_parallel.py [0:0]


    def _register_grad_hook(self):
        """
        This function registers the callback all-reduction function for the
        NCCL backend. All gradients will be all reduced in one single step.
        The NCCL reduction will directly be enqueued into the default CUDA
        stream. Therefore, no synchronization is needed.
        """

        def all_reduce(params):
            buffer = self.buffer
            nonzero_buffer = False
            if len(params) > 1:
                offset = 0
                for p in params:
                    sz = p.numel()
                    if p.grad is not None:
                        buffer[offset:offset+sz].copy_(p.grad.data.view(-1))
                        nonzero_buffer = True
                    else:
                        buffer[offset:offset+sz].zero_()
                    offset += sz
            else:
                # we only have a single grad to all-reduce
                p = params[0]
                if p.grad is not None:
                    buffer = p.grad.data
                    nonzero_buffer = True
                elif p.numel() <= self.buffer.numel():
                    buffer = buffer[:p.numel()]
                    buffer.zero_()
                else:
                    buffer = torch.zeros_like(p)

            if nonzero_buffer:
                buffer.div_(self.world_size)

            distributed_utils.all_reduce(buffer, self.process_group)

            # copy all-reduced grads back into their original place
            offset = 0
            for p in params:
                sz = p.numel()
                if p.grad is not None:
                    p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))
                else:
                    p.grad = buffer[offset:offset+sz].view_as(p).clone()
                offset += sz

        def reduction_fn():
            # This function only needs to be called once
            if not self.need_reduction or self.accumulate_grads:
                return
            self.need_reduction = False

            if self.buffer is None:
                self.buffer = next(self.module.parameters()).new(self.buffer_size)

            # All-reduce the gradients in buckets
            offset = 0
            buffered_params = []
            for param in self.module.parameters():
                if not param.requires_grad:
                    continue
                if param.grad is None:
                    param.grad = torch.zeros_like(param)
                if param.grad.requires_grad:
                    raise RuntimeError("DistributedDataParallel only works "
                                       "with gradients that don't require "
                                       "grad")
                sz = param.numel()
                if sz > self.buffer.numel():
                    # all-reduce big params directly
                    all_reduce([param])
                else:
                    if offset + sz > self.buffer.numel():
                        all_reduce(buffered_params)
                        offset = 0
                        buffered_params.clear()
                    buffered_params.append(param)
                    offset += sz

            if len(buffered_params) > 0:
                all_reduce(buffered_params)

        # Now register the reduction hook on the parameters
        for p in self.module.parameters():

            def allreduce_hook(*unused):
                self.need_reduction = True
                Variable._execution_engine.queue_callback(reduction_fn)

            if p.requires_grad:
                p.register_hook(allreduce_hook)