def forward()

in crypten/nn/privacy/dp_split.py [0:0]


    def forward(self, input):
        # During eval mode, just conduct forward pass.
        if not self.training:
            if self.is_feature_src():
                return self.model(input)
            # Parties without model should return None
            return None

        if self.is_feature_src():
            self.logits = self.model(input)
            self.preds = self.logits.sigmoid()

            # Extract saved input to last layer from autograd tape if we need it
            if cfg.nn.dpsmpc.protocol == "layer_estimation":
                self.last_input = self.logits.grad_fn._saved_mat1

            # Check that prediction size matches cached size
            preds_size = self.preds.size()
            if "preds_size" in self.cache:
                cache_size = self.cache["preds_size"]
                if preds_size != cache_size:
                    raise ValueError(
                        f"Logit size does not match cached size: {preds_size} vs. {cache_size}"
                    )
        else:
            self.logits = None
            self.preds = None
            preds_size = None

        # Cache predictions size - Note batch size must match here
        # TODO: Handle batch dimension here
        if self.cache_pred_size:
            preds_size = self._communicate_and_cache("preds_size", preds_size)

        self.preds = torch.empty(preds_size) if self.preds is None else self.preds
        return self.logits