def jacobians()

in theseus/core/cost_function.py [0:0]


    def jacobians(self) -> Tuple[List[torch.Tensor], torch.Tensor]:
        err, optim_vars, aux_vars = self._compute_error()

        # this receives a list of torch tensors with data to set for tmp_optim_vars
        def jac_fn(*optim_vars_data_):
            assert len(optim_vars_data_) == len(self._tmp_optim_vars)
            for i, tensor in enumerate(optim_vars_data_):
                self._tmp_optim_vars[i].update(tensor)

            return self._err_fn(optim_vars=self._tmp_optim_vars, aux_vars=aux_vars)

        jacobians_full = autogradF.jacobian(
            jac_fn,
            tuple(v.data for v in optim_vars),
            create_graph=True,
            strict=self._autograd_strict,
            vectorize=self._autograd_vectorize,
        )
        aux_idx = torch.arange(err.shape[0])  # batch_size

        # torch autograd returns shape (batch_size, dim, batch_size, var_dim), which
        # includes derivatives of batches against each other.
        # this indexing recovers only the derivatives wrt the same batch
        jacobians = list(jac[aux_idx, :, aux_idx, :] for jac in jacobians_full)
        return jacobians, err