def matvec()

in automl21/scs_neural/solver/linear_operator.py [0:0]


    def matvec(self, rhs):
        index_pos = (self.k == -1)
        index_neg = (self.k > -1)

        self.lambd, self.Q = self.lambd_full[index_neg], self.Q_full[index_neg]
        reduced_k = self.k[index_neg]
        rhs_neg = rhs[index_neg]
    
        Q_t = self.Q.transpose(1, 2)
        dim = int(np.sqrt(2 * rhs_neg.size(-1)))
        tmp = Q_t @ MatrixUtils.matrix_from_lower_triangular(rhs_neg, dim) @ self.Q
        tmp_rows, tmp_cols = tmp.size(1), tmp.size(2)

        for a in range(rhs_neg.size(0)):
            k_plus_1 = reduced_k[a] + 1
            tmp[a, 0:k_plus_1, 0:k_plus_1] = 0

            zero_tensor = torch.tensor(0, dtype=rhs.dtype, device=rhs.device)
            for i in range(k_plus_1, tmp_rows):
                for j in range(0, k_plus_1):
                    lambd_i_pos = torch.max(self.lambd[a, i], zero_tensor)
                    lambd_j_neg = -torch.min(self.lambd[a, j], zero_tensor)
                    tmp[a, i, j] *= lambd_i_pos / (lambd_i_pos + lambd_j_neg)

            for i in range(0, k_plus_1):
                for j in range(k_plus_1, tmp_cols):
                    lambd_i_neg = -torch.min(self.lambd[a, i], zero_tensor)
                    lambd_j_pos = torch.max(self.lambd[a, j], zero_tensor)
                    tmp[a, i, j] *= lambd_j_pos / (lambd_j_pos + lambd_i_neg)

        result2 = self.Q @ tmp @ Q_t
        dim = result2.size(-1)
        result_neg = MatrixUtils.lower_triangular_from_matrix(result2, dim)

        result = torch.zeros_like(rhs)
        result[index_pos] = rhs[index_pos]
        result[index_neg] = result_neg

        return result