def _linearize_jacobian_impl()

in theseus/optimizer/sparse_linearization.py [0:0]


    def _linearize_jacobian_impl(self):
        # those will be fully overwritten, no need to zero:
        self.A_val = torch.empty(
            size=(self.objective.batch_size, len(self.A_col_ind)),
            device=self.objective.device,
        )
        self.b = torch.empty(
            size=(self.objective.batch_size, self.num_rows),
            device=self.objective.device,
        )

        err_row_idx = 0
        for f_idx, cost_function in enumerate(self.objective):
            jacobians, error = cost_function.weighted_jacobians_error()
            num_rows = cost_function.dim()
            row_slice = slice(err_row_idx, err_row_idx + num_rows)

            # we will view the blocks of rows inside `A_val` as `num_rows` x `stride` matrix
            block_start = self.cost_function_row_block_starts[f_idx]
            stride = self.cost_function_stride[f_idx]
            block = self.A_val[:, block_start : block_start + stride * num_rows].view(
                -1, num_rows, stride
            )
            block_pointers = self.cost_function_block_pointers[f_idx]

            for var_idx_in_cost_function, var_jacobian in enumerate(jacobians):

                # the proper block is written, using the precomputed index in `block_pointers`
                num_cols = var_jacobian.shape[2]
                pointer = block_pointers[var_idx_in_cost_function]
                block[:, :, pointer : pointer + num_cols] = var_jacobian

            self.b[:, row_slice] = -error
            err_row_idx += cost_function.dim()