syne_tune/optimizer/schedulers/searchers/bayesopt/models/gp_model.py [79:115]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        super().__init__(state, active_metric, filter_observed_data)
        self._gpmodel = gpmodel
        self.mean = normalize_mean
        self.std = normalize_std
        self.fantasy_samples = fantasy_samples

    def predict(self, inputs: np.ndarray) -> List[Dict[str, np.ndarray]]:
        predictions_list = []
        for post_mean, post_variance in self._gpmodel.predict(
                inputs):
            assert post_mean.shape[0] == inputs.shape[0], \
                (post_mean.shape, inputs.shape)
            assert post_variance.shape == (inputs.shape[0],), \
                (post_variance.shape, inputs.shape)
            # Undo normalization applied to targets
            mean_denorm = post_mean * self.std + self.mean
            std_denorm = np.sqrt(post_variance) * self.std
            predictions_list.append(
                {'mean': mean_denorm, 'std': std_denorm})
        return predictions_list

    def backward_gradient(
            self, input: np.ndarray,
            head_gradients: List[Dict[str, np.ndarray]]) -> List[np.ndarray]:
        poster_states = self.posterior_states
        assert poster_states is not None, \
            "Cannot run backward_gradient without a posterior state"
        assert len(poster_states) == len(head_gradients), \
            "len(posterior_states) = {} != {} = len(head_gradients)".format(
                len(poster_states), len(head_gradients))
        return [
            poster_state.backward_gradient(
                input, head_gradient, self.mean, self.std)
            for poster_state, head_gradient in zip(
                poster_states, head_gradients)]

    def does_mcmc(self):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



syne_tune/optimizer/schedulers/searchers/bayesopt/models/gpiss_model.py [73:115]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        super().__init__(state, active_metric, filter_observed_data)
        self._gpmodel = gpmodel
        self.mean = normalize_mean
        self.std = normalize_std
        self.fantasy_samples = fantasy_samples

    def predict(self, inputs: np.ndarray) -> List[Dict[str, np.ndarray]]:
        """
        Input features `inputs` are w.r.t. extended configs (x, r).

        :param inputs: Input features
        :return: Predictive means, stddevs
        """
        predictions_list = []
        for post_mean, post_variance in self._gpmodel.predict(
                inputs):
            assert post_mean.shape[0] == inputs.shape[0], \
                (post_mean.shape, inputs.shape)
            assert post_variance.shape == (inputs.shape[0],), \
                (post_variance.shape, inputs.shape)
            # Undo normalization applied to targets
            mean_denorm = post_mean * self.std + self.mean
            std_denorm = np.sqrt(post_variance) * self.std
            predictions_list.append(
                {'mean': mean_denorm, 'std': std_denorm})
        return predictions_list

    def backward_gradient(
            self, input: np.ndarray,
            head_gradients: List[Dict[str, np.ndarray]]) -> List[np.ndarray]:
        poster_states = self.posterior_states
        assert poster_states is not None, \
            "Cannot run backward_gradient without a posterior state"
        assert len(poster_states) == len(head_gradients), \
            "len(posterior_states) = {} != {} = len(head_gradients)".format(
                len(poster_states), len(head_gradients))
        return [
            poster_state.backward_gradient(
                input, head_gradient, self.mean, self.std)
            for poster_state, head_gradient in zip(
                poster_states, head_gradients)]

    def does_mcmc(self):
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



