def get_batch_configs()

in syne_tune/optimizer/schedulers/searchers/gp_fifo_searcher.py [0:0]


    def get_batch_configs(
            self, batch_size: int,
            num_init_candidates_for_batch: Optional[int] = None,
            **kwargs) -> List[Configuration]:
        """
        Asks for a batch of `batch_size` configurations to be suggested. This
        is roughly equivalent to calling `get_config` `batch_size` times,
        marking the suggested configs as pending in the state (but the state
        is not modified here).
        If `num_init_candidates_for_batch` is given, it is used instead
        of `num_init_candidates` for the selection of all but the first
        config in the batch. In order to speed up batch selection, choose
        `num_init_candidates_for_batch` smaller than
        `num_init_candidates`.

        If less than `batch_size` configs are returned, the search space
        has been exhausted.
        """
        assert round(batch_size) == batch_size and batch_size >= 1
        configs = []
        if batch_size == 1:
            config = self.get_config(**kwargs)
            if config is not None:
                configs.append(config)
        else:
            exclusion_candidates = self._get_exclusion_candidates(**kwargs)
            pick_random = True
            while pick_random and len(configs) < batch_size:
                config, pick_random = self._get_config_not_modelbased(
                    exclusion_candidates)
                if pick_random:
                    if config is not None:
                        configs.append(config)
                        exclusion_candidates.add(config)
                    else:
                        break  # Space exhausted
            if not pick_random:
                # Model-based decision for remaining ones
                num_requested_candidates = batch_size - len(configs)
                model = self.state_transformer.model()
                # Select and fix target resource attribute (relevant in subclasses)
                self._fix_resource_attribute(**kwargs)
                # Create BO algorithm
                initial_candidates_scorer = create_initial_candidates_scorer(
                    initial_scoring=self.initial_scoring,
                    model=model,
                    acquisition_class=self.acquisition_class,
                    random_state=self.random_state)
                local_optimizer = self.local_minimizer_class(
                    hp_ranges=self._hp_ranges_for_prediction(),
                    model=model,
                    acquisition_class=self.acquisition_class,
                    active_metric=INTERNAL_METRIC_NAME)
                pending_candidate_state_transformer = None
                if num_requested_candidates > 1:
                    # Internally, if num_requested_candidates > 1, the candidates are
                    # selected greedily. This needs model updates after each greedy
                    # selection, because of one more pending evaluation.
                    model_factory = self.state_transformer._model_factory
                    if isinstance(model_factory, dict):
                        model_factory = model_factory[INTERNAL_METRIC_NAME]
                    # We need a copy of the state here, since
                    # `pending_candidate_state_transformer` modifies the state (it
                    # appends pending trials)
                    temporary_state = copy.deepcopy(
                        self.state_transformer.state)
                    pending_candidate_state_transformer = \
                        ModelStateTransformer(
                            model_factory=model_factory,
                            init_state=temporary_state,
                            skip_optimization=AlwaysSkipPredicate())
                bo_algorithm = BayesianOptimizationAlgorithm(
                    initial_candidates_generator=self.random_generator,
                    initial_candidates_scorer=initial_candidates_scorer,
                    num_initial_candidates=self.num_initial_candidates,
                    num_initial_candidates_for_batch=num_init_candidates_for_batch,
                    local_optimizer=local_optimizer,
                    pending_candidate_state_transformer=pending_candidate_state_transformer,
                    exclusion_candidates=exclusion_candidates,
                    num_requested_candidates=num_requested_candidates,
                    greedy_batch_selection=True,
                    duplicate_detector=DuplicateDetectorIdentical(),
                    sample_unique_candidates=False,
                    debug_log=self.debug_log)
                # Next candidate decision
                _configs = bo_algorithm.next_candidates()
                configs.extend(
                    self._postprocess_config(config) for config in _configs)
        return configs