def evaluation_function()

in ax/benchmark/benchmark.py [0:0]


    def evaluation_function(x: List[float]) -> float:
        # Check if we have exhuasted the evaluation budget
        if len(experiment.trials) >= max_trials:
            raise ValueError(f"Evaluation budget ({max_trials} trials) exhuasted.")

        # Create an ObservationFeatures
        param_dict = {
            pname: x[i]
            for i, pname in enumerate(problem.search_space.parameters.keys())
        }
        obsf = ObservationFeatures(parameters=param_dict)  # pyre-ignore
        # Get the time since last call
        num_trials = len(experiment.trials)
        if num_trials == 0:
            gen_time = None
        else:
            previous_ts = experiment.trials[num_trials - 1].time_created.timestamp()
            gen_time = time.time() - previous_ts
        # Create a GR
        arms, candidate_metadata_by_arm_signature = gen_arms(
            observation_features=[obsf], arms_by_signature=experiment.arms_by_signature
        )
        gr = GeneratorRun(
            arms=arms,
            gen_time=gen_time,
            candidate_metadata_by_arm_signature=candidate_metadata_by_arm_signature,
        )
        # Add it as a trial
        trial = experiment.new_trial().add_generator_run(gr).run()
        # Evaluate function
        df = trial.fetch_data().df
        if len(df) > 1:
            raise Exception("Does not support multiple outcomes")  # pragma: no cover
        obj = float(df["mean"].values[0])
        if not problem.optimization_config.objective.minimize:
            obj = -obj
        return obj