def evaluate_x()

in benchmarks/nasbench_evaluation.py [0:0]


def evaluate_x(x):
    """
    Evaluate NASBench on the model defined by x.

    x is a 36-d array.
    The first 21 are for the adjacency matrix. Largest entries will have the
    corresponding element in the adjacency matrix set to 1, with as many 1s as
    possible within the NASBench model space.
    The last 15 are for the ops in each of the five NASBench model components.
    One-hot encoded for each of the 5 components, 3 options.
    """
    assert len(x) == 36
    x_adj = x[:21]
    x_op = x[-15:]
    x_ord = x_adj.argsort()[::-1]
    op_indxs = x_op.reshape(3, 5).argmax(axis=0).tolist()
    last_good = None
    for i in range(1, 22):
        model_spec = get_spec(x_ord[:i], op_indxs)
        if model_spec.matrix is not None:
            # We have a connected graph
            # See if it has too many edges
            if model_spec.matrix.sum() > 9:
                break
            last_good = model_spec
    if last_good is None:
        # Could not get a valid spec from this x. Return bad metric values.
        return [0.80], [50 * 60]
    fixed_metrics, computed_metrics = nasbench.get_metrics_from_spec(last_good)
    test_acc = [r['final_test_accuracy'] for r in computed_metrics[108]]
    train_time = [r['final_training_time'] for r in computed_metrics[108]]
    return np.mean(test_acc), np.mean(train_time)