def run_experiment()

in online_attacks/experiments/hyper_search_stochastic_toy.py [0:0]


def run_experiment(params: OnlineParams, train_loader: Dataset, knapsack: bool):
    offline_algorithm, online_algorithm = create_online_algorithm(params)
    num_perms = len(train_loader)
    comp_ratio_list, online_knapsack_list = [], []
    for i, dataset in enumerate(train_loader):
        offline_dataset, online_dataset = dataset[0], dataset[1]
        indices = compute_indices(online_dataset, [online_algorithm, offline_algorithm])
        comp_ratio_list.append(
            compute_competitive_ratio(
                indices[online_algorithm.name], indices[offline_algorithm.name]
            )
        )

        if knapsack:
            offline_value = sum([x[0] for x in indices[offline_algorithm.name]])
            online_knapsack_list.append(
                compute_knapsack_online_value(indices[online_algorithm.name])
            )

    # Indicator Competitive Ratio
    comp_ratio = np.sum(comp_ratio_list) / (params.K * num_perms)
    print(
        "Competitive Ratio for %s with K = %d is %f "
        % (online_algorithm.name, params.K, comp_ratio)
    )

    # Knapsack Competitive Ratio
    if knapsack:
        comp_ratio = np.sum(online_knapsack_list) / (offline_value * num_perms)
        print(
            "Knapsack Competitive Ratio for %s with K = %d is %f "
            % (online_algorithm.name, params.K, comp_ratio)
        )

    return comp_ratio