def FT()

in src/smclarify/bias/metrics/posttraining.py [0:0]


def FT(df: pd.DataFrame, sensitive_facet_index: pd.Series, positive_predicted_label_index: pd.Series) -> float:
    r"""
    Flip Test (FT)

    The Flip Test(FT) is an approximation of the test described in (Black et. al paper) to apply for tabular data. In this
    test, we train a k-Nearest Neighbors(k-NN) algorithm on the advantaged samples, run prediction on disadvantaged samples,
    and compute FT metric FT = (FTp - FTn)/ number of disadvangated samples where FTp is the number samples that flipped
    from negative to positive, and FTn is the number samples that flipped from positive to negative.

    :param df: the dataset, excluding facet and label columns
    :param sensitive_facet_index: boolean facet column indicating sensitive group
    :param positive_predicted_label_index: boolean column indicating predicted labels
    :return: FT metric
    """
    # FlipTest - binary case
    # a = adv facet, d = disadv facet
    require(sensitive_facet_index.dtype == bool, "sensitive_facet_index must be of type bool")
    require(positive_predicted_label_index.dtype == bool, "positive_predicted_label_index must be of type bool")

    if len(df[sensitive_facet_index]) == 0:
        raise ValueError("Facet set is empty")
    if len(df[~sensitive_facet_index]) == 0:
        raise ValueError("Negated Facet set is empty")
    if len(df.columns) != len(df.select_dtypes([np.number, bool]).columns):
        raise ValueError("FlipTest does not support non-numeric columns")

    dataset = np.array(df)

    data_a = (
        [el for idx, el in enumerate(dataset) if ~sensitive_facet_index.iat[idx]],
        [el for idx, el in enumerate(positive_predicted_label_index) if ~sensitive_facet_index.iat[idx]],
        [el for idx, el in enumerate(sensitive_facet_index) if ~sensitive_facet_index.iat[idx]],
    )
    data_d = (
        [el for idx, el in enumerate(dataset) if sensitive_facet_index.iat[idx]],
        [el for idx, el in enumerate(positive_predicted_label_index) if sensitive_facet_index.iat[idx]],
        [el for idx, el in enumerate(sensitive_facet_index) if sensitive_facet_index.iat[idx]],
    )
    # Set KNN neighbors to 1 if samples less than 10
    # Used at prediction to have enough samples for neighbors
    n_neighbors = FT_DEFAULT_NEIGHBOR if len(data_a[0]) > FT_SAMPLES_COUNT_THRESHOLD else FT_MIN_NEIGHBOR
    knn = KNeighborsClassifier(
        n_neighbors=n_neighbors,
        weights="uniform",
        algorithm="auto",
        leaf_size=30,
        p=2,
        metric="minkowski",
        metric_params=None,
        n_jobs=None,
    )

    # kNN method over a with Labels from the model
    knn.fit(np.array(data_a[0]), np.array(data_a[1]))
    # kNN prediction over d
    d_y_if_a = knn.predict(data_d[0])
    # Model predictions over the same test d
    d_y_model = data_d[1]

    FS_pos = FlipSet_pos(dataset=data_d[1], labels=d_y_model, predicted_labels=d_y_if_a)
    FS_neg = FlipSet_neg(dataset=data_d[1], labels=d_y_model, predicted_labels=d_y_if_a)
    FS = FlipSet(dataset=data_d[1], labels=d_y_model, predicted_labels=d_y_if_a)

    # if verbose > 0:
    #     print('Data with', len(dataset), 'examples -- ', len(data_d[0]), 'female examples')
    #     print('Length of FlipSet positive (i.e. positive bias to females w.r.t. males):', len(FS_pos), '(',
    #           100 * len(FS_pos) / len(data_d[0]), '%)')
    #     print('Length of FlipSet negative (i.e. negative bias to females w.r.t. males):', len(FS_neg), '(',
    #           100 * len(FS_neg) / len(data_d[0]), '%)')
    #     print('Length of FlipSet:', len(FS), '(', 100 * len(FS) / len(data_d[0]), '%)')

    FTd = divide(len(FS_pos) - len(FS_neg), len(data_d[0]))

    return FTd