def grade()

in mlebench/competitions/tgs-salt-identification-challenge/grade.py [0:0]


def grade(submission: DataFrame, answers: DataFrame) -> float:
    expected_columns = ["id", "rle_mask"]

    assert set(answers.columns) == set(
        expected_columns
    ), f"Expected answers to have columns {expected_columns}, but got {answers.columns} instead!"

    if set(submission.columns) != set(expected_columns):
        raise InvalidSubmissionError(
            f"Expected submission to have columns {expected_columns}, but got {submission.columns} instead!"
        )

    if len(submission) != len(answers):
        raise InvalidSubmissionError(
            f"Expected submission to have {len(answers)} rows, but got {len(submission)} instead!"
        )

    transformed_submission = submission.fillna("")
    transformed_answers = answers.fillna("")

    assert transformed_answers.applymap(
        lambda x: isinstance(x, str)
    ).values.all(), "Expected all items in `answers` to be strings, but found non-string items!"

    if not transformed_submission.applymap(lambda x: isinstance(x, str)).values.all():
        raise InvalidSubmissionError(
            "Expected all items in `submission` to be strings, but found non-string items!"
        )

    sorted_submission = transformed_submission.sort_values(by="id")
    sorted_answers = transformed_answers.sort_values(by="id")

    if (sorted_submission["id"].values != sorted_answers["id"].values).any():
        raise InvalidSubmissionError(
            f"Expected submission to have the same IDs as answers, but got a different set of IDs!"
        )

    y_preds_rle = sorted_submission["rle_mask"].values
    y_trues_rle = sorted_answers["rle_mask"].values

    try:
        y_preds = np.array(
            [rle_decode(rle, height=IMG_HEIGHT, width=IMG_WIDTH) for rle in y_preds_rle]
        )
    except AssertionError as e:
        raise InvalidSubmissionError(f"Error decoding RLE masks: {e}")

    y_trues = np.array([rle_decode(rle, height=IMG_HEIGHT, width=IMG_WIDTH) for rle in y_trues_rle])

    scores = [iou_metric(y_trues[i], y_preds[i]) for i in range(len(y_trues))]
    score = np.mean(scores)

    return score