leaderboard/irt/evaluate.py [110:129]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                prob = irt_3pl(
                    ability=model_stats.skill,
                    diff=example_stats.diff,
                    disc=example_stats.disc,
                    lambda_=example_stats.lambda_,
                )
            else:
                raise ValueError(f"Invalid model type: {model_type}")
            pred = 1 if prob > 0.5 else 0
            pred_probs.append(prob)
            pred_labels.append(pred)
            labels.append(score)

    pred_probs = np.array(pred_probs)
    pred_labels = np.array(pred_labels)
    labels = np.array(labels)
    name = f"{model_family}-{model_type}-{evaluation}"
    evaluate_item_predictions(
        report_dir=irt_base_dir, pred_probs=pred_probs, labels=labels, name=name
    )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



leaderboard/irt/evaluate.py [183:202]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                prob = irt_3pl(
                    ability=model_stats.skill,
                    diff=example_stats.diff,
                    disc=example_stats.disc,
                    lambda_=example_stats.lambda_,
                )
            else:
                raise ValueError(f"Invalid model type: {model_type}")
            pred = 1 if prob > 0.5 else 0
            pred_probs.append(prob)
            pred_labels.append(pred)
            labels.append(score)

    pred_probs = np.array(pred_probs)
    pred_labels = np.array(pred_labels)
    labels = np.array(labels)
    name = f"{model_family}-{model_type}-{evaluation}"
    evaluate_item_predictions(
        report_dir=irt_base_dir, pred_probs=pred_probs, labels=labels, name=name
    )
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



