mm_action_prediction/tools/data_support.py [74:100]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def sort_eval_metrics(eval_metrics):
    """Sort a dictionary of evaluation metrics.

    Args:
        eval_metrics: Dict of evaluation metrics.

    Returns:
        sorted_evals: Sorted evaluated metrics, best first.
    """
    # Sort based on 'perplexity' (lower is better).
    # sorted_evals = sorted(eval_metrics.items(), key=lambda x: x[1]['perplexity'])
    # return sorted_evals

    # Sort based on average %increase across all metrics (higher is better).
    def mean_relative_increase(arg1, arg2):
        _, metric1 = arg1
        _, metric2 = arg2
        rel_gain = []
        # higher_better is +1 if true and -1 if false.
        for higher_better, key in [
            (-1, "perplexity"),
            (1, "action_accuracy"),
            (1, "action_attribute"),
        ]:
            rel_gain.append(
                higher_better
                * (metric1[key] - metric2[key])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



mm_action_prediction/tools/support.py [78:104]:
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def sort_eval_metrics(eval_metrics):
    """Sort a dictionary of evaluation metrics.

    Args:
        eval_metrics: Dict of evaluation metrics.

    Returns:
        sorted_evals: Sorted evaluated metrics, best first.
    """
    # Sort based on 'perplexity' (lower is better).
    # sorted_evals = sorted(eval_metrics.items(), key=lambda x: x[1]['perplexity'])
    # return sorted_evals

    # Sort based on average %increase across all metrics (higher is better).
    def mean_relative_increase(arg1, arg2):
        _, metric1 = arg1
        _, metric2 = arg2
        rel_gain = []
        # higher_better is +1 if true and -1 if false.
        for higher_better, key in [
            (-1, "perplexity"),
            (1, "action_accuracy"),
            (1, "action_attribute"),
        ]:
            rel_gain.append(
                higher_better
                * (metric1[key] - metric2[key])
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -



