def process_evaluation()

in ml/eval/reward_eval.py [0:0]


def process_evaluation(args, model_name: str, eval_data_list_dict) -> List[Dict[str, Any]]:
    """
    Main function for processing evaluation, takes model name as input.
    """
    # mixed_precision = 'bf16' if args.bfloat16 else 'fp16'
    
    # Initialize accelerator and model
    # accelerator = MyAccelerator(mixed_precision)
    model = create_model(model_name)
    tokenizer = create_tokenizer(model_name)

    model.eval()

    eval_data = evaluate_data(args, model, tokenizer, eval_data_list_dict)

    result_filename = args.result_filename or f"{os.path.basename(args.output_filepath).split('.')[0]}_reward_results.json"
    with open(result_filename, "w") as f:
        json.dump(eval_data, f)

    return eval_data