in scripts/launcher_distributed.py [0:0]
def run_eval() -> None:
"""
Run evaluation on the model.
This function sets up the environment, downloads the model,
and runs the evaluation command.
Args:
args: An object containing command-line arguments.
Returns:
None
Raises:
subprocess.CalledProcessError: If any subprocess command fails.
"""
print("***** Starting model evaluation *****")
if LOCAL_RANK != -1 and not dist.is_initialized():
dist.init_process_group(backend="nccl" if dist.is_nccl_available() else "gloo")
# Set custom environment variables
custom_env: Dict[str, str] = {
"HF_DATASETS_TRUST_REMOTE_CODE": "TRUE",
"HF_TOKEN": args.hf_token,
}
set_custom_env(custom_env)
# Construct the evaluation command
full_command = f"tune run eleuther_eval --config {args.tune_eval_yaml}"
print("Running evaluation command...")
run_command(full_command)