in benchmarks/benchmark/tools/locust-load-inference/locust-docker/locust-tasks/tasks.py [0:0]
def _(environment, **kwargs):
if not isinstance(environment.runner, MasterRunner):
global model_params
global test_data
global local_metric_collector
global tokenizer
tokenizer = AutoTokenizer.from_pretrained(
environment.parsed_options.tokenizer)
logging.info(
"Loading test prompts from locust-tasks/filtered_prompts.txt.")
test_data = []
try:
test_data = load_test_prompts()
except Exception as e:
logging.error(f"Failed to load test data: {e}")
logging.info(f"Loaded {len(test_data)} test prompts.")
model_params = {
"backend": environment.parsed_options.backend,
"best_of": environment.parsed_options.best_of,
"max_output_len": environment.parsed_options.max_output_len,
"sax_model": environment.parsed_options.sax_model,
"use_beam_search": environment.parsed_options.use_beam_search,
"tokenizer": environment.parsed_options.tokenizer,
}
logging.info(
f"Using the following benchmark parameters:\n {model_params}")