in 1_synthetic-qa-generation/reasoningplaning/evolve.py [0:0]
def generateApi(self, prompt: str, num_rollouts) -> List[str]:
def send_request(prompt):
temperature = random.choice([0.7, 1.0])#(self.temperature_range)
if self.model_type == "aoai":
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
max_tokens=self.kwargs["max_tokens"],
temperature=self.kwargs["temperature"]
# seed=
)
output = response.choices[0].message.content
else:
assert False, "only support azure open ai"
return output
responses = []
with ThreadPoolExecutor(max_workers=num_rollouts) as executor:
futures = [executor.submit(send_request, prompt) for _ in range(num_rollouts)]
for future in tqdm(as_completed(futures), total=len(futures)):
responses.append(future.result())
return responses