in connectors/aoai.py [0:0]
def _truncate_input(self, text, max_tokens):
input_tokens = GptTokenEstimator().estimate_tokens(text)
if input_tokens > max_tokens:
logging.info(f"[aoai] Input size {input_tokens} exceeded maximum token limit {max_tokens}, truncating...")
step_size = 1 # Initial step size
iteration = 0 # Iteration counter
while GptTokenEstimator().estimate_tokens(text) > max_tokens:
text = text[:-step_size]
iteration += 1
# Increase step size exponentially every 5 iterations
if iteration % 5 == 0:
step_size = min(step_size * 2, 100)
return text