in bot/gemini_model.py [0:0]
def _build_content(self) -> (list[Content], int):
"""
Prepare the whole Content structure to be sent to the AI model, containing the whole chat history so far
(or as much as the token limit allows).
The Gemini model accepts a sequence of Content objects, each Content object contains one or more Part objects.
Content objects have the `role` attribute that tells Gemini who's the author of a given piece of conversation
history. The model expects that the sequence of incoming Content objects is a conversation between "model" and
"user" - in our case, we combine all user messages into single Content object, with proper attribution, so that
Gemini can recognize who said what. Model Content objects are sent as regular text.
"""
# Buffer keeps tuples of (part, role)
buffer = deque()
contents = deque()
tokens = 0
parts_count = 0
for part in reversed(self._history):
parts_count += 1
if buffer and buffer[0][1] != part.role:
content = Content(role=buffer[0][1], parts=list(b[0] for b in buffer))
contents.appendleft(content)
buffer.clear()
buffer.appendleft((part.part, part.role))
tokens += part.token_count or 0
if tokens > _MAX_HISTORY_TOKEN_SIZE:
_LOGGER.info("Memory full, will purge now.")
break
# We fit whole _history in the contents, no need to clear memory
if buffer:
user_content = Content(role=buffer[0][1], parts=list(b[0] for b in buffer))
contents.appendleft(user_content)
# We need to forget the tail of history, so we don't waste memory
for _ in range(len(self._history) - parts_count):
self._history.popleft()
while contents[0].role == "model" or len(contents[0].parts) == 0:
# Can't have model start the conversation
contents.popleft()
return list(contents), tokens