def build()

in neuron_explainer/explanations/prompt_builder.py [0:0]


    def build(self, prompt_format: PromptFormat) -> str | list[ChatMessage]:
        """
        Validates the messages added so far (reasonable alternation of assistant vs. user, etc.)
        and returns either a regular string (maybe with <|endofprompt|> tokens) or a list of
        ChatMessages suitable for use with the /chat/completions endpoint.
        """
        # Create a deep copy of the messages so we can modify it and so that the caller can't
        # modify the internal state of this object.
        messages = [message.copy() for message in self._messages]

        expected_next_role = Role.SYSTEM
        for message in messages:
            role = message["role"]
            assert role == expected_next_role or (
                self._allow_extra_system_messages and role == Role.SYSTEM
            ), f"Expected message from {expected_next_role} but got message from {role}"
            if role == Role.SYSTEM:
                expected_next_role = Role.USER
            elif role == Role.USER:
                expected_next_role = Role.ASSISTANT
            elif role == Role.ASSISTANT:
                expected_next_role = Role.USER

        if prompt_format == PromptFormat.INSTRUCTION_FOLLOWING:
            last_user_message = None
            for message in messages:
                if message["role"] == Role.USER:
                    last_user_message = message
            assert last_user_message is not None
            last_user_message["content"] += "<|endofprompt|>"

        if prompt_format == PromptFormat.CHAT_MESSAGES:
            return messages
        elif prompt_format in [PromptFormat.NONE, PromptFormat.INSTRUCTION_FOLLOWING]:
            return "".join(message["content"] for message in messages)
        else:
            raise ValueError(f"Unknown prompt format: {prompt_format}")