def make_explanation_prompt()

in neuron-explainer/neuron_explainer/explanations/explainer.py [0:0]


    def make_explanation_prompt(self, **kwargs: Any) -> Union[str, list[HarmonyMessage]]:
        tokens: list[str] = kwargs.pop("tokens")
        max_tokens_for_completion = kwargs.pop("max_tokens_for_completion")
        assert not kwargs, f"Unexpected kwargs: {kwargs}"
        # Note that this does not preserve the precise tokens, as e.g.
        # f" {token_with_no_leading_space}" may be tokenized as "f{token_with_leading_space}".
        # TODO(dan): Try out other variants, including "\n".join(...) and ",".join(...)
        stringified_tokens = ", ".join([f"'{t}'" for t in tokens])

        prompt_builder = PromptBuilder()
        prompt_builder.add_message(Role.SYSTEM, self.prompt_prefix)
        if self.use_few_shot:
            self._add_few_shot_examples(prompt_builder)
        self._add_neuron_specific_prompt(prompt_builder, stringified_tokens, explanation=None)

        if self._prompt_is_too_long(prompt_builder, max_tokens_for_completion):
            raise ValueError(f"Prompt too long: {prompt_builder.build(self.prompt_format)}")
        else:
            return prompt_builder.build(self.prompt_format)