def text_completion_input()

in azure/functions/decorators/function_app.py [0:0]


    def text_completion_input(self,
                              arg_name: str,
                              prompt: str,
                              model: Optional[OpenAIModels] = OpenAIModels.DefaultChatModel,  # NoQA
                              temperature: Optional[str] = "0.5",
                              top_p: Optional[str] = None,
                              max_tokens: Optional[str] = "100",
                              data_type: Optional[Union[DataType, str]] = None,
                              **kwargs) \
            -> Callable[..., Any]:
        """
        The textCompletion input binding can be used to invoke the
        OpenAI Chat Completions API and return the results to the function.

        Ref: https://platform.openai.com/docs/guides/text-generation/chat-completions-vs-completions  # NoQA

        The examples below define "who is" HTTP-triggered functions with a
        hardcoded `"who is {name}?"` prompt, where `{name}` is the substituted
        with the value in the HTTP request path. The OpenAI input binding
        invokes the OpenAI GPT endpoint to surface the answer to the prompt to
        the function, which then returns the result text as the response
        content.

        :param arg_name: The name of binding parameter in the function code.
        :param prompt: The prompt to generate completions for, encoded as a
        string.
        :param model: the ID of the model to use.
        :param temperature: The sampling temperature to use, between 0 and 2.
        Higher values like 0.8 will make the output more random, while lower
        values like 0.2 will make it more focused and deterministic.
        :param top_p: An alternative to sampling with temperature, called
        nucleus sampling, where the model considers the results of the tokens
        with top_p probability mass. So 0.1 means only the tokens comprising
        the top 10% probability mass are considered. It's generally recommend
        to use this or temperature
        :param max_tokens: The maximum number of tokens to generate in the
        completion. The token count of your prompt plus max_tokens cannot
        exceed the model's context length. Most models have a context length of
        2048 tokens (except for the newest models, which support 4096).
        :param data_type: Defines how Functions runtime should treat the
        parameter value
        :param kwargs: Keyword arguments for specifying additional binding
        fields to include in the binding json

        :return: Decorator function.
        """

        @self._configure_function_builder
        def wrap(fb):
            def decorator():
                fb.add_binding(
                    binding=TextCompletionInput(
                        name=arg_name,
                        prompt=prompt,
                        model=model,
                        temperature=temperature,
                        top_p=top_p,
                        max_tokens=max_tokens,
                        data_type=parse_singular_param_to_enum(data_type,
                                                               DataType),
                        **kwargs))
                return fb

            return decorator()

        return wrap