def toLLM()

in supporting-blog-content/elasticsearch_llm_cache/elasticRAG_with_cache.py [0:0]


def toLLM(resp, url, usr_prompt, sys_prompt, neg_resp, show_prompt, engine):
    prompt_template = Template(usr_prompt)
    prompt_formatted = prompt_template.substitute(
        query=query, resp=resp, negResponse=negResponse
    )
    answer = genAI(prompt_formatted, engine, sys_content=sys_prompt)

    # Display response from LLM
    st.header("Response from LLM")
    st.markdown(answer.strip())

    # We don't need to return a reference URL if it wasn't useful
    if not negResponse in answer:
        st.write(url)

    # Display full prompt if checkbox was selected
    if show_prompt:
        st.divider()
        st.subheader("Full prompt sent to LLM")
        prompt_formatted

    return answer