def display_message()

in components/frontend_streamlit/src/pages/3_Chat.py [0:0]


def display_message(item, item_index):
  if "HumanInput" in item:
    with st.chat_message("user"):
      st.write(item["HumanInput"], is_user=True, key=f"human_{item_index}")

  if "route_name" in item and "AIOutput" not in item:
    route_name = item["route_name"]
    with st.chat_message("ai"):
      st.write(f"Using route **`{route_name}`** to respond.")

  route_logs = item.get("route_logs", None)
  if route_logs and route_logs.strip() != "":
    with st.expander("Expand to see Agent's thought process"):
      utils.print_ai_output(route_logs)

  if "AIOutput" in item:
    with st.chat_message("ai"):
      utils.print_ai_output(item["AIOutput"])

  # Append all query references.
  if item.get("db_result", None):
    with st.chat_message("ai"):
      st.write("Query result:")
      result_index = 1

      # Clean up empty rows.
      db_result = []
      for result in item["db_result"]:
        if len(result.keys()) > 0:
          db_result.append(result)

      if len(db_result) > 0:
        for result in db_result:
          values = [str(x) for x in list(result.values())]
          if len(values) > 0:
            markdown_content = f"{result_index}. **{values[0]}**"
            markdown_content += " - " + ", ".join(values[1:])
            with stylable_container(
              key=f"ref_{result_index}",
              css_styles=REFERENCE_CSS_STYLE
            ):
              st.markdown(markdown_content)
          result_index = result_index + 1

      else:
        with stylable_container(
          key=f"ref_{result_index}",
          css_styles=REFERENCE_CSS_STYLE
        ):
          st.markdown("No result found.")

  # Append all resources.
  if item.get("resources", None):
    with st.chat_message("ai"):
      for name, link in item["resources"].items():
        st.markdown(f"Resource: [{name}]({link})")

  # Append all query references.
  if item.get("query_references", None):
    with st.chat_message("ai"):
      st.write("References:")
      reference_index = 1
      for reference in dedup_list(item["query_references"], "chunk_id"):
        document_url = render_cloud_storage_url(reference["document_url"])
        document_text = reference["document_text"]
        st.markdown(
            f"**{reference_index}.** [{document_url}]({document_url})")
        markdown_content = re.sub(
            r"<b>(.*?)</b>", r"**\1**", document_text, flags=re.IGNORECASE)

        with stylable_container(
          key=f"ref_{reference_index}",
          css_styles=REFERENCE_CSS_STYLE
        ):
          st.markdown(markdown_content)

        reference_index = reference_index + 1
      st.divider()

  if "plan" in item:
    with st.chat_message("ai"):
      plan_index = 1
      plan = get_plan(item["plan"]["id"])
      logging.info(plan)

      for step in plan["plan_steps"]:
        with stylable_container(
          key=f"ref_{plan_index}",
          css_styles=STEP_CSS_STYLE
        ):
          st.markdown(step["description"])
        plan_index += 1

      plan_id = plan["id"]
      if st.button("Execute this plan", key=f"plan-{plan_id}"):
        with st.spinner("Executing the plan..."):
          output = run_agent_execute_plan(
            plan_id=plan_id,
            chat_id=st.session_state.chat_id,
            auth_token=st.session_state.auth_token)
        st.session_state.messages.append({
          "AIOutput": f"Plan executed successfully. (plan_id={plan_id})",
        })

        agent_process_output = output.get("agent_process_output", "")
        agent_process_output = ansi_escape.sub("", agent_process_output)
        st.session_state.messages.append({
          "AIOutput": agent_process_output,
        })

  agent_logs = item.get("agent_logs", None)
  if agent_logs:
    with st.expander("Expand to see Agent's thought process"):
      utils.print_ai_output(agent_logs)

  item_index += 1