def on_submit()

in components/frontend_streamlit/src/pages/6_Custom_Chat.py [0:0]


def on_submit(user_input):
  """ Run dispatch agent when adding an user input prompt """
  st.session_state.error_msg = None
  st.session_state.messages.append({"HumanInput": user_input})
  message_index = len(st.session_state.messages)

  # Handle user who needs help
  if user_input.lower() == "i need help":
    input_cont = st.empty()
    help_cont = st.empty()
    start_time = time.time()

    with input_cont:
      with st.chat_message("user"):
        st.write(user_input, is_user=True, key=f"human_{message_index}")

    count = 0
    time_elapsed = 0
    while time_elapsed < 4:
      count += 1
      with spinner_container:
        with st.chat_message("ai"):
          st.write("Loading." + "." * int(count % 3),
                   is_user=True, key="help_loading")

      time.sleep(1)
      time_elapsed = time.time() - start_time
    hide_loading()

    with help_cont:
      with st.chat_message("ai"):
        st.write(
            "If it's an emergency, please dial 911."\
            " Otherwise, complete the form below",
            key=f"ai_{message_index}"
        )
      with st.expander("Get Further Assistance", expanded=True):
        help_form(messages_container, spinner_container, help_cont, input_cont)

  # User doesn't need help, handle normally
  else:
    with st.chat_message("user"):
      st.write(user_input, is_user=True, key=f"human_{message_index}")

    show_loading()

    # Send API to llm-service
    default_route = st.session_state.get("default_route", None)
    routing_agents = get_all_routing_agents()
    routing_agent_names = list(routing_agents.keys())
    chat_llm_type = st.session_state.get("chat_llm_type")
    logging.info("llm_type in session %s", chat_llm_type)

    if default_route is None:
      # pick the first routing agent as default
      if routing_agent_names:
        routing_agent = routing_agent_names[0]
      else:
        routing_agent = "default"
      response = run_dispatch(user_input,
                              routing_agent,
                              chat_id=st.session_state.get("chat_id"),
                              llm_type=chat_llm_type,
                              run_as_batch_job=True)

    elif default_route in routing_agent_names:
      response = run_dispatch(user_input,
                              default_route,
                              chat_id=st.session_state.get("chat_id"),
                              llm_type=chat_llm_type,
                              run_as_batch_job=True)

    elif default_route == "Chat":
      response = run_chat(user_input,
                          chat_id=st.session_state.get("chat_id"),
                          llm_type=chat_llm_type)
    elif default_route == "Plan":
      response = run_agent_plan("Plan", user_input,
                                chat_id=st.session_state.get("chat_id"),
                                llm_type=chat_llm_type)
    else:
      st.error(f"Unsupported route {default_route}")
      response = None

    if response:
      st.session_state.chat_id = response.get("chat", {}).get("id")

      # TODO: Currently the AIOutput vs content are inconsistent across
      # API response and in a UserChat history.
      if "content" in response:
        response["AIOutput"] = response["content"]
      del response["chat"]

      # Append new message from the API response and display it.
      append_and_display_message(response)

      # If the response has a batch async job, keep pulling the job result.
      if "batch_job" in response:
        if st.session_state.get("debug"):
          with st.expander("batch_job info:"):
            st.write(response)
        update_async_job(response["batch_job"]["id"])