File size: 2,355 Bytes
19f4fce
4bb745d
19f4fce
4bb745d
19f4fce
4bb745d
19f4fce
 
4bb745d
 
 
 
19f4fce
 
 
 
4bb745d
 
 
 
 
19f4fce
4bb745d
 
 
 
19f4fce
4bb745d
 
19f4fce
 
4bb745d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import time
import json
import streamlit as st
from typing import Dict, List, Any

from llama_index.core.base.llms.types import ChatMessage


def show_previous_messages(framework: str, messages_container: any):
    with messages_container:
        messages: List[Dict[str, Any]] = st.session_state[f"{framework}_messages"]
        for message in messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])


def show_chat_input(
    disabled: bool, framework: str, model: any, messages_container: any
):
    if disabled:
        st.info("Make sure to select a model and file to start chatting!")

    if prompt := st.chat_input("Say something", disabled=disabled):
        st.session_state[f"{framework}_messages"].append(
            {"role": "user", "content": prompt}
        )

        st.session_state[f"{framework}_chat_history"].append(
            ChatMessage.from_str(role="user", content=prompt)
        )

        # if st.session_state[f"{framework}_messages"][-1]["role"] == "assistant":
        with messages_container:
            with st.chat_message("user"):
                st.write(prompt)

            with st.chat_message("assistant"):
                with st.spinner("Thinking..."):
                    try:
                        ai_response = model.get_response(
                            query_str=prompt,
                            chat_history=st.session_state[f"{framework}_chat_history"],
                        )
                        # when streaming, the response format is gone
                        # ai_response = model.get_stream_response(
                        #     query_str=prompt,
                        #     chat_history=st.session_state[f"{framework}_chat_history"],
                        # )
                    except Exception as e:
                        ai_response = f"An error occurred: {e}"

                    st.write(ai_response)
                    # response = st.write_stream(ai_response)

                    st.session_state[f"{framework}_messages"].append(
                        {"role": "assistant", "content": ai_response}
                    )

                    st.session_state[f"{framework}_chat_history"].append(
                        ChatMessage.from_str(role="assistant", content=ai_response)
                    )