File size: 6,919 Bytes
bb44eb0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c83be6d
bb44eb0
 
 
c83be6d
bb44eb0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c83be6d
 
bb44eb0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import json
import os
import uuid
from streamlit_feedback import streamlit_feedback
import streamlit as st
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage
from langchain.memory import ChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
from langchain_core.prompts import HumanMessagePromptTemplate
import tiktoken
#from agent import app
from crag import crag_app
from datetime import timedelta
from sqlalchemy import create_engine
#from cache import (write_to_db,current_time)


#load postgres engine
#engine = create_engine("postgresql://postgres:sampath@localhost:5432/postgres")
#load keys
os.environ['OPENAI_API_KEY'] = st.secrets["OPENAI_API_KEY"]
chat_history = ChatMessageHistory()

system_message = '''You are an AI assistant for answering questions about vedas and scriptures.
                    \nYou are given the following extracted documents from Svarupa Knowledge Base (https://svarupa.org/) and other documents and a question. 
                    Provide a conversational answer. If there are any unicode characters in the final answer, please encode and provide readable answer to the user.
                    \nIf you are not provided with any documents, say \"I did not get any relevant context for this but 
                    I will reply to the best of my knowledge\" and then write your answer\nIf you don't know the answer, just say \"Hmm, I'm not sure. \" Don't try to make up an answer.
                    \nIf the question is not about vedas and scriptures, politely inform them that you are tuned to only answer questions about that.\n\n'''
generate_prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system_message),
        ("human", "Here is the given context {context}, queation: {question} \n\n Formulate an answer."),
    ]
)
#@st.cache_resource(show_spinner=False)  # Set allow_output_mutation to True for mutable objects like instances
def bot_response(user_input):
    response = crag_app.invoke({"question": user_input})
    return response


##======
# Main chatbot function
def veda_bot(sidebar: bool = True) -> None:
    # Define custom CSS
    custom_css = """
        <style>
            /* Adjust the selector as needed */
            .stHeadingContainer {
                margin-top: -100px; /* Reduce the top margin */
            }
            #MainMenu {visibility: hidden;}
            footer {visibility: hidden;}
            header {visibility: hidden;}
        </style>
    """

    # Apply the custom CSS
    st.markdown(custom_css, unsafe_allow_html=True)

    # Streamlit Components Initialization
    st.title("Veda Bot")
    st.write("This bot is developed based on the content from the [Svarupa](https://svarupa.org/home) website.")
    chat_history.add_message(SystemMessage(content="Welcome! I am your Veda Bot. How can I assist you today?"))
    # Initialize session state variables
    if "messages" not in st.session_state.keys():
        st.session_state.messages = [{"role": "assistant", "content": "Hi. I am an AI Assistant. Ask me a question about Vedas!"}]

    #if "session_uuid" not in st.session_state:
    #    st.session_state["session_uuid"] = f"{current_time()}-{str(uuid.uuid4())}"

    if "feedback" not in st.session_state:
        st.session_state["feedback"] = None

    if "chat_engine" not in st.session_state.keys():
        st.session_state.chat_engine = bot_response

    if "memory" not in st.session_state:
        st.session_state["memory"] = ChatMessageHistory()
        st.session_state["memory"].add_message(generate_prompt)
        st.session_state["memory"].add_message({"role":"user","content":"Hi/Hello or Any Greating"})
        st.session_state["memory"].add_message({"role":"assistant","content":"Hi. Please ask the question about vedas!"})
    # Display chat history
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    # Get user input
    prompt = st.chat_input("Enter your question!")

    if prompt:
        # Display user message in chat message container
        with st.chat_message("user"):
            st.markdown(prompt)

        # Log user message
        st.session_state["messages"].append({"role": "user", "content": prompt})
        st.session_state["memory"].add_message({"role": "user", "content": prompt})

    # Generate bot response
    if st.session_state.messages[-1]["role"] != "assistant":
        with st.spinner("Thinking..."):
            references = []
            message_placeholder = st.empty()
            full_response = ""
            
            # Get bot response
            response_bot = st.session_state.chat_engine(prompt)
            generation = response_bot['generation']
            full_response += generation
            web_search = response_bot['web_search']

            # Extract references from bot response
            if response_bot['messages']:
                try:
                    references.extend([doc.metadata['source'] for doc in response_bot['messages']])
                except Exception as e:
                    print("Error:", e)
            #message_placeholder.markdown(full_response + "▌")

            # Add references to the full response
            if references:
                unique_references = set(references)
                full_response += "\n\n**References:**\n\n"
                for reference in unique_references:
                    full_response += f"- {reference}\n"

            #message_placeholder.markdown(full_response + "▌")
            # Submit Feedback
            streamlit_feedback(
                feedback_type="faces",
                on_submit=None,
                optional_text_label="[Optional] Please provide an explanation",
                key="feedback",	
            )
            message_placeholder.markdown(full_response)
        st.session_state["messages"].append({"role": "assistant", "content": generation})
        st.session_state["memory"].add_message({"role": "assistant", "content": generation})
        print(f"Response added to memory: {full_response}")

    # Log feedback and messages
    if st.session_state['feedback']:
        user_feedback ={
            "user_message": st.session_state["messages"][-2],
            "assistant_message": st.session_state["messages"][-1],
            "feedback_score": st.session_state["feedback"]["score"],
            "feedback_text": st.session_state["feedback"]["text"],
        }
        #write_to_db(u_message=user_feedback["user_message"],
        #            a_message=user_feedback["assistant_message"],
        #            f_score=user_feedback["feedback_score"],
        #            f_text=user_feedback["feedback_text"])

if __name__ == "__main__":
    veda_bot()