samlonka commited on
Commit
bb44eb0
1 Parent(s): 35dcaa4

'gitignore'

Browse files
Files changed (3) hide show
  1. .gitignore +1 -0
  2. app.py +158 -0
  3. crag_app.py +0 -158
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .streamlit/*
app.py CHANGED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import uuid
4
+ from streamlit_feedback import streamlit_feedback
5
+ import streamlit as st
6
+ from langchain_openai import ChatOpenAI
7
+ from langchain_core.messages import HumanMessage
8
+ from langchain.memory import ChatMessageHistory
9
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
10
+ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
11
+ from langchain_core.prompts import HumanMessagePromptTemplate
12
+ import tiktoken
13
+ #from agent import app
14
+ from crag import crag_app
15
+ from datetime import timedelta
16
+ from sqlalchemy import create_engine
17
+ from cache import (write_to_db,
18
+ current_time)
19
+
20
+
21
+ #load postgres engine
22
+ engine = create_engine("postgresql://postgres:sampath@localhost:5432/postgres")
23
+ #load keys
24
+ os.environ['OPENAI_API_KEY'] = st.secrets["OPENAI_API_KEY"]
25
+ chat_history = ChatMessageHistory()
26
+
27
+ system_message = '''You are an AI assistant for answering questions about vedas and scriptures.
28
+ \nYou are given the following extracted documents from Svarupa Knowledge Base (https://svarupa.org/) and other documents and a question.
29
+ Provide a conversational answer. If there are any unicode characters in the final answer, please encode and provide readable answer to the user.
30
+ \nIf you are not provided with any documents, say \"I did not get any relevant context for this but
31
+ I will reply to the best of my knowledge\" and then write your answer\nIf you don't know the answer, just say \"Hmm, I'm not sure. \" Don't try to make up an answer.
32
+ \nIf the question is not about vedas and scriptures, politely inform them that you are tuned to only answer questions about that.\n\n'''
33
+ generate_prompt = ChatPromptTemplate.from_messages(
34
+ [
35
+ ("system", system_message),
36
+ ("human", "Here is the given context {context}, queation: {question} \n\n Formulate an answer."),
37
+ ]
38
+ )
39
+ #@st.cache_resource(show_spinner=False) # Set allow_output_mutation to True for mutable objects like instances
40
+ def bot_response(user_input):
41
+ response = crag_app.invoke({"question": user_input})
42
+ return response
43
+
44
+
45
+ ##======
46
+ # Main chatbot function
47
+ def veda_bot(sidebar: bool = True) -> None:
48
+ # Define custom CSS
49
+ custom_css = """
50
+ <style>
51
+ /* Adjust the selector as needed */
52
+ .stHeadingContainer {
53
+ margin-top: -100px; /* Reduce the top margin */
54
+ }
55
+ #MainMenu {visibility: hidden;}
56
+ footer {visibility: hidden;}
57
+ header {visibility: hidden;}
58
+ </style>
59
+ """
60
+
61
+ # Apply the custom CSS
62
+ st.markdown(custom_css, unsafe_allow_html=True)
63
+
64
+ # Streamlit Components Initialization
65
+ st.title("Veda Bot")
66
+ st.write("This bot is developed based on the content from the [Svarupa](https://svarupa.org/home) website.")
67
+ chat_history.add_message(SystemMessage(content="Welcome! I am your Veda Bot. How can I assist you today?"))
68
+ # Initialize session state variables
69
+ if "messages" not in st.session_state.keys():
70
+ st.session_state.messages = [{"role": "assistant", "content": "Hi. I am an AI Assistant. Ask me a question about Vedas!"}]
71
+
72
+ if "session_uuid" not in st.session_state:
73
+ st.session_state["session_uuid"] = f"{current_time()}-{str(uuid.uuid4())}"
74
+
75
+ if "feedback" not in st.session_state:
76
+ st.session_state["feedback"] = None
77
+
78
+ if "chat_engine" not in st.session_state.keys():
79
+ st.session_state.chat_engine = bot_response
80
+
81
+ if "memory" not in st.session_state:
82
+ st.session_state["memory"] = ChatMessageHistory()
83
+ st.session_state["memory"].add_message(generate_prompt)
84
+ st.session_state["memory"].add_message({"role":"user","content":"Hi/Hello or Any Greating"})
85
+ st.session_state["memory"].add_message({"role":"assistant","content":"Hi. Please ask the question about vedas!"})
86
+ # Display chat history
87
+ for message in st.session_state.messages:
88
+ with st.chat_message(message["role"]):
89
+ st.markdown(message["content"])
90
+
91
+ # Get user input
92
+ prompt = st.chat_input("Enter your question!")
93
+
94
+ if prompt:
95
+ # Display user message in chat message container
96
+ with st.chat_message("user"):
97
+ st.markdown(prompt)
98
+
99
+ # Log user message
100
+ st.session_state["messages"].append({"role": "user", "content": prompt})
101
+ st.session_state["memory"].add_message({"role": "user", "content": prompt})
102
+
103
+ # Generate bot response
104
+ if st.session_state.messages[-1]["role"] != "assistant":
105
+ with st.spinner("Thinking..."):
106
+ references = []
107
+ message_placeholder = st.empty()
108
+ full_response = ""
109
+
110
+ # Get bot response
111
+ response_bot = st.session_state.chat_engine(prompt)
112
+ generation = response_bot['generation']
113
+ full_response += generation
114
+ web_search = response_bot['web_search']
115
+
116
+ # Extract references from bot response
117
+ if response_bot['messages']:
118
+ try:
119
+ references.extend([doc.metadata['source'] for doc in response_bot['messages']])
120
+ except Exception as e:
121
+ print("Error:", e)
122
+ #message_placeholder.markdown(full_response + "▌")
123
+
124
+ # Add references to the full response
125
+ if references:
126
+ unique_references = set(references)
127
+ full_response += "\n\n**References:**\n\n"
128
+ for reference in unique_references:
129
+ full_response += f"- {reference}\n"
130
+
131
+ #message_placeholder.markdown(full_response + "▌")
132
+ # Submit Feedback
133
+ streamlit_feedback(
134
+ feedback_type="faces",
135
+ on_submit=None,
136
+ optional_text_label="[Optional] Please provide an explanation",
137
+ key="feedback",
138
+ )
139
+ message_placeholder.markdown(full_response)
140
+ st.session_state["messages"].append({"role": "assistant", "content": generation})
141
+ st.session_state["memory"].add_message({"role": "assistant", "content": generation})
142
+ print(f"Response added to memory: {full_response}")
143
+
144
+ # Log feedback and messages
145
+ if st.session_state['feedback']:
146
+ user_feedback ={
147
+ "user_message": st.session_state["messages"][-2],
148
+ "assistant_message": st.session_state["messages"][-1],
149
+ "feedback_score": st.session_state["feedback"]["score"],
150
+ "feedback_text": st.session_state["feedback"]["text"],
151
+ }
152
+ #write_to_db(u_message=user_feedback["user_message"],
153
+ # a_message=user_feedback["assistant_message"],
154
+ # f_score=user_feedback["feedback_score"],
155
+ # f_text=user_feedback["feedback_text"])
156
+
157
+ if __name__ == "__main__":
158
+ veda_bot()
crag_app.py DELETED
@@ -1,158 +0,0 @@
1
- import json
2
- import os
3
- import uuid
4
- from streamlit_feedback import streamlit_feedback
5
- import streamlit as st
6
- from langchain_openai import ChatOpenAI
7
- from langchain_core.messages import HumanMessage
8
- from langchain.memory import ChatMessageHistory
9
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
10
- from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
11
- from langchain_core.prompts import HumanMessagePromptTemplate
12
- import tiktoken
13
- #from agent import app
14
- from crag import crag_app
15
- from datetime import timedelta
16
- from sqlalchemy import create_engine
17
- from cache import (write_to_db,
18
- current_time)
19
-
20
-
21
- #load postgres engine
22
- engine = create_engine("postgresql://postgres:sampath@localhost:5432/postgres")
23
- #load keys
24
- os.environ['OPENAI_API_KEY'] = st.secrets["OPENAI_API_KEY"]
25
- chat_history = ChatMessageHistory()
26
-
27
- system_message = '''You are an AI assistant for answering questions about vedas and scriptures.
28
- \nYou are given the following extracted documents from Svarupa Knowledge Base (https://svarupa.org/) and other documents and a question.
29
- Provide a conversational answer. If there are any unicode characters in the final answer, please encode and provide readable answer to the user.
30
- \nIf you are not provided with any documents, say \"I did not get any relevant context for this but
31
- I will reply to the best of my knowledge\" and then write your answer\nIf you don't know the answer, just say \"Hmm, I'm not sure. \" Don't try to make up an answer.
32
- \nIf the question is not about vedas and scriptures, politely inform them that you are tuned to only answer questions about that.\n\n'''
33
- generate_prompt = ChatPromptTemplate.from_messages(
34
- [
35
- ("system", system_message),
36
- ("human", "Here is the given context {context}, queation: {question} \n\n Formulate an answer."),
37
- ]
38
- )
39
- #@st.cache_resource(show_spinner=False) # Set allow_output_mutation to True for mutable objects like instances
40
- def bot_response(user_input):
41
- response = crag_app.invoke({"question": user_input})
42
- return response
43
-
44
-
45
- ##======
46
- # Main chatbot function
47
- def veda_bot(sidebar: bool = True) -> None:
48
- # Define custom CSS
49
- custom_css = """
50
- <style>
51
- /* Adjust the selector as needed */
52
- .stHeadingContainer {
53
- margin-top: -100px; /* Reduce the top margin */
54
- }
55
- #MainMenu {visibility: hidden;}
56
- footer {visibility: hidden;}
57
- header {visibility: hidden;}
58
- </style>
59
- """
60
-
61
- # Apply the custom CSS
62
- st.markdown(custom_css, unsafe_allow_html=True)
63
-
64
- # Streamlit Components Initialization
65
- st.title("Veda Bot")
66
- st.write("This bot is developed based on the content from the [Svarupa](https://svarupa.org/home) website.")
67
- chat_history.add_message(SystemMessage(content="Welcome! I am your Veda Bot. How can I assist you today?"))
68
- # Initialize session state variables
69
- if "messages" not in st.session_state.keys():
70
- st.session_state.messages = [{"role": "assistant", "content": "Hi. I am an AI Assistant. Ask me a question about Vedas!"}]
71
-
72
- if "session_uuid" not in st.session_state:
73
- st.session_state["session_uuid"] = f"{current_time()}-{str(uuid.uuid4())}"
74
-
75
- if "feedback" not in st.session_state:
76
- st.session_state["feedback"] = None
77
-
78
- if "chat_engine" not in st.session_state.keys():
79
- st.session_state.chat_engine = bot_response
80
-
81
- if "memory" not in st.session_state:
82
- st.session_state["memory"] = ChatMessageHistory()
83
- st.session_state["memory"].add_message(generate_prompt)
84
- st.session_state["memory"].add_message({"role":"user","content":"Hi/Hello or Any Greating"})
85
- st.session_state["memory"].add_message({"role":"assistant","content":"Hi. Please ask the question about vedas!"})
86
- # Display chat history
87
- for message in st.session_state.messages:
88
- with st.chat_message(message["role"]):
89
- st.markdown(message["content"])
90
-
91
- # Get user input
92
- prompt = st.chat_input("Enter your question!")
93
-
94
- if prompt:
95
- # Display user message in chat message container
96
- with st.chat_message("user"):
97
- st.markdown(prompt)
98
-
99
- # Log user message
100
- st.session_state["messages"].append({"role": "user", "content": prompt})
101
- st.session_state["memory"].add_message({"role": "user", "content": prompt})
102
-
103
- # Generate bot response
104
- if st.session_state.messages[-1]["role"] != "assistant":
105
- with st.spinner("Thinking..."):
106
- references = []
107
- message_placeholder = st.empty()
108
- full_response = ""
109
-
110
- # Get bot response
111
- response_bot = st.session_state.chat_engine(prompt)
112
- generation = response_bot['generation']
113
- full_response += generation
114
- web_search = response_bot['web_search']
115
-
116
- # Extract references from bot response
117
- if response_bot['messages']:
118
- try:
119
- references.extend([doc.metadata['source'] for doc in response_bot['messages']])
120
- except Exception as e:
121
- print("Error:", e)
122
- #message_placeholder.markdown(full_response + "▌")
123
-
124
- # Add references to the full response
125
- if references:
126
- unique_references = set(references)
127
- full_response += "\n\n**References:**\n\n"
128
- for reference in unique_references:
129
- full_response += f"- {reference}\n"
130
-
131
- #message_placeholder.markdown(full_response + "▌")
132
- # Submit Feedback
133
- streamlit_feedback(
134
- feedback_type="faces",
135
- on_submit=None,
136
- optional_text_label="[Optional] Please provide an explanation",
137
- key="feedback",
138
- )
139
- message_placeholder.markdown(full_response)
140
- st.session_state["messages"].append({"role": "assistant", "content": generation})
141
- st.session_state["memory"].add_message({"role": "assistant", "content": generation})
142
- print(f"Response added to memory: {full_response}")
143
-
144
- # Log feedback and messages
145
- if st.session_state['feedback']:
146
- user_feedback ={
147
- "user_message": st.session_state["messages"][-2],
148
- "assistant_message": st.session_state["messages"][-1],
149
- "feedback_score": st.session_state["feedback"]["score"],
150
- "feedback_text": st.session_state["feedback"]["text"],
151
- }
152
- #write_to_db(u_message=user_feedback["user_message"],
153
- # a_message=user_feedback["assistant_message"],
154
- # f_score=user_feedback["feedback_score"],
155
- # f_text=user_feedback["feedback_text"])
156
-
157
- if __name__ == "__main__":
158
- veda_bot()