Upload app_streamlit.py
Browse files- app_streamlit.py +245 -0
app_streamlit.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
import requests
|
4 |
+
from dotenv import load_dotenv # Only needed if using a .env file
|
5 |
+
|
6 |
+
# Langchain and HuggingFace
|
7 |
+
from langchain.vectorstores import Chroma
|
8 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
9 |
+
from langchain_groq import ChatGroq
|
10 |
+
from langchain.chains import RetrievalQA
|
11 |
+
|
12 |
+
# Load the .env file (if using it)
|
13 |
+
load_dotenv()
|
14 |
+
groq_api_key = os.getenv("GROQ_API_KEY")
|
15 |
+
|
16 |
+
# Load embeddings, model, and vector store
|
17 |
+
@st.cache_resource # Singleton, prevent multiple initializations
|
18 |
+
def init_chain():
|
19 |
+
model_kwargs = {'trust_remote_code': True}
|
20 |
+
embedding = HuggingFaceEmbeddings(model_name='nomic-ai/nomic-embed-text-v1.5', model_kwargs=model_kwargs)
|
21 |
+
llm = ChatGroq(groq_api_key=groq_api_key, model_name="llama3-70b-8192", temperature=0.2)
|
22 |
+
vectordb = Chroma(persist_directory='updated_CSPCDB2', embedding_function=embedding)
|
23 |
+
|
24 |
+
# Create chain
|
25 |
+
chain = RetrievalQA.from_chain_type(llm=llm,
|
26 |
+
chain_type="stuff",
|
27 |
+
retriever=vectordb.as_retriever(k=5),
|
28 |
+
return_source_documents=True)
|
29 |
+
return chain
|
30 |
+
|
31 |
+
# Streamlit app layout
|
32 |
+
st.set_page_config(
|
33 |
+
page_title="CSPC Citizens Charter Conversational Agent",
|
34 |
+
page_icon="cspclogo.png"
|
35 |
+
)
|
36 |
+
|
37 |
+
with st.sidebar:
|
38 |
+
st.title('CSPCean Conversational Agent')
|
39 |
+
st.subheader('Ask anything CSPC Related here!')
|
40 |
+
|
41 |
+
st.markdown('''**About CSPC:**
|
42 |
+
History, Core Values, Mission and Vision''')
|
43 |
+
|
44 |
+
st.markdown('''**Admission & Graduation:**
|
45 |
+
Apply, Requirements, Process, Graduation''')
|
46 |
+
|
47 |
+
st.markdown('''**Student Services:**
|
48 |
+
Scholarships, Orgs, Facilities''')
|
49 |
+
|
50 |
+
st.markdown('''**Academics:**
|
51 |
+
Degrees, Courses, Faculty''')
|
52 |
+
|
53 |
+
st.markdown('''**Officials:**
|
54 |
+
President, VPs, Deans, Admin''')
|
55 |
+
|
56 |
+
st.markdown('''
|
57 |
+
Access the resources here:
|
58 |
+
|
59 |
+
- [CSPC Citizen’s Charter](https://cspc.edu.ph/governance/citizens-charter/)
|
60 |
+
- [About CSPC](https://cspc.edu.ph/about/)
|
61 |
+
- [College Officials](https://cspc.edu.ph/college-officials/)
|
62 |
+
''')
|
63 |
+
st.markdown('Team XceptionNet')
|
64 |
+
|
65 |
+
# Store LLM generated responses
|
66 |
+
if "messages" not in st.session_state:
|
67 |
+
st.session_state.chain = init_chain()
|
68 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
|
69 |
+
|
70 |
+
# Function for generating response using the last three conversations
|
71 |
+
def generate_response(prompt_input):
|
72 |
+
# Initialize result
|
73 |
+
result = ''
|
74 |
+
|
75 |
+
# Prepare conversation history: get the last 3 user and assistant messages
|
76 |
+
conversation_history = ""
|
77 |
+
recent_messages = st.session_state.messages[-3:] # Last 3 user and assistant exchanges (each exchange is 2 messages)
|
78 |
+
|
79 |
+
for message in recent_messages:
|
80 |
+
conversation_history += f"{message['role']}: {message['content']}\n"
|
81 |
+
|
82 |
+
# Append the current user prompt to the conversation history
|
83 |
+
conversation_history += f"user: {prompt_input}\n"
|
84 |
+
|
85 |
+
# Invoke chain with the truncated conversation history
|
86 |
+
res = st.session_state.chain.invoke(conversation_history)
|
87 |
+
|
88 |
+
# Process response (as in the original code)
|
89 |
+
if res['result'].startswith('According to the provided context, '):
|
90 |
+
res['result'] = res['result'][35:]
|
91 |
+
res['result'] = res['result'][0].upper() + res['result'][1:]
|
92 |
+
elif res['result'].startswith('Based on the provided context, '):
|
93 |
+
res['result'] = res['result'][31:]
|
94 |
+
res['result'] = res['result'][0].upper() + res['result'][1:]
|
95 |
+
elif res['result'].startswith('According to the provided text, '):
|
96 |
+
res['result'] = res['result'][34:]
|
97 |
+
res['result'] = res['result'][0].upper() + res['result'][1:]
|
98 |
+
elif res['result'].startswith('According to the context, '):
|
99 |
+
res['result'] = res['result'][26:]
|
100 |
+
res['result'] = res['result'][0].upper() + res['result'][1:]
|
101 |
+
|
102 |
+
|
103 |
+
# result += res['result']
|
104 |
+
|
105 |
+
# # Process sources
|
106 |
+
# result += '\n\nSources: '
|
107 |
+
# sources = []
|
108 |
+
# for source in res["source_documents"]:
|
109 |
+
# sources.append(source.metadata['source'][122:-4]) # Adjust as per your source format
|
110 |
+
|
111 |
+
# sources = list(set(sources)) # Remove duplicates
|
112 |
+
# source_list = ", ".join(sources)
|
113 |
+
|
114 |
+
# result += source_list
|
115 |
+
|
116 |
+
# return result, res['result'], source_list
|
117 |
+
# return result, res['result']
|
118 |
+
# def generate_response(prompt_input):
|
119 |
+
# # Prepare conversation history: get the last 3 user and assistant messages
|
120 |
+
# conversation_history = ""
|
121 |
+
# recent_messages = st.session_state.messages[-3:] # Last 3 user and assistant exchanges
|
122 |
+
|
123 |
+
# for message in recent_messages:
|
124 |
+
# conversation_history += f"{message['role']}: {message['content']}\n"
|
125 |
+
|
126 |
+
# # Append the current user prompt to the conversation history
|
127 |
+
# conversation_history += f"user: {prompt_input}\n"
|
128 |
+
|
129 |
+
# # Invoke chain with the truncated conversation history
|
130 |
+
# res = st.session_state.chain.invoke(conversation_history)
|
131 |
+
|
132 |
+
# # Process response
|
133 |
+
# result_text = res['result']
|
134 |
+
# if result_text.startswith('According to the provided context, '):
|
135 |
+
# result_text = result_text[35:].capitalize()
|
136 |
+
# elif result_text.startswith('Based on the provided context, '):
|
137 |
+
# result_text = result_text[31:].capitalize()
|
138 |
+
# elif result_text.startswith('According to the provided text, '):
|
139 |
+
# result_text = result_text[34:].capitalize()
|
140 |
+
# elif result_text.startswith('According to the context, '):
|
141 |
+
# result_text = result_text[26:].capitalize()
|
142 |
+
|
143 |
+
# # Extract and format sources
|
144 |
+
# sources = []
|
145 |
+
# for source in res.get("source_documents", []): # Safeguard with .get() in case sources are missing
|
146 |
+
# source_path = source.metadata.get('source', '')
|
147 |
+
# formatted_source = source_path[122:-4] if source_path else "Unknown source"
|
148 |
+
# sources.append(formatted_source)
|
149 |
+
|
150 |
+
# # Remove duplicates and combine into a single string
|
151 |
+
# unique_sources = list(set(sources))
|
152 |
+
# source_list = ", ".join(unique_sources)
|
153 |
+
|
154 |
+
# # Combine response text with sources
|
155 |
+
# result_text += f"\n\n**Sources:** {source_list}" if source_list else "\n\n**Sources:** None"
|
156 |
+
|
157 |
+
# return result_text
|
158 |
+
|
159 |
+
# return res['result']
|
160 |
+
def generate_response(prompt_input):
|
161 |
+
# Retrieve vector database context using ONLY the current user input
|
162 |
+
retriever = st.session_state.chain.retriever
|
163 |
+
relevant_context = retriever.get_relevant_documents(prompt_input) # Retrieve context only for the current prompt
|
164 |
+
|
165 |
+
# Prepare full conversation history for the LLM
|
166 |
+
conversation_history = ""
|
167 |
+
for message in st.session_state.messages:
|
168 |
+
conversation_history += f"{message['role']}: {message['content']}\n"
|
169 |
+
|
170 |
+
# Append the current user prompt to the conversation history
|
171 |
+
conversation_history += f"user: {prompt_input}\n"
|
172 |
+
|
173 |
+
# Format the input for the chain with the retrieved context
|
174 |
+
formatted_input = (
|
175 |
+
f"Context:\n"
|
176 |
+
f"{' '.join([doc.page_content for doc in relevant_context])}\n\n"
|
177 |
+
f"Conversation:\n{conversation_history}"
|
178 |
+
)
|
179 |
+
|
180 |
+
# Invoke the RetrievalQA chain directly with the formatted input
|
181 |
+
res = st.session_state.chain.invoke({"query": formatted_input})
|
182 |
+
|
183 |
+
# Process the response text
|
184 |
+
result_text = res['result']
|
185 |
+
if result_text.startswith('According to the provided context, '):
|
186 |
+
result_text = result_text[35:].capitalize()
|
187 |
+
elif result_text.startswith('Based on the provided context, '):
|
188 |
+
result_text = result_text[31:].capitalize()
|
189 |
+
elif result_text.startswith('According to the provided text, '):
|
190 |
+
result_text = result_text[34:].capitalize()
|
191 |
+
elif result_text.startswith('According to the context, '):
|
192 |
+
result_text = result_text[26:].capitalize()
|
193 |
+
|
194 |
+
# Extract and format sources (if available)
|
195 |
+
sources = []
|
196 |
+
for doc in relevant_context:
|
197 |
+
source_path = doc.metadata.get('source', '')
|
198 |
+
formatted_source = source_path[122:-4] if source_path else "Unknown source"
|
199 |
+
sources.append(formatted_source)
|
200 |
+
|
201 |
+
# Remove duplicates and combine into a single string
|
202 |
+
unique_sources = list(set(sources))
|
203 |
+
source_list = ", ".join(unique_sources)
|
204 |
+
|
205 |
+
# Combine response text with sources
|
206 |
+
result_text += f"\n\n**Sources:** {source_list}" if source_list else "\n\n**Sources:** None"
|
207 |
+
|
208 |
+
return result_text
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
# Display chat messages
|
213 |
+
for message in st.session_state.messages:
|
214 |
+
with st.chat_message(message["role"]):
|
215 |
+
st.write(message["content"])
|
216 |
+
|
217 |
+
# User-provided prompt for input box
|
218 |
+
if prompt := st.chat_input(placeholder="Ask a question..."):
|
219 |
+
# Append user query to session state
|
220 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
221 |
+
with st.chat_message("user"):
|
222 |
+
st.write(prompt)
|
223 |
+
|
224 |
+
# Generate and display placeholder for assistant response
|
225 |
+
with st.chat_message("assistant"):
|
226 |
+
message_placeholder = st.empty() # Placeholder for response while it's being generated
|
227 |
+
with st.spinner("Generating response..."):
|
228 |
+
# Use conversation history when generating response
|
229 |
+
response = generate_response(prompt)
|
230 |
+
message_placeholder.markdown(response) # Replace placeholder with actual response
|
231 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
232 |
+
|
233 |
+
# Clear chat history function
|
234 |
+
def clear_chat_history():
|
235 |
+
# Clear chat messages (reset the assistant greeting)
|
236 |
+
st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
|
237 |
+
|
238 |
+
# Reinitialize the chain to clear any stored history (ensures it forgets previous user inputs)
|
239 |
+
st.session_state.chain = init_chain()
|
240 |
+
|
241 |
+
# Clear any additional session state that might be remembering user inquiries
|
242 |
+
if "recent_user_messages" in st.session_state:
|
243 |
+
del st.session_state["recent_user_messages"] # Clear remembered user inputs
|
244 |
+
|
245 |
+
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
|