File size: 10,989 Bytes
519c3e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35f7ed2
519c3e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06aadc0
 
519c3e7
35f7ed2
cc8fe19
519c3e7
5b4b6c8
35f7ed2
519c3e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35f7ed2
 
 
519c3e7
 
35f7ed2
519c3e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8c79da
 
519c3e7
f8c79da
830b0bd
 
 
 
 
519c3e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b4b6c8
2facc44
 
 
519c3e7
 
 
 
 
 
 
 
 
 
 
 
 
5b4b6c8
519c3e7
 
5b4b6c8
519c3e7
 
5b4b6c8
519c3e7
8a255f7
519c3e7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f6e5cd
2facc44
 
 
 
519c3e7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
import streamlit as st
from pypdf import PdfReader
# import replicate
import os
from pathlib import Path
from dotenv import load_dotenv
import pickle
import timeit
from PIL import Image
import datetime
import base64

from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.llms import LlamaCpp
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.document_loaders import PyPDFDirectoryLoader
from langchain.retrievers import BM25Retriever, EnsembleRetriever
from langchain.chat_models import ChatOpenAI
from langchain.agents.agent_toolkits import create_retriever_tool
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
from langchain.utilities import SerpAPIWrapper

from utils import build_embedding_model, build_llm
from utils import load_retriver,load_vectorstore, load_conversational_retrievel_chain

load_dotenv()
# Getting current timestamp to keep track of historical conversations
current_timestamp = datetime.datetime.now()
timestamp_string = current_timestamp.strftime("%Y-%m-%d %H:%M:%S")

#Directories path
persist_directory= "Database/PDF_HTML_CHROMA_DB"
all_docs_pkl_directory= 'Database/text_chunks_html_pdf.pkl'

# Initliazing sesstion states in Streamlit to cache different stuffs like model iniitialization and there by avoid re-running of alredy initialized stuffs over and again.
if "llm" not in st.session_state:
    st.session_state["llm"] = build_llm()

if "embeddings" not in st.session_state:
    st.session_state["embeddings"] = build_embedding_model()
    
if "vector_db" not in st.session_state:
    st.session_state["vector_db"] = load_vectorstore(persist_directory=persist_directory, embeddings=st.session_state["embeddings"])
 
# if "text_chunks" not in st.session_state:
#     st.session_state["text_chunks"] = load_text_chunks(text_chunks_pkl_dir=all_docs_pkl_directory)

if "retriever" not in st.session_state:
    st.session_state["retriever"] = load_retriver(chroma_vectorstore=st.session_state["vector_db"])

if "conversation_chain" not in st.session_state:
    st.session_state["conversation_chain"] = load_conversational_retrievel_chain(retriever=st.session_state["retriever"], llm=st.session_state["llm"])    



# App title
st.set_page_config(
    page_title="OMP Search Bot",
    layout="wide",
    initial_sidebar_state="expanded",
)

st.markdown("""
        <style>
               .block-container {
                    padding-top: 2.2rem}
        </style>
        """, unsafe_allow_html=True)
# To get header in the App
col1, col2= st.columns(2)

title1 = """
<p style="font-size: 26px;text-align: right; color: #0C3453; font-weight: bold">OPM Retirement Services Assistant</p>
"""

def clear_chat_history():
        """
        Clear chat and start new chat
        """
        st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]

#loading OPM logo
file_ = open("opm_logo.png", "rb")
contents = file_.read()
data_url = base64.b64encode(contents).decode("utf-8")
file_.close()

st.markdown(
    f"""
    <div style="background-color: white; padding: 15px; border-radius: 10px;">
        <div style="display: flex; justify-content: space-between;">
            <div>
                <img src="data:image/png;base64,{data_url}" style="max-width: 100%;" alt="OPM Logo" />
            </div>
            <div style="flex: 1; padding: 15px;">
                {title1}
    """,
    unsafe_allow_html=True
)
st.write("")


st.write('<p style="color: #B0B0B0;margin: 0;">OPM is here to help you transition from serving the American people to enjoying your retirement. This retirement services assistant shows our commitment to supporting new and existing retirees throughout the retirement journey.  Our assistant is trained on 1500+ documents related to OPM retirement services and can answer your questions in conversational style.  Just ask away..</p>', unsafe_allow_html=True)

st.markdown("""---""")

text_html = """
    <p style="font-size: 24px; text-align: center; color:blue; margin: 0;">
        Type your question below in conversational style language.
    </p>
    <p style="font-size: 18px; text-align: center; color: blue; margin: 0;">
        Sample Questions:<br>
        can I work part-time and get retirement benefits <br>
        will I get paid for my unused annual leave <br>
        how do I report the death of a federal employee <br>
        what are interim benefits 
    </p>
"""

st.write(text_html, unsafe_allow_html=True)


with st.sidebar:
    st.subheader("")       
        
if st.session_state["vector_db"] and st.session_state["llm"]:
        # Store LLM generated responses
    if "messages" not in st.session_state.keys():
        st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?", "Source":""}]

    # Display or clear chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.write(message["content"])
            if message["Source"]=="":
                st.write("")
            else:
                with st.expander("source"):
                    for idx, item in enumerate(message["Source"]):
                        st.markdown(item["Page"])
                        st.markdown(item["Source"])
                        st.markdown(item["page_content"])
                        st.write("---")


    # Initialize the session state to store chat history
    if "stored_session" not in st.session_state:
        st.session_state["stored_session"] = []

    # Create a list to store expanders
    if "expanders" not in st.session_state:
        st.session_state["expanders"] = []
    
        # Define a function to add a new chat expander
    def add_chat_expander(chat_history):
        current_timestamp = datetime.datetime.now()
        timestamp_string = current_timestamp.strftime("%Y-%m-%d %H:%M:%S")
        st.session_state["expanders"].append({"timestamp": timestamp_string, "chat_history": chat_history})
                
    def clear_chat_history():
        """
        To remove existing chat history and start new conversation
        """
        stored_session = []
        for dict_message in st.session_state.messages:
            if dict_message["role"] == "user":
                string_dialogue = "User: " + dict_message["content"] + "\n\n"
                st.session_state["stored_session"].append(string_dialogue)

            else:
                string_dialogue = "Assistant: " + dict_message["content"] + "\n\n"
                st.session_state["stored_session"].append(string_dialogue)
            stored_session.append(string_dialogue)
        
            # Add a new chat expander
        add_chat_expander(stored_session)
        st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?", "Source":""}]
        
    st.sidebar.button('New chat', on_click=clear_chat_history, use_container_width=True)
    st.sidebar.text("")
    st.sidebar.write('<p style="font-size: 16px;text-align: center; color: #727477; font-weight: bold">Chat history</p>', unsafe_allow_html=True)
    # Display existing chat expanders
    for expander_info in st.session_state["expanders"]:
        with st.sidebar.expander("Conversation ended at:"+"\n\n"+expander_info["timestamp"]):
            for message in expander_info["chat_history"]:
                if message.startswith("User:"):
                    st.write(f'<span style="color: #EF6A6A;">{message}</span>', unsafe_allow_html=True)
                elif message.startswith("Assistant:"):
                    st.write(f'<span style="color: #F7BD45;">{message}</span>', unsafe_allow_html=True)
                else:
                    st.write(message)


    def generate_llm_response(conversation_chain, prompt_input):
        # output= conversation_chain({'question': prompt_input})
        res = conversation_chain(prompt_input)
        return res['result']


    # User-provided prompt
    if prompt := st.chat_input(disabled= not st.session_state["vector_db"]):
        st.session_state.messages.append({"role": "user", "content": prompt, "Source":""})
        with st.chat_message("user"):
            st.write(prompt)

    # Generate a new response if last message is not from assistant
    if st.session_state.messages[-1]["role"] != "assistant":
        with st.chat_message("assistant"):
            with st.spinner("Searching..."):
                start = timeit.default_timer()
                response = generate_llm_response(conversation_chain=st.session_state["conversation_chain"], prompt_input=prompt)
                placeholder = st.empty()
                full_response = ''
                for item in response:
                    full_response += item
                placeholder.markdown(full_response)
                if response:
                    st.text("-------------------------------------")
                    docs= st.session_state["retriever"].get_relevant_documents(prompt)   
                    source_doc_list= []  
                    for doc in docs:
                        source_doc_list.append(doc.dict())                   
                    merged_source_doc= []   
                    with st.expander("source"): 
                        for idx, item in enumerate(source_doc_list):
                            source_doc = {"Page": f"Source {idx + 1}", "Source": f"**Source:** {item['metadata']['source'].split('/')[-1]}",
                                        "page_content":item["page_content"]}
                            merged_source_doc.append(source_doc)
                            st.markdown(f"Source {idx + 1}")
                            st.markdown(f"**Source:** {item['metadata']['source'].split('/')[-1]}")
                            st.markdown(item["page_content"])
                            st.write("---")  # Add a separator between entries
                    message = {"role": "assistant", "content": full_response, "Source":merged_source_doc}
                    st.session_state.messages.append(message)
                    st.markdown("👍  👎  Create Ticket")
                # else:
                    # with st.expander("source"):
                    #     message = {"role": "assistant", "content": full_response, "Source":""}
                    #     st.session_state.messages.append(message)
        end = timeit.default_timer()
        print(f"Time to retrieve response: {end - start}")