File size: 10,914 Bytes
097caae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b9e5cd
 
 
097caae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
import streamlit as st 
from streamlit_option_menu import option_menu
from streamlit_lottie import st_lottie
import json
from streamlit_chat import message
from emb import EmbeddingsManager
from history import HistoryManager
from llm import LLMManager
from settings import SettingManager
import os


#Defining my parameters
vector_store="prohelper"

@st.cache_resource
def get_EMB():
    emb=EmbeddingsManager(get_settings(),emb="hkunlp/instructor-large")
    emb.set=get_settings()
    return emb

@st.cache_resource
def get_History():
    return HistoryManager()

@st.cache_resource
def get_llm():
    llm=LLMManager(get_settings())
    llm.set=get_settings()
    return llm

@st.cache_resource
def get_settings():
    return SettingManager()

#Main page config
st.set_page_config(
    page_title="ProHelper",
    layout="wide",
    initial_sidebar_state="expanded")

#Data Pull and Functions
st.markdown("""
<style>
.big-font {
    font-size:80px !important;
}
</style>
""", unsafe_allow_html=True)

#HideStreamlit
hide_streamlit_style = """
            <style>
            #MainMenu {visibility: hidden;}
            footer {visibility: hidden;}
            </style>
            """
st.markdown(hide_streamlit_style, unsafe_allow_html=True) 

@st.cache_data
def load_lottiefile(filepath: str):
    with open(filepath,"r") as f:
        return json.load(f)

# Display conversation history using Streamlit messages
def display_conversation(history_manager):
    if not history_manager.chat_exists(vector_store):
        history_manager.add_message(vector_store,"Assistant","Hi, how can i help you?")
        message("Hi, how can i help you?",False)
    else:
        for sender,mess in history_manager.get_messages(vector_store):
            is_user=not (sender=="Assistant")
            message(mess, is_user=is_user)

def clear_text():
    st.session_state.my_text = st.session_state.input
    st.session_state.input = ""

def process_answer(user_input):
    history=get_History().format_chat(vector_store)
    context=get_EMB().get_formatted_context(vector_store,user_input,history)
    prompt=get_llm().get_prompt(user_input,context,history)
    return get_llm().get_text(prompt),context,history

def create_setting():
    st.title('ProHelper Settings')
    st.divider()

    st.subheader('*Main LLM Settings*')

    # Create a main container with two columns
    left_column, right_column = st.columns([1,  2])

    # Add Ai assisted search button
    checked = left_column.checkbox('AI assisted search',value=get_settings().ai_assisted_search, help="An additional LLM will pre process the question to get improved search words for RAG")

    #Add max new token settings
    max_new_token = left_column.number_input(label='Max new token', value=get_settings().max_new_token,help="The maximum number of tokens that the LLM can generate")
    get_settings().max_new_token=max_new_token

    #Add topP setting
    top_p = left_column.slider('Top p', min_value=0.0, max_value=1.0, value=get_settings().top_p, step=0.01, help="Used to control the randomness of the generated text.")
    get_settings().top_p=top_p

    #Add temperature setting
    temperature = left_column.slider('Temperature', min_value=0.01, max_value=1.0, value=get_settings().temperature, step=0.01, help="Another parameter used to control the randomness of the generated text.")
    get_settings().temperature=temperature

    #Add repetition penality setting
    repetition_penalty = left_column.slider('Repetition penality', min_value=0.7, max_value=2.0, value=get_settings().repetition_penalty, step=0.01, help="Used to discourage the model from repeating the same phrases or content.")
    get_settings().repetition_penalty=repetition_penalty

    #Add LLM settings
    llm_options = get_settings().listLLMMap
    selected_option = left_column.selectbox('Chat LLM', llm_options, index=get_settings().defaultLLM)
    selected_llm = llm_options.index(selected_option)
    get_settings().defaultLLM=selected_llm
    get_llm().selectLLM(selected_option)

    # Add content to the right column based on the checkbox state
    if checked:
        get_settings().ai_assisted_search=True
    else:
        get_settings().ai_assisted_search=False

    #Add prompt setting
    system_prompt = right_column.text_area('System prompt:', get_settings().system_prompt,height=500,help=r"Text that provides context and instructions to the model before it generates a response. The special fields {context}, {history} and {question} will be replaced with their corresponding values")
    get_settings().system_prompt=system_prompt

    st.divider()
    st.subheader('*RAG Settings*')

    # Create a main container with two columns
    left_column_RAG, right_column_RAG = st.columns([1,  2])

    #Add return documents settings
    n_doc_return = left_column_RAG.number_input(label='N doc return', value=get_settings().n_doc_return,help="The number of documents to be returned by the search")
    get_settings().n_doc_return=n_doc_return

    #Add default search method
    rag_methods = get_settings().available_search_methods
    print(get_settings().search_method)
    selected_option_RAG = left_column_RAG.selectbox('Search Methods', rag_methods, index=rag_methods.index(get_settings().search_method))
    get_settings().search_method=selected_option_RAG

    #Add prompt setting
    RAG_prompt = right_column_RAG.text_area('RAG prompt:', get_settings().default_ai_search_prompt,height=500,help=r"Text that provides context and instructions to the model used for search terms. The special fields  {history} and {question} will be replaced with their corresponding values")
    get_settings().default_ai_search_prompt=RAG_prompt

    #Add max new token settings
    RAG_max_new_token = left_column_RAG.number_input(label='RAG max new token', value=get_settings().RAG_max_new_token,help="The maximum number of tokens that the LLM can generate")
    get_settings().RAG_max_new_token=RAG_max_new_token

    #Add topP setting
    RAG_top_p = left_column_RAG.slider('RAG top p', min_value=0.0, max_value=1.0, value=get_settings().RAG_top_p, step=0.01, help="Used to control the randomness of the generated text.")
    get_settings().RAG_top_p=RAG_top_p

    #Add temperature setting
    RAG_temperature = left_column_RAG.slider('RAG temperature', min_value=0.01, max_value=1.0, value=get_settings().RAG_temperature, step=0.01, help="Another parameter used to control the randomness of the generated text.")
    get_settings().RAG_temperature=RAG_temperature

    #Add repetition penality setting
    RAG_repetition_penalty = left_column_RAG.slider('RAG repetition penality', min_value=0.7, max_value=2.0, value=get_settings().RAG_repetition_penalty, step=0.01, help="Used to discourage the model from repeating the same phrases or content.")
    get_settings().RAG_repetition_penalty=RAG_repetition_penalty




def create_info():
        #Header
        st.title('Welcome to ProHelper')
        st.subheader('*A new tool to help you with process problems*')

        st.divider()

        #Use Cases
        with st.container():
            col1,col2=st.columns(2)
            with col1:
                st.header('Knlowledge base:')
                st.markdown(
                    """
                    - _Perrys chemical engineers handbook_
                    - _Coulson & Richardson's Chemical Engineering Vol.6 Chemical Engineering Design 4th Edition_
                    - _Chemical-Process-Equipment-Selection-and-Design-by-Stanley-M.-Walas_
                    """
                    )
            with col2:
                #current_dir = os.path.dirname(__file__)
                #lottie_path = os.path.join(current_dir, r"res/lottie/Piping.json")
                lottie2 = load_lottiefile(r"/home/user/app/res/lottie/Piping.json")
                st_lottie(lottie2,key='place',height=300,width=300)
        st.divider()

def main():

    #SIDE BAR------------------------------------------------------------------------------------------------------------------------------
    with st.sidebar:
        selected = option_menu('ProHelper', ["Info", 'About','Settings','Bots'], 
            icons=['info-circle','question','gear'],menu_icon='droplet-fill', default_index=0)
    
    #INTRO PAGE----------------------------------------------------------------------------------------------------------------------------
    if selected=="Info":
        create_info()
    
    #SETTING PAGE----------------------------------------------------------------------------------------------------------------------------
    if selected=="Settings":
        create_setting()
    
    #ABOUT PAGE----------------------------------------------------------------------------------------------------------------------------
    if selected=="About":
        # Header
        st.title('About ProHelper')
        
        # Content
        st.write("ProHelper is a testing program created by DFO.")
        st.write("This program is designed for testing purposes only.")
        st.write("For more information, please contact DFO.")


    #BOTS PAGE-----------------------------------------------------------------------------------------------------------------------------
    if selected=="Bots":
        if "messages" not in st.session_state.keys():
            st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]

            # Display or clear chat messages
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.write(message["content"])

            # User-provided prompt
        if prompt := st.chat_input("Ask me anything"):
            st.session_state.messages.append({"role": "user", "content": prompt})
            with st.chat_message("user"):
                st.write(prompt)


        if st.session_state.messages[-1]["role"] != "assistant":
            prompt=st.session_state.messages[-1]["content"]
            with st.chat_message("assistant"):
                with st.spinner("Thinking..."):
                    response,cont,hist = process_answer(prompt)
                    placeholder = st.empty()
                    full_response = ''
                    for item in response:
                        full_response += item
                        placeholder.markdown(full_response)
                        placeholder.markdown(full_response)
            message = {"role": "assistant", "content": full_response}
            st.session_state.messages.append(message)

            rag_context = "context"
            with st.expander("Sources"):
                st.write(f"Context: {cont}")
                st.write(f"History: {hist}")

            get_History().add_message(vector_store,"User",prompt)
            get_History().add_message(vector_store,"Assistant",full_response)




if __name__ == "__main__":
    main()