ProHelper / app.py
DarForm's picture
Update app.py
8b9e5cd verified
import streamlit as st
from streamlit_option_menu import option_menu
from streamlit_lottie import st_lottie
import json
from streamlit_chat import message
from emb import EmbeddingsManager
from history import HistoryManager
from llm import LLMManager
from settings import SettingManager
import os
#Defining my parameters
vector_store="prohelper"
@st.cache_resource
def get_EMB():
emb=EmbeddingsManager(get_settings(),emb="hkunlp/instructor-large")
emb.set=get_settings()
return emb
@st.cache_resource
def get_History():
return HistoryManager()
@st.cache_resource
def get_llm():
llm=LLMManager(get_settings())
llm.set=get_settings()
return llm
@st.cache_resource
def get_settings():
return SettingManager()
#Main page config
st.set_page_config(
page_title="ProHelper",
layout="wide",
initial_sidebar_state="expanded")
#Data Pull and Functions
st.markdown("""
<style>
.big-font {
font-size:80px !important;
}
</style>
""", unsafe_allow_html=True)
#HideStreamlit
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
@st.cache_data
def load_lottiefile(filepath: str):
with open(filepath,"r") as f:
return json.load(f)
# Display conversation history using Streamlit messages
def display_conversation(history_manager):
if not history_manager.chat_exists(vector_store):
history_manager.add_message(vector_store,"Assistant","Hi, how can i help you?")
message("Hi, how can i help you?",False)
else:
for sender,mess in history_manager.get_messages(vector_store):
is_user=not (sender=="Assistant")
message(mess, is_user=is_user)
def clear_text():
st.session_state.my_text = st.session_state.input
st.session_state.input = ""
def process_answer(user_input):
history=get_History().format_chat(vector_store)
context=get_EMB().get_formatted_context(vector_store,user_input,history)
prompt=get_llm().get_prompt(user_input,context,history)
return get_llm().get_text(prompt),context,history
def create_setting():
st.title('ProHelper Settings')
st.divider()
st.subheader('*Main LLM Settings*')
# Create a main container with two columns
left_column, right_column = st.columns([1, 2])
# Add Ai assisted search button
checked = left_column.checkbox('AI assisted search',value=get_settings().ai_assisted_search, help="An additional LLM will pre process the question to get improved search words for RAG")
#Add max new token settings
max_new_token = left_column.number_input(label='Max new token', value=get_settings().max_new_token,help="The maximum number of tokens that the LLM can generate")
get_settings().max_new_token=max_new_token
#Add topP setting
top_p = left_column.slider('Top p', min_value=0.0, max_value=1.0, value=get_settings().top_p, step=0.01, help="Used to control the randomness of the generated text.")
get_settings().top_p=top_p
#Add temperature setting
temperature = left_column.slider('Temperature', min_value=0.01, max_value=1.0, value=get_settings().temperature, step=0.01, help="Another parameter used to control the randomness of the generated text.")
get_settings().temperature=temperature
#Add repetition penality setting
repetition_penalty = left_column.slider('Repetition penality', min_value=0.7, max_value=2.0, value=get_settings().repetition_penalty, step=0.01, help="Used to discourage the model from repeating the same phrases or content.")
get_settings().repetition_penalty=repetition_penalty
#Add LLM settings
llm_options = get_settings().listLLMMap
selected_option = left_column.selectbox('Chat LLM', llm_options, index=get_settings().defaultLLM)
selected_llm = llm_options.index(selected_option)
get_settings().defaultLLM=selected_llm
get_llm().selectLLM(selected_option)
# Add content to the right column based on the checkbox state
if checked:
get_settings().ai_assisted_search=True
else:
get_settings().ai_assisted_search=False
#Add prompt setting
system_prompt = right_column.text_area('System prompt:', get_settings().system_prompt,height=500,help=r"Text that provides context and instructions to the model before it generates a response. The special fields {context}, {history} and {question} will be replaced with their corresponding values")
get_settings().system_prompt=system_prompt
st.divider()
st.subheader('*RAG Settings*')
# Create a main container with two columns
left_column_RAG, right_column_RAG = st.columns([1, 2])
#Add return documents settings
n_doc_return = left_column_RAG.number_input(label='N doc return', value=get_settings().n_doc_return,help="The number of documents to be returned by the search")
get_settings().n_doc_return=n_doc_return
#Add default search method
rag_methods = get_settings().available_search_methods
print(get_settings().search_method)
selected_option_RAG = left_column_RAG.selectbox('Search Methods', rag_methods, index=rag_methods.index(get_settings().search_method))
get_settings().search_method=selected_option_RAG
#Add prompt setting
RAG_prompt = right_column_RAG.text_area('RAG prompt:', get_settings().default_ai_search_prompt,height=500,help=r"Text that provides context and instructions to the model used for search terms. The special fields {history} and {question} will be replaced with their corresponding values")
get_settings().default_ai_search_prompt=RAG_prompt
#Add max new token settings
RAG_max_new_token = left_column_RAG.number_input(label='RAG max new token', value=get_settings().RAG_max_new_token,help="The maximum number of tokens that the LLM can generate")
get_settings().RAG_max_new_token=RAG_max_new_token
#Add topP setting
RAG_top_p = left_column_RAG.slider('RAG top p', min_value=0.0, max_value=1.0, value=get_settings().RAG_top_p, step=0.01, help="Used to control the randomness of the generated text.")
get_settings().RAG_top_p=RAG_top_p
#Add temperature setting
RAG_temperature = left_column_RAG.slider('RAG temperature', min_value=0.01, max_value=1.0, value=get_settings().RAG_temperature, step=0.01, help="Another parameter used to control the randomness of the generated text.")
get_settings().RAG_temperature=RAG_temperature
#Add repetition penality setting
RAG_repetition_penalty = left_column_RAG.slider('RAG repetition penality', min_value=0.7, max_value=2.0, value=get_settings().RAG_repetition_penalty, step=0.01, help="Used to discourage the model from repeating the same phrases or content.")
get_settings().RAG_repetition_penalty=RAG_repetition_penalty
def create_info():
#Header
st.title('Welcome to ProHelper')
st.subheader('*A new tool to help you with process problems*')
st.divider()
#Use Cases
with st.container():
col1,col2=st.columns(2)
with col1:
st.header('Knlowledge base:')
st.markdown(
"""
- _Perrys chemical engineers handbook_
- _Coulson & Richardson's Chemical Engineering Vol.6 Chemical Engineering Design 4th Edition_
- _Chemical-Process-Equipment-Selection-and-Design-by-Stanley-M.-Walas_
"""
)
with col2:
#current_dir = os.path.dirname(__file__)
#lottie_path = os.path.join(current_dir, r"res/lottie/Piping.json")
lottie2 = load_lottiefile(r"/home/user/app/res/lottie/Piping.json")
st_lottie(lottie2,key='place',height=300,width=300)
st.divider()
def main():
#SIDE BAR------------------------------------------------------------------------------------------------------------------------------
with st.sidebar:
selected = option_menu('ProHelper', ["Info", 'About','Settings','Bots'],
icons=['info-circle','question','gear'],menu_icon='droplet-fill', default_index=0)
#INTRO PAGE----------------------------------------------------------------------------------------------------------------------------
if selected=="Info":
create_info()
#SETTING PAGE----------------------------------------------------------------------------------------------------------------------------
if selected=="Settings":
create_setting()
#ABOUT PAGE----------------------------------------------------------------------------------------------------------------------------
if selected=="About":
# Header
st.title('About ProHelper')
# Content
st.write("ProHelper is a testing program created by DFO.")
st.write("This program is designed for testing purposes only.")
st.write("For more information, please contact DFO.")
#BOTS PAGE-----------------------------------------------------------------------------------------------------------------------------
if selected=="Bots":
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
# Display or clear chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# User-provided prompt
if prompt := st.chat_input("Ask me anything"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
if st.session_state.messages[-1]["role"] != "assistant":
prompt=st.session_state.messages[-1]["content"]
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response,cont,hist = process_answer(prompt)
placeholder = st.empty()
full_response = ''
for item in response:
full_response += item
placeholder.markdown(full_response)
placeholder.markdown(full_response)
message = {"role": "assistant", "content": full_response}
st.session_state.messages.append(message)
rag_context = "context"
with st.expander("Sources"):
st.write(f"Context: {cont}")
st.write(f"History: {hist}")
get_History().add_message(vector_store,"User",prompt)
get_History().add_message(vector_store,"Assistant",full_response)
if __name__ == "__main__":
main()