|
import streamlit as st |
|
from streamlit_option_menu import option_menu |
|
from streamlit_lottie import st_lottie |
|
import json |
|
from streamlit_chat import message |
|
from emb import EmbeddingsManager |
|
from history import HistoryManager |
|
from llm import LLMManager |
|
from settings import SettingManager |
|
import os |
|
|
|
|
|
|
|
vector_store="prohelper" |
|
|
|
@st.cache_resource |
|
def get_EMB(): |
|
emb=EmbeddingsManager(get_settings(),emb="hkunlp/instructor-large") |
|
emb.set=get_settings() |
|
return emb |
|
|
|
@st.cache_resource |
|
def get_History(): |
|
return HistoryManager() |
|
|
|
@st.cache_resource |
|
def get_llm(): |
|
llm=LLMManager(get_settings()) |
|
llm.set=get_settings() |
|
return llm |
|
|
|
@st.cache_resource |
|
def get_settings(): |
|
return SettingManager() |
|
|
|
|
|
st.set_page_config( |
|
page_title="ProHelper", |
|
layout="wide", |
|
initial_sidebar_state="expanded") |
|
|
|
|
|
st.markdown(""" |
|
<style> |
|
.big-font { |
|
font-size:80px !important; |
|
} |
|
</style> |
|
""", unsafe_allow_html=True) |
|
|
|
|
|
hide_streamlit_style = """ |
|
<style> |
|
#MainMenu {visibility: hidden;} |
|
footer {visibility: hidden;} |
|
</style> |
|
""" |
|
st.markdown(hide_streamlit_style, unsafe_allow_html=True) |
|
|
|
@st.cache_data |
|
def load_lottiefile(filepath: str): |
|
with open(filepath,"r") as f: |
|
return json.load(f) |
|
|
|
|
|
def display_conversation(history_manager): |
|
if not history_manager.chat_exists(vector_store): |
|
history_manager.add_message(vector_store,"Assistant","Hi, how can i help you?") |
|
message("Hi, how can i help you?",False) |
|
else: |
|
for sender,mess in history_manager.get_messages(vector_store): |
|
is_user=not (sender=="Assistant") |
|
message(mess, is_user=is_user) |
|
|
|
def clear_text(): |
|
st.session_state.my_text = st.session_state.input |
|
st.session_state.input = "" |
|
|
|
def process_answer(user_input): |
|
history=get_History().format_chat(vector_store) |
|
context=get_EMB().get_formatted_context(vector_store,user_input,history) |
|
prompt=get_llm().get_prompt(user_input,context,history) |
|
return get_llm().get_text(prompt),context,history |
|
|
|
def create_setting(): |
|
st.title('ProHelper Settings') |
|
st.divider() |
|
|
|
st.subheader('*Main LLM Settings*') |
|
|
|
|
|
left_column, right_column = st.columns([1, 2]) |
|
|
|
|
|
checked = left_column.checkbox('AI assisted search',value=get_settings().ai_assisted_search, help="An additional LLM will pre process the question to get improved search words for RAG") |
|
|
|
|
|
max_new_token = left_column.number_input(label='Max new token', value=get_settings().max_new_token,help="The maximum number of tokens that the LLM can generate") |
|
get_settings().max_new_token=max_new_token |
|
|
|
|
|
top_p = left_column.slider('Top p', min_value=0.0, max_value=1.0, value=get_settings().top_p, step=0.01, help="Used to control the randomness of the generated text.") |
|
get_settings().top_p=top_p |
|
|
|
|
|
temperature = left_column.slider('Temperature', min_value=0.01, max_value=1.0, value=get_settings().temperature, step=0.01, help="Another parameter used to control the randomness of the generated text.") |
|
get_settings().temperature=temperature |
|
|
|
|
|
repetition_penalty = left_column.slider('Repetition penality', min_value=0.7, max_value=2.0, value=get_settings().repetition_penalty, step=0.01, help="Used to discourage the model from repeating the same phrases or content.") |
|
get_settings().repetition_penalty=repetition_penalty |
|
|
|
|
|
llm_options = get_settings().listLLMMap |
|
selected_option = left_column.selectbox('Chat LLM', llm_options, index=get_settings().defaultLLM) |
|
selected_llm = llm_options.index(selected_option) |
|
get_settings().defaultLLM=selected_llm |
|
get_llm().selectLLM(selected_option) |
|
|
|
|
|
if checked: |
|
get_settings().ai_assisted_search=True |
|
else: |
|
get_settings().ai_assisted_search=False |
|
|
|
|
|
system_prompt = right_column.text_area('System prompt:', get_settings().system_prompt,height=500,help=r"Text that provides context and instructions to the model before it generates a response. The special fields {context}, {history} and {question} will be replaced with their corresponding values") |
|
get_settings().system_prompt=system_prompt |
|
|
|
st.divider() |
|
st.subheader('*RAG Settings*') |
|
|
|
|
|
left_column_RAG, right_column_RAG = st.columns([1, 2]) |
|
|
|
|
|
n_doc_return = left_column_RAG.number_input(label='N doc return', value=get_settings().n_doc_return,help="The number of documents to be returned by the search") |
|
get_settings().n_doc_return=n_doc_return |
|
|
|
|
|
rag_methods = get_settings().available_search_methods |
|
print(get_settings().search_method) |
|
selected_option_RAG = left_column_RAG.selectbox('Search Methods', rag_methods, index=rag_methods.index(get_settings().search_method)) |
|
get_settings().search_method=selected_option_RAG |
|
|
|
|
|
RAG_prompt = right_column_RAG.text_area('RAG prompt:', get_settings().default_ai_search_prompt,height=500,help=r"Text that provides context and instructions to the model used for search terms. The special fields {history} and {question} will be replaced with their corresponding values") |
|
get_settings().default_ai_search_prompt=RAG_prompt |
|
|
|
|
|
RAG_max_new_token = left_column_RAG.number_input(label='RAG max new token', value=get_settings().RAG_max_new_token,help="The maximum number of tokens that the LLM can generate") |
|
get_settings().RAG_max_new_token=RAG_max_new_token |
|
|
|
|
|
RAG_top_p = left_column_RAG.slider('RAG top p', min_value=0.0, max_value=1.0, value=get_settings().RAG_top_p, step=0.01, help="Used to control the randomness of the generated text.") |
|
get_settings().RAG_top_p=RAG_top_p |
|
|
|
|
|
RAG_temperature = left_column_RAG.slider('RAG temperature', min_value=0.01, max_value=1.0, value=get_settings().RAG_temperature, step=0.01, help="Another parameter used to control the randomness of the generated text.") |
|
get_settings().RAG_temperature=RAG_temperature |
|
|
|
|
|
RAG_repetition_penalty = left_column_RAG.slider('RAG repetition penality', min_value=0.7, max_value=2.0, value=get_settings().RAG_repetition_penalty, step=0.01, help="Used to discourage the model from repeating the same phrases or content.") |
|
get_settings().RAG_repetition_penalty=RAG_repetition_penalty |
|
|
|
|
|
|
|
|
|
def create_info(): |
|
|
|
st.title('Welcome to ProHelper') |
|
st.subheader('*A new tool to help you with process problems*') |
|
|
|
st.divider() |
|
|
|
|
|
with st.container(): |
|
col1,col2=st.columns(2) |
|
with col1: |
|
st.header('Knlowledge base:') |
|
st.markdown( |
|
""" |
|
- _Perrys chemical engineers handbook_ |
|
- _Coulson & Richardson's Chemical Engineering Vol.6 Chemical Engineering Design 4th Edition_ |
|
- _Chemical-Process-Equipment-Selection-and-Design-by-Stanley-M.-Walas_ |
|
""" |
|
) |
|
with col2: |
|
current_dir = os.path.dirname(__file__) |
|
lottie_path = os.path.join(current_dir, "res\lottie\Piping.json") |
|
lottie2 = load_lottiefile(lottie_path) |
|
st_lottie(lottie2,key='place',height=300,width=300) |
|
st.divider() |
|
|
|
def main(): |
|
|
|
|
|
with st.sidebar: |
|
selected = option_menu('ProHelper', ["Info", 'About','Settings','Bots'], |
|
icons=['info-circle','question','gear'],menu_icon='droplet-fill', default_index=0) |
|
|
|
|
|
if selected=="Info": |
|
create_info() |
|
|
|
|
|
if selected=="Settings": |
|
create_setting() |
|
|
|
|
|
if selected=="About": |
|
|
|
st.title('About ProHelper') |
|
|
|
|
|
st.write("ProHelper is a testing program created by DFO.") |
|
st.write("This program is designed for testing purposes only.") |
|
st.write("For more information, please contact DFO.") |
|
|
|
|
|
|
|
if selected=="Bots": |
|
if "messages" not in st.session_state.keys(): |
|
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.write(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("Ask me anything"): |
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
with st.chat_message("user"): |
|
st.write(prompt) |
|
|
|
|
|
if st.session_state.messages[-1]["role"] != "assistant": |
|
prompt=st.session_state.messages[-1]["content"] |
|
with st.chat_message("assistant"): |
|
with st.spinner("Thinking..."): |
|
response,cont,hist = process_answer(prompt) |
|
placeholder = st.empty() |
|
full_response = '' |
|
for item in response: |
|
full_response += item |
|
placeholder.markdown(full_response) |
|
placeholder.markdown(full_response) |
|
message = {"role": "assistant", "content": full_response} |
|
st.session_state.messages.append(message) |
|
|
|
rag_context = "context" |
|
with st.expander("Sources"): |
|
st.write(f"Context: {cont}") |
|
st.write(f"History: {hist}") |
|
|
|
get_History().add_message(vector_store,"User",prompt) |
|
get_History().add_message(vector_store,"Assistant",full_response) |
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
main() |