MahmoudRox commited on
Commit
0af588c
1 Parent(s): b813364

side bar and memory clear

Browse files
Files changed (1) hide show
  1. app.py +28 -13
app.py CHANGED
@@ -172,17 +172,17 @@ qa = ConversationalRetrievalChain.from_llm(
172
 
173
 
174
  #---------------------------------------------------------
175
-
176
  import streamlit as st
177
  import time
178
 
 
179
  # App title
180
  st.set_page_config(page_title="🤖💼 🇲🇦 Financial advisor is Here")
181
 
182
  # Replicate Credentials
183
  with st.sidebar:
184
- st.title(' Mokawil.AI is Here 🤖💼 🇲🇦')
185
- st.markdown('📖 an AI-powered advisor designed to assist founders (or anyone aspiring to start their own company) with various aspects of business in Morocco, including legal considerations, budget planning, available investors, and strategies for success.')
186
 
187
  # Store LLM generated responses
188
  if "messages" not in st.session_state.keys():
@@ -191,33 +191,47 @@ if "messages" not in st.session_state.keys():
191
  # Display or clear chat messages
192
  for message in st.session_state.messages:
193
  if message["role"] == "user" :
194
- with st.chat_message(message["role"], avatar="👨‍💻"):
195
  st.write(message["content"])
196
  else :
197
- with st.chat_message(message["role"], avatar="🤖"):
198
  st.write(message["content"])
199
 
200
  def clear_chat_history():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
202
 
203
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
204
 
205
  # Function for generating LLaMA2 response
206
- def generate_llama2_response(prompt_input):
207
  res = qa(f'''{prompt_input}''')
208
  return res['answer']
209
 
210
  # User-provided prompt
211
  if prompt := st.chat_input("What is up?"):
212
  st.session_state.messages.append({"role": "user", "content": prompt})
213
- with st.chat_message("user", avatar="👨‍💻"):
214
  st.write(prompt)
215
 
216
  # Generate a new response if last message is not from assistant
217
  if st.session_state.messages[-1]["role"] != "assistant":
218
- with st.chat_message("assistant", avatar="🤖"):
219
  with st.spinner("Thinking..."):
220
- response = generate_llama2_response(st.session_state.messages[-1]["content"])
221
  placeholder = st.empty()
222
  full_response = ''
223
  for item in response:
@@ -230,19 +244,20 @@ if st.session_state.messages[-1]["role"] != "assistant":
230
 
231
  # Example prompt
232
  with st.sidebar :
233
- st.title('Input examples')
 
234
  def promptExample1():
235
- prompt = "how can I start my company example 1"
236
  st.session_state.messages.append({"role": "user", "content": prompt})
237
 
238
  # Example prompt
239
  def promptExample2():
240
- prompt = "how can I start my company example 2"
241
  st.session_state.messages.append({"role": "user", "content": prompt})
242
 
243
  # Example prompt
244
  def promptExample3():
245
- prompt = "how can I start my company example 3"
246
  st.session_state.messages.append({"role": "user", "content": prompt})
247
 
248
 
 
172
 
173
 
174
  #---------------------------------------------------------
 
175
  import streamlit as st
176
  import time
177
 
178
+
179
  # App title
180
  st.set_page_config(page_title="🤖💼 🇲🇦 Financial advisor is Here")
181
 
182
  # Replicate Credentials
183
  with st.sidebar:
184
+ st.title('Mokawil.AI is Here 🤖💼 🇲🇦')
185
+ st.markdown('🤖 an AI-powered advisor designed to assist founders (or anyone aspiring to start their own company) with various aspects of business in Morocco, including legal considerations, budget planning, available investors, and strategies for success.')
186
 
187
  # Store LLM generated responses
188
  if "messages" not in st.session_state.keys():
 
191
  # Display or clear chat messages
192
  for message in st.session_state.messages:
193
  if message["role"] == "user" :
194
+ with st.chat_message(message["role"], avatar="user.png"):
195
  st.write(message["content"])
196
  else :
197
+ with st.chat_message(message["role"], avatar="logo.png"):
198
  st.write(message["content"])
199
 
200
  def clear_chat_history():
201
+ memory = ConversationBufferMemory(
202
+ memory_key="chat_history",
203
+ return_messages=True,
204
+ input_key='question',
205
+ output_key='answer'
206
+ )
207
+ qa = ConversationalRetrievalChain.from_llm(
208
+ llm,
209
+ chain_type="stuff",
210
+ retriever=VectorStore.as_retriever(search_kwargs={"k": 5}),
211
+ memory=memory,
212
+ return_source_documents=True,
213
+ verbose=False,
214
+ )
215
  st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
216
 
217
  st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
218
 
219
  # Function for generating LLaMA2 response
220
+ def generate_llm_response(prompt_input):
221
  res = qa(f'''{prompt_input}''')
222
  return res['answer']
223
 
224
  # User-provided prompt
225
  if prompt := st.chat_input("What is up?"):
226
  st.session_state.messages.append({"role": "user", "content": prompt})
227
+ with st.chat_message("user", avatar="user.png"):
228
  st.write(prompt)
229
 
230
  # Generate a new response if last message is not from assistant
231
  if st.session_state.messages[-1]["role"] != "assistant":
232
+ with st.chat_message("assistant", avatar="logo.png"):
233
  with st.spinner("Thinking..."):
234
+ response = generate_llm_response(st.session_state.messages[-1]["content"])
235
  placeholder = st.empty()
236
  full_response = ''
237
  for item in response:
 
244
 
245
  # Example prompt
246
  with st.sidebar :
247
+ st.title('Examples :')
248
+
249
  def promptExample1():
250
+ prompt = "how can I start my company in morocco?"
251
  st.session_state.messages.append({"role": "user", "content": prompt})
252
 
253
  # Example prompt
254
  def promptExample2():
255
+ prompt = "What are some recommended cities for starting a business in finance"
256
  st.session_state.messages.append({"role": "user", "content": prompt})
257
 
258
  # Example prompt
259
  def promptExample3():
260
+ prompt = "what is the estimate money I need for starting my company"
261
  st.session_state.messages.append({"role": "user", "content": prompt})
262
 
263