Pratik Dwivedi commited on
Commit
727d805
1 Parent(s): 53f7f5d

health model

Browse files
Files changed (2) hide show
  1. .gitignore +2 -0
  2. app.py +15 -7
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .env
2
+ .env.example
app.py CHANGED
@@ -3,7 +3,8 @@ from langchain.memory import ConversationBufferMemory
3
  from langchain.chains import LLMChain
4
  from langchain_community.llms import HuggingFaceHub
5
  from langchain.prompts import PromptTemplate
6
-
 
7
 
8
  def get_response(model, query):
9
  prompt_template = PromptTemplate(
@@ -12,27 +13,32 @@ def get_response(model, query):
12
  )
13
  # get the response
14
  memory = ConversationBufferMemory(memory_key="messages", return_messages=True)
 
15
  conversation_chain = LLMChain(
16
  llm=model,
17
  prompt=prompt_template,
18
  # retriever=vectorstore.as_retriever(),
19
  memory=memory)
20
- # response = conversation_chain.invoke(query)
21
- # response = response["result"]
22
- # answer = response.split('\nHelpful Answer: ')[1]
23
  response = conversation_chain.invoke(query)
24
- return response
 
 
 
25
 
26
  def main():
27
  st.title("Health Chatbot")
28
-
 
29
  print("Loading LLM from HuggingFace")
30
  with st.spinner('Loading LLM from HuggingFace...'):
31
- llm = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta", model_kwargs={"temperature":0.7, "max_new_tokens":512, "top_p":0.95, "top_k":50})
 
 
32
 
33
  if "messages" not in st.session_state:
34
  st.session_state.messages = []
35
 
 
36
  if st.button("Clear Chat"):
37
  st.session_state.messages = []
38
 
@@ -47,7 +53,9 @@ def main():
47
  st.chat_message("user").markdown(user_prompt)
48
  st.session_state.messages.append({"role": "user", "content": user_prompt})
49
  with st.spinner('Thinking...'):
 
50
  response = get_response(llm, user_prompt)
 
51
  st.chat_message("bot").markdown(response)
52
  st.session_state.messages.append({"role": "bot", "content": response})
53
 
 
3
  from langchain.chains import LLMChain
4
  from langchain_community.llms import HuggingFaceHub
5
  from langchain.prompts import PromptTemplate
6
+ from dotenv import load_dotenv
7
+ import time
8
 
9
  def get_response(model, query):
10
  prompt_template = PromptTemplate(
 
13
  )
14
  # get the response
15
  memory = ConversationBufferMemory(memory_key="messages", return_messages=True)
16
+ print(memory)
17
  conversation_chain = LLMChain(
18
  llm=model,
19
  prompt=prompt_template,
20
  # retriever=vectorstore.as_retriever(),
21
  memory=memory)
 
 
 
22
  response = conversation_chain.invoke(query)
23
+ answer = response["text"]
24
+ if "\n\n" in answer:
25
+ answer = answer.split("\n\n", 1)[1]
26
+ return answer
27
 
28
  def main():
29
  st.title("Health Chatbot")
30
+ # load the environment variables
31
+ load_dotenv()
32
  print("Loading LLM from HuggingFace")
33
  with st.spinner('Loading LLM from HuggingFace...'):
34
+ # llm = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta", model_kwargs={"temperature":0.7, "max_new_tokens":1028, "top_p":0.95})
35
+
36
+ llm = HuggingFaceHub(repo_id="epfl-llm/meditron-70b", model_kwargs={"temperature":0.7, "max_new_tokens":1028, "top_p":0.95})
37
 
38
  if "messages" not in st.session_state:
39
  st.session_state.messages = []
40
 
41
+
42
  if st.button("Clear Chat"):
43
  st.session_state.messages = []
44
 
 
53
  st.chat_message("user").markdown(user_prompt)
54
  st.session_state.messages.append({"role": "user", "content": user_prompt})
55
  with st.spinner('Thinking...'):
56
+ start_time = time.time()
57
  response = get_response(llm, user_prompt)
58
+ st.write("Response Time: ", time.time() - start_time)
59
  st.chat_message("bot").markdown(response)
60
  st.session_state.messages.append({"role": "bot", "content": response})
61