Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,99 +1,93 @@
|
|
1 |
-
#from langchain import PromptTemplate
|
2 |
-
from langchain_core.prompts import PromptTemplate
|
3 |
-
import os
|
4 |
-
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
|
5 |
-
from langchain_community.vectorstores import FAISS
|
6 |
-
from langchain_community.llms.ctransformers import CTransformers
|
7 |
-
#from langchain.chains import RetrievalQA
|
8 |
-
from langchain.chains.retrieval_qa.base import RetrievalQA
|
9 |
-
import chainlit as cl
|
10 |
-
|
11 |
-
DB_FAISS_PATH = 'vectorstores/'
|
12 |
-
|
13 |
-
custom_prompt_template = '''
|
14 |
-
use the following pieces of information to answer the user's questions/
|
15 |
-
If you don't know the answer, please just say that don't know the answer, don't try to make uo an answer.
|
16 |
-
|
17 |
-
|
18 |
-
Question : {question}
|
19 |
-
|
20 |
-
only return the helpful answer below and nothing else.
|
21 |
-
'''
|
22 |
-
|
23 |
-
def set_custom_prompt():
|
24 |
-
"""
|
25 |
-
Prompt template for QA retrieval for vector stores
|
26 |
-
"""
|
27 |
-
prompt = PromptTemplate(template = custom_prompt_template,
|
28 |
-
input_variables = ['context','question'])
|
29 |
-
|
30 |
-
return prompt
|
31 |
-
|
32 |
-
def load_llm():
|
33 |
-
llm = CTransformers(
|
34 |
-
model = 'TheBloke/Llama-2-7B-Chat-GGML',
|
35 |
-
#model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML"),
|
36 |
-
model_type = 'llama',
|
37 |
-
max_new_token = 512,
|
38 |
-
temperature = 0.5
|
39 |
-
)
|
40 |
-
return llm
|
41 |
-
|
42 |
-
def retrieval_qa_chain(llm,prompt,db):
|
43 |
-
qa_chain = RetrievalQA.from_chain_type(
|
44 |
-
llm = llm,
|
45 |
-
chain_type = 'stuff',
|
46 |
-
retriever = db.as_retriever(search_kwargs= {'k': 2}),
|
47 |
-
return_source_documents = True,
|
48 |
-
chain_type_kwargs = {'prompt': prompt}
|
49 |
-
)
|
50 |
-
|
51 |
-
return qa_chain
|
52 |
-
|
53 |
-
def qa_bot():
|
54 |
-
embeddings = HuggingFaceBgeEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2',
|
55 |
-
model_kwargs = {'device':'cpu'})
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
return
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
st.title('Medical Chatbot')
|
94 |
-
|
95 |
-
user_input = st.text_input("Hi, welcome to the medical Bot. What is your query?")
|
96 |
-
|
97 |
-
if user_input:
|
98 |
-
output = process_query(user_input)
|
99 |
st.text_area("Response", output, height=300)
|
|
|
1 |
+
#from langchain import PromptTemplate
|
2 |
+
from langchain_core.prompts import PromptTemplate
|
3 |
+
import os
|
4 |
+
from langchain_community.embeddings import HuggingFaceBgeEmbeddings
|
5 |
+
from langchain_community.vectorstores import FAISS
|
6 |
+
from langchain_community.llms.ctransformers import CTransformers
|
7 |
+
#from langchain.chains import RetrievalQA
|
8 |
+
from langchain.chains.retrieval_qa.base import RetrievalQA
|
9 |
+
import chainlit as cl
|
10 |
+
|
11 |
+
DB_FAISS_PATH = 'vectorstores/'
|
12 |
+
|
13 |
+
custom_prompt_template = '''
|
14 |
+
use the following pieces of information to answer the user's questions/
|
15 |
+
If you don't know the answer, please just say that don't know the answer, don't try to make uo an answer.
|
16 |
+
|
17 |
+
Context : {}
|
18 |
+
Question : {question}
|
19 |
+
|
20 |
+
only return the helpful answer below and nothing else.
|
21 |
+
'''
|
22 |
+
|
23 |
+
def set_custom_prompt():
|
24 |
+
"""
|
25 |
+
Prompt template for QA retrieval for vector stores
|
26 |
+
"""
|
27 |
+
prompt = PromptTemplate(template = custom_prompt_template,
|
28 |
+
input_variables = ['context','question'])
|
29 |
+
|
30 |
+
return prompt
|
31 |
+
|
32 |
+
def load_llm():
|
33 |
+
llm = CTransformers(
|
34 |
+
model = 'TheBloke/Llama-2-7B-Chat-GGML',
|
35 |
+
#model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML"),
|
36 |
+
model_type = 'llama',
|
37 |
+
max_new_token = 512,
|
38 |
+
temperature = 0.5
|
39 |
+
)
|
40 |
+
return llm
|
41 |
+
|
42 |
+
def retrieval_qa_chain(llm,prompt,db):
|
43 |
+
qa_chain = RetrievalQA.from_chain_type(
|
44 |
+
llm = llm,
|
45 |
+
chain_type = 'stuff',
|
46 |
+
retriever = db.as_retriever(search_kwargs= {'k': 2}),
|
47 |
+
return_source_documents = True,
|
48 |
+
chain_type_kwargs = {'prompt': prompt}
|
49 |
+
)
|
50 |
+
|
51 |
+
return qa_chain
|
52 |
+
|
53 |
+
def qa_bot():
|
54 |
+
embeddings = HuggingFaceBgeEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2',
|
55 |
+
model_kwargs = {'device':'cpu'})
|
56 |
+
|
57 |
+
|
58 |
+
db = FAISS.load_local(DB_FAISS_PATH, embeddings,allow_dangerous_deserialization=True)
|
59 |
+
llm = load_llm()
|
60 |
+
qa_prompt = set_custom_prompt()
|
61 |
+
qa = retrieval_qa_chain(llm,qa_prompt, db)
|
62 |
+
|
63 |
+
return qa
|
64 |
+
|
65 |
+
def final_result(query):
|
66 |
+
qa_result = qa_bot()
|
67 |
+
response = qa_result({'quert' : query})
|
68 |
+
|
69 |
+
return response
|
70 |
+
|
71 |
+
|
72 |
+
import streamlit as st
|
73 |
+
|
74 |
+
# Initialize the bot
|
75 |
+
bot = qa_bot()
|
76 |
+
|
77 |
+
def process_query(query):
|
78 |
+
# Here you would include the logic to process the query and return a response
|
79 |
+
response, sources = bot.answer_query(query) # Modify this according to your bot implementation
|
80 |
+
if sources:
|
81 |
+
response += f"\nSources: {', '.join(sources)}"
|
82 |
+
else:
|
83 |
+
response += "\nNo Sources Found"
|
84 |
+
return response
|
85 |
+
|
86 |
+
# Setting up the Streamlit app
|
87 |
+
st.title('Medical Chatbot')
|
88 |
+
|
89 |
+
user_input = st.text_input("Hi, welcome to the medical Bot. What is your query?")
|
90 |
+
|
91 |
+
if user_input:
|
92 |
+
output = process_query(user_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
st.text_area("Response", output, height=300)
|