drkareemkamal commited on
Commit
bcc7923
·
verified ·
1 Parent(s): 986b020

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -98
app.py CHANGED
@@ -1,99 +1,93 @@
1
- #from langchain import PromptTemplate
2
- from langchain_core.prompts import PromptTemplate
3
- import os
4
- from langchain_community.embeddings import HuggingFaceBgeEmbeddings
5
- from langchain_community.vectorstores import FAISS
6
- from langchain_community.llms.ctransformers import CTransformers
7
- #from langchain.chains import RetrievalQA
8
- from langchain.chains.retrieval_qa.base import RetrievalQA
9
- import chainlit as cl
10
-
11
- DB_FAISS_PATH = 'vectorstores/'
12
-
13
- custom_prompt_template = '''
14
- use the following pieces of information to answer the user's questions/
15
- If you don't know the answer, please just say that don't know the answer, don't try to make uo an answer.
16
-
17
- Content : {}
18
- Question : {question}
19
-
20
- only return the helpful answer below and nothing else.
21
- '''
22
-
23
- def set_custom_prompt():
24
- """
25
- Prompt template for QA retrieval for vector stores
26
- """
27
- prompt = PromptTemplate(template = custom_prompt_template,
28
- input_variables = ['context','question'])
29
-
30
- return prompt
31
-
32
- def load_llm():
33
- llm = CTransformers(
34
- model = 'TheBloke/Llama-2-7B-Chat-GGML',
35
- #model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML"),
36
- model_type = 'llama',
37
- max_new_token = 512,
38
- temperature = 0.5
39
- )
40
- return llm
41
-
42
- def retrieval_qa_chain(llm,prompt,db):
43
- qa_chain = RetrievalQA.from_chain_type(
44
- llm = llm,
45
- chain_type = 'stuff',
46
- retriever = db.as_retriever(search_kwargs= {'k': 2}),
47
- return_source_documents = True,
48
- chain_type_kwargs = {'prompt': prompt}
49
- )
50
-
51
- return qa_chain
52
-
53
- def qa_bot():
54
- embeddings = HuggingFaceBgeEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2',
55
- model_kwargs = {'device':'cpu'})
56
-
57
- # index_path = os.path.join('vectorstores')
58
- # if not os.path.exists(index_path):
59
- # raise FileNotFoundError(f"FAISS index file not found at {index_path}")
60
-
61
- # Load the index if it exists
62
- #db = FAISS.load_local('vectorstores', embeddings, allow_dangerous_deserialization=True)
63
-
64
- db = FAISS.load_local(DB_FAISS_PATH, embeddings,allow_dangerous_deserialization=True)
65
- llm = load_llm()
66
- qa_prompt = set_custom_prompt()
67
- qa = retrieval_qa_chain(llm,qa_prompt, db)
68
-
69
- return qa
70
-
71
- def final_result(query):
72
- qa_result = qa_bot()
73
- response = qa_result({'quert' : query})
74
-
75
- return response
76
-
77
-
78
- import streamlit as st
79
-
80
- # Initialize the bot
81
- bot = qa_bot()
82
-
83
- def process_query(query):
84
- # Here you would include the logic to process the query and return a response
85
- response, sources = bot.answer_query(query) # Modify this according to your bot implementation
86
- if sources:
87
- response += f"\nSources: {', '.join(sources)}"
88
- else:
89
- response += "\nNo Sources Found"
90
- return response
91
-
92
- # Setting up the Streamlit app
93
- st.title('Medical Chatbot')
94
-
95
- user_input = st.text_input("Hi, welcome to the medical Bot. What is your query?")
96
-
97
- if user_input:
98
- output = process_query(user_input)
99
  st.text_area("Response", output, height=300)
 
1
+ #from langchain import PromptTemplate
2
+ from langchain_core.prompts import PromptTemplate
3
+ import os
4
+ from langchain_community.embeddings import HuggingFaceBgeEmbeddings
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_community.llms.ctransformers import CTransformers
7
+ #from langchain.chains import RetrievalQA
8
+ from langchain.chains.retrieval_qa.base import RetrievalQA
9
+ import chainlit as cl
10
+
11
+ DB_FAISS_PATH = 'vectorstores/'
12
+
13
+ custom_prompt_template = '''
14
+ use the following pieces of information to answer the user's questions/
15
+ If you don't know the answer, please just say that don't know the answer, don't try to make uo an answer.
16
+
17
+ Context : {}
18
+ Question : {question}
19
+
20
+ only return the helpful answer below and nothing else.
21
+ '''
22
+
23
+ def set_custom_prompt():
24
+ """
25
+ Prompt template for QA retrieval for vector stores
26
+ """
27
+ prompt = PromptTemplate(template = custom_prompt_template,
28
+ input_variables = ['context','question'])
29
+
30
+ return prompt
31
+
32
+ def load_llm():
33
+ llm = CTransformers(
34
+ model = 'TheBloke/Llama-2-7B-Chat-GGML',
35
+ #model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML"),
36
+ model_type = 'llama',
37
+ max_new_token = 512,
38
+ temperature = 0.5
39
+ )
40
+ return llm
41
+
42
+ def retrieval_qa_chain(llm,prompt,db):
43
+ qa_chain = RetrievalQA.from_chain_type(
44
+ llm = llm,
45
+ chain_type = 'stuff',
46
+ retriever = db.as_retriever(search_kwargs= {'k': 2}),
47
+ return_source_documents = True,
48
+ chain_type_kwargs = {'prompt': prompt}
49
+ )
50
+
51
+ return qa_chain
52
+
53
+ def qa_bot():
54
+ embeddings = HuggingFaceBgeEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2',
55
+ model_kwargs = {'device':'cpu'})
56
+
57
+
58
+ db = FAISS.load_local(DB_FAISS_PATH, embeddings,allow_dangerous_deserialization=True)
59
+ llm = load_llm()
60
+ qa_prompt = set_custom_prompt()
61
+ qa = retrieval_qa_chain(llm,qa_prompt, db)
62
+
63
+ return qa
64
+
65
+ def final_result(query):
66
+ qa_result = qa_bot()
67
+ response = qa_result({'quert' : query})
68
+
69
+ return response
70
+
71
+
72
+ import streamlit as st
73
+
74
+ # Initialize the bot
75
+ bot = qa_bot()
76
+
77
+ def process_query(query):
78
+ # Here you would include the logic to process the query and return a response
79
+ response, sources = bot.answer_query(query) # Modify this according to your bot implementation
80
+ if sources:
81
+ response += f"\nSources: {', '.join(sources)}"
82
+ else:
83
+ response += "\nNo Sources Found"
84
+ return response
85
+
86
+ # Setting up the Streamlit app
87
+ st.title('Medical Chatbot')
88
+
89
+ user_input = st.text_input("Hi, welcome to the medical Bot. What is your query?")
90
+
91
+ if user_input:
92
+ output = process_query(user_input)
 
 
 
 
 
 
93
  st.text_area("Response", output, height=300)