drkareemkamal commited on
Commit
edb6ea6
1 Parent(s): 3724580

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ vectorstores/index.faiss filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from langchain import PromptTemplate
2
+ from langchain_core.prompts import PromptTemplate
3
+ import os
4
+ from langchain_community.embeddings import HuggingFaceBgeEmbeddings
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_community.llms.ctransformers import CTransformers
7
+ #from langchain.chains import RetrievalQA
8
+ from langchain.chains.retrieval_qa.base import RetrievalQA
9
+
10
+ DB_FAISS_PATH = 'vectorstores/'
11
+
12
+ custom_prompt_template = '''use the following pieces of information to answer the user's questions.
13
+ If you don't know the answer, please just say that don't know the answer, don't try to make uo an answer.
14
+
15
+ Context : {context}
16
+ Question : {question}
17
+
18
+ only return the helpful answer below and nothing else.
19
+ '''
20
+
21
+ def set_custom_prompt():
22
+ """
23
+ Prompt template for QA retrieval for vector stores
24
+ """
25
+ prompt = PromptTemplate(template = custom_prompt_template,
26
+ input_variables = ['context','question'])
27
+
28
+ return prompt
29
+
30
+
31
+ def load_llm():
32
+ llm = CTransformers(
33
+ model = 'TheBloke/Llama-2-7B-Chat-GGML',
34
+ #model = AutoModel.from_pretrained("TheBloke/Llama-2-7B-Chat-GGML"),
35
+ model_type = 'llama',
36
+ max_new_token = 512,
37
+ temperature = 0.5
38
+ )
39
+ return llm
40
+
41
+ def retrieval_qa_chain(llm,prompt,db):
42
+ qa_chain = RetrievalQA.from_chain_type(
43
+ llm = llm,
44
+ chain_type = 'stuff',
45
+ retriever = db.as_retriever(search_kwargs= {'k': 2}),
46
+ return_source_documents = True,
47
+ chain_type_kwargs = {'prompt': prompt}
48
+ )
49
+
50
+ return qa_chain
51
+
52
+ def qa_bot():
53
+ embeddings = HuggingFaceBgeEmbeddings(model_name = 'sentence-transformers/all-MiniLM-L6-v2',
54
+ model_kwargs = {'device':'cpu'})
55
+
56
+
57
+ db = FAISS.load_local(DB_FAISS_PATH, embeddings,allow_dangerous_deserialization=True)
58
+ llm = load_llm()
59
+ qa_prompt = set_custom_prompt()
60
+ qa = retrieval_qa_chain(llm,qa_prompt, db)
61
+
62
+ return qa
63
+
64
+ def final_result(query):
65
+ qa_result = qa_bot()
66
+ response = qa_result({'query' : query})
67
+
68
+ return response
69
+
70
+
71
+ import streamlit as st
72
+
73
+ # Initialize the bot
74
+ bot = qa_bot()
75
+
76
+ def process_query(query):
77
+ # Here you would include the logic to process the query and return a response
78
+ response, sources = bot.answer_query(query) # Modify this according to your bot implementation
79
+ if sources:
80
+ response += f"\nSources: {', '.join(sources)}"
81
+ else:
82
+ response += "\nNo Sources Found"
83
+ return response
84
+
85
+ # Setting up the Streamlit app
86
+ st.title('Medical Chatbot')
87
+
88
+ user_input = st.text_input("Hi, welcome to the medical Bot. What is your query?")
89
+
90
+ if user_input:
91
+ output = process_query(user_input)
92
+ st.text_area("Response", output, height=300)
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pypdf
2
+ langchain
3
+ torch
4
+ accelerate
5
+ bitsandbytes
6
+ transformers
7
+ sentence_transformers
8
+ faiss_cpu
9
+ langchain-community
10
+ huggingface_hub
11
+ ctransformers
vectorstores/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ceac52af31d17a599afdeaa78b5309e58f242078efcd723b604cbdf2be45cb75
3
+ size 10983981
vectorstores/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bad34adb5061873cd15a1f7e86541e9818502b235c53793e8b284884d77336dd
3
+ size 3446300