File size: 2,670 Bytes
a43049b cb40971 c72a561 cb40971 c72a561 cb40971 c72a561 cb40971 c72a561 cb40971 c72a561 cb40971 88f03bb cb40971 c72a561 88f03bb cb40971 88f03bb cb40971 8b709fd cb40971 8b709fd cb40971 c72a561 88f03bb fc55ef6 cb40971 7600365 cb40971 88f03bb 8b709fd 88f03bb a43049b 885c38f a43049b 88f03bb a43049b 88f03bb fc55ef6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import gradio as gr
from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.prompts import PromptTemplate
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.llms import CTransformers
from langchain.chains import RetrievalQA
DB_FAISS_PATH = 'vectorstore/db_faiss'
custom_prompt_template = """Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""
def set_custom_prompt():
"""
Prompt template for QA retrieval for each vectorstore
"""
prompt = PromptTemplate(template=custom_prompt_template,
input_variables=['context', 'question'])
return prompt
# Retrieval QA Chain
def retrieval_qa_chain(llm, prompt, db):
qa_chain = RetrievalQA.from_chain_type(llm=llm,
chain_type='stuff',
retriever=db.as_retriever(search_kwargs={'k': 2}),
return_source_documents=True,
chain_type_kwargs={'prompt': prompt})
return qa_chain
# Loading the model
def load_llm():
# Load the locally downloaded model here
llm = CTransformers(
model="TheBloke/Llama-2-7B-Chat-GGML",
model_type="llama",
max_new_tokens=512,
temperature=0.5
)
return llm
# QA Model Function
def qa_bot(query):
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
model_kwargs={'device': 'cpu'})
db = FAISS.load_local(DB_FAISS_PATH, embeddings,allow_dangerous_deserialization=True)
llm = load_llm()
qa_prompt = set_custom_prompt()
qa = retrieval_qa_chain(llm, qa_prompt, db)
response = qa({'query': query})
answer = response['result']
sources = response['source_documents']
if sources:
answer +="\n\n"+ f"Sources: {sources}"
else:
answer += "\nNo sources found"
return answer
# Gradio interface
iface = gr.Interface(fn=qa_bot,
inputs=gr.Textbox(label="Enter your medical query"),
outputs=gr.Textbox(label="Answer"),
title="Medical Bot",
description="Ask any medical query and get an answer with sources if available.")
if __name__ == "__main__":
iface.launch()
|