from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader from langchain.prompts import PromptTemplate from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS from langchain_community.llms import CTransformers from langchain.chains import RetrievalQA import gradio as gr DB_FAISS_PATH = 'vectorstore/db_faiss' custom_prompt_template = """Use the following pieces of information to answer the user's question. If you don't know the answer, just say that you don't know, don't try to make up an answer. Context: {context} Question: {question} Only return the helpful answer below and nothing else. Helpful answer: """ def set_custom_prompt(): """ Prompt template for QA retrieval for each vectorstore """ prompt = PromptTemplate(template=custom_prompt_template, input_variables=['context', 'question']) return prompt def load_llm(): # Load the locally downloaded model here llm = CTransformers( model = "TheBloke/Llama-2-7B-Chat-GGML", model_type="llama", max_new_tokens = 512, temperature = 0.5 ) return llm def qa_bot(query): embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'}) db = FAISS.load_local(DB_FAISS_PATH, embeddings) llm = load_llm() qa_prompt = set_custom_prompt() qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type='stuff', retriever=db.as_retriever(search_kwargs={'k': 2}), return_source_documents=True, chain_type_kwargs={'prompt': qa_prompt} ) result = qa_chain({'query': query}) response = result['answers'][0]['text'] if result['answers'] else "Sorry, I don't have an answer for that." return response iface = gr.Interface( fn=qa_bot, inputs="text", outputs="text", title="Medical Query Bot", description="Enter your medical query to get an answer." ) if __name__ == '__main__': iface.launch()