from langchain.document_loaders import PyPDFLoader from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate, FAISS from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI import os os.environ["OPENAI_API_KEY"] # loader = PyPDFLoader("cardinality.pdf") # pages = loader.load_and_split() # print(len(pages), pages) embeddings = OpenAIEmbeddings() # Create DB # db = FAISS.from_documents(pages, embeddings) # Save the DB in your local # db.save_local("faiss_index") # # load the DB new_db = FAISS.load_local("faiss_index", embeddings) # # Init LLM llm = ChatOpenAI() qa_chain = RetrievalQA.from_chain_type(llm, retriever=new_db.as_retriever()) def ask(user_query): res=qa_chain({"query": user_query}) return res["result"]