import gradio as gr from langchain.document_loaders import OnlinePDFLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.embeddings import HuggingFaceHubEmbeddings from langchain.vectorstores import FAISS from langchain.llms import HuggingFaceHub from langchain.chains import RetrievalQA from datasets import load_dataset import pickle import pickletools import os key = os.environ.get('RLS') os.environ["HUGGINGFACEHUB_API_TOKEN"] = key import sentence_transformers import faiss def loading_pdf(): return "Loading..." def pdf_changes(pdf_doc): loader = OnlinePDFLoader(pdf_doc.name) pages = loader.load_and_split() text_splitter = RecursiveCharacterTextSplitter( chunk_size=1024, chunk_overlap=64, separators=['\n\n', '\n', '(?=>\. )', ' ', ''] ) docs = text_splitter.split_documents(pages) embeddings = HuggingFaceHubEmbeddings() db = FAISS.from_documents(docs, embeddings) llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000}) global qa qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever(search_kwargs={"k": 3})) return "Ready" def book_changes(book): with open( book, 'rb') as f: data = pickle.load(f) llm=HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":1, "max_length":1000000}) global qa qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=db.as_retriever(search_kwargs={"k": 3})) return "Ready" def add_text(history, text): history = history + [(text, None)] return history, "" def bot(history): response = infer(history[-1][0]) history[-1][1] = response['result'] return history def infer(question): query = question result = qa({"query": query}) return result css=""" #col-container {max-width: 700px; margin-left: auto; margin-right: auto;} """ title = """