Spaces:
Runtime error
Runtime error
# rag.py | |
# https://github.com/vndee/local-rag-example/blob/main/rag.py | |
# ADAPTED TO USE HF LLM INSTEAD OF OLLAMA self.model = ChatOllama(model="mistral") BY J. BOURS 01-03-2024 | |
# EVERNOTE: | |
# https://www.evernote.com/shard/s313/nl/41973486/282c6fc8-9ed5-a977-9895-1eb23941bb4c?title=REQUIREMENTS%20FOR%20A%20LITERATURE%20BASED%20RESEARCH%20LBR%20SYSTEM%20-%20FUNCTIONAL%20AND%20TECHNICAL%20REQUIREMENTS%20-%20ALEXANDER%20UNZICKER%20-%2026-02-2024 | |
# | |
# mistralai/Mistral-7B-v0.1 · Hugging Face | |
# https://huggingface.co/mistralai/Mistral-7B-v0.1?library=true | |
# | |
# Load model directly | |
# from transformers import AutoTokenizer, AutoModelForCausalLM | |
# | |
# tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") | |
# model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") | |
from langchain.vectorstores import Chroma | |
from langchain.chat_models import ChatOllama | |
from langchain.embeddings import FastEmbedEmbeddings | |
from langchain.schema.output_parser import StrOutputParser | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.schema.runnable import RunnablePassthrough | |
from langchain.prompts import PromptTemplate | |
from langchain.vectorstores.utils import filter_complex_metadata | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
class ChatPDF: | |
vector_store = None | |
retriever = None | |
chain = None | |
def __init__(self): | |
# self.model = ChatOllama(model="mistral") # ORIGINAL | |
# mistralai/Mistral-7B-v0.1 · Hugging Face | |
# https://huggingface.co/mistralai/Mistral-7B-v0.1?library=true | |
# | |
# Load model directly | |
# from transformers import AutoTokenizer, AutoModelForCausalLM | |
# | |
# tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") | |
# model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") | |
# TE GROOT VOOR DE FREE VERSION VAN HF SPACES (max 16 GB): | |
# tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") | |
# self.model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") | |
# | |
# https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha?library=true | |
# tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha") | |
# self.model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-alpha") | |
# | |
# https://huggingface.co/microsoft/phi-2?library=true | |
# Intended Uses | |
# Given the nature of the training data, the Phi-2 model is best suited for prompts using the | |
# QA format, the chat format, and the code format. | |
# tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True) | |
# model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2", trust_remote_code=True) | |
# https://huggingface.co/meta-llama/Llama-2-7b-chat-hf?library=true | |
# | |
# tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") | |
# model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") | |
# | |
# TEST EVEN ZONDER HET LADEN VAN EEN LLM ! | |
# https://huggingface.co/stabilityai/stablelm-3b-4e1t?library=true | |
# tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t") | |
# self.model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t") | |
self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=100) | |
self.prompt = PromptTemplate.from_template( | |
""" | |
<s> [INST] You are an assistant for question-answering tasks. Use the following pieces of retrieved context | |
to answer the question. If you don't know the answer, just say that you don't know. Use three sentences | |
maximum and keep the answer concise. [/INST] </s> | |
[INST] Question: {question} | |
Context: {context} | |
Answer: [/INST] | |
""" | |
) | |
def ingest(self, pdf_file_path: str): | |
docs = PyPDFLoader(file_path=pdf_file_path).load() | |
chunks = self.text_splitter.split_documents(docs) | |
chunks = filter_complex_metadata(chunks) | |
vector_store = Chroma.from_documents(documents=chunks, embedding=FastEmbedEmbeddings()) | |
self.retriever = vector_store.as_retriever( | |
search_type="similarity_score_threshold", | |
search_kwargs={ | |
"k": 3, | |
"score_threshold": 0.5, | |
}, | |
) | |
self.chain = ({"context": self.retriever, "question": RunnablePassthrough()} | |
| self.prompt | |
| self.model | |
| StrOutputParser()) | |
def ask(self, query: str): | |
if not self.chain: | |
return "Please, add a PDF document first." | |
return self.chain.invoke(query) | |
def clear(self): | |
self.vector_store = None | |
self.retriever = None | |
self.chain = None |