File size: 5,044 Bytes
1c70265
c569847
97ed0a5
 
 
 
 
 
 
 
 
 
 
 
 
1c70265
 
 
 
 
 
 
 
 
 
 
97ed0a5
 
 
1c70265
 
 
 
 
 
 
97ed0a5
 
 
 
 
 
 
 
 
121a026
 
 
 
 
a8c5260
 
 
 
 
 
 
3c11bf7
 
 
e65901c
 
 
 
1bdf384
e65901c
1bdf384
 
97ed0a5
121a026
1c70265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# rag.py
# https://github.com/vndee/local-rag-example/blob/main/rag.py
# ADAPTED TO USE HF LLM INSTEAD OF OLLAMA self.model = ChatOllama(model="mistral") BY J. BOURS 01-03-2024
# EVERNOTE:
# https://www.evernote.com/shard/s313/nl/41973486/282c6fc8-9ed5-a977-9895-1eb23941bb4c?title=REQUIREMENTS%20FOR%20A%20LITERATURE%20BASED%20RESEARCH%20LBR%20SYSTEM%20-%20FUNCTIONAL%20AND%20TECHNICAL%20REQUIREMENTS%20-%20ALEXANDER%20UNZICKER%20-%2026-02-2024
#
# mistralai/Mistral-7B-v0.1 · Hugging Face 
# https://huggingface.co/mistralai/Mistral-7B-v0.1?library=true
#
# Load model directly
# from transformers import AutoTokenizer, AutoModelForCausalLM
#
# tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
# model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")


from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOllama
from langchain.embeddings import FastEmbedEmbeddings
from langchain.schema.output_parser import StrOutputParser
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema.runnable import RunnablePassthrough
from langchain.prompts import PromptTemplate
from langchain.vectorstores.utils import filter_complex_metadata

from transformers import AutoTokenizer, AutoModelForCausalLM



class ChatPDF:
    vector_store = None
    retriever = None
    chain = None

    def __init__(self):
        # self.model = ChatOllama(model="mistral") # ORIGINAL
        # mistralai/Mistral-7B-v0.1 · Hugging Face 
        # https://huggingface.co/mistralai/Mistral-7B-v0.1?library=true
        #
        # Load model directly
        # from transformers import AutoTokenizer, AutoModelForCausalLM
        #
        # tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
        # model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
        # TE GROOT VOOR DE FREE VERSION VAN HF SPACES (max 16 GB):
        # tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
        # self.model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1") 
        #
        # https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha?library=true
        # tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
        # self.model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-alpha") 
        #
        # https://huggingface.co/microsoft/phi-2?library=true
        # Intended Uses
        # Given the nature of the training data, the Phi-2 model is best suited for prompts using the 
        # QA format, the chat format, and the code format.
        # tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2", trust_remote_code=True)
        # model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2", trust_remote_code=True)
        # https://huggingface.co/meta-llama/Llama-2-7b-chat-hf?library=true
        #
        # tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
        # model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
        #
        # TEST EVEN ZONDER HET LADEN VAN EEN LLM !
        # https://huggingface.co/stabilityai/stablelm-3b-4e1t?library=true 
        # tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t")
        # self.model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t")

        
        self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024, chunk_overlap=100)
        self.prompt = PromptTemplate.from_template(
            """
            <s> [INST] You are an assistant for question-answering tasks. Use the following pieces of retrieved context 
            to answer the question. If you don't know the answer, just say that you don't know. Use three sentences
             maximum and keep the answer concise. [/INST] </s> 
            [INST] Question: {question} 
            Context: {context} 
            Answer: [/INST]
            """
        )

    def ingest(self, pdf_file_path: str):
        docs = PyPDFLoader(file_path=pdf_file_path).load()
        chunks = self.text_splitter.split_documents(docs)
        chunks = filter_complex_metadata(chunks)

        vector_store = Chroma.from_documents(documents=chunks, embedding=FastEmbedEmbeddings())
        self.retriever = vector_store.as_retriever(
            search_type="similarity_score_threshold",
            search_kwargs={
                "k": 3,
                "score_threshold": 0.5,
            },
        )

        self.chain = ({"context": self.retriever, "question": RunnablePassthrough()}
                      | self.prompt
                      | self.model
                      | StrOutputParser())

    def ask(self, query: str):
        if not self.chain:
            return "Please, add a PDF document first."

        return self.chain.invoke(query)

    def clear(self):
        self.vector_store = None
        self.retriever = None
        self.chain = None