File size: 2,924 Bytes
5dd447e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96563ef
5dd447e
 
 
 
96563ef
 
5dd447e
 
 
 
96563ef
5dd447e
 
 
96563ef
5dd447e
 
 
 
 
 
 
 
96563ef
0c49163
96563ef
 
5dd447e
 
 
 
 
 
 
 
 
 
 
 
 
96563ef
 
 
5dd447e
 
96563ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import streamlit as st
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain import PromptTemplate
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import CTransformers
from langchain.chains import RetrievalQA
import chainlit as cl

DB_FAISS_PATH = 'vectorstore/db_faiss'

custom_prompt_template = """Use the following pieces of information to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Only return the helpful answer below and nothing else.
Helpful answer:
"""

def set_custom_prompt():
    """
    Prompt template for QA retrieval for each vectorstore
    """
    prompt = PromptTemplate(template=custom_prompt_template,
                            input_variables=['context', 'question'])
    return prompt

# Retrieval QA Chain
def retrieval_qa_chain(llm, prompt, db):
    qa_chain = RetrievalQA.from_chain_type(llm=llm,
                                           chain_type='stuff',
                                           retriever=db.as_retriever(search_kwargs={'k': 2}),
                                           return_source_documents=True,
                                           chain_type_kwargs={'prompt': prompt}
                                           )
    return qa_chain

# Loading the model
def load_llm(max_new_tokens, temperature):
    # Load the locally downloaded model here
    llm = CTransformers(
        model="llama-2-7b-chat.ggmlv3.q8_0.bin",
        model_type="llama",
        max_new_tokens=max_new_tokens,
        temperature=temperature
    )
    return llm

# QA Model Function
def qa_bot(max_new_tokens, temperature):
    embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
                                       model_kwargs={'device': 'cpu'})
    db = FAISS.load_local(DB_FAISS_PATH, embeddings)
    llm = load_llm(max_new_tokens, temperature)
    qa_prompt = set_custom_prompt()
    qa = retrieval_qa_chain(llm, qa_prompt, db)

    return qa

def main():
    st.title("AI ChatBot LLM")

    max_new_tokens = st.slider("Max New Tokens", min_value=1, max_value=1000, value=512)
    temperature = st.slider("Temperature", min_value=0.1, max_value=1.0, step=0.1, value=0.5)

    qa_result = qa_bot(max_new_tokens, temperature)

    user_input = st.text_input("Enter your question:")
    
    if st.button("Ask"):
        response = qa_result({'query': user_input})
        answer = response["result"]
        sources = response["source_documents"]

        st.write("Answer:", answer)
        if sources:
            st.write("Sources:", sources)
        else:
            st.write("No sources found")
    
    if st.button("Clear"):
        st.text_input("Enter your question:", value="")

if __name__ == "__main__":
    main()