Duplicate from Sakil/LLM_Question_Answering_ChatBot
Browse filesCo-authored-by: Sakil Ansari <Sakil@users.noreply.huggingface.co>
- .gitattributes +36 -0
- README.md +14 -0
- app.py +85 -0
- llama-2-7b-chat.ggmlv3.q8_0.bin +3 -0
- requirements.txt +12 -0
- vectorstore/db_faiss/index.faiss +3 -0
- vectorstore/db_faiss/index.pkl +3 -0
.gitattributes
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
vectorstore/db_faiss/index.faiss filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: LLM Question Answering ChatBot
|
3 |
+
emoji: 🐢
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: red
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.25.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
duplicated_from: Sakil/LLM_Question_Answering_ChatBot
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
|
3 |
+
from langchain import PromptTemplate
|
4 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
5 |
+
from langchain.vectorstores import FAISS
|
6 |
+
from langchain.llms import CTransformers
|
7 |
+
from langchain.chains import RetrievalQA
|
8 |
+
import chainlit as cl
|
9 |
+
|
10 |
+
DB_FAISS_PATH = 'vectorstore/db_faiss'
|
11 |
+
|
12 |
+
custom_prompt_template = """Use the following pieces of information to answer the user's question.
|
13 |
+
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
14 |
+
Context: {context}
|
15 |
+
Question: {question}
|
16 |
+
Only return the helpful answer below and nothing else.
|
17 |
+
Helpful answer:
|
18 |
+
"""
|
19 |
+
|
20 |
+
def set_custom_prompt():
|
21 |
+
"""
|
22 |
+
Prompt template for QA retrieval for each vectorstore
|
23 |
+
"""
|
24 |
+
prompt = PromptTemplate(template=custom_prompt_template,
|
25 |
+
input_variables=['context', 'question'])
|
26 |
+
return prompt
|
27 |
+
|
28 |
+
# Retrieval QA Chain
|
29 |
+
def retrieval_qa_chain(llm, prompt, db):
|
30 |
+
qa_chain = RetrievalQA.from_chain_type(llm=llm,
|
31 |
+
chain_type='stuff',
|
32 |
+
retriever=db.as_retriever(search_kwargs={'k': 2}),
|
33 |
+
return_source_documents=True,
|
34 |
+
chain_type_kwargs={'prompt': prompt}
|
35 |
+
)
|
36 |
+
return qa_chain
|
37 |
+
|
38 |
+
# Loading the model
|
39 |
+
def load_llm(max_new_tokens, temperature):
|
40 |
+
# Load the locally downloaded model here
|
41 |
+
llm = CTransformers(
|
42 |
+
model="llama-2-7b-chat.ggmlv3.q8_0.bin",
|
43 |
+
model_type="llama",
|
44 |
+
max_new_tokens=max_new_tokens,
|
45 |
+
temperature=temperature
|
46 |
+
)
|
47 |
+
return llm
|
48 |
+
|
49 |
+
# QA Model Function
|
50 |
+
def qa_bot(max_new_tokens, temperature):
|
51 |
+
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
|
52 |
+
model_kwargs={'device': 'cpu'})
|
53 |
+
db = FAISS.load_local(DB_FAISS_PATH, embeddings)
|
54 |
+
llm = load_llm(max_new_tokens, temperature)
|
55 |
+
qa_prompt = set_custom_prompt()
|
56 |
+
qa = retrieval_qa_chain(llm, qa_prompt, db)
|
57 |
+
|
58 |
+
return qa
|
59 |
+
|
60 |
+
def main():
|
61 |
+
st.title("AI ChatBot LLM")
|
62 |
+
|
63 |
+
max_new_tokens = st.slider("Max New Tokens", min_value=1, max_value=1000, value=512)
|
64 |
+
temperature = st.slider("Temperature", min_value=0.1, max_value=1.0, step=0.1, value=0.5)
|
65 |
+
|
66 |
+
qa_result = qa_bot(max_new_tokens, temperature)
|
67 |
+
|
68 |
+
user_input = st.text_input("Enter your question:")
|
69 |
+
|
70 |
+
if st.button("Ask"):
|
71 |
+
response = qa_result({'query': user_input})
|
72 |
+
answer = response["result"]
|
73 |
+
sources = response["source_documents"]
|
74 |
+
|
75 |
+
st.write("Answer:", answer)
|
76 |
+
if sources:
|
77 |
+
st.write("Sources:", sources)
|
78 |
+
else:
|
79 |
+
st.write("No sources found")
|
80 |
+
|
81 |
+
if st.button("Clear"):
|
82 |
+
st.text_input("Enter your question:", value="")
|
83 |
+
|
84 |
+
if __name__ == "__main__":
|
85 |
+
main()
|
llama-2-7b-chat.ggmlv3.q8_0.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3bfdde943555c78294626a6ccd40184162d066d39774bd2c98dae24943d32cc3
|
3 |
+
size 7160799872
|
requirements.txt
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
torch
|
3 |
+
accelerate
|
4 |
+
bitsandbytes
|
5 |
+
transformers
|
6 |
+
sentence_transformers
|
7 |
+
chainlit
|
8 |
+
python-docx
|
9 |
+
ctransformers
|
10 |
+
sentence_transformers
|
11 |
+
faiss_cpu
|
12 |
+
chainlit
|
vectorstore/db_faiss/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:15dfc240a44f43797648bb6de6747514335fc4f68d9da7d91d41c4a90c074726
|
3 |
+
size 1729581
|
vectorstore/db_faiss/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96663f6513fb640bf3002df1298cb5a2b4c1dab63601b05e8d96a43f23e375ca
|
3 |
+
size 650320
|