Chatbot
Browse files- README.md +7 -6
- app.py +147 -0
- htmlTemplates.py +44 -0
- requirements.txt +13 -0
README.md
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk:
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Basic DAG AI Chatbot With Llama2
|
3 |
+
emoji: π₯
|
4 |
+
colorFrom: green
|
5 |
+
colorTo: pink
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.27.2
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
|
4 |
+
from langchain.vectorstores import FAISS
|
5 |
+
from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models.
|
6 |
+
from langchain.memory import ConversationBufferMemory
|
7 |
+
from langchain.chains import ConversationalRetrievalChain
|
8 |
+
from htmlTemplates import css, bot_template, user_template
|
9 |
+
from langchain.llms import LlamaCpp # For loading transformer models.
|
10 |
+
from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader
|
11 |
+
import tempfile # μμ νμΌμ μμ±νκΈ° μν λΌμ΄λΈλ¬λ¦¬μ
λλ€.
|
12 |
+
import os
|
13 |
+
from huggingface_hub import hf_hub_download # Hugging Face Hubμμ λͺ¨λΈμ λ€μ΄λ‘λνκΈ° μν ν¨μμ
λλ€.
|
14 |
+
|
15 |
+
# PDF λ¬Έμλ‘λΆν° ν
μ€νΈλ₯Ό μΆμΆνλ ν¨μμ
λλ€.
|
16 |
+
def get_pdf_text(pdf_docs):
|
17 |
+
temp_dir = tempfile.TemporaryDirectory() # μμ λλ ν 리λ₯Ό μμ±ν©λλ€.
|
18 |
+
temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # μμ νμΌ κ²½λ‘λ₯Ό μμ±ν©λλ€.
|
19 |
+
with open(temp_filepath, "wb") as f: # μμ νμΌμ λ°μ΄λ리 μ°κΈ° λͺ¨λλ‘ μ½λλ€.
|
20 |
+
f.write(pdf_docs.getvalue()) # PDF λ¬Έμμ λ΄μ©μ μμ νμΌμ μλλ€.
|
21 |
+
pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoaderλ₯Ό μ¬μ©ν΄ PDFλ₯Ό λ‘λν©λλ€.
|
22 |
+
pdf_doc = pdf_loader.load() # ν
μ€νΈλ₯Ό μΆμΆν©λλ€.
|
23 |
+
return pdf_doc # μΆμΆν ν
μ€νΈλ₯Ό λ°νν©λλ€.
|
24 |
+
|
25 |
+
# κ³Όμ
|
26 |
+
# μλ ν
μ€νΈ μΆμΆ ν¨μλ₯Ό μμ±
|
27 |
+
def get_text_file(docs):
|
28 |
+
pass
|
29 |
+
|
30 |
+
def get_csv_file(docs):
|
31 |
+
pass
|
32 |
+
|
33 |
+
def get_json_file(docs):
|
34 |
+
pass
|
35 |
+
|
36 |
+
|
37 |
+
# λ¬Έμλ€μ μ²λ¦¬νμ¬ ν
μ€νΈ μ²ν¬λ‘ λλλ ν¨μμ
λλ€.
|
38 |
+
def get_text_chunks(documents):
|
39 |
+
text_splitter = RecursiveCharacterTextSplitter(
|
40 |
+
chunk_size=1000, # μ²ν¬μ ν¬κΈ°λ₯Ό μ§μ ν©λλ€.
|
41 |
+
chunk_overlap=200, # μ²ν¬ μ¬μ΄μ μ€λ³΅μ μ§μ ν©λλ€.
|
42 |
+
length_function=len # ν
μ€νΈμ κΈΈμ΄λ₯Ό μΈ‘μ νλ ν¨μλ₯Ό μ§μ ν©λλ€.
|
43 |
+
)
|
44 |
+
|
45 |
+
documents = text_splitter.split_documents(documents) # λ¬Έμλ€μ μ²ν¬λ‘ λλλλ€.
|
46 |
+
return documents # λλ μ²ν¬λ₯Ό λ°νν©λλ€.
|
47 |
+
|
48 |
+
|
49 |
+
# ν
μ€νΈ μ²ν¬λ€λ‘λΆν° λ²‘ν° μ€ν μ΄λ₯Ό μμ±νλ ν¨μμ
λλ€.
|
50 |
+
def get_vectorstore(text_chunks):
|
51 |
+
# μνλ μλ² λ© λͺ¨λΈμ λ‘λν©λλ€.
|
52 |
+
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2',
|
53 |
+
model_kwargs={'device': 'cpu'}) # μλ² λ© λͺ¨λΈμ μ€μ ν©λλ€.
|
54 |
+
vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS λ²‘ν° μ€ν μ΄λ₯Ό μμ±ν©λλ€.
|
55 |
+
return vectorstore # μμ±λ λ²‘ν° μ€ν μ΄λ₯Ό λ°νν©λλ€.
|
56 |
+
|
57 |
+
|
58 |
+
def get_conversation_chain(vectorstore):
|
59 |
+
model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF'
|
60 |
+
model_basename = 'llama-2-7b-chat.Q2_K.gguf'
|
61 |
+
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename)
|
62 |
+
|
63 |
+
llm = LlamaCpp(model_path=model_path,
|
64 |
+
n_ctx=4086,
|
65 |
+
input={"temperature": 0.75, "max_length": 2000, "top_p": 1},
|
66 |
+
verbose=True, )
|
67 |
+
# λν κΈ°λ‘μ μ μ₯νκΈ° μν λ©λͺ¨λ¦¬λ₯Ό μμ±ν©λλ€.
|
68 |
+
memory = ConversationBufferMemory(
|
69 |
+
memory_key='chat_history', return_messages=True)
|
70 |
+
# λν κ²μ 체μΈμ μμ±ν©λλ€.
|
71 |
+
conversation_chain = ConversationalRetrievalChain.from_llm(
|
72 |
+
llm=llm,
|
73 |
+
retriever=vectorstore.as_retriever(),
|
74 |
+
memory=memory
|
75 |
+
)
|
76 |
+
return conversation_chain # μμ±λ λν 체μΈμ λ°νν©λλ€.
|
77 |
+
|
78 |
+
# μ¬μ©μ μ
λ ₯μ μ²λ¦¬νλ ν¨μμ
λλ€.
|
79 |
+
def handle_userinput(user_question):
|
80 |
+
print('user_question => ', user_question)
|
81 |
+
# λν 체μΈμ μ¬μ©νμ¬ μ¬μ©μ μ§λ¬Έμ λν μλ΅μ μμ±ν©λλ€.
|
82 |
+
response = st.session_state.conversation({'question': user_question})
|
83 |
+
# λν κΈ°λ‘μ μ μ₯ν©λλ€.
|
84 |
+
st.session_state.chat_history = response['chat_history']
|
85 |
+
|
86 |
+
for i, message in enumerate(st.session_state.chat_history):
|
87 |
+
if i % 2 == 0:
|
88 |
+
st.write(user_template.replace(
|
89 |
+
"{{MSG}}", message.content), unsafe_allow_html=True)
|
90 |
+
else:
|
91 |
+
st.write(bot_template.replace(
|
92 |
+
"{{MSG}}", message.content), unsafe_allow_html=True)
|
93 |
+
|
94 |
+
|
95 |
+
def main():
|
96 |
+
load_dotenv()
|
97 |
+
st.set_page_config(page_title="Chat with multiple Files",
|
98 |
+
page_icon=":books:")
|
99 |
+
st.write(css, unsafe_allow_html=True)
|
100 |
+
|
101 |
+
if "conversation" not in st.session_state:
|
102 |
+
st.session_state.conversation = None
|
103 |
+
if "chat_history" not in st.session_state:
|
104 |
+
st.session_state.chat_history = None
|
105 |
+
|
106 |
+
st.header("Chat with multiple Files:")
|
107 |
+
user_question = st.text_input("Ask a question about your documents:")
|
108 |
+
if user_question:
|
109 |
+
handle_userinput(user_question)
|
110 |
+
|
111 |
+
with st.sidebar:
|
112 |
+
st.subheader("Your documents")
|
113 |
+
docs = st.file_uploader(
|
114 |
+
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
|
115 |
+
if st.button("Process"):
|
116 |
+
with st.spinner("Processing"):
|
117 |
+
# get pdf text
|
118 |
+
doc_list = []
|
119 |
+
|
120 |
+
for file in docs:
|
121 |
+
print('file - type : ', file.type)
|
122 |
+
if file.type == 'text/plain':
|
123 |
+
# file is .txt
|
124 |
+
doc_list.extend(get_text_file(file))
|
125 |
+
elif file.type in ['application/octet-stream', 'application/pdf']:
|
126 |
+
# file is .pdf
|
127 |
+
doc_list.extend(get_pdf_text(file))
|
128 |
+
elif file.type == 'text/csv':
|
129 |
+
# file is .csv
|
130 |
+
doc_list.extend(get_csv_file(file))
|
131 |
+
elif file.type == 'application/json':
|
132 |
+
# file is .json
|
133 |
+
doc_list.extend(get_json_file(file))
|
134 |
+
|
135 |
+
# get the text chunks
|
136 |
+
text_chunks = get_text_chunks(doc_list)
|
137 |
+
|
138 |
+
# create vector store
|
139 |
+
vectorstore = get_vectorstore(text_chunks)
|
140 |
+
|
141 |
+
# create conversation chain
|
142 |
+
st.session_state.conversation = get_conversation_chain(
|
143 |
+
vectorstore)
|
144 |
+
|
145 |
+
|
146 |
+
if __name__ == '__main__':
|
147 |
+
main()
|
htmlTemplates.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
css = '''
|
2 |
+
<style>
|
3 |
+
.chat-message {
|
4 |
+
padding: 1.5rem; border-radius: 0.5rem; margin-bottom: 1rem; display: flex
|
5 |
+
}
|
6 |
+
.chat-message.user {
|
7 |
+
background-color: #2b313e
|
8 |
+
}
|
9 |
+
.chat-message.bot {
|
10 |
+
background-color: #475063
|
11 |
+
}
|
12 |
+
.chat-message .avatar {
|
13 |
+
width: 20%;
|
14 |
+
}
|
15 |
+
.chat-message .avatar img {
|
16 |
+
max-width: 78px;
|
17 |
+
max-height: 78px;
|
18 |
+
border-radius: 50%;
|
19 |
+
object-fit: cover;
|
20 |
+
}
|
21 |
+
.chat-message .message {
|
22 |
+
width: 80%;
|
23 |
+
padding: 0 1.5rem;
|
24 |
+
color: #fff;
|
25 |
+
}
|
26 |
+
'''
|
27 |
+
|
28 |
+
bot_template = '''
|
29 |
+
<div class="chat-message bot">
|
30 |
+
<div class="avatar">
|
31 |
+
<img src="https://i.ibb.co/cN0nmSj/Screenshot-2023-05-28-at-02-37-21.png" style="max-height: 78px; max-width: 78px; border-radius: 50%; object-fit: cover;">
|
32 |
+
</div>
|
33 |
+
<div class="message">{{MSG}}</div>
|
34 |
+
</div>
|
35 |
+
'''
|
36 |
+
|
37 |
+
user_template = '''
|
38 |
+
<div class="chat-message user">
|
39 |
+
<div class="avatar">
|
40 |
+
<img src="https://i.ibb.co/rdZC7LZ/Photo-logo-1.png">
|
41 |
+
</div>
|
42 |
+
<div class="message">{{MSG}}</div>
|
43 |
+
</div>
|
44 |
+
'''
|
requirements.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
langchain
|
2 |
+
llama-cpp-python
|
3 |
+
PyPDF2==3.0.1
|
4 |
+
faiss-cpu==1.7.4
|
5 |
+
ctransformers
|
6 |
+
pypdf
|
7 |
+
chromadb
|
8 |
+
tiktoken
|
9 |
+
pysqlite3-binary
|
10 |
+
streamlit-extras
|
11 |
+
InstructorEmbedding
|
12 |
+
sentence-transformers
|
13 |
+
jq
|