# import streamlit as st | |
# from dotenv import load_dotenv | |
# from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter | |
# from langchain.vectorstores import FAISS | |
# from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models. | |
# from langchain.memory import ConversationBufferMemory | |
# from langchain.chains import ConversationalRetrievalChain | |
# from htmlTemplates import css, bot_template, user_template | |
# from langchain.llms import LlamaCpp | |
# import json | |
# from pathlib import Path | |
# from pprint import pprint | |
# from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader | |
# import tempfile # ์์ ํ์ผ์ ์์ฑํ๊ธฐ ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋๋ค. | |
# import os | |
# from huggingface_hub import hf_hub_download # Hugging Face Hub์์ ๋ชจ๋ธ์ ๋ค์ด๋ก๋ํ๊ธฐ ์ํ ํจ์์ ๋๋ค. | |
# # PDF ๋ฌธ์๋ก๋ถํฐ ํ ์คํธ๋ฅผ ์ถ์ถํ๋ ํจ์์ ๋๋ค. | |
# def get_pdf_text(pdf_docs): | |
# temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
# temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
# with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ๋ฐ์ด๋๋ฆฌ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
# f.write(pdf_docs.getvalue()) # PDF ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
# pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoader๋ฅผ ์ฌ์ฉํด PDF๋ฅผ ๋ก๋ํฉ๋๋ค. | |
# pdf_doc = pdf_loader.load() # ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
# return pdf_doc # ์ถ์ถํ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. | |
# # ๊ณผ์ | |
# # ์๋ ํ ์คํธ ์ถ์ถ ํจ์๋ฅผ ์์ฑ | |
# def get_text_file(text_docs): | |
# temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
# temp_filepath = os.path.join(temp_dir.name, text_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
# with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ํ ์คํธ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
# f.write(text_docs.getvalue()) # ํ ์คํธ ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
# text_loader = TextLoader(temp_filepath) # TextLoader๋ฅผ ์ฌ์ฉํด ํ ์คํธ ๋ฌธ์๋ฅผ ๋ก๋ํฉ๋๋ค. | |
# text_doc = text_loader.load() # ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
# return text_doc # ์ถ์ถ๋ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. | |
# def get_csv_file(csv_docs): | |
# temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
# temp_filepath = os.path.join(temp_dir.name, csv_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
# with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ๋ฐ์ด๋๋ฆฌ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
# f.write(csv_docs.getvalue()) # CSV ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
# csv_loader = CSVLoader(temp_filepath) # CSVLoader๋ฅผ ์ฌ์ฉํด CSV ๋ฌธ์๋ฅผ ๋ก๋ํฉ๋๋ค. | |
# csv_doc = csv_loader.load() # ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
# return csv_doc # ์ถ์ถ๋ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. | |
# def get_json_file(json_docs): | |
# temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
# temp_filepath = os.path.join(temp_dir.name, json_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
# with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ํ ์คํธ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
# f.write(json_docs.getvalue()) # JSON ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
# json_loader = JSONLoader(temp_filepath) # JSONLoader๋ฅผ ์ฌ์ฉํด JSON ๋ฌธ์๋ฅผ ๋ก๋ํฉ๋๋ค. | |
# json_doc = json_loader.load() # ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
# return json_doc # ์ถ์ถ๋ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. | |
# # def get_text_file(text_docs): | |
# # | |
# # pass | |
# # | |
# # def get_csv_file(csv_docs): | |
# # pass | |
# # | |
# # def get_json_file(json_docs): | |
# # | |
# # | |
# # pass | |
# # ๋ฌธ์๋ค์ ์ฒ๋ฆฌํ์ฌ ํ ์คํธ ์ฒญํฌ๋ก ๋๋๋ ํจ์์ ๋๋ค. | |
# def get_text_chunks(documents): | |
# text_splitter = RecursiveCharacterTextSplitter( | |
# chunk_size=1000, # ์ฒญํฌ์ ํฌ๊ธฐ๋ฅผ ์ง์ ํฉ๋๋ค. | |
# chunk_overlap=200, # ์ฒญํฌ ์ฌ์ด์ ์ค๋ณต์ ์ง์ ํฉ๋๋ค. | |
# length_function=len # ํ ์คํธ์ ๊ธธ์ด๋ฅผ ์ธก์ ํ๋ ํจ์๋ฅผ ์ง์ ํฉ๋๋ค. | |
# ) | |
# documents = text_splitter.split_documents(documents) # ๋ฌธ์๋ค์ ์ฒญํฌ๋ก ๋๋๋๋ค. | |
# return documents # ๋๋ ์ฒญํฌ๋ฅผ ๋ฐํํฉ๋๋ค. | |
# # ํ ์คํธ ์ฒญํฌ๋ค๋ก๋ถํฐ ๋ฒกํฐ ์คํ ์ด๋ฅผ ์์ฑํ๋ ํจ์์ ๋๋ค. | |
# def get_vectorstore(text_chunks): | |
# # ์ํ๋ ์๋ฒ ๋ฉ ๋ชจ๋ธ์ ๋ก๋ํฉ๋๋ค. | |
# embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2', | |
# model_kwargs={'device': 'cpu'}) # ์๋ฒ ๋ฉ ๋ชจ๋ธ์ ์ค์ ํฉ๋๋ค. | |
# vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS ๋ฒกํฐ ์คํ ์ด๋ฅผ ์์ฑํฉ๋๋ค. | |
# return vectorstore # ์์ฑ๋ ๋ฒกํฐ ์คํ ์ด๋ฅผ ๋ฐํํฉ๋๋ค. | |
# def get_conversation_chain(vectorstore): | |
# model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF' | |
# model_basename = 'llama-2-7b-chat.Q2_K.gguf' | |
# model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename) | |
# llm = LlamaCpp(model_path=model_path, | |
# n_ctx=8192, | |
# input={"temperature": 0.75, "max_length": 2000, "top_p": 1}, | |
# verbose=True, ) | |
# # ๋ํ ๊ธฐ๋ก์ ์ ์ฅํ๊ธฐ ์ํ ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
# memory = ConversationBufferMemory( | |
# memory_key='chat_history', return_messages=True) | |
# # ๋ํ ๊ฒ์ ์ฒด์ธ์ ์์ฑํฉ๋๋ค. | |
# conversation_chain = ConversationalRetrievalChain.from_llm( | |
# llm=llm, | |
# retriever=vectorstore.as_retriever(), | |
# memory=memory | |
# ) | |
# return conversation_chain # ์์ฑ๋ ๋ํ ์ฒด์ธ์ ๋ฐํํฉ๋๋ค. | |
# # ์ฌ์ฉ์ ์ ๋ ฅ์ ์ฒ๋ฆฌํ๋ ํจ์์ ๋๋ค. | |
# def handle_userinput(user_question): | |
# print('user_question => ', user_question) | |
# # ๋ํ ์ฒด์ธ์ ์ฌ์ฉํ์ฌ ์ฌ์ฉ์ ์ง๋ฌธ์ ๋ํ ์๋ต์ ์์ฑํฉ๋๋ค. | |
# response = st.session_state.conversation({'question': user_question}) | |
# # ๋ํ ๊ธฐ๋ก์ ์ ์ฅํฉ๋๋ค. | |
# st.session_state.chat_history = response['chat_history'] | |
# for i, message in enumerate(st.session_state.chat_history): | |
# if i % 2 == 0: | |
# st.write(user_template.replace( | |
# "{{MSG}}", message.content), unsafe_allow_html=True) | |
# else: | |
# st.write(bot_template.replace( | |
# "{{MSG}}", message.content), unsafe_allow_html=True) | |
# text_chunks = [] | |
# def initialize_conversation_chain(): | |
# # Add the necessary code to initialize the conversation_chain | |
# # This may include loading the LlamaCpp model and creating the conversation_chain | |
# vectorstore = get_vectorstore(text_chunks) # Replace this with the appropriate code | |
# return get_conversation_chain(vectorstore) | |
# def main(): | |
# load_dotenv() | |
# st.set_page_config(page_title="Chat with multiple Files", | |
# page_icon=":books:") | |
# st.write(css, unsafe_allow_html=True) | |
# # ๋ํ ์ฒด์ธ์ด ์ธ์ ์ํ์ ์๊ฑฐ๋ None์ธ ๊ฒฝ์ฐ ์ด๊ธฐํํฉ๋๋ค. | |
# if "conversation" not in st.session_state or st.session_state.conversation is None: | |
# # ์ ์ ํ ๋ฐ์ดํฐ๋ก text_chunks๋ฅผ ์ ์ํด์ผ ํฉ๋๋ค. | |
# st.session_state.conversation = initialize_conversation_chain(text_chunks) | |
# # if "conversation" not in st.session_state: | |
# # st.session_state.conversation = None | |
# # if "chat_history" not in st.session_state: | |
# # st.session_state.chat_history = None | |
# st.header("Chat with multiple Files:") | |
# user_question = st.text_input("Ask a question about your documents:") | |
# # if user_question: | |
# # handle_userinput(user_question) | |
# if user_question: | |
# # Ensure that conversation_chain is initialized before calling handle_userinput | |
# if st.session_state.conversation is None: | |
# st.session_state.conversation = initialize_conversation_chain() | |
# handle_userinput(user_question) | |
# with st.sidebar: | |
# st.subheader("Your documents") | |
# docs = st.file_uploader( | |
# "Upload your PDFs here and click on 'Process'", accept_multiple_files=True) | |
# if st.button("Process"): | |
# with st.spinner("Processing"): | |
# # get pdf text | |
# doc_list = [] | |
# for file in docs: | |
# print('file - type : ', file.type) | |
# if file.type == 'text/plain': | |
# # file is .txt | |
# doc_list.extend(get_text_file(file)) | |
# elif file.type in ['application/octet-stream', 'application/pdf']: | |
# # file is .pdf | |
# doc_list.extend(get_pdf_text(file)) | |
# elif file.type == 'text/csv': | |
# # file is .csv | |
# doc_list.extend(get_csv_file(file)) | |
# elif file.type == 'application/json': | |
# # file is .json | |
# doc_list.extend(get_json_file(file)) | |
# # get the text chunks | |
# text_chunks = get_text_chunks(doc_list) | |
# # create vector store | |
# vectorstore = get_vectorstore(text_chunks) | |
# # create conversation chain | |
# st.session_state.conversation = get_conversation_chain( | |
# vectorstore) | |
# if __name__ == '__main__': | |
# main() | |
import streamlit as st | |
from dotenv import load_dotenv | |
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter | |
from langchain.vectorstores import FAISS | |
from langchain.embeddings import HuggingFaceEmbeddings # General embeddings from HuggingFace models. | |
from langchain.memory import ConversationBufferMemory | |
from langchain.chains import ConversationalRetrievalChain | |
from htmlTemplates import css, bot_template, user_template | |
from langchain.llms import LlamaCpp | |
import json | |
from pathlib import Path | |
from pprint import pprint | |
from langchain.document_loaders import PyPDFLoader, TextLoader, JSONLoader, CSVLoader | |
import tempfile # ์์ ํ์ผ์ ์์ฑํ๊ธฐ ์ํ ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋๋ค. | |
import os | |
from huggingface_hub import hf_hub_download # Hugging Face Hub์์ ๋ชจ๋ธ์ ๋ค์ด๋ก๋ํ๊ธฐ ์ํ ํจ์์ ๋๋ค. | |
# PDF ๋ฌธ์๋ก๋ถํฐ ํ ์คํธ๋ฅผ ์ถ์ถํ๋ ํจ์์ ๋๋ค. | |
def get_pdf_text(pdf_docs): | |
temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
temp_filepath = os.path.join(temp_dir.name, pdf_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ๋ฐ์ด๋๋ฆฌ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
f.write(pdf_docs.getvalue()) # PDF ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
pdf_loader = PyPDFLoader(temp_filepath) # PyPDFLoader๋ฅผ ์ฌ์ฉํด PDF๋ฅผ ๋ก๋ํฉ๋๋ค. | |
pdf_doc = pdf_loader.load() # ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
return pdf_doc # ์ถ์ถํ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. | |
def get_text_file(text_docs): | |
temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
temp_filepath = os.path.join(temp_dir.name, text_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ํ ์คํธ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
f.write(text_docs.getvalue()) # ํ ์คํธ ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
text_loader = TextLoader(temp_filepath) # TextLoader๋ฅผ ์ฌ์ฉํด ํ ์คํธ ๋ฌธ์๋ฅผ ๋ก๋ํฉ๋๋ค. | |
text_doc = text_loader.load() # ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
return text_doc # ์ถ์ถ๋ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. | |
def get_csv_file(csv_docs): | |
temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
temp_filepath = os.path.join(temp_dir.name, csv_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ๋ฐ์ด๋๋ฆฌ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
f.write(csv_docs.getvalue()) # CSV ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
csv_loader = CSVLoader(temp_filepath) # CSVLoader๋ฅผ ์ฌ์ฉํด CSV ๋ฌธ์๋ฅผ ๋ก๋ํฉ๋๋ค. | |
csv_doc = csv_loader.load() # ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
return csv_doc # ์ถ์ถ๋ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. | |
def get_json_file(json_docs): | |
temp_dir = tempfile.TemporaryDirectory() # ์์ ๋๋ ํ ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
temp_filepath = os.path.join(temp_dir.name, json_docs.name) # ์์ ํ์ผ ๊ฒฝ๋ก๋ฅผ ์์ฑํฉ๋๋ค. | |
with open(temp_filepath, "wb") as f: # ์์ ํ์ผ์ ๋ฐ์ด๋๋ฆฌ ์ฐ๊ธฐ ๋ชจ๋๋ก ์ฝ๋๋ค. | |
f.write(json_docs.getvalue()) # JSON ๋ฌธ์์ ๋ด์ฉ์ ์์ ํ์ผ์ ์๋๋ค. | |
json_loader = JSONLoader(file_path=temp_filepath, jq_schema='.messages[].content', text_content=False) | |
try: | |
json_doc = json_loader.load() # JSON ๋ฌธ์๋ก๋ถํฐ ํ ์คํธ๋ฅผ ์ถ์ถํฉ๋๋ค. | |
except Exception as e: | |
# ์์ธ๋ null ๋ฐ์ดํฐ๋ฅผ gracefulํ๊ฒ ์ฒ๋ฆฌํฉ๋๋ค. | |
print(f"JSON ๋ก๋ ์ค ์ค๋ฅ ๋ฐ์: {e}") | |
json_doc = [] # ์ค๋ฅ ๋ฐ์ ์ ๊ธฐ๋ณธ๊ฐ์ผ๋ก ๋น ๋ฆฌ์คํธ๋ฅผ ์ค์ ํฉ๋๋ค. | |
return json_doc # ์ถ์ถ๋ ํ ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค. (์ค๋ฅ ๋ฐ์ ์ ๋น ๋ฆฌ์คํธ๋ฅผ ๋ฐํํฉ๋๋ค.) | |
# ๋ฌธ์๋ค์ ์ฒ๋ฆฌํ์ฌ ํ ์คํธ ์ฒญํฌ๋ก ๋๋๋ ํจ์์ ๋๋ค. | |
def get_text_chunks(documents): | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=1000, # ์ฒญํฌ์ ํฌ๊ธฐ๋ฅผ ์ง์ ํฉ๋๋ค. | |
chunk_overlap=200, # ์ฒญํฌ ์ฌ์ด์ ์ค๋ณต์ ์ง์ ํฉ๋๋ค. | |
length_function=len # ํ ์คํธ์ ๊ธธ์ด๋ฅผ ์ธก์ ํ๋ ํจ์๋ฅผ ์ง์ ํฉ๋๋ค. | |
) | |
documents = text_splitter.split_documents(documents) # ๋ฌธ์๋ค์ ์ฒญํฌ๋ก ๋๋๋๋ค. | |
return documents # ๋๋ ์ฒญํฌ๋ฅผ ๋ฐํํฉ๋๋ค. | |
# ํ ์คํธ ์ฒญํฌ๋ค๋ก๋ถํฐ ๋ฒกํฐ ์คํ ์ด๋ฅผ ์์ฑํ๋ ํจ์์ ๋๋ค. | |
def get_vectorstore(text_chunks): | |
if not text_chunks: | |
return None # ๋น text_chunks๋ฅผ ์ฒ๋ฆฌํ๋ ๋ฐฉ๋ฒ์ ์ถ๊ฐํฉ๋๋ค. | |
# ์ํ๋ ์๋ฒ ๋ฉ ๋ชจ๋ธ์ ๋ก๋ํฉ๋๋ค. | |
embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L12-v2', | |
model_kwargs={'device': 'cpu'}) # ์๋ฒ ๋ฉ ๋ชจ๋ธ์ ์ค์ ํฉ๋๋ค. | |
if not embeddings: | |
return None # embeddings๊ฐ ๋น์ด ์๋ ๊ฒฝ์ฐ ์ฒ๋ฆฌ ๋ฐฉ๋ฒ์ ์ถ๊ฐํฉ๋๋ค. | |
vectorstore = FAISS.from_documents(text_chunks, embeddings) # FAISS ๋ฒกํฐ ์คํ ์ด๋ฅผ ์์ฑํฉ๋๋ค. | |
return vectorstore # ์์ฑ๋ ๋ฒกํฐ ์คํ ์ด๋ฅผ ๋ฐํํฉ๋๋ค. | |
def get_conversation_chain(vectorstore): | |
model_name_or_path = 'TheBloke/Llama-2-7B-chat-GGUF' | |
model_basename = 'llama-2-7b-chat.Q2_K.gguf' | |
model_path = hf_hub_download(repo_id=model_name_or_path, filename=model_basename) | |
llm = LlamaCpp(model_path=model_path, | |
n_ctx=9000, | |
input={"temperature": 0.75, "max_length": 2000, "top_p": 1}, | |
verbose=True, ) | |
# ๋ํ ๊ธฐ๋ก์ ์ ์ฅํ๊ธฐ ์ํ ๋ฉ๋ชจ๋ฆฌ๋ฅผ ์์ฑํฉ๋๋ค. | |
memory = ConversationBufferMemory( | |
memory_key='chat_history', return_messages=True) | |
# ๋ํ ๊ฒ์ ์ฒด์ธ์ ์์ฑํฉ๋๋ค. | |
conversation_chain = ConversationalRetrievalChain.from_llm( | |
llm=llm, | |
retriever=vectorstore.as_retriever(), | |
memory=memory | |
) | |
return conversation_chain # ์์ฑ๋ ๋ํ ์ฒด์ธ์ ๋ฐํํฉ๋๋ค. | |
# ์ฌ์ฉ์ ์ ๋ ฅ์ ์ฒ๋ฆฌํ๋ ํจ์์ ๋๋ค. | |
def handle_userinput(user_question): | |
print('user_question => ', user_question) | |
# ๋ํ ์ฒด์ธ์ ์ฌ์ฉํ์ฌ ์ฌ์ฉ์ ์ง๋ฌธ์ ๋ํ ์๋ต์ ์์ฑํฉ๋๋ค. | |
response = st.session_state.conversation({'question': user_question}) | |
# ๋ํ ๊ธฐ๋ก์ ์ ์ฅํฉ๋๋ค. | |
st.session_state.chat_history = response['chat_history'] | |
for i, message in enumerate(st.session_state.chat_history): | |
if i % 2 == 0: | |
st.write(user_template.replace( | |
"{{MSG}}", message.content), unsafe_allow_html=True) | |
else: | |
st.write(bot_template.replace( | |
"{{MSG}}", message.content), unsafe_allow_html=True) | |
def main(): | |
load_dotenv() | |
st.set_page_config(page_title="Chat with multiple Files", | |
page_icon=":books:") | |
st.write(css, unsafe_allow_html=True) | |
if "conversation" not in st.session_state: | |
st.session_state.conversation = None | |
if "chat_history" not in st.session_state: | |
st.session_state.chat_history = None | |
st.header("Chat with multiple Files:") | |
user_question = st.text_input("Ask a question about your documents:") | |
if user_question: | |
handle_userinput(user_question) | |
with st.sidebar: | |
st.subheader("Your documents") | |
docs = st.file_uploader( | |
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True) | |
if st.button("Process"): | |
with st.spinner("Processing"): | |
# get pdf text | |
doc_list = [] | |
for file in docs: | |
print('file - type : ', file.type) | |
if file.type == 'text/plain': | |
# file is .txt | |
doc_list.extend(get_text_file(file)) | |
elif file.type in ['application/octet-stream', 'application/pdf']: | |
# file is .pdf | |
doc_list.extend(get_pdf_text(file)) | |
elif file.type == 'text/csv': | |
# file is .csv | |
doc_list.extend(get_csv_file(file)) | |
elif file.type == 'application/json': | |
# file is .json | |
doc_list.extend(get_json_file(file)) | |
# get the text chunks | |
text_chunks = get_text_chunks(doc_list) | |
# create vector store | |
vectorstore = get_vectorstore(text_chunks) | |
# create conversation chain | |
st.session_state.conversation = get_conversation_chain( | |
vectorstore) | |
if __name__ == '__main__': | |
main() | |