|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import streamlit as st |
|
from streamlit.components.v1 import html |
|
|
|
from dotenv import load_dotenv |
|
|
|
from PyPDF2 import PdfReader |
|
|
|
from PIL import Image |
|
|
|
|
|
from htmlTemplates import css, bot_template, user_template |
|
|
|
|
|
|
|
from langchain_community.embeddings import HuggingFaceInstructEmbeddings |
|
|
|
|
|
from langchain_community.vectorstores import FAISS |
|
|
|
from langchain.text_splitter import CharacterTextSplitter |
|
|
|
from langchain.memory import ConversationBufferMemory |
|
from langchain.chains import ConversationalRetrievalChain |
|
|
|
|
|
|
|
from langchain_community.llms import HuggingFaceHub |
|
|
|
def extract_pdf_text(pdf_docs): |
|
text = "" |
|
for pdf in pdf_docs: |
|
pdf_reader = PdfReader(pdf) |
|
for page in pdf_reader.pages: |
|
text += page.extract_text() |
|
return text |
|
|
|
|
|
|
|
def extract_bitesize_pieces(text): |
|
text_splitter = CharacterTextSplitter( |
|
separator="\n", |
|
chunk_size=800, |
|
chunk_overlap=200, |
|
length_function=len |
|
) |
|
chunks = text_splitter.split_text(text) |
|
return chunks |
|
|
|
|
|
def prepare_embedding_vectors(text_chunks): |
|
|
|
st.write('Here in vector store....', unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl") |
|
|
|
st.write('Here in vector store - got embeddings ', unsafe_allow_html=True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print('have Embeddings: ') |
|
|
|
|
|
|
|
|
|
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings) |
|
st.write('FAISS succeeds: ') |
|
|
|
return vectorstore |
|
|
|
def prepare_conversation(vectorstore): |
|
|
|
|
|
|
|
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature": 0.5, "max_length": 512}) |
|
|
|
memory = ConversationBufferMemory( |
|
memory_key='chat_history', return_messages=True) |
|
conversation_chain = ConversationalRetrievalChain.from_llm( |
|
llm=llm, |
|
retriever=vectorstore.as_retriever(), |
|
memory=memory, |
|
) |
|
return conversation_chain |
|
|
|
def process_user_question(user_question): |
|
|
|
print('process_user_question called: \n') |
|
if user_question == None : |
|
print('question is null') |
|
return |
|
if user_question == '' : |
|
print('question is blank') |
|
return |
|
|
|
if st == None : |
|
print('session is null') |
|
return |
|
|
|
if st.session_state == None : |
|
print('session STATE is null') |
|
return |
|
|
|
print('question is: ', user_question) |
|
print('\nsession is: ', st ) |
|
|
|
response = st.session_state.conversation({'question': user_question}) |
|
|
|
st.session_state.chat_history = response['chat_history'] |
|
|
|
|
|
|
|
results_size = len( response['chat_history'] ) |
|
results_string = "" |
|
|
|
print('results_size is: ', results_size ) |
|
|
|
for i, message in enumerate(st.session_state.chat_history): |
|
|
|
print('results_size on msg: ', results_size, i, ( results_size - 6 ) ) |
|
if results_size > 6: |
|
if i < ( results_size - 6 ): |
|
print( 'skipped line', i) |
|
continue |
|
|
|
if i % 2 == 0: |
|
|
|
|
|
|
|
results_string += ( "<p>" + message.content + "</p>" ) |
|
|
|
else: |
|
|
|
|
|
|
|
results_string += ( "<p>" + "-- " + message.content + "</p>" ) |
|
|
|
|
|
html(results_string, height=300, scrolling=True) |
|
|
|
|
|
|
|
def main(): |
|
|
|
print( 'Pennwick Starting up...\n') |
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import base64 |
|
from PIL import Image |
|
|
|
|
|
image = Image.open("robot_icon.ico") |
|
|
|
|
|
with open("robot_icon.ico", "rb") as f: |
|
encoded_string = base64.b64encode(f.read()).decode() |
|
|
|
|
|
st.set_page_config(page_title="Pennwick File Analyzer 2", page_icon=f"data:image/ico;base64,{encoded_string}") |
|
|
|
print( 'prepared page...\n') |
|
|
|
|
|
|
|
|
|
st.write(css, unsafe_allow_html=True) |
|
|
|
if "conversation" not in st.session_state: |
|
st.session_state.conversation = None |
|
if "chat_history" not in st.session_state: |
|
st.session_state.chat_history = None |
|
|
|
|
|
st.header("Pennwick File Analyzer 2") |
|
|
|
user_question = None |
|
user_question = st.text_input("Ask the Model a question about your uploaded documents:") |
|
if user_question != None: |
|
print( 'calling process question', user_question) |
|
process_user_question(user_question) |
|
|
|
|
|
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
|
st.subheader("Your documents") |
|
pdf_docs = st.file_uploader( |
|
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True) |
|
|
|
|
|
if st.button("Process these files"): |
|
with st.spinner("Processing..."): |
|
|
|
|
|
|
|
|
|
from datetime import datetime |
|
global_now = datetime.now() |
|
global_current_time = global_now.strftime("%H:%M:%S") |
|
st.write("Vectorizing Files - Current Time =", global_current_time) |
|
|
|
|
|
raw_text = extract_pdf_text(pdf_docs) |
|
|
|
|
|
|
|
text_chunks = extract_bitesize_pieces(raw_text) |
|
|
|
|
|
|
|
vectorstore = prepare_embedding_vectors(text_chunks) |
|
|
|
|
|
st.session_state.conversation = prepare_conversation(vectorstore) |
|
|
|
|
|
global_later = datetime.now() |
|
st.write("Files Vectorized - Total EXECUTION Time =", |
|
(global_later - global_now), global_later) |
|
|
|
|
|
if __name__ == '__main__': |
|
main() |
|
|
|
|
|
|