AppChat / app.py
phucbienvan
change text
6cb69fd
import streamlit as st
from utils.pdf_processor import PDFProcessor
from utils.llm_handler import LLMHandler
import time
from dotenv import load_dotenv
import os
import requests
from PIL import Image
from io import BytesIO
load_dotenv()
if not os.getenv("HUGGINGFACEHUB_API_TOKEN"):
st.error("HUGGINGFACEHUB_API_TOKEN does not exist in environment variables!")
st.stop()
st.set_page_config(
page_title="PDF Chatbot by phucbienvan",
page_icon="📚",
layout="wide"
)
# Display logo and title side by side
col1, col2 = st.columns([1, 5])
with col1:
try:
logo_url = "https://cdn-avatars.huggingface.co/v1/production/uploads/65129261353a60593b1dc353/PY40eVSt4jkYQinleKGua.jpeg"
response = requests.get(logo_url)
logo = Image.open(BytesIO(response.content))
st.image(logo, width=150)
except Exception as e:
st.error(f"Could not load logo: {e}")
with col2:
st.title("📚 PDF Chatbot")
st.markdown("### Upload PDF files and ask questions about the content")
st.markdown("##### Author: phucbienvan")
if "vector_store" not in st.session_state:
st.session_state["vector_store"] = None
if "qa_chain" not in st.session_state:
st.session_state["qa_chain"] = None
if "chat_history" not in st.session_state:
st.session_state["chat_history"] = []
if "pdf_name" not in st.session_state:
st.session_state["pdf_name"] = None
with st.sidebar:
st.header("Upload Document")
uploaded_file = st.file_uploader("Choose PDF file", type="pdf")
if uploaded_file is not None and (st.session_state["pdf_name"] != uploaded_file.name):
with st.spinner("Processing PDF file..."):
pdf_processor = PDFProcessor()
st.session_state["vector_store"] = pdf_processor.process_pdf(uploaded_file)
llm_handler = LLMHandler()
st.session_state["qa_chain"] = llm_handler.create_qa_chain(st.session_state["vector_store"])
st.session_state["pdf_name"] = uploaded_file.name
st.session_state["chat_history"] = []
st.success(f"Processed file: {uploaded_file.name}")
st.markdown("---")
st.markdown("### User Guide")
st.markdown("""
1. Upload a PDF file from your computer
2. Wait for the system to process the file
3. Ask questions about the file content
4. Get answers from the chatbot
""")
# Display logo in sidebar
st.markdown("---")
try:
logo_url = "https://cdn-avatars.huggingface.co/v1/production/uploads/65129261353a60593b1dc353/PY40eVSt4jkYQinleKGua.jpeg"
response = requests.get(logo_url)
logo = Image.open(BytesIO(response.content))
st.image(logo, width=100, caption="phucbienvan")
except:
pass
st.subheader("Conversation")
for i, (question, answer) in enumerate(st.session_state["chat_history"]):
message_container = st.container()
with message_container:
col1, col2 = st.columns([1, 9])
with col1:
st.markdown("🧑")
with col2:
st.markdown(f"**You:** {question}")
message_container = st.container()
with message_container:
col1, col2 = st.columns([1, 9])
with col1:
st.markdown("🤖")
with col2:
st.markdown(f"**Bot:** {answer}")
st.markdown("---")
question = st.text_input("Enter your question:", key="question_input")
if st.button("Send Question"):
if st.session_state["qa_chain"] is None:
st.error("Please upload a PDF file before asking questions!")
elif not question:
st.warning("Please enter a question!")
else:
with st.spinner("Finding answer..."):
llm_handler = LLMHandler()
answer, sources = llm_handler.get_answer(st.session_state["qa_chain"], question)
st.session_state["chat_history"].append((question, answer))
message_container = st.container()
with message_container:
col1, col2 = st.columns([1, 9])
with col1:
st.markdown("🧑")
with col2:
st.markdown(f"**You:** {question}")
message_container = st.container()
with message_container:
col1, col2 = st.columns([1, 9])
with col1:
st.markdown("🤖")
with col2:
st.markdown(f"**Bot:** {answer}")
if sources:
with st.expander("View References"):
for i, doc in enumerate(sources):
st.markdown(f"**Source {i+1}:**")
st.markdown(doc.page_content)
st.markdown("---")
st.rerun()
st.markdown("---")
st.markdown("### 📚 PDF Chatbot | Author: phucbienvan")