Spaces:
Paused
Paused
import streamlit as st | |
from PyPDF2 import PdfReader | |
from llama_index.llms import HuggingFaceInferenceAPI | |
from llama_index import VectorStoreIndex | |
from llama_index.embeddings import HuggingFaceEmbedding | |
from llama_index import ServiceContext | |
from llama_index.schema import Document | |
def read_pdf(uploaded_file): | |
pdf_reader = PdfReader(uploaded_file) | |
text = "" | |
for page_num in range(len(pdf_reader.pages)): | |
text += pdf_reader.pages[page_num].extract_text() | |
return text | |
st.title("PdfQuerier using LLAMA by Rahul Bhoyar") | |
hf_token = st.text_input("Enter your Hugging Face token:") | |
llm = HuggingFaceInferenceAPI(model_name="HuggingFaceH4/zephyr-7b-alpha", token=hf_token) | |
st.markdown("Query your pdf file data with using this chatbot.") | |
uploaded_file = st.file_uploader("Choose a PDF file", type=["pdf"]) | |
# Creation of Embedding model | |
embed_model_uae = HuggingFaceEmbedding(model_name="WhereIsAI/UAE-Large-V1") | |
service_context = ServiceContext.from_defaults(llm=llm, chunk_size=800, chunk_overlap=20, embed_model=embed_model_uae) | |
if uploaded_file is not None: | |
file_contents = read_pdf(uploaded_file) | |
documents = Document(text=file_contents) | |
documents = [documents] | |
st.success("Documents loaded successfully!") | |
# Indexing the documents | |
progress_container = st.empty() | |
progress_container.text("Creating VectorStoreIndex...") | |
# Code to create VectorStoreIndex | |
index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True) | |
# Persist Storage Context | |
index.storage_context.persist() | |
st.success("VectorStoreIndex created successfully!") | |
# Create Query Engine | |
query = st.text_input("Ask a question:") | |
query_engine = index.as_query_engine() | |
if query: | |
# Run Query | |
progress_container.text("Fetching the response...") | |
response = query_engine.query(query) | |
st.markdown(f"**Response:** {response}") | |