from dotenv import load_dotenv import os import streamlit as st import openai from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Document from sentence_transformers import CrossEncoder import fitz # PyMuPDF library for PDF processing import tempfile load_dotenv() openai.api_key = st.secrets['api_key'] # Create a sidebar st.sidebar.title("Model Configuration") # File uploader moved to the sidebar uploaded_file = st.sidebar.file_uploader("Upload a PDF", type=["pdf"]) # Option menu for model selection model_selection = st.sidebar.selectbox("Model Selection", ["GPT 3.5", "LLama 2"]) # Slider for selecting model temperature model_temperature = st.sidebar.slider("Select model temperature", 0.0, 0.5, 1.0) # Initialize LLM response storage llm_responses = [] # Initialize HHEM model hhem_model = CrossEncoder('vectara/hallucination_evaluation_model') if uploaded_file is not None: # Save the uploaded PDF file to a temporary location with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_pdf: temp_pdf.write(uploaded_file.read()) temp_pdf_path = temp_pdf.name # Open the PDF file using PyMuPDF pdf_document = fitz.open(temp_pdf_path) text = "" for page_number in range(pdf_document.page_count): page = pdf_document[page_number] text += page.get_text() documents = [Document(text=text)] index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() query = st.text_input("Ask your question") button = st.button("Ask") if button: print(query) response = query_engine.query(query) st.write(response.response) # Record LLM response llm_responses.append(response.response) # Calculate and display HHEM score for each LLM response for i, llm_response in enumerate(llm_responses): score = hhem_model.predict([text, llm_response]) st.sidebar.write(f"Response {i + 1} - HHEM Score: {score}") # Close and remove the temporary PDF file pdf_document.close() os.remove(temp_pdf_path) # Display LLM responses if llm_responses: st.sidebar.markdown("## LLM Responses") for i, llm_response in enumerate(llm_responses): st.sidebar.write(f"Response {i + 1}: {llm_response}")