import os import openai import streamlit as st # Load environment variables from dotenv import load_dotenv load_dotenv() # Set the OpenAI API key openai.api_key = os.getenv("OPENAI_API_KEY") # Function to interact with OpenAI GPT model def get_openai_response(prompt, model="gpt-4", max_tokens=150): response = openai.Completion.create( engine=model, prompt=prompt, max_tokens=max_tokens, n=1, stop=None, temperature=0.7, ) return response.choices[0].text.strip() # Streamlit UI st.title("Chat with Your Document") # Load chunked data from the .txt file (chunked_data.txt) try: with open("chunked_data.txt", "r") as f: chunked_data = f.read().split("\n---\n") # Split by delimiter to separate chunks st.write("Document has been chunked into the following parts:") for i, chunk_part in enumerate(chunked_data, 1): st.write(f"**Chunk {i}:**\n{chunk_part}\n") except FileNotFoundError: st.error("Chunked data not found. Please run the search and chunk process first.") # Input field for user to ask questions about the chunked document st.subheader("Ask a question about the document:") user_question = st.text_input("Your question") # Button to submit the question if st.button("Get Answer"): if user_question: # Create the prompt with the document chunks and user's question prompt = f"Document Chunks: {chunked_data}\n\nQuestion: {user_question}\nAnswer:" response = get_openai_response(prompt) st.write(f"**Answer**: {response}") else: st.error("Please provide a question.")