File size: 1,636 Bytes
2992469 22bfc32 8759daa 22bfc32 8759daa 2992469 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import os
import openai
import streamlit as st
# Load environment variables
from dotenv import load_dotenv
load_dotenv()
# Set the OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# Function to interact with OpenAI GPT model
def get_openai_response(prompt, model="gpt-4", max_tokens=150):
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=max_tokens,
n=1,
stop=None,
temperature=0.7,
)
return response.choices[0].text.strip()
# Streamlit UI
st.title("Chat with Your Document")
# Load chunked data from the .txt file (chunked_data.txt)
try:
with open("chunked_data.txt", "r") as f:
chunked_data = f.read().split("\n---\n") # Split by delimiter to separate chunks
st.write("Document has been chunked into the following parts:")
for i, chunk_part in enumerate(chunked_data, 1):
st.write(f"**Chunk {i}:**\n{chunk_part}\n")
except FileNotFoundError:
st.error("Chunked data not found. Please run the search and chunk process first.")
# Input field for user to ask questions about the chunked document
st.subheader("Ask a question about the document:")
user_question = st.text_input("Your question")
# Button to submit the question
if st.button("Get Answer"):
if user_question:
# Create the prompt with the document chunks and user's question
prompt = f"Document Chunks: {chunked_data}\n\nQuestion: {user_question}\nAnswer:"
response = get_openai_response(prompt)
st.write(f"**Answer**: {response}")
else:
st.error("Please provide a question.")
|