|
import os |
|
import openai |
|
import streamlit as st |
|
|
|
|
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
|
|
openai.api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
def get_openai_response(prompt, model="gpt-4", max_tokens=150): |
|
response = openai.Completion.create( |
|
engine=model, |
|
prompt=prompt, |
|
max_tokens=max_tokens, |
|
n=1, |
|
stop=None, |
|
temperature=0.7, |
|
) |
|
return response.choices[0].text.strip() |
|
|
|
|
|
st.title("Chat with Your Document") |
|
|
|
|
|
try: |
|
with open("chunked_data.txt", "r") as f: |
|
chunked_data = f.read().split("\n---\n") |
|
st.write("Document has been chunked into the following parts:") |
|
for i, chunk_part in enumerate(chunked_data, 1): |
|
st.write(f"**Chunk {i}:**\n{chunk_part}\n") |
|
except FileNotFoundError: |
|
st.error("Chunked data not found. Please run the search and chunk process first.") |
|
|
|
|
|
st.subheader("Ask a question about the document:") |
|
user_question = st.text_input("Your question") |
|
|
|
|
|
if st.button("Get Answer"): |
|
if user_question: |
|
|
|
prompt = f"Document Chunks: {chunked_data}\n\nQuestion: {user_question}\nAnswer:" |
|
response = get_openai_response(prompt) |
|
st.write(f"**Answer**: {response}") |
|
else: |
|
st.error("Please provide a question.") |
|
|