Update chat_with_doc.py
Browse fileschat with doc use chunked data
- chat_with_doc.py +11 -10
chat_with_doc.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os
|
2 |
import openai
|
3 |
import streamlit as st
|
4 |
-
import
|
5 |
|
6 |
# Load environment variables
|
7 |
from dotenv import load_dotenv
|
@@ -25,15 +25,15 @@ def get_openai_response(prompt, model="gpt-4", max_tokens=150):
|
|
25 |
# Streamlit UI
|
26 |
st.title("Chat with Your Document")
|
27 |
|
28 |
-
#
|
29 |
-
|
30 |
-
|
31 |
-
chunked_data =
|
32 |
-
|
33 |
-
|
34 |
-
st.write("
|
35 |
-
|
36 |
-
st.
|
37 |
|
38 |
# Input field for user to ask questions about the chunked document
|
39 |
st.subheader("Ask a question about the document:")
|
@@ -48,3 +48,4 @@ if st.button("Get Answer"):
|
|
48 |
st.write(f"**Answer**: {response}")
|
49 |
else:
|
50 |
st.error("Please provide a question.")
|
|
|
|
1 |
import os
|
2 |
import openai
|
3 |
import streamlit as st
|
4 |
+
import json
|
5 |
|
6 |
# Load environment variables
|
7 |
from dotenv import load_dotenv
|
|
|
25 |
# Streamlit UI
|
26 |
st.title("Chat with Your Document")
|
27 |
|
28 |
+
# Load chunked data from the file (chunked_data.json)
|
29 |
+
try:
|
30 |
+
with open("chunked_data.json", "r") as f:
|
31 |
+
chunked_data = json.load(f)
|
32 |
+
st.write("Document has been chunked into the following parts:")
|
33 |
+
for i, chunk_part in enumerate(chunked_data, 1):
|
34 |
+
st.write(f"**Chunk {i}:**\n{chunk_part}\n")
|
35 |
+
except FileNotFoundError:
|
36 |
+
st.error("Chunked data not found. Please run the search and chunk process first.")
|
37 |
|
38 |
# Input field for user to ask questions about the chunked document
|
39 |
st.subheader("Ask a question about the document:")
|
|
|
48 |
st.write(f"**Answer**: {response}")
|
49 |
else:
|
50 |
st.error("Please provide a question.")
|
51 |
+
|