NavyDevilDoc's picture
Update app.py
b5a978b verified
import streamlit as st
from openai import OpenAI
import os
import prompts
import file_processing
st.set_page_config(page_title="Executive Editor Pro", layout="wide", page_icon="⚓")
# ... (API Key Logic remains the same) ...
api_key = os.environ.get("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)
# --- SIDEBAR ---
with st.sidebar:
st.header("⚙️ Controls")
# 1. Mode Selection (Pulled from prompts.py)
selected_mode = st.radio("Editing Mode", list(prompts.MODE_CONFIG.keys()))
st.divider()
# 2. FILE UPLOADER (The new feature)
uploaded_files = st.file_uploader(
"Upload Context",
type=['pdf', 'docx', 'txt', 'csv', 'xlsx'],
accept_multiple_files=True)
if st.button("Clear Chat"):
st.session_state.messages = []
st.rerun()
# --- MAIN APP ---
st.title("⚓ Executive Editor Pro")
if "messages" not in st.session_state:
st.session_state.messages = []
# --- MAIN LOGIC: HANDLE BATCH UPLOADS ---
if uploaded_files and "file_processed" not in st.session_state:
st.session_state.file_processed = False
# We check if files are uploaded AND if we haven't processed this specific batch yet
if uploaded_files and not st.session_state.file_processed:
master_text = ""
file_names = []
error_log = []
with st.spinner(f"Processing {len(uploaded_files)} documents..."):
for file in uploaded_files:
text, error = file_processing.validate_and_extract(file)
if error:
error_log.append(f"❌ {file.name}: {error}")
else:
# We wrap each file in XML tags so the AI knows where one ends and the next begins
master_text += f"\n<document name='{file.name}'>\n{text}\n</document>\n"
file_names.append(file.name)
# Check if we successfully extracted anything
if master_text:
# Create the System Note
system_note = f"**[SYSTEM: User attached {len(file_names)} files]**\n\n{master_text}"
# Create the UI Message (Hidden context, Clean display)
display_message = f"📂 **Batch Upload Processed:**\n"
for name in file_names:
display_message += f"- `{name}`\n"
if error_log:
display_message += "\n**Errors:**\n" + "\n".join(error_log)
st.session_state.messages.append({
"role": "user",
"content": system_note,
"display_text": display_message
})
st.session_state.file_processed = True
st.rerun()
elif error_log:
# If everything failed, just show errors
for e in error_log:
st.error(e)
# --- DISPLAY CHAT ---
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
# Check if there is specific 'display_text' (for files), otherwise use standard 'content'
display_content = msg.get("display_text", msg["content"])
st.markdown(display_content)
# --- CHAT INPUT & PROCESSING ---
if user_input := st.chat_input("Type instructions or paste text..."):
# 1. User Step
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
# 2. AI Step
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# --- FIX FOR NAME ERROR IS HERE ---
# We explicitly grab the config dictionary for the selected mode
mode_config = prompts.MODE_CONFIG[selected_mode]
# We construct the variable 'system_instruction' explicitly
system_instruction = f"""
{prompts.BASE_SYSTEM_PROMPT}
CURRENT MODE: {selected_mode}
INSTRUCTION: {mode_config['instruction']}
EXAMPLES OF DESIRED OUTPUT:
{mode_config['examples']}
"""
# ----------------------------------
# Build the payload
# Note: We filter the messages to only send 'role' and 'content' to OpenAI
# (OpenAI will crash if we send our custom 'display_text' field)
api_messages = [{"role": "system", "content": system_instruction}]
for msg in st.session_state.messages:
api_messages.append({"role": msg["role"], "content": msg["content"]})
try:
stream = client.chat.completions.create(
model="gpt-4o",
messages=api_messages,
temperature=0.3,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
full_response += chunk.choices[0].delta.content
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Download Button
st.download_button(
label="💾 Download Result",
data=full_response,
file_name="edited_text.md",
mime="text/markdown"
)
except Exception as e:
st.error(f"An error occurred: {e}")