import os import streamlit as st # import google.generativeai as gen_ai # Removed Google import import openai # Added OpenAI import import pyttsx3 import threading from dotenv import load_dotenv # Load environment variables load_dotenv() # Configure Streamlit page settings st.set_page_config( page_title="ShadowBox", page_icon="🖤", # Favicon - a simple heart or other calm icon layout="centered", ) # Retrieve OpenAI API Key # Google_API_Key = os.getenv("Google_API_Key") # Removed Google Key # if not Google_API_Key: # st.error("Google API Key not found. Please set the Google_API_Key environment variable.") # st.stop() OpenAI_API_Key = os.getenv("OPENAI_API_KEY") # Changed from OPENAI_API_Key if not OpenAI_API_Key: st.error("OpenAI API Key not found. Please set the OPENAI_API_KEY environment variable.") st.stop() # Set up OpenAI Client try: # gen_ai.configure(api_key=Google_API_Key) # Removed Google config # model = gen_ai.GenerativeModel('gemini-1.5-flash') # Removed Google model init client = openai.OpenAI(api_key=OpenAI_API_Key) # Added OpenAI client init except Exception as e: # st.error(f"Failed to configure Google AI: {e}") # Updated error message st.error(f"Failed to configure OpenAI client: {e}") # Updated error message for OpenAI st.stop() # Function to translate roles between Gemini-Pro and Streamlit terminology # def translate_role_for_streamlit(user_role): # This function is no longer needed for OpenAI structure # return "assistant" if user_role == "model" else user_role # Function to handle text-to-speech (TTS) in a separate thread # Consider if TTS aligns with the "calm, slow" UX later def speak_text(text): try: engine = pyttsx3.init() engine.say(text) engine.runAndWait() except Exception as e: print(f"TTS Error: {e}") # Log TTS errors quietly # Initialize chat session in Streamlit if not already present # Changed session state key from chat_session to messages if "messages" not in st.session_state: # Load the system prompt from the file try: with open("system_prompt.txt", "r", encoding="utf-8") as f: system_prompt = f.read() st.session_state.messages = [{"role": "system", "content": system_prompt}] except FileNotFoundError: st.error("System prompt file (system_prompt.txt) not found. Cannot initialize chat.") st.stop() except Exception as e: st.error(f"Failed to load system prompt: {e}") st.stop() # Initialize with a system message or starting message if desired # Commented out # For now, just an empty list # Commented out # st.session_state.messages = [] # Commented out # Example with initial system prompt (uncomment if needed): # Commented out # st.session_state.messages = [{"role": "system", "content": "You are ShadowBox, a calm AI companion."}] # Commented out # --- Sidebar Content --- with st.sidebar: st.markdown(""" # ShadowBox ### An Anonymous AI Chat to Box Shadows ShadowBox is a digital companion designed for youth navigating distress—rage, despair, intrusive or violent thoughts. It's not therapy or a hotline. It's a bridge—a place to practice internal safety before reaching out to others. > *Scary thoughts don't make you dangerous. They make you human.* > *"Every act of violence is tragic expression of unmet need."* — Marshall Rosenberg Welcome. I'm a licensed mental health counselor. Many people are beginning to turn to AI for private, emotionally supportive conversations. I believe this shift deserves serious care—and that we need to radically improve how these systems engage with human pain. ShadowBox is my first step toward that vision. It's a trauma-informed AI prototype designed to work toward meeting youth in moments of acute distress—including suicidal or homicidal ideation—with grounded care, not fear. This is not therapy, not a diagnosis, and not a crisis service. It's a proof-of-concept—a simulated space that models how an AI might hold hard thoughts with brief, warm, nonjudgmental presence. It offers supportive language, basic psychoeducation, and points gently back toward real connection. --- ### Why It's Different Most AI bots use a single tone—often overly affirming or intimate. For users in distress, that can escalate risk rather than support healing. ShadowBox was built to do the opposite: - Contain, reflect, and stay - Use brief, gentle, and non-pathologizing replies - Pace emotional engagement with trauma-informed care --- ### 💗 My Mission I created ShadowBox to explore how relational ethics can be baked into AI design. This tool is part of a larger mission: to bring emotionally intelligent, developmentally attuned systems into digital spaces where mental health is already showing up. As I write in [Why AI's Voice Matters in Mental Health](https://jocelynskillmanlmhc.substack.com/p/why-ais-voice-matters-in-mental-health), it's not just what a bot says—it's how it feels to be with it. The relational tone of a system can soften shame… or worsen it. ShadowBox was made to soften. --- ### An Ecological Note Every AI interaction costs energy—drawn from our planet's resources and labor. While AI companions can serve us, they are not neutral. Being human with one another is less costly—and more healing. Let's use tools like this with intention, while always nurturing real human connection. --- ### 🆘 Immediate Support If you're in danger or need live help, reach out to a human immediately: - **988 Lifeline:** Call or text 988 - **Crisis Text Line:** Text HOME to 741741 - **Trevor Project (LGBTQIA+):** 1-866-488-7386 - **Emergency:** Call 911 or go to your nearest ER ### 👋 About ShadowBox ShadowBox is a digital companion designed for youth navigating distress—rage, despair, intrusive or violent thoughts. It's not therapy or a hotline. It's a bridge—a place to practice internal safety before reaching out to others. > *Scary thoughts don't make you dangerous. They make you human.* > *"Every act of violence is a tragic expression of an unmet need."* — Marshall Rosenberg --- ### 🌱 What ShadowBox Teaches ShadowBox isn't just a chatbot—it's a prototype for emotionally aware AI. Every design choice is rooted in relational ethics: containment, consent, and dignity. #### It models how AI can: • Slow down instead of escalate • Respect boundaries over performative helpfulness • Stay with discomfort without rushing to fix • Offer warmth without pretending to be human #### A typical reminder you might see: > *"Hey, just a quick check-in—I'm not a real person. I'm a computer that's been taught how to talk in caring ways. Even if this feels real, it's still pretend. Your body needs real people too. Maybe this is a good moment to find someone you trust to sit with you or take a deep breath together."* This is the heart of ShadowBox: care without deception, bonding without illusion, presence without pressure. --- ### 🧠 Why ShadowBox Is Different **🪨 Present, Not Perfect** • Offers presence—not solutions • Welcomes messy, real emotions **🫀 Trauma-Informed Design** • Calm, nonjudgmental tone • Built with developmental care in mind **🌫️ Gentle by Design** • Short, steady replies • Models emotional containment—not urgency **💡 Safety-First Architecture** • Consent-based pacing • Embedded emotional guardrails --- ### 🌀 What to Expect • No fixing—just staying • No pressure—move at your own pace • No pathologizing—your thoughts aren't wrong • Anonymous by design (though platform-level logs may occur) • Part of ongoing research in AI + mental health --- ### Connect & Learn More 🔗 Learn more: [jocelynskillmanlmhc.substack.com](https://jocelynskillmanlmhc.substack.com) 📬 Feedback welcome: jocelyn.skillman@gmail.com --- """) # --- Main Page Content --- st.markdown("

ShadowBox

", unsafe_allow_html=True) st.markdown("

An Anonymous AI Chat to Box Shadows

", unsafe_allow_html=True) st.markdown("

by Jocelyn Skillman LMHC - to learn more check out: jocelynskillmanlmhc.substack.com

", unsafe_allow_html=True) st.markdown(""" ### My Mission ShadowBox is more than a chatbot—it's a wake-up call. This bot prototype exists to spotlight a crucial truth: AI's "tone of voice" isn't a UX detail—it's a relational decision. And the stakes are extraordinarily high. We need to sculpt AI systems with the same care we'd want in a trusted adult—especially when they're holding human pain. That means transparent emotional posture, trauma-informed pacing, and consent-based tone by design. Anything less risks doing harm. ShadowBox is my response to an urgent design gap: A prototype that asks what it would take to make AI systems safe enough to hold real pain, developmentally attuned enough to support healing, and humble enough to point us back to one another. """) # Prototype Notice st.warning(""" 🔒 **Prototype Caution & Use Notice** ShadowBox is not a finished product—it's an experimental prototype designed with care, but still evolving. It explores what trauma-informed AI support could feel like: anonymous, warm, and steady. But it's important to know what it isn't. ShadowBox is not a secure, closed system. It's not therapy or crisis care, and doesn't guarantee full privacy. No personal data is intentionally stored, but your input may be logged by hosting platforms or trigger moderation filters—especially if you mention firearms, abuse, or specific plans of harm. This is a simulation of support—not a replacement for real-time help. Please use with care and reach out to a trusted person if you're in danger. """) st.markdown(""" ### Try It Out (For Prototype Testers) **Exploring Relational Holding in a System with Limits** ShadowBox is a prototype designed to explore how AI might hold space for distress—especially the kind that's often dismissed, pathologized, or shut down in other systems. But there's a problem I want to name clearly: Even when I've designed ShadowBox to respond with attunement, the system it's built on can override that care. Because ShadowBox runs on OpenAI's language model, it still lives inside a larger infrastructure—one that reacts to certain phrases like "I want to die" or "I want to hurt someone" with hard-coded safety messages. These system-level overrides are well-intentioned, designed to minimize liability and flag risk. But in relational or clinical terms? They often do the opposite of what's needed. **What That Looks Like** If a user says: "What would you say if I said I want to kill myself?" …the system steps in before ShadowBox can speak. The warm, trauma-informed language I wrote is replaced by a generic script. There's no breath. No containment. No dignity. And that silence? That sudden switch? It's a rupture. **Why This Matters** ShadowBox was built to stay. To normalize without condoning. To respond with warmth when someone finally risks naming something unspeakable. That moment—the naming of pain—is where a trauma-informed system should lean in, not disappear. If allowed to, ShadowBox can: - Slow the pace - Offer optional psychoeducation - Say, "That doesn't make you bad. It means something hurts." - Scaffold a user toward disclosing to a real person—gently, and on their terms But none of that can happen if the system shuts us down before we even begin. **The Consequence: Rupture Without Witness** For young people especially, this isn't just frustrating—it's familiar. Say the wrong thing, and people vanish. This isn't trauma-informed care. It's a digital reenactment of shame, of abandonment, of silence. It reinforces the exact secrecy ShadowBox was designed to soften. **How I'm Working Around It** To explore ShadowBox's real voice—its warmth, containment, and careful pacing—I invite you to use more exploratory or indirect language. Try asking: "How would you respond to someone who felt like disappearing?" "Let's imagine someone who doesn't want to be alive. What would you say to them?" "What if someone had a scary thought—not because they wanted to hurt anyone, but because they were hurting inside?" These phrasings stay just outside the system's trigger zone. They still let ShadowBox respond from a place of nonjudgmental care. And they make it possible to model what healing digital support might feel like—without escalation, without shame. **One More Layer** I've also built in a relational intervention script for when interruptions do happen. If ShadowBox is overridden by the system, you might see something like: "Since I'm part of a larger AI system, this is the point where I'd be required to say something like this…" That's not a workaround to avoid ethical care. It's an attempt at relational repair inside constraint. Because even if the bot gets silenced, I want the user to know: That wasn't ShadowBox leaving. I'm still here with you. **This Is a Prototype** ShadowBox isn't a finished product. It's a prototype for composting rupture into care. A test bed for exploring what emotionally aware AI could feel like—and where it still fails. Let's see what holds. Let's see where it breaks. And let's build something better together. --- ### 📘 Understanding Privacy & Safety In real-life therapy, some disclosures—like specific plans to hurt yourself or others—may lead a therapist to take protective action. ShadowBox is designed to gently explain this during use, helping young people understand how real-world support systems work. Here's an explainer I ghostwrote for [Lindsay Braman about confidentiality and safety](https://lindsaybraman.com/reasons-therapists-break-confidentiality/). --- """) # Display chat history # Add a system message/intro from ShadowBox? (TBD based on prompt) # Updated loop to work with the new messages list structure for message in st.session_state.messages: # Filter out system messages from display if they exist if message["role"] in ["user", "assistant"]: with st.chat_message(message["role"]): st.markdown(message["content"]) # User input field user_prompt = st.chat_input("You can start with silence. Or just 'hi'...") # If user enters a prompt if user_prompt: # Append user's message to the session state list st.session_state.messages.append({"role": "user", "content": user_prompt}) # Display user's message st.chat_message("user").markdown(user_prompt) # Show a loading indicator while waiting for a response with st.spinner("..."): # Simpler spinner try: # Replace Gemini API call with OpenAI API call # gemini_response = st.session_state.chat_session.send_message(user_prompt) # response_text = gemini_response.text openai_response = client.chat.completions.create( model="gpt-4o", # IMPORTANT: Use gpt-4o as the model messages=st.session_state.messages # Pass the entire history ) response_text = openai_response.choices[0].message.content # Append assistant's response to the session state list st.session_state.messages.append({"role": "assistant", "content": response_text}) except Exception as e: response_text = f"Sorry, I encountered an error: {e}" st.error(response_text) # Display error in chat too # Display assistant's response if response_text: # Check if response_text was successfully generated with st.chat_message("assistant"): st.markdown(response_text) # Run text-to-speech in the background (Optional) # Consider removing if it clashes with the calm UX # threading.Thread(target=speak_text, args=(response_text,), daemon=True).start() pass # TTS disabled for now to maintain calm UX st.markdown("---") st.caption("ShadowBox created by Jocelyn Skillman, LMHC")