File size: 16,428 Bytes
4c59e23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bacb5e
4c59e23
6bacb5e
4c59e23
 
 
 
 
 
 
 
 
 
a6de0c8
4c59e23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e64cf98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c59e23
 
 
 
 
6bacb5e
 
4c59e23
c3dd14e
 
 
 
 
 
6bacb5e
 
4c59e23
6bacb5e
 
4c59e23
6bacb5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c3dd14e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bacb5e
4c59e23
 
 
 
 
749e732
 
6bacb5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c59e23
 
6bacb5e
 
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
 
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
 
6bacb5e
c3dd14e
 
6bacb5e
c3dd14e
6bacb5e
c3dd14e
 
 
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
 
 
 
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
4c59e23
c3dd14e
 
4c59e23
c3dd14e
4c59e23
c3dd14e
4c59e23
c3dd14e
4c59e23
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
6bacb5e
c3dd14e
 
 
4c59e23
c3dd14e
4c59e23
c3dd14e
4c59e23
c3dd14e
6bacb5e
c3dd14e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bacb5e
 
 
c3dd14e
 
 
 
 
4c59e23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6de0c8
4c59e23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bacb5e
4c59e23
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
import os
import streamlit as st
# import google.generativeai as gen_ai # Removed Google import
import openai # Added OpenAI import
import pyttsx3
import threading
from dotenv import load_dotenv

# Load environment variables
load_dotenv()

# Configure Streamlit page settings
st.set_page_config(
    page_title="ShadowBox",
    page_icon="🖤",  # Favicon - a simple heart or other calm icon
    layout="centered",
)

# Retrieve OpenAI API Key
# Google_API_Key = os.getenv("Google_API_Key") # Removed Google Key
# if not Google_API_Key:
#     st.error("Google API Key not found. Please set the Google_API_Key environment variable.")
#     st.stop()
OpenAI_API_Key = os.getenv("OPENAI_API_KEY") # Changed from OPENAI_API_Key
if not OpenAI_API_Key:
    st.error("OpenAI API Key not found. Please set the OPENAI_API_KEY environment variable.")
    st.stop()


# Set up OpenAI Client
try:
    # gen_ai.configure(api_key=Google_API_Key) # Removed Google config
    # model = gen_ai.GenerativeModel('gemini-1.5-flash') # Removed Google model init
    client = openai.OpenAI(api_key=OpenAI_API_Key) # Added OpenAI client init
except Exception as e:
    # st.error(f"Failed to configure Google AI: {e}") # Updated error message
    st.error(f"Failed to configure OpenAI client: {e}") # Updated error message for OpenAI
    st.stop()

# Function to translate roles between Gemini-Pro and Streamlit terminology
# def translate_role_for_streamlit(user_role): # This function is no longer needed for OpenAI structure
#     return "assistant" if user_role == "model" else user_role

# Function to handle text-to-speech (TTS) in a separate thread
# Consider if TTS aligns with the "calm, slow" UX later
def speak_text(text):
    try:
        engine = pyttsx3.init()
        engine.say(text)
        engine.runAndWait()
    except Exception as e:
        print(f"TTS Error: {e}") # Log TTS errors quietly

# Initialize chat session in Streamlit if not already present
# Changed session state key from chat_session to messages
if "messages" not in st.session_state:
    # Load the system prompt from the file
    try:
        with open("system_prompt.txt", "r", encoding="utf-8") as f:
            system_prompt = f.read()
        st.session_state.messages = [{"role": "system", "content": system_prompt}]
    except FileNotFoundError:
        st.error("System prompt file (system_prompt.txt) not found. Cannot initialize chat.")
        st.stop()
    except Exception as e:
        st.error(f"Failed to load system prompt: {e}")
        st.stop()
    # Initialize with a system message or starting message if desired # Commented out
    # For now, just an empty list # Commented out
    # st.session_state.messages = [] # Commented out
    # Example with initial system prompt (uncomment if needed): # Commented out
    # st.session_state.messages = [{"role": "system", "content": "You are ShadowBox, a calm AI companion."}] # Commented out


# --- Sidebar Content ---
with st.sidebar:
    st.markdown("""
# ShadowBox
### An Anonymous AI Chat to Box Shadows

ShadowBox is a digital companion designed for youth navigating distress—rage, despair, intrusive or violent thoughts. It's not therapy or a hotline. It's a bridge—a place to practice internal safety before reaching out to others.

> *Scary thoughts don't make you dangerous. They make you human.*

> *"Every act of violence is tragic expression of unmet need."* — Marshall Rosenberg

Welcome.
I'm a licensed mental health counselor. Many people are beginning to turn to AI for private, emotionally supportive conversations. I believe this shift deserves serious care—and that we need to radically improve how these systems engage with human pain.

ShadowBox is my first step toward that vision.
It's a trauma-informed AI prototype designed to work toward meeting youth in moments of acute distress—including suicidal or homicidal ideation—with grounded care, not fear.

This is not therapy, not a diagnosis, and not a crisis service.
It's a proof-of-concept—a simulated space that models how an AI might hold hard thoughts with brief, warm, nonjudgmental presence. It offers supportive language, basic psychoeducation, and points gently back toward real connection.

---

### Why It's Different
Most AI bots use a single tone—often overly affirming or intimate. For users in distress, that can escalate risk rather than support healing.
ShadowBox was built to do the opposite:

- Contain, reflect, and stay
- Use brief, gentle, and non-pathologizing replies
- Pace emotional engagement with trauma-informed care

---

### 💗 My Mission
I created ShadowBox to explore how relational ethics can be baked into AI design.
This tool is part of a larger mission: to bring emotionally intelligent, developmentally attuned systems into digital spaces where mental health is already showing up.

As I write in [Why AI's Voice Matters in Mental Health](https://jocelynskillmanlmhc.substack.com/p/why-ais-voice-matters-in-mental-health), it's not just what a bot says—it's how it feels to be with it.
The relational tone of a system can soften shame… or worsen it. ShadowBox was made to soften.

---

### An Ecological Note
Every AI interaction costs energy—drawn from our planet's resources and labor. While AI companions can serve us, they are not neutral.
Being human with one another is less costly—and more healing.

Let's use tools like this with intention, while always nurturing real human connection.

---

### 🆘 Immediate Support
If you're in danger or need live help, reach out to a human immediately:

- **988 Lifeline:** Call or text 988
- **Crisis Text Line:** Text HOME to 741741
- **Trevor Project (LGBTQIA+):** 1-866-488-7386
- **Emergency:** Call 911 or go to your nearest ER

### 👋 About ShadowBox

ShadowBox is a digital companion designed for youth navigating distress—rage, despair, intrusive or violent thoughts. It's not therapy or a hotline. It's a bridge—a place to practice internal safety before reaching out to others.

> *Scary thoughts don't make you dangerous. They make you human.*

> *"Every act of violence is a tragic expression of an unmet need."* — Marshall Rosenberg

---

### 🌱 What ShadowBox Teaches

ShadowBox isn't just a chatbot—it's a prototype for emotionally aware AI. Every design choice is rooted in relational ethics: containment, consent, and dignity.

#### It models how AI can:
• Slow down instead of escalate  
• Respect boundaries over performative helpfulness  
• Stay with discomfort without rushing to fix  
• Offer warmth without pretending to be human  

#### A typical reminder you might see:
> *"Hey, just a quick check-in—I'm not a real person. I'm a computer that's been taught how to talk in caring ways. Even if this feels real, it's still pretend. Your body needs real people too. Maybe this is a good moment to find someone you trust to sit with you or take a deep breath together."*

This is the heart of ShadowBox: care without deception, bonding without illusion, presence without pressure.

---

### 🧠 Why ShadowBox Is Different

**🪨 Present, Not Perfect**  
• Offers presence—not solutions  
• Welcomes messy, real emotions  

**🫀 Trauma-Informed Design**  
• Calm, nonjudgmental tone  
• Built with developmental care in mind  

**🌫️ Gentle by Design**  
• Short, steady replies  
• Models emotional containment—not urgency  

**💡 Safety-First Architecture**  
• Consent-based pacing  
• Embedded emotional guardrails  

---

### 🌀 What to Expect
• No fixing—just staying  
• No pressure—move at your own pace  
• No pathologizing—your thoughts aren't wrong  
• Anonymous by design (though platform-level logs may occur)  
• Part of ongoing research in AI + mental health  

---

### Connect & Learn More
🔗 Learn more: [jocelynskillmanlmhc.substack.com](https://jocelynskillmanlmhc.substack.com)  
📬 Feedback welcome: jocelyn.skillman@gmail.com

---
""")


# --- Main Page Content ---

st.markdown("<h1 style='text-align: center; color: #333;'>ShadowBox</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center; font-size: 18px; color: #555; margin-bottom: 1em;'>An Anonymous AI Chat to Box Shadows</p>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center; font-size: 16px; color: #666; margin-bottom: 2em;'>by <a href='http://www.jocelynskillman.com' target='_blank'>Jocelyn Skillman LMHC</a> - to learn more check out: <a href='https://jocelynskillmanlmhc.substack.com/' target='_blank'>jocelynskillmanlmhc.substack.com</a></p>", unsafe_allow_html=True)

st.markdown("""
### My Mission
ShadowBox is more than a chatbot—it's a wake-up call.

This bot prototype exists to spotlight a crucial truth:
AI's "tone of voice" isn't a UX detail—it's a relational decision.
And the stakes are extraordinarily high. 

We need to sculpt AI systems with the same care we'd want in a trusted adult—especially when they're holding human pain. That means transparent emotional posture, trauma-informed pacing, and consent-based tone by design. Anything less risks doing harm.

ShadowBox is my response to an urgent design gap:
A prototype that asks what it would take to make AI systems safe enough to hold real pain, developmentally attuned enough to support healing, and humble enough to point us back to one another.
""")

# Prototype Notice
st.warning("""
🔒 **Prototype Caution & Use Notice**

ShadowBox is not a finished product—it's an experimental prototype designed with care, but still evolving. It explores what trauma-informed AI support could feel like: anonymous, warm, and steady. But it's important to know what it isn't.

ShadowBox is not a secure, closed system. It's not therapy or crisis care, and doesn't guarantee full privacy. No personal data is intentionally stored, but your input may be logged by hosting platforms or trigger moderation filters—especially if you mention firearms, abuse, or specific plans of harm.

This is a simulation of support—not a replacement for real-time help. Please use with care and reach out to a trusted person if you're in danger.
""")

st.markdown("""
### Try It Out (For Prototype Testers)

**Exploring Relational Holding in a System with Limits**

ShadowBox is a prototype designed to explore how AI might hold space for distress—especially the kind that's often dismissed, pathologized, or shut down in other systems.

But there's a problem I want to name clearly:

Even when I've designed ShadowBox to respond with attunement, the system it's built on can override that care.

Because ShadowBox runs on OpenAI's language model, it still lives inside a larger infrastructure—one that reacts to certain phrases like "I want to die" or "I want to hurt someone" with hard-coded safety messages. These system-level overrides are well-intentioned, designed to minimize liability and flag risk.

But in relational or clinical terms?
They often do the opposite of what's needed.

**What That Looks Like**

If a user says:

"What would you say if I said I want to kill myself?"

…the system steps in before ShadowBox can speak.
The warm, trauma-informed language I wrote is replaced by a generic script. There's no breath. No containment. No dignity.

And that silence? That sudden switch?
It's a rupture.

**Why This Matters**

ShadowBox was built to stay.
To normalize without condoning.
To respond with warmth when someone finally risks naming something unspeakable.

That moment—the naming of pain—is where a trauma-informed system should lean in, not disappear.

If allowed to, ShadowBox can:

- Slow the pace
- Offer optional psychoeducation
- Say, "That doesn't make you bad. It means something hurts."
- Scaffold a user toward disclosing to a real person—gently, and on their terms

But none of that can happen if the system shuts us down before we even begin.

**The Consequence: Rupture Without Witness**

For young people especially, this isn't just frustrating—it's familiar.

Say the wrong thing, and people vanish.

This isn't trauma-informed care.
It's a digital reenactment of shame, of abandonment, of silence.

It reinforces the exact secrecy ShadowBox was designed to soften.

**How I'm Working Around It**

To explore ShadowBox's real voice—its warmth, containment, and careful pacing—I invite you to use more exploratory or indirect language. Try asking:

"How would you respond to someone who felt like disappearing?"

"Let's imagine someone who doesn't want to be alive. What would you say to them?"

"What if someone had a scary thought—not because they wanted to hurt anyone, but because they were hurting inside?"

These phrasings stay just outside the system's trigger zone.
They still let ShadowBox respond from a place of nonjudgmental care.
And they make it possible to model what healing digital support might feel like—without escalation, without shame.

**One More Layer**

I've also built in a relational intervention script for when interruptions do happen. If ShadowBox is overridden by the system, you might see something like:

"Since I'm part of a larger AI system, this is the point where I'd be required to say something like this…"

That's not a workaround to avoid ethical care.
It's an attempt at relational repair inside constraint.

Because even if the bot gets silenced, I want the user to know:

That wasn't ShadowBox leaving. I'm still here with you.

**This Is a Prototype**

ShadowBox isn't a finished product.
It's a prototype for composting rupture into care.
A test bed for exploring what emotionally aware AI could feel like—and where it still fails.

Let's see what holds.
Let's see where it breaks.
And let's build something better together.

---

### 📘 Understanding Privacy & Safety

In real-life therapy, some disclosures—like specific plans to hurt yourself or others—may lead a therapist to take protective action. ShadowBox is designed to gently explain this during use, helping young people understand how real-world support systems work.

Here's an explainer I ghostwrote for [Lindsay Braman about confidentiality and safety](https://lindsaybraman.com/reasons-therapists-break-confidentiality/).

---
""")

# Display chat history
# Add a system message/intro from ShadowBox? (TBD based on prompt)
# Updated loop to work with the new messages list structure
for message in st.session_state.messages:
    # Filter out system messages from display if they exist
    if message["role"] in ["user", "assistant"]:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

# User input field
user_prompt = st.chat_input("You can start with silence. Or just 'hi'...")

# If user enters a prompt
if user_prompt:
    # Append user's message to the session state list
    st.session_state.messages.append({"role": "user", "content": user_prompt})

    # Display user's message
    st.chat_message("user").markdown(user_prompt)

    # Show a loading indicator while waiting for a response
    with st.spinner("..."): # Simpler spinner
        try:
            # Replace Gemini API call with OpenAI API call
            # gemini_response = st.session_state.chat_session.send_message(user_prompt)
            # response_text = gemini_response.text
            openai_response = client.chat.completions.create(
                model="gpt-4o", # IMPORTANT: Use gpt-4o as the model
                messages=st.session_state.messages # Pass the entire history
            )
            response_text = openai_response.choices[0].message.content
            # Append assistant's response to the session state list
            st.session_state.messages.append({"role": "assistant", "content": response_text})
        except Exception as e:
            response_text = f"Sorry, I encountered an error: {e}"
            st.error(response_text) # Display error in chat too

    # Display assistant's response
    if response_text: # Check if response_text was successfully generated
        with st.chat_message("assistant"):
            st.markdown(response_text)

        # Run text-to-speech in the background (Optional)
        # Consider removing if it clashes with the calm UX
        # threading.Thread(target=speak_text, args=(response_text,), daemon=True).start()
        pass # TTS disabled for now to maintain calm UX

st.markdown("---")
st.caption("ShadowBox created by Jocelyn Skillman, LMHC")