Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
# In-memory memory store | |
memory_db = {} | |
# Load the model | |
interpreter = pipeline("text2text-generation", model="google/flan-t5-base") | |
# Few-shot examples to guide the model | |
few_shot_examples = """ | |
Example 1: | |
"sure. whatever you want" | |
→ Passive-aggressive, likely annoyed but masking it with false agreement. | |
Example 2: | |
"we’ll see" | |
→ Avoidant, doesn’t want to commit or say no directly. | |
Example 3: | |
"it's fine, don't worry about it" | |
→ Likely masking frustration or disappointment, doesn’t want to express upset directly. | |
""" | |
# Interpret message and return explanation + memory log | |
def interpret_message(msg, name, closeness, traits, relation, past_events, feelings, use_memory): | |
if not msg.strip() or not name.strip(): | |
return "Please enter both a message and a person’s name.", "" | |
name = name.strip().title() | |
if use_memory: | |
if name in memory_db: | |
existing = memory_db[name] | |
closeness = closeness or existing.get("closeness") | |
traits = traits or existing.get("personality") | |
relation = relation or existing.get("relationship") | |
else: | |
memory_db[name] = { | |
"closeness": closeness, | |
"personality": traits, | |
"relationship": relation, | |
"history": [] | |
} | |
prompt = f"""{few_shot_examples} | |
New message from {name}: "{msg}" | |
Known context: | |
- Closeness: {closeness} | |
- Personality: {traits} | |
- Relationship: {relation} | |
- Similar Past Events: {past_events} | |
- Your Feelings: {feelings} | |
Based on the message and context, explain what the person is *really thinking* or feeling but not saying directly. | |
Infer their hidden intention, emotional state, and subtext. Then suggest how I should respond. | |
""" | |
if use_memory: | |
memory_db[name]["closeness"] = closeness | |
memory_db[name]["personality"] = traits | |
memory_db[name]["relationship"] = relation | |
memory_db[name]["history"].append({ | |
"message": msg, | |
"prompt_used": prompt, | |
"interpretation": "TO BE FILLED" | |
}) | |
result = interpreter(prompt, max_new_tokens=80)[0]["generated_text"] | |
if use_memory: | |
memory_db[name]["history"][-1]["interpretation"] = result | |
memory_log = "" | |
if use_memory and memory_db[name]["history"]: | |
memory_log = "\n\n".join( | |
[f"• \"{entry['message']}\"\n → {entry['interpretation']}" for entry in memory_db[name]["history"]] | |
) | |
return result, memory_log | |
# Memory Advisor chatbot | |
def memory_chat(user_input, person_name): | |
name = person_name.strip().title() | |
if name not in memory_db: | |
return "There's no memory for this person yet. Try interpreting a message first." | |
history = memory_db[name]["history"] | |
past_text = "\n\n".join([f"- {h['message']}\n → {h['interpretation']}" for h in history]) | |
chat_prompt = f"""You are a relationship and social communication expert trained in human behavior. | |
Here is what you know about {name}'s recent messages and past behavior: | |
{past_text} | |
The user now asks: {user_input} | |
Respond with a supportive and emotionally intelligent answer based on past messages. | |
Avoid repeating the user's input. Be specific. If {name} is likely upset, say it. | |
If the user should give space or check in, say so. | |
Only output your advice in 2–4 sentences. No explanation of what you’re doing. | |
""" | |
response = interpreter(chat_prompt, max_new_tokens=80)[0]["generated_text"] | |
return response | |
# Gradio UI | |
with gr.Blocks(title="Textual Chemistry") as app: | |
gr.Markdown("## 💬 Textual Chemistry\n_What do they really mean?_") | |
with gr.Row(): | |
person_name = gr.Textbox(label="Person’s Name", placeholder="e.g., Alex") | |
message = gr.Textbox(label="Message Received", placeholder="Paste the message here...", lines=3) | |
with gr.Row(): | |
closeness = gr.Dropdown( | |
choices=["Stranger", "Acquaintance", "Friend", "Close Friend", "Partner", "Ex", "Boss", "Teacher"], | |
label="Closeness Level") | |
use_memory = gr.Checkbox(label="Use memory for this person?", value=True) | |
sender_traits = gr.Textbox(label="Background on the Sender", placeholder="e.g., Sarcastic, hates small talk...") | |
relationship = gr.Textbox(label="Relationship History", placeholder="e.g., We used to date, now friends") | |
past_events = gr.Textbox(label="Similar Past Situations", placeholder="e.g., Last time I canceled they acted cold") | |
user_feelings = gr.Textbox(label="Your Current Feelings", placeholder="e.g., I feel confused or anxious") | |
output = gr.Textbox(label="🧠 Interpretation", lines=6) | |
history_output = gr.Textbox(label="🧠 Memory Log for This Person", lines=10) | |
submit_btn = gr.Button("Interpret Message") | |
submit_btn.click( | |
fn=interpret_message, | |
inputs=[message, person_name, closeness, sender_traits, relationship, past_events, user_feelings, use_memory], | |
outputs=[output, history_output] | |
) | |
gr.Markdown("---") | |
gr.Markdown("## 🧠 Memory Advisor Chatbot") | |
with gr.Row(): | |
chatbot_input = gr.Textbox(label="Ask a follow-up question about this person") | |
chatbot_output = gr.Textbox(label="🤖 Advisor Response", lines=6) | |
chatbot_btn = gr.Button("Ask Memory Bot") | |
chatbot_btn.click(fn=memory_chat, inputs=[chatbot_input, person_name], outputs=chatbot_output) | |
app.launch() | |