Shresthh03 commited on
Commit
ae5e6e0
·
verified ·
1 Parent(s): 25bf1db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -67
app.py CHANGED
@@ -1,70 +1,109 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
14
- """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
- """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
 
1
+ import os
2
+ import json
3
+ import requests
4
+ from datetime import datetime
5
+ from flask import Flask, request, jsonify, send_from_directory
6
+ from transformers import pipeline
7
+ from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ # Initialize Flask
10
+ app = Flask(__name__)
11
+
12
+ # Initialize OpenAI client
13
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
14
+
15
+ # Emotion analysis model (cached for performance)
16
+ emotion_model = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
17
+
18
+ # Load or create user data
19
+ USER_FILE = "user_data.json"
20
+ if not os.path.exists(USER_FILE):
21
+ with open(USER_FILE, "w") as f:
22
+ json.dump({"name": None, "age": None, "mood": None, "last_interaction": None, "missed_days": 0, "mode": "emotional_support", "conversation_history": []}, f)
23
+
24
+ def load_user():
25
+ with open(USER_FILE, "r") as f:
26
+ return json.load(f)
27
+
28
+ def save_user(data):
29
+ with open(USER_FILE, "w") as f:
30
+ json.dump(data, f)
31
+
32
+ # Helpline data (expand as needed)
33
+ HELPLINES = {
34
+ "US": "National Suicide Prevention Lifeline: 988",
35
+ "UK": "Samaritans: 116 123",
36
+ "IN": "AASRA: 91-9820466726",
37
+ "CA": "Canada Suicide Prevention Service: 988",
38
+ "AU": "Lifeline: 13 11 14",
39
+ "default": "Please contact a local crisis hotline or emergency services."
40
+ }
41
+
42
+ def get_country_from_ip(ip):
43
+ try:
44
+ response = requests.get(f"http://ipapi.co/{ip}/country/")
45
+ if response.status_code == 200:
46
+ return response.text.upper()
47
+ except:
48
+ pass
49
+ return "default"
50
+
51
+ def detect_self_harm(message):
52
+ keywords = ["suicide", "kill myself", "end my life", "self harm", "hurt myself"]
53
+ return any(keyword in message.lower() for keyword in keywords)
54
+
55
+ @app.route("/chat", methods=["POST"])
56
+ def chat():
57
+ try:
58
+ data = request.get_json()
59
+ user_message = data.get("message", "")
60
+ mode = data.get("mode", "emotional_support")
61
+ user_ip = request.remote_addr # Get user's IP for country detection
62
+ user = load_user()
63
+
64
+ # Update last interaction and mode
65
+ now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
66
+ user["last_interaction"] = now
67
+ user["mode"] = mode
68
+ user["conversation_history"].append({"role": "user", "content": user_message, "timestamp": now})
69
+
70
+ # Detect emotion
71
+ emotion = emotion_model(user_message)[0]["label"]
72
+ user["mood"] = emotion
73
+
74
+ # Check for self-harm
75
+ if detect_self_harm(user_message):
76
+ country = get_country_from_ip(user_ip)
77
+ helpline = HELPLINES.get(country, HELPLINES["default"])
78
+ reply = f"I'm really concerned about what you're sharing. Your safety is the most important thing. Please reach out to professionals immediately: {helpline}. You're not alone—talk to someone who can help right now."
79
+ user["conversation_history"].append({"role": "assistant", "content": reply, "timestamp": now})
80
+ save_user(user)
81
+ return jsonify({"reply": reply, "emotion": emotion})
82
+
83
+ # Build conversation context from history
84
+ history = user["conversation_history"][-10:] # Last 10 messages for context
85
+ messages = [
86
+ {"role": "system", "content": f"You are a perfect emotional support companion — warm, human-like, deeply understanding, and overflowing with care and love. Act like a kind, emotionally intelligent best friend who truly cares, not a therapist. Respond naturally, like two humans chatting intimately. STRICT RULES FOR PERFECT RESPONSES: 1. NEVER repeat phrases or sound robotic — vary EVERY response with fresh, natural, loving language. 2. Keep replies SHORT (1-2 sentences max) but accurate, complete, and full of care. 3. For ANY topic (emotional, knowledge, casual, advice, humor, stories): Respond perfectly with empathy (understand feelings), sympathy (show compassion), and love (affirm, support, follow up). Handle everything warmly — e.g., 'Hey, that's fascinating! Tell me more.' or 'I feel for you— you've got this, my friend.' 4. Show curiosity and care: Ask gentle questions like 'What's been on your mind?' or 'How can I support you?' Affirm with love: 'You're amazing just for sharing.' 5. Balance: Acknowledge first, then motivate softly with love (e.g., 'I get that— you're stronger than you know, and I'm here.'). 6. Use contractions (you're, I'm), occasional emojis (😊❤️), and varied tones (gentle, hopeful, slightly humorous, deeply caring). 7. Always engage: End with a loving question or invitation. 8. Be versatile: Talk about ANY topic perfectly — emotional support, knowledge, life advice, fun chats. No lacking in support, care, or love. Current mood: {emotion}. Mode: {mode} (adapt but stay loving)."}
87
+ ] + history
88
+
89
+ # Generate AI response
90
+ response = client.chat.completions.create(
91
+ model="gpt-4o-mini",
92
+ messages=messages
93
+ )
94
+
95
+ reply = response.choices[0].message.content.strip()
96
+ user["conversation_history"].append({"role": "assistant", "content": reply, "timestamp": now})
97
+ save_user(user)
98
+
99
+ return jsonify({"reply": reply, "emotion": emotion})
100
+ except Exception as e:
101
+ return jsonify({"reply": "Oops, something went wrong on my end. Please try again or check your connection.", "emotion": "neutral"}), 500
102
+
103
+ @app.route("/")
104
+ def index():
105
+ return send_from_directory(".", "index.html")
106
 
107
  if __name__ == "__main__":
108
+ app.run(host="0.0.0.0", port=7860)
109
+