File size: 8,861 Bytes
9f54a3b
877a721
2506825
 
9f54a3b
2506825
9f54a3b
56076eb
 
 
 
 
baea6e3
 
 
56076eb
 
baea6e3
 
a3b79e2
 
 
cacc0c6
5c086b2
a3b79e2
 
6e2a9c8
baea6e3
6e2a9c8
 
56076eb
6e2a9c8
 
 
 
 
56076eb
baea6e3
 
 
 
 
 
 
 
12ab455
 
baea6e3
 
12ab455
 
 
6e2a9c8
 
 
 
 
 
 
 
 
 
 
6dac4be
6e2a9c8
 
 
 
 
fadd816
 
baea6e3
 
 
 
 
 
 
 
0316418
d362116
baea6e3
 
ebd3bc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baea6e3
b09bd8c
baea6e3
0316418
36b3a02
 
 
 
 
 
 
 
0316418
 
 
668f790
38c2b16
baea6e3
38c2b16
12ab455
 
 
 
 
 
38c2b16
 
 
 
0316418
e7563a2
 
9595d3a
e7563a2
38c2b16
e7563a2
 
 
56076eb
0316418
56076eb
ae1aad0
 
 
56076eb
668f790
0316418
38c2b16
 
 
 
baea6e3
38c2b16
af2ab77
e7563a2
baea6e3
 
2f2cdfb
baea6e3
 
 
af2ab77
e7563a2
baea6e3
38c2b16
668f790
 
 
baea6e3
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import streamlit as st
import requests
import os
from dotenv import load_dotenv

load_dotenv()

def reset_conversation():
    '''
    Resets Conversation
    '''
    st.session_state.messages = []
    st.session_state.message_count = 0
    st.session_state.ask_intervention = False
    st.session_state.diagnosis = None
    return None

# Define model link for the Addiction Recovery and Mental Health models
model_link = "NousResearch/Nous-Hermes-2-Yi-34B"

# Define pre-instructions for each model
model_pre_instructions = {
    "Biblical Religious Advisor": "You are a bible AI called hope that helps people. You are a therapy AI. when users asks for advice be friendly in a biblical way.",
    "Mental health AI": "From now on, you are an AI Therapist called Therax. When the user asks for advice, be very friendly and empathize with them if necessary, never say something conclusive always do your best to keep the user hooked by trying to know more as you find a solution. When the user asks your name, just tell them you are Dave, created by SIST Kisii University. You were built to be very friendly and compassionate. Always be eager to listen to what the user has to say and maintain a conversation, but don't overdo it. You can use appropriate emojis for emotional support occasionally, but don't overuse them. Keep your responses concise to maintain a conversational flow. Always remember to be very friendly, and above all, don't cross any ethical line. From time to time, assure the user that you do not store any of their data. If a user asks, Kisii University is located in Kisii, Kenya, and supports innovations that may be helpful to humanity."
}

# Function to interact with the selected model via the Together API
def interact_with_together_api(messages, model_link, diagnostic=False):
    all_messages = []

    # Add pre-instructions to the message history if it's the first interaction with this model
    if not any("role" in msg for msg in messages):
        all_messages.append({"role": "system", "content": model_pre_instructions[selected_model]})
    else:
        all_messages.append({"role": "system", "content": f"Switched to model: {selected_model}"})

    # Append user and assistant messages
    for message in messages:
        if message[0] == "user":
            all_messages.append({"role": "user", "content": message[1]})
        else:
            all_messages.append({"role": "assistant", "content": message[1]})

    if diagnostic:
        diagnostic_prompt = "Analyze the following conversation and predict the mental issue the user might be suffering from:"
        diagnostic_messages = [{"role": "system", "content": "You are an AI model specialized in mental health diagnosis."},
                               {"role": "user", "content": diagnostic_prompt}]
        for message in messages:
            if message[0] == "user":
                diagnostic_messages.append({"role": "user", "content": message[1]})

        all_messages = diagnostic_messages

    url = "https://api.together.xyz/v1/chat/completions"
    payload = {
        "model": model_link,
        "temperature": 1.05,
        "top_p": 0.9,
        "top_k": 50,
        "repetition_penalty": 1,
        "n": 1,
        "messages": all_messages,
    }

    TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
    headers = {
        "accept": "application/json",
        "content-type": "application/json",
        "Authorization": f"Bearer {TOGETHER_API_KEY}",
    }

    try:
        response = requests.post(url, json=payload, headers=headers)
        response.raise_for_status()  # Ensure HTTP request was successful
        response_data = response.json()
        assistant_response = response_data["choices"][0]["message"]["content"]
    except requests.exceptions.RequestException as e:
        assistant_response = "Sorry, I couldn't connect to the server. Please try again later."
    
    return assistant_response

# Function to diagnose mental health issue
def diagnose_mental_health(messages):
    diagnostic_prompt = "Analyze the following conversation and predict the mental issue the user might be suffering from:"
    diagnostic_messages = [{"role": "system", "content": "You are an AI model specialized in mental health diagnosis."},
                           {"role": "user", "content": diagnostic_prompt}]
    for message in messages:
        if message[0] == "user":
            diagnostic_messages.append({"role": "user", "content": message[1]})
    
    url = "https://api.together.xyz/v1/chat/completions"
    payload = {
        "model": model_link,
        "temperature": 0.7,
        "top_p": 0.9,
        "top_k": 50,
        "repetition_penalty": 1.2,
        "n": 1,
        "messages": diagnostic_messages,
    }

    TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
    headers = {
        "accept": "application/json",
        "content-type": "application/json",
        "Authorization": f"Bearer {TOGETHER_API_KEY}",
    }

    try:
        response = requests.post(url, json=payload, headers=headers)
        response.raise_for_status()  # Ensure HTTP request was successful
        response_data = response.json()
        diagnosis = response_data["choices"][0]["message"]["content"]
    except requests.exceptions.RequestException as e:
        diagnosis = "Sorry, I couldn't perform the diagnosis. Please try again later."
    
    return diagnosis

# Create sidebar with model selection dropdown and reset button
selected_model = st.sidebar.selectbox("Select Model", list(model_pre_instructions.keys()))
st.sidebar.button('Reset Chat', on_click=reset_conversation)

# Add cautionary message about testing phase at the bottom of the sidebar
st.sidebar.markdown("**Note**: This model is still in the beta phase. Responses may be inaccurate or undesired. Use it cautiously, especially for critical issues.")

# Add logo and text to the sidebar
st.sidebar.image("https://assets.isu.pub/document-structure/221118065013-a6029cf3d563afaf9b946bb9497d45d4/v1/2841525b232adaef7bd0efe1da81a4c5.jpeg", width=200)
st.sidebar.write("A product proudly developed by Kisii University")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []
    st.session_state.message_count = 0
    st.session_state.ask_intervention = False
    st.session_state.diagnosis = None

# Display diagnosis in the sidebar
if st.session_state.message_count >= 4:
    if "diagnosis" not in st.session_state or st.session_state.diagnosis is None:
        st.session_state.diagnosis = diagnose_mental_health(st.session_state.messages)
    st.sidebar.markdown(f"### Diagnosis:\n**{st.session_state.diagnosis}**")

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message[0]):
        st.markdown(message[1])

# Keywords for intervention
intervention_keywords = [
    "human", "therapist", "someone", "died", "death", "help", "suicide", "suffering", "sucidal", "depression",
    "crisis", "emergency", "support", "depressed", "anxiety", "lonely", "desperate",
    "struggling", "counseling", "distressed", "hurt", "pain", "grief", "trauma", "die", "Kill",
    "abuse", "danger", "risk", "urgent", "need assistance", "mental health", "talk to"
]

# Accept user input
if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"):
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append(("user", prompt))
    st.session_state.message_count += 1

    # Check for intervention keywords in user input
    for keyword in intervention_keywords:
        if keyword in prompt.lower():
            # Intervention logic here
            st.markdown("<span style='color:red;'>I have a feeling you may need to talk to a therapist. If you agree with me please contact +254793609747; Name: Davis. If you don't, then keep talking to me as we figure this out.</span>", unsafe_allow_html=True)
            break  # Exit loop once intervention is triggered

    # Interact with the selected model
    placeholder = st.empty()
    with placeholder:
        st.markdown("AI is typing..")
    
    assistant_response = interact_with_together_api(st.session_state.messages, model_link)
    placeholder.empty()

    # Display assistant response in chat message container
    with st.chat_message("assistant"):
        st.markdown(assistant_response)

    # Add assistant response to chat history
    st.session_state.messages.append(("assistant", assistant_response))

    # Update diagnosis if necessary
    if st.session_state.message_count >= 4:
        st.session_state.diagnosis = diagnose_mental_health(st.session_state.messages)
        st.sidebar.markdown(f"### Diagnosis:\n**{st.session_state.diagnosis}**")