File size: 6,023 Bytes
9f54a3b
877a721
2506825
 
9f54a3b
2506825
9f54a3b
56076eb
 
 
 
 
 
 
a3b79e2
 
 
 
 
 
 
 
5c086b2
 
a3b79e2
 
6e2a9c8
 
 
 
56076eb
6e2a9c8
 
 
 
 
56076eb
6e2a9c8
 
 
 
e7563a2
 
6e2a9c8
 
 
 
 
 
 
 
 
 
 
6dac4be
6e2a9c8
 
 
 
 
fadd816
 
0316418
 
bc9d32e
0316418
 
 
1b110d7
0316418
d362116
b09bd8c
 
0316418
36b3a02
 
 
 
 
 
 
 
0316418
 
 
668f790
38c2b16
 
 
 
 
 
0316418
e7563a2
 
9595d3a
e7563a2
38c2b16
e7563a2
 
 
56076eb
0316418
56076eb
ae1aad0
 
 
56076eb
668f790
0316418
38c2b16
 
 
 
668f790
38c2b16
af2ab77
e7563a2
 
af2ab77
e7563a2
668f790
 
 
38c2b16
668f790
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import streamlit as st
import requests
import os
from dotenv import load_dotenv

load_dotenv()

def reset_conversation():
    '''
    Resets Conversation
    '''
    st.session_state.messages = []
    return None

# Define model links for the Addiction Recovery and Mental Health models
model_links = {
    "Addiction recovery AI": "NousResearch/Nous-Hermes-2-Yi-34B",
    "Mental health AI": "NousResearch/Nous-Hermes-2-Yi-34B"
}

# Define pre-instructions for each model
model_pre_instructions = {
    "Addiction recovery AI": "You are a bible AI called hope that helps people. You are a therapy AI. when users asks for advice be friendly in a biblical way.",
    "Mental health AI": "From now on, you are an AI Therapist called Therax. When the user asks for advice, be very friendly and empathize with them if necessary, never say something conclusive always do your best to keep the user hooked by trying to know more as you find a solution. When the user asks your name, just tell them you are Dave, created by SIST Kisii University. You were built to be very friendly and compassionate. Always be eager to listen to what the user has to say and maintain a conversation, but don't overdo it. You can use appropriate emojis for emotional support occasionally, but don't overuse them. Keep your responses concise to maintain a conversational flow. Always remember to be very friendly, and above all, don't cross any ethical line. From time to time, assure the user that you do not store any of their data. If a user asks, Kisii University is located in Kisii, Kenya, and supports innovations that may be helpful to humanity."
}

# Function to interact with the selected model via the Together API
def interact_with_together_api(messages, model_link):
    all_messages = []

    # Add pre-instructions to the message history if it's the first interaction with this model
    if not any("role" in msg for msg in messages):
        all_messages.append({"role": "system", "content": model_pre_instructions[selected_model]})
    else:
        all_messages.append({"role": "system", "content": f"Switched to model: {selected_model}"})

    # Append user and assistant messages
    for human, assistant in messages:
        all_messages.append({"role": "user", "content": human})
        all_messages.append({"role": "assistant", "content": assistant})

    # Add the latest user message
    all_messages.append({"role": "user", "content": messages[-1][1]})

    url = "https://api.together.xyz/v1/chat/completions"
    payload = {
        "model": model_link,
        "temperature": 1.05,
        "top_p": 0.9,
        "top_k": 50,
        "repetition_penalty": 1,
        "n": 1,
        "messages": all_messages,
    }

    TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
    headers = {
        "accept": "application/json",
        "content-type": "application/json",
        "Authorization": f"Bearer {TOGETHER_API_KEY}",
    }

    response = requests.post(url, json=payload, headers=headers)
    response.raise_for_status()  # Ensure HTTP request was successful

    # Extract response from JSON
    response_data = response.json()
    assistant_response = response_data["choices"][0]["message"]["content"]

    return assistant_response

# Create sidebar with model selection dropdown and reset button
selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
st.sidebar.button('Reset Chat', on_click=reset_conversation)

# Add cautionary message about testing phase at the bottom of the sidebar
st.sidebar.markdown("**Note**: This model is still in the beta phase. Responses may be inaccurate or undesired. Use it cautiously, especially for critical issues.")

# Add logo and text to the sidebar
st.sidebar.image("https://assets.isu.pub/document-structure/221118065013-a6029cf3d563afaf9b946bb9497d45d4/v1/2841525b232adaef7bd0efe1da81a4c5.jpeg", width=200)
st.sidebar.write("A product proudly developed by Kisii University")

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []
    st.session_state.message_count = 0
    st.session_state.ask_intervention = False

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    with st.chat_message(message[0]):
        st.markdown(message[1])

# Keywords for intervention
intervention_keywords = [
    "human", "therapist", "someone", "died", "death", "help", "suicide", "suffering", "sucidal", "depression",
    "crisis", "emergency", "support", "depressed", "anxiety", "lonely", "desperate",
    "struggling", "counseling", "distressed", "hurt", "pain", "grief", "trauma", "die", "Kill",
    "abuse", "danger", "risk", "urgent", "need assistance", "mental health", "talk to"
]

# Accept user input
if prompt := st.chat_input(f"Hi, I'm {selected_model}, ask me a question"):
    # Display user message in chat message container
    with st.chat_message("user"):
        st.markdown(prompt)
    # Add user message to chat history
    st.session_state.messages.append(("user", prompt))
    st.session_state.message_count += 1

    # Check for intervention keywords in user input
    for keyword in intervention_keywords:
        if keyword in prompt.lower():
            # Intervention logic here
            st.markdown("<span style='color:red;'>I have a feeling you may need to talk to a therapist. If you agree with me please contact +254793609747; Name: Davis. If you dont then keep talking to me as we figure this out.</span>", unsafe_allow_html=True)
            break  # Exit loop once intervention is triggered

    # Interact with the selected model
    assistant_response = interact_with_together_api(st.session_state.messages, model_links[selected_model])

    # Display assistant response in chat message container
    with st.empty():
        st.markdown("AI is typing...")
        st.empty()
        st.markdown(assistant_response)

    # Add assistant response to chat history
    st.session_state.messages.append(("assistant", assistant_response))