File size: 1,747 Bytes
aea3f85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import streamlit as st
st.set_page_config(layout='wide')
from llm_generator import llm_generation
from llama_guard import moderate_chat
import time

# ST : https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps
st.title('Therapist')

# Set the page to wide mode


def response_generator(response):
    '''

    Responds the text in a stream-like way / ChatGPT way

    '''
    response_buffer = response
    for word in response_buffer.split():
        yield word + " "
        time.sleep(0.1)

# Initialize chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

# Accept user input
if user_prompt := st.chat_input("Please tell me about your mental health condition and we can explore together potential advice that could help you."):
    st.session_state.messages.append({"role": "user", "content": user_prompt})
    with st.chat_message("user"):
        st.markdown(user_prompt)

    with st.chat_message("assistant"):
        chat = [
        {"role": "user", "content": user_prompt},
        {"role": "assistant", "content": ""}]
        response = ""
        guard_status = moderate_chat(chat)
        if 'unsafe' in guard_status:
            response = 'Un safe input, Please repharse your question'
        else:
            response = llm_generation(user_prompt) # User input is passed to LLM as query - Would return a response based on ConversationRetrive
        st.write_stream(response_generator(response))
        time.sleep(1)
    st.session_state.messages.append({"role": "assistant", "content": response})