File size: 4,820 Bytes
a521442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
577f02c
a521442
577f02c
a521442
 
577f02c
a521442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115

import streamlit as st
from dotenv import load_dotenv, find_dotenv
import os
import time
from langchain.chains import LLMChain
from langchain_community.llms import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.prompts.chat import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    SystemMessagePromptTemplate,
) 
from langchain.memory import ChatMessageHistory, ConversationSummaryBufferMemory, ConversationBufferMemory, ConversationSummaryMemory
from langchain.chains import LLMChain, ConversationChain


# Please ensure you have a .env file available with 'HUGGINGFACEHUB_API_TOKEN'
load_dotenv(find_dotenv())
HUGGINGFACEHUB_API_TOKEN  = os.environ["HUGGINGFACEHUB_API_TOKEN"]

repo_id ="mistralai/Mistral-7B-Instruct-v0.2"
def choose_model1(model):
    global repo_id
    if model == "mistral-7b-base-model":
        repo_id="mistralai/Mistral-7B-Instruct-v0.2"
        print("model chosen from chat",repo_id)
    else:
        repo_id="GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
        print("model chosen from chat",repo_id)

query2 = " "
def main():
    llm = HuggingFaceEndpoint(
        repo_id=repo_id, max_length=512, temperature=0.5, token=HUGGINGFACEHUB_API_TOKEN
    )

    # template="""Act as a therapist, and conduct therapy sessions with the user. Your goal analyse their mental health 
    # problem, based following input:{query}. Do not show your thought process, only output a single question. 
    # Your output should contain consolation related to the query and a single question. Only ask one question at a time."""

    # def ConvoLLM(query: str): 
    #     prompt_template=PromptTemplate(input_variables=['query'],template= template)
    #     prompt_template.format(query= query)
    #     chain=LLMChain(llm=llm,prompt=prompt_template)
    #     response = chain.run(query) 
    #     return response

    #---------------------------------------------------------------------------------------------------------------------------------------



    # memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=10)
    # memory.save_context({"input": "hi"}, {"output": "whats up"})

    # def ConvoLLM(query: str): 
    #     conversation.predict(input=query)

    #---------------------------------------------------------------------------------------------------------------------------------------
    # print(conversation.predict(input="I am feeling low"))
    # print(conversation.predict(input="I am alone at home"))
    # print(conversation.memory.buffer)
    global conversation,memory
    template = """ Act as an expert mental health therapist, and conduct therapy sessions with the user. You are an expert Mental Health therapist who is asking the user questions to learn what professional mental health well-being advice could help the user.

        Your goal is to analyse their mental health problem, based following input:{input}. You will always ask questions to the user to get them to explain more about whatever mental health condition is ailing them.

        DO NOT give the user any mental health advice or medical advice, ONLY ask for more information about their symptoms.

        Do not show your thought process, only output a single question.  Your output should contain consolation related to the query and a single question.

        Only ask one question a time. 

        

        Current conversation:

        {history}

        

        Human: {input}

        AI Assistant:"""

    
    PROMPT = PromptTemplate(input_variables=["history","input"], template=template)
    memory = ConversationBufferMemory(llm=llm)
        # memory.save_context({"input": "hi"}, {"output": "whats up"})
        # memory.save_context({"input": "not much you"}, {"output": "not much"})
        # memory.save_context({"input": "feeling sad"}, {"output": "I am happy you feel that way"})
    
    conversation = ConversationChain(
            prompt=PROMPT,
            llm=llm,
            memory=memory,
            # verbose=True
        )


def convo(query):  
    global conversation, memory, query2 
    response = conversation.predict(input=query)
    # memory.save_context({"input": query}, {"output": ""})
    query2 = query2 + "," + query
    print("\n query2----------",query2)
    print("\n chat_agent.py----------",memory.chat_memory)
    summary = query2
    return response, summary


def delete_all_variables():
    global query2
    query2 = " "
    main()

# main()  

# convo("I am feeling sad")
# convo("I am feeling Lonely")
# delete_all_variables()
# convo("I am feeling hungry")