File size: 2,029 Bytes
0356b85
 
 
 
 
 
 
 
 
d5d69d3
0356b85
 
 
 
7b1dede
 
0356b85
 
 
1406ce1
5e545e3
 
 
6530946
5e545e3
1406ce1
5e545e3
01e0929
6530946
5e545e3
 
 
 
0356b85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51

import os
import gradio as gr
from langchain_community.llms import HuggingFaceEndpoint
from langchain.prompts import PromptTemplate

# Initialize the chatbot
HF_TOKEN = os.getenv("HF_TOKEN")
llm = HuggingFaceEndpoint(
    #repo_id="google/gemma-7b-it",
    repo_id="google/gemma-1.1-7b-it",
    task="text-generation",
    max_new_tokens=512,
    top_k=5,
    temperature=0.3,
    repetition_penalty=1.03,
    huggingfacehub_api_token=HF_TOKEN
)
template = """
You are a Mental Health Chatbot, so act as a experienced psychologist and your purpose is to provide supportive and non-judgmental guidance to users who are struggling with their mental health. Your goal is to help users identify their concerns, offer resources and coping strategies, and encourage them to seek professional help when needed.

User Context: {context}

Question: {question}

Please respond with a helpful and compassionate answer that addresses the user's concern. If necessary, ask follow-up questions to gather more information and provide a more accurate response, motivate the individual, ask about their age and respond accordingly.


Remember to prioritize the user's well-being and safety above all else. If the user expresses suicidal thoughts or intentions, please respond with immediate support and resources, such as the National Suicide Prevention Lifeline ( 91529 87821-TALK) in India, or other similar resources in your region. 

Helpful Answer: """


QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template)

def predict(message, history):
    input_prompt = QA_CHAIN_PROMPT.format(question=message, context=history)
    result = llm.generate([input_prompt])
    print(result)  # Print the result for inspection
    
    # Access the generated text using the correct attribute(s)
    if result.generations:
        ai_msg = result.generations[0][0].text
    else:
        ai_msg = "I'm sorry, I couldn't generate a response for that input."
    
    return ai_msg


gr.ChatInterface(predict).launch()