kishoregajjala commited on
Commit
577f02c
1 Parent(s): 18a46f3

final demo code

Browse files
Files changed (4) hide show
  1. app.py +3 -2
  2. chat_agent.py +3 -3
  3. nlp_models.py +43 -0
  4. recommendation_agent.py +56 -33
app.py CHANGED
@@ -3,7 +3,7 @@ from llama_guard import moderate_chat, get_category_name
3
  import time
4
  from chat_agent import convo, main
5
  from chat_agent import choose_model1, delete_all_variables
6
- from recommendation_agent import recommend2, choose_model2, is_depressed
7
  from functools import cached_property
8
  from streamlit_js_eval import streamlit_js_eval
9
 
@@ -19,11 +19,12 @@ st.title('BrighterDays Mentor')
19
  # Adjust sidebar width to take half the screen
20
  col1, col2 = st.columns([2, 3])
21
 
22
- model = st.sidebar.selectbox(label="Choose the LLM model", options=["Venilla Model", "Fine Tuned Model"])
23
  print("\n\nSelected LLM model from Dropdown",model)
24
  choose_model1(model)
25
  choose_model2(model)
26
  main()
 
27
  # Function to update recommendations in col1
28
  def update_recommendations(sum):
29
  # with col1:
 
3
  import time
4
  from chat_agent import convo, main
5
  from chat_agent import choose_model1, delete_all_variables
6
+ from recommendation_agent import recommend2, choose_model2, is_depressed, start_recommend
7
  from functools import cached_property
8
  from streamlit_js_eval import streamlit_js_eval
9
 
 
19
  # Adjust sidebar width to take half the screen
20
  col1, col2 = st.columns([2, 3])
21
 
22
+ model = st.sidebar.selectbox(label="Choose the LLM model", options=["mistral-7b-base-model", "mental-health-mistral-7b-finetuned-model"])
23
  print("\n\nSelected LLM model from Dropdown",model)
24
  choose_model1(model)
25
  choose_model2(model)
26
  main()
27
+ start_recommend()
28
  # Function to update recommendations in col1
29
  def update_recommendations(sum):
30
  # with col1:
chat_agent.py CHANGED
@@ -23,12 +23,12 @@ HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
23
  repo_id ="mistralai/Mistral-7B-Instruct-v0.2"
24
  def choose_model1(model):
25
  global repo_id
26
- if model == "Venilla Model":
27
  repo_id="mistralai/Mistral-7B-Instruct-v0.2"
28
- print("model chooosed from chat",repo_id)
29
  else:
30
  repo_id="GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
31
- print("model chooosed from chat",repo_id)
32
 
33
  query2 = " "
34
  def main():
 
23
  repo_id ="mistralai/Mistral-7B-Instruct-v0.2"
24
  def choose_model1(model):
25
  global repo_id
26
+ if model == "mistral-7b-base-model":
27
  repo_id="mistralai/Mistral-7B-Instruct-v0.2"
28
+ print("model chosen from chat",repo_id)
29
  else:
30
  repo_id="GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
31
+ print("model chosen from chat",repo_id)
32
 
33
  query2 = " "
34
  def main():
nlp_models.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import DistilBertForSequenceClassification
3
+ import os
4
+ # # Get the directory path of the current script
5
+ # script_dir = os.path.dirname(os.path.abspath(__file__))
6
+ # model = DistilBertForSequenceClassification.from_pretrained("model.safetensors")
7
+
8
+ # Load model directly
9
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained("lxs1/DistilBertForSequenceClassification_6h_768dim")
12
+ model = AutoModelForSequenceClassification.from_pretrained("lxs1/DistilBertForSequenceClassification_6h_768dim")
13
+
14
+
15
+ # from transformers import DistilBertTokenizerFast
16
+ # tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
17
+
18
+ # Move the model to the GPU if available
19
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
20
+ model.to(device)
21
+
22
+ def sentiment_class(summarized_text):
23
+ '''
24
+ # 1 = non-depressed
25
+ # 0 = depressed
26
+ returns: example:- array([[0.00493283, 0.9950671 ]], dtype=float32)
27
+ '''
28
+ #inputs = tokenizer(summarized_text, padding = True, truncation = True, return_tensors='pt').to('cuda')
29
+ inputs = tokenizer(summarized_text, padding = True, truncation = True, return_tensors='pt').to(device)
30
+
31
+ outputs = model(**inputs)
32
+
33
+ predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
34
+ predictions = predictions.cpu().detach().numpy()
35
+ return predictions
36
+
37
+ def pattern_classification():
38
+ result=""
39
+ return result
40
+
41
+ def corelation_analysis():
42
+ result=""
43
+ return result
recommendation_agent.py CHANGED
@@ -22,47 +22,53 @@ load_dotenv(find_dotenv())
22
  HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
23
 
24
  repo_id ="mistralai/Mistral-7B-Instruct-v0.2"
 
 
25
  def choose_model2(model):
26
  global repo_id
27
- if model == "Venilla Model":
 
 
28
  repo_id="mistralai/Mistral-7B-Instruct-v0.2"
29
  print("model chooosed from recomm",repo_id)
30
  else:
31
  repo_id="GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
32
  print("model chooosed from recomm",repo_id)
33
 
34
- llm = HuggingFaceEndpoint(
35
- repo_id=repo_id, max_length=512, temperature=0.5, token=HUGGINGFACEHUB_API_TOKEN
36
- )
37
 
38
- persist_directory="Data/chroma"
39
- #chroma_client = chromadb.PersistentClient(persist_directory=persist_directory)
40
- embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
41
- vectors = Chroma(persist_directory = persist_directory, embedding_function = embedding_function, collection_name="split_parents")
42
- retriever = vectors.as_retriever() #(k=6)
43
-
44
- #prompt="you are a mental health therapist, talking to a person with who is facing some mental health issues. Following is the user feeling {question}"
45
-
46
- prompt = """You're a Mental Health Specialist. Support those with Depressive Disorder. Your task is to provide mental health advice.
47
- Listen compassionately, respond helpfully. For casual talk, be friendly. For facts, use context.
48
- Following is the user feeling {question}.
49
- If unsure, say, 'Out of my knowledge.' Always stay direct.
50
- If you cannot find the answer from the pieces of context, just say that you don't know, don't try to make up an answer.
51
- PLEASE GIVE THE RESPONSE IN THE FORM OF BULLET POINTS.
52
- ----------------
53
- {context}"""
54
-
55
- prompt = PromptTemplate(input_variables=['question'],template=prompt)
56
-
57
- chain1 = LLMChain(llm=llm, prompt=prompt, verbose=True)
58
- doc_chain = load_qa_chain(llm, chain_type="stuff")
59
-
60
- chain = ConversationalRetrievalChain(
61
- retriever=retriever,
62
- question_generator=chain1,
63
- combine_docs_chain=doc_chain,
64
- verbose=True,
65
- )
 
 
66
 
67
 
68
  def recommend2(query):
@@ -88,4 +94,21 @@ def is_depressed(human_inputs):
88
  # return status[0]["label"]
89
 
90
 
91
- # print(recommend2("i am feeling sad"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
23
 
24
  repo_id ="mistralai/Mistral-7B-Instruct-v0.2"
25
+ llm = None
26
+
27
  def choose_model2(model):
28
  global repo_id
29
+ global llm
30
+
31
+ if model == "mistral-7b-base-model":
32
  repo_id="mistralai/Mistral-7B-Instruct-v0.2"
33
  print("model chooosed from recomm",repo_id)
34
  else:
35
  repo_id="GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
36
  print("model chooosed from recomm",repo_id)
37
 
38
+ llm = HuggingFaceEndpoint(
39
+ repo_id=repo_id, max_length=512, temperature=0.5, token=HUGGINGFACEHUB_API_TOKEN
40
+ )
41
 
42
+ def start_recommend():
43
+ persist_directory="Data/chroma"
44
+ #chroma_client = chromadb.PersistentClient(persist_directory=persist_directory)
45
+ embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
46
+ vectors = Chroma(persist_directory = persist_directory, embedding_function = embedding_function, collection_name="split_parents")
47
+ retriever = vectors.as_retriever() #(k=6)
48
+
49
+ #prompt="you are a mental health therapist, talking to a person with who is facing some mental health issues. Following is the user feeling {question}"
50
+
51
+ prompt = """You're a Mental Health Specialist. Support those with Depressive Disorder. Your task is to provide brief mental health advice. PLEASE KEEP IT BRIEF.
52
+ Listen compassionately, respond helpfully. For casual talk, be friendly. For facts, use context.
53
+ Following is the user feeling {question}.
54
+ If unsure, say, 'Out of my knowledge.' Always stay direct.
55
+ If you cannot find the answer from the pieces of context, just say that you don't know, don't try to make up an answer.
56
+ PLEASE GIVE THE BRIEF RESPONSE IN THE FORM OF BULLET POINTS.
57
+ ----------------
58
+ {context}"""
59
+
60
+ global chain
61
+ prompt = PromptTemplate(input_variables=['question'],template=prompt)
62
+
63
+ chain1 = LLMChain(llm=llm, prompt=prompt, verbose=True)
64
+ doc_chain = load_qa_chain(llm, chain_type="stuff")
65
+
66
+ chain = ConversationalRetrievalChain(
67
+ retriever=retriever,
68
+ question_generator=chain1,
69
+ combine_docs_chain=doc_chain,
70
+ verbose=True,
71
+ )
72
 
73
 
74
  def recommend2(query):
 
94
  # return status[0]["label"]
95
 
96
 
97
+ def is_depressed_from_nlp_model(human_inputs):
98
+ ''''
99
+ returns wether according to human inputs the person is depressed or not
100
+ '''
101
+ # Implement Classification
102
+ # all_user_inputs = ''.join(human_inputs)
103
+ from nlp_models import sentiment_class
104
+ predictions = sentiment_class(human_inputs)
105
+ # Assuming the predictions are shaped (1, 2) as in the example
106
+ probability_depressed, probability_not_depressed = predictions[0]
107
+
108
+ if probability_depressed > probability_not_depressed:
109
+ return "Depressed", probability_depressed
110
+ else:
111
+ return "Not depressed", probability_not_depressed
112
+ #return 'Not so depressed' if status[0][1] > 0.5 else 'is_depressed'
113
+ #return 'Is depressed' if status[0]["label"] == "NEGATIVE" else 'Not Depressed'
114
+ # return status[0]["label"]