vsrinivas commited on
Commit
800ec67
·
verified ·
1 Parent(s): 901a643

Update funcs.py

Browse files
Files changed (1) hide show
  1. funcs.py +113 -43
funcs.py CHANGED
@@ -96,74 +96,144 @@ def get_doc_response_emotions(user_message, therapy_session_conversation):
96
  print(f"therapy_session_conversation: {therapy_session_conversation}")
97
  return '', therapy_session_conversation, emotions_msg
98
 
99
- from transformers import AutoTokenizer, AutoModelForCausalLM
100
- import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
- # Load model once globally for reuse
103
- tokenizer = AutoTokenizer.from_pretrained("tiiuae/Falcon-H1-1.5B-Deep-Instruct")
104
- use_cuda = torch.cuda.is_available()
105
- model = AutoModelForCausalLM.from_pretrained(
106
- "tiiuae/Falcon-H1-1.5B-Deep-Instruct",
107
- torch_dtype=torch.float16 if use_cuda else torch.float32,
108
- device_map="auto" if use_cuda else None
109
- )
110
- def generate_falcon_response(prompt, max_new_tokens=300):
111
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1800).to(model.device)
112
- outputs = model.generate(
113
- **inputs,
114
- max_new_tokens=max_new_tokens,
115
- do_sample=True,
116
- temperature=0.7,
117
- top_p=0.9
118
- )
119
- decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
120
- return decoded_output[len(prompt):].strip()
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
- def summarize_and_recommend(therapy_session_conversation):
124
 
125
- session_time = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
126
- session_conversation = [item[0] for item in therapy_session_conversation]
127
- session_conversation = [x for x in session_conversation if x is not None]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  session_conversation.insert(0, "Session_time: " + session_time)
129
-
130
  session_conversation_processed = '\n'.join(session_conversation)
131
  print("session_conversation_processed:", session_conversation_processed)
132
 
133
- # Summarization prompt
134
- summary_prompt = f"""You are an Expert Cognitive Behavioural Therapist and Precis writer.
135
- Summarize STRICTLY the following session into concise, ethical, and clinically meaningful content.
136
-
137
- Session:
138
- {session_conversation_processed}
139
 
140
- Format your response as:
141
- Session Time:
142
  Summary of the patient messages:
143
- Summary of therapist messages:
144
  Summary of the whole session:
145
- Ensure the entire summary is less than 300 tokens."""
146
 
147
- full_summary = generate_falcon_response(summary_prompt, max_new_tokens=300)
 
 
 
 
148
  print("\nFull summary:", full_summary)
149
 
150
- # Recommendation prompt
151
- recommendation_prompt = f"""You are an expert Cognitive Behavioural Therapist.
152
- Based STRICTLY on the following summary, provide a clinically valid action plan for the patient.
 
153
 
154
  Summary:
155
  {full_summary}
156
 
157
- Use this format:
158
  - The patient is referred to...
159
  - The patient is advised to...
160
  - The patient is refrained from...
161
  - It is suggested that the patient...
162
  - Scheduled a follow-up session with the patient...
163
 
164
- Ensure the list contains NOT MORE THAN 7 points and is in passive voice with proper tense."""
 
165
 
166
- full_recommendations = generate_falcon_response(recommendation_prompt, max_new_tokens=400)
167
  print("\nFull recommendations:", full_recommendations)
168
 
169
  chatbox = []
 
96
  print(f"therapy_session_conversation: {therapy_session_conversation}")
97
  return '', therapy_session_conversation, emotions_msg
98
 
99
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
100
+ # import torch
101
+
102
+ # # Load model once globally for reuse
103
+ # tokenizer = AutoTokenizer.from_pretrained("tiiuae/Falcon-H1-1.5B-Deep-Instruct")
104
+ # use_cuda = torch.cuda.is_available()
105
+ # model = AutoModelForCausalLM.from_pretrained(
106
+ # "tiiuae/Falcon-H1-1.5B-Deep-Instruct",
107
+ # torch_dtype=torch.float16 if use_cuda else torch.float32,
108
+ # device_map="auto" if use_cuda else None
109
+ # )
110
+ # def generate_falcon_response(prompt, max_new_tokens=300):
111
+ # inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1800).to(model.device)
112
+ # outputs = model.generate(
113
+ # **inputs,
114
+ # max_new_tokens=max_new_tokens,
115
+ # do_sample=True,
116
+ # temperature=0.7,
117
+ # top_p=0.9
118
+ # )
119
+ # decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
120
+ # return decoded_output[len(prompt):].strip()
121
+
122
+
123
+ # def summarize_and_recommend(therapy_session_conversation):
124
+
125
+ # session_time = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
126
+ # session_conversation = [item[0] for item in therapy_session_conversation]
127
+ # session_conversation = [x for x in session_conversation if x is not None]
128
+ # session_conversation.insert(0, "Session_time: " + session_time)
129
+
130
+ # session_conversation_processed = '\n'.join(session_conversation)
131
+ # print("session_conversation_processed:", session_conversation_processed)
132
 
133
+ # # Summarization prompt
134
+ # summary_prompt = f"""You are an Expert Cognitive Behavioural Therapist and Precis writer.
135
+ # Summarize STRICTLY the following session into concise, ethical, and clinically meaningful content.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136
 
137
+ # Session:
138
+ # {session_conversation_processed}
139
+
140
+ # Format your response as:
141
+ # Session Time:
142
+ # Summary of the patient messages:
143
+ # Summary of therapist messages:
144
+ # Summary of the whole session:
145
+ # Ensure the entire summary is less than 300 tokens."""
146
+
147
+ # full_summary = generate_falcon_response(summary_prompt, max_new_tokens=300)
148
+ # print("\nFull summary:", full_summary)
149
+
150
+ # # Recommendation prompt
151
+ # recommendation_prompt = f"""You are an expert Cognitive Behavioural Therapist.
152
+ # Based STRICTLY on the following summary, provide a clinically valid action plan for the patient.
153
+
154
+ # Summary:
155
+ # {full_summary}
156
+
157
+ # Use this format:
158
+ # - The patient is referred to...
159
+ # - The patient is advised to...
160
+ # - The patient is refrained from...
161
+ # - It is suggested that the patient...
162
+ # - Scheduled a follow-up session with the patient...
163
+
164
+ # Ensure the list contains NOT MORE THAN 7 points and is in passive voice with proper tense."""
165
+
166
+ # full_recommendations = generate_falcon_response(recommendation_prompt, max_new_tokens=400)
167
+ # print("\nFull recommendations:", full_recommendations)
168
+
169
+ # chatbox = []
170
+ # return full_summary, full_recommendations, chatbox
171
 
 
172
 
173
+ import requests
174
+ from datetime import datetime
175
+ import os
176
+
177
+ HF_API_TOKEN = os.getenv('hf_token') # 🔁 Replace with your HF token
178
+ API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
179
+ HEADERS = {"Authorization": f"Bearer {HF_API_TOKEN}"}
180
+
181
+ def query_huggingface(prompt):
182
+ payload = {
183
+ "inputs": prompt,
184
+ "parameters": {
185
+ "max_new_tokens": 400,
186
+ "temperature": 0.7,
187
+ "top_p": 0.9,
188
+ "return_full_text": False
189
+ }
190
+ }
191
+ response = requests.post(API_URL, headers=HEADERS, json=payload)
192
+ response.raise_for_status()
193
+ return response.json()[0]['generated_text'].strip()
194
+
195
+ def summarize_and_recommend(therapy_session_conversation):
196
+ session_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
197
+ session_conversation = [item[0] for item in therapy_session_conversation if item[0]]
198
  session_conversation.insert(0, "Session_time: " + session_time)
 
199
  session_conversation_processed = '\n'.join(session_conversation)
200
  print("session_conversation_processed:", session_conversation_processed)
201
 
202
+ summary_prompt = f"""
203
+ You are an Expert Cognitive Behavioural Therapist and Precis writer.
204
+ Summarize STRICTLY the below session content ONLY into useful, ethical, relevant and realistic phrases in this format:
 
 
 
205
 
206
+ Session Time: {session_time}
 
207
  Summary of the patient messages:
208
+ Summary of therapist messages:
209
  Summary of the whole session:
 
210
 
211
+ Conversation:
212
+ {session_conversation_processed}
213
+ """
214
+
215
+ full_summary = query_huggingface(summary_prompt)
216
  print("\nFull summary:", full_summary)
217
 
218
+ recommendation_prompt = f"""
219
+ You are an expert Cognitive Behavioural Therapist.
220
+ Based STRICTLY on the following summary ONLY, provide a clinically valid and appropriate action plan for the patient as a bullet list.
221
+ Include both medical and non-medical suggestions in passive voice.
222
 
223
  Summary:
224
  {full_summary}
225
 
226
+ Format:
227
  - The patient is referred to...
228
  - The patient is advised to...
229
  - The patient is refrained from...
230
  - It is suggested that the patient...
231
  - Scheduled a follow-up session with the patient...
232
 
233
+ (Ensure not more than 7 bullet points)
234
+ """
235
 
236
+ full_recommendations = query_huggingface(recommendation_prompt)
237
  print("\nFull recommendations:", full_recommendations)
238
 
239
  chatbox = []