vsrinivas commited on
Commit
408baf5
·
verified ·
1 Parent(s): 095f8cd

Update funcs.py

Browse files
Files changed (1) hide show
  1. funcs.py +61 -108
funcs.py CHANGED
@@ -93,121 +93,74 @@ def get_doc_response_emotions(user_message, therapy_session_conversation):
93
  print(f"User's message: {user_message}")
94
  print(f"RAG Matching message: {dials_embeddings.iloc[top_match_index]['Patient']}")
95
  print(f"Therapist's response: {dials_embeddings.iloc[top_match_index]['Doctor']}\n\n")
96
-
97
  return '', therapy_session_conversation, emotions_msg
98
 
99
- def summarize_and_recommend(therapy_session_conversation):
 
 
100
 
101
- session_time = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
102
- session_conversation = [item[0] for item in therapy_session_conversation]
103
- session_conversation = [x for x in session_conversation if x is not None]
104
-
105
- session_conversation.insert(0, "Session_time: "+session_time)
106
-
107
- session_conversation_processed ='\n'.join(session_conversation)
108
- print("session_conversation_processed:", session_conversation_processed)
109
 
110
- full_summary = ""
111
- # for chunk in AI71(AI71_API_KEY).chat.completions.create(
112
- # model="tiiuae/falcon-180b-chat",
113
- # messages=[
114
- # {"role": "system", "content": """You are an Expert Cognitive Behavioural Therapist and Precis writer.
115
- # Summarize 'STRICTLY' the below user content <<<session_conversation_processed>>> 'ONLY' into useful, ethical, relevant and realistic phrases with a format
116
- # Session Time:
117
- # Summary of the patient messages: #in two to four sentences
118
- # Summary of therapist messages: #in two to three sentences:
119
- # Summary of the whole session: # in two to three sentences. Ensure the entire session summary strictly does not exceed 100 tokens."""},
120
- # {"role": "user", "content": session_conversation_processed},
121
- # ],
122
- # stream=True,
123
- # ):
124
- # if chunk.choices[0].delta.content:
125
- # summary = chunk.choices[0].delta.content
126
- # full_summary += summary
127
-
128
- from huggingface_hub import login
129
- huggingface_token = os.getenv('hf_token')
130
- print("hf-token:",huggingface_token)
131
- login(token = huggingface_token)
132
- from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
133
- import torch
134
-
135
- # Falcon-180B model name
136
- model_name = "tiiuae/falcon-7b-instruct"
137
-
138
- # Load tokenizer and model
139
- tokenizer = AutoTokenizer.from_pretrained(model_name)
140
- model = AutoModelForCausalLM.from_pretrained(
141
- model_name,
142
- device_map="auto", # Automatically maps layers to available GPUs
143
- torch_dtype=torch.bfloat16, # Use bf16 if supported
144
- trust_remote_code=True, # Falcon uses custom model code
145
- # token=huggingface_token
146
- )
147
-
148
- # Create conversation prompt (chat-style)
149
- messages = [
150
- {"role": "system", "content": """You are an Expert Cognitive Behavioural Therapist and Precis writer.
151
- Summarize 'STRICTLY' the below user content <<<session_conversation_processed>>> 'ONLY' into useful, ethical, relevant and realistic phrases with a format
152
- Session Time:
153
- Summary of the patient messages: #in two to four sentences
154
- Summary of therapist messages: #in two to three sentences:
155
- Summary of the whole session: # in two to three sentences. Ensure the entire session summary strictly does not exceed 100 tokens."""},
156
- {"role": "user", "content": session_conversation_processed}
157
- ]
158
-
159
- # Convert to prompt string
160
- prompt = ""
161
- for msg in messages:
162
- prompt += f"{msg['role'].capitalize()}: {msg['content']}\n"
163
- prompt += "Assistant:"
164
-
165
- # Tokenize input
166
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
167
-
168
- # Optional: live printing during generation
169
- streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
170
-
171
- # Generate output
172
- output = model.generate(
173
  **inputs,
174
- max_new_tokens=200,
175
  do_sample=True,
176
  temperature=0.7,
177
- top_p=0.9,
178
- streamer=streamer # comment this line to just capture output
179
  )
 
 
 
 
 
 
 
 
 
180
 
181
- # To print final output (if not using streamer)
182
- print(tokenizer.decode(output[0], skip_special_tokens=True))
183
-
184
-
185
- # full_summary = full_summary.replace('User:', '').strip()
186
- # print("\n")
187
- # print("Full summary:", full_summary)
188
-
189
- full_recommendations = ""
190
- for chunk in AI71(AI71_API_KEY).chat.completions.create(
191
- model="tiiuae/falcon-180b-chat",
192
- messages=[
193
- {"role": "system", "content": """You are an expert Cognitive Behavioural Therapist.
194
- Based on 'STRICTLY' the full summary <<<full_summary>>> 'ONLY' provide clinically valid, useful, appropriate action plan for the Patient as a bullted list.
195
- The list shall contain both medical and non medical prescriptions, dos and donts. The format of response shall be in passive voice with proper tense.
196
- - The patient is referred to........ #in one sentence
197
- - The patient is advised to ........ #in one sentence
198
- - The patient is refrained from........ #in one sentence
199
- - It is suggested that tha patient ........ #in one sentence
200
- - Scheduled a follow-up session with the patient........#in one sentence
201
- *Ensure the list contains NOT MORE THAN 7 points"""},
202
- {"role": "user", "content": full_summary},
203
- ],
204
- stream=True,
205
- ):
206
- if chunk.choices[0].delta.content:
207
- rec = chunk.choices[0].delta.content
208
- full_recommendations += rec
209
- full_recommendations = full_recommendations.replace('User:', '').strip()
210
- print("\n")
211
- print("Full recommendations:", full_recommendations)
212
- chatbox=[]
213
- return full_summary, full_recommendations, chatbox
 
 
 
 
 
 
 
 
 
93
  print(f"User's message: {user_message}")
94
  print(f"RAG Matching message: {dials_embeddings.iloc[top_match_index]['Patient']}")
95
  print(f"Therapist's response: {dials_embeddings.iloc[top_match_index]['Doctor']}\n\n")
96
+ print(f"therapy_session_conversation: {therapy_session_conversation}")
97
  return '', therapy_session_conversation, emotions_msg
98
 
99
+ from datetime import datetime
100
+ from transformers import AutoTokenizer, AutoModelForCausalLM
101
+ import torch
102
 
103
+ # Load model once globally for reuse
104
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct")
105
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct", torch_dtype=torch.float16, device_map="auto")
 
 
 
 
 
106
 
107
+ def generate_falcon_response(prompt, max_new_tokens=300):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
109
+ outputs = model.generate(
 
 
 
 
 
110
  **inputs,
111
+ max_new_tokens=max_new_tokens,
112
  do_sample=True,
113
  temperature=0.7,
114
+ top_p=0.9
 
115
  )
116
+ decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
117
+ return decoded_output[len(prompt):].strip()
118
+
119
+ def summarize_and_recommend(therapy_session_conversation):
120
+
121
+ session_time = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
122
+ session_conversation = [item[0] for item in therapy_session_conversation]
123
+ session_conversation = [x for x in session_conversation if x is not None]
124
+ session_conversation.insert(0, "Session_time: " + session_time)
125
 
126
+ session_conversation_processed = '\n'.join(session_conversation)
127
+ print("session_conversation_processed:", session_conversation_processed)
128
+
129
+ # Summarization prompt
130
+ summary_prompt = f"""You are an Expert Cognitive Behavioural Therapist and Precis writer.
131
+ Summarize STRICTLY the following session into concise, ethical, and clinically meaningful content.
132
+
133
+ Session:
134
+ {session_conversation_processed}
135
+
136
+ Format your response as:
137
+ Session Time:
138
+ Summary of the patient messages:
139
+ Summary of therapist messages:
140
+ Summary of the whole session:
141
+ Ensure the entire summary is less than 300 tokens."""
142
+
143
+ full_summary = generate_falcon_response(summary_prompt, max_new_tokens=300)
144
+ print("\nFull summary:", full_summary)
145
+
146
+ # Recommendation prompt
147
+ recommendation_prompt = f"""You are an expert Cognitive Behavioural Therapist.
148
+ Based STRICTLY on the following summary, provide a clinically valid action plan for the patient.
149
+
150
+ Summary:
151
+ {full_summary}
152
+
153
+ Use this format:
154
+ - The patient is referred to...
155
+ - The patient is advised to...
156
+ - The patient is refrained from...
157
+ - It is suggested that the patient...
158
+ - Scheduled a follow-up session with the patient...
159
+
160
+ Ensure the list contains NOT MORE THAN 7 points and is in passive voice with proper tense."""
161
+
162
+ full_recommendations = generate_falcon_response(recommendation_prompt, max_new_tokens=400)
163
+ print("\nFull recommendations:", full_recommendations)
164
+
165
+ chatbox = []
166
+ return full_summary, full_recommendations, chatbox