mgbam commited on
Commit
caa98a8
Β·
verified Β·
1 Parent(s): 5fb0df4

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +443 -198
agent.py CHANGED
@@ -25,239 +25,484 @@ GROQ_API_KEY = os.getenv("GROQ_API_KEY")
25
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
26
 
27
  if not all([UMLS_API_KEY, GROQ_API_KEY, TAVILY_API_KEY]):
28
- logger.error("Missing required API keys")
29
- raise RuntimeError("Missing API keys")
30
 
31
  # ── Agent Configuration ──────────────────────────────────────────────
 
 
 
 
32
  class ClinicalPrompts:
33
  SYSTEM_PROMPT = """
34
  You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation...
35
  [SYSTEM PROMPT CONTENT HERE]
36
  """
37
 
38
- MAX_ITERATIONS = 4
39
- AGENT_MODEL_NAME = "llama3-70b-8192"
40
- AGENT_TEMPERATURE = 0.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
- # ── State Definition ─────────────────────────────────────────────────
 
 
 
 
 
 
 
 
 
43
  class AgentState(TypedDict):
44
  messages: List[Any]
45
  patient_data: Optional[Dict[str, Any]]
46
  summary: Optional[str]
47
  interaction_warnings: Optional[List[str]]
48
- done: bool
49
- iterations: int
50
 
 
51
  def propagate_state(new: Dict[str, Any], old: Dict[str, Any]) -> Dict[str, Any]:
52
- """Merge new state changes with existing state"""
53
- return {**old, **new}
 
 
54
 
55
- # ── Core Agent Node ──────────────────────────────────────────────────
56
  def agent_node(state: AgentState) -> Dict[str, Any]:
57
- """Main agent node with iteration tracking"""
58
- state = dict(state) # Create mutable copy
59
-
60
- # Check termination conditions
61
  if state.get("done", False):
62
  return state
63
-
64
- # Update iteration count
65
- iterations = state.get("iterations", 0) + 1
66
- state["iterations"] = iterations
67
-
68
- # Enforce iteration limit
69
- if iterations >= MAX_ITERATIONS:
70
- return {
71
- "messages": [AIMessage(content="Consultation concluded. Maximum iterations reached.")],
72
- "done": True,
73
- **state
74
- }
75
-
76
- # Prepare message history
77
- messages = state.get("messages", [])
78
- if not messages or not isinstance(messages[0], SystemMessage):
79
- messages = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + messages
80
-
81
  try:
82
- # Generate response
83
- llm_response = ChatGroq(
84
- temperature=AGENT_TEMPERATURE,
85
- model=AGENT_MODEL_NAME
86
- ).invoke(messages)
87
-
88
- return propagate_state({
89
- "messages": [llm_response],
90
- "done": "consultation complete" in llm_response.content.lower()
91
- }, state)
92
-
93
  except Exception as e:
94
- logger.error(f"Agent error: {str(e)}")
95
- return propagate_state({
96
- "messages": [AIMessage(content=f"System Error: {str(e)}")],
97
- "done": True
98
- }, state)
99
-
100
- # ── Tool Handling Nodes ──────────────────────────────────────────────
101
- tool_executor = ToolExecutor([
102
- TavilySearchResults(max_results=3),
103
- # Include other tools here...
104
- ])
105
 
106
  def tool_node(state: AgentState) -> Dict[str, Any]:
107
- """Execute tool calls from last agent message"""
108
- state = dict(state)
109
- messages = state["messages"]
110
- last_message = messages[-1]
111
-
112
- if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
113
  return state
114
-
115
- tool_calls = last_message.tool_calls
116
- outputs = []
117
-
118
- for tool_call in tool_calls:
119
- try:
120
- output = tool_executor.invoke(tool_call)
121
- outputs.append(
122
- ToolMessage(
123
- content=json.dumps(output),
124
- tool_call_id=tool_call["id"],
125
- name=tool_call["name"]
126
- )
127
- )
128
- except Exception as e:
129
- logger.error(f"Tool error: {str(e)}")
130
- outputs.append(
131
- ToolMessage(
132
- content=json.dumps({"error": str(e)}),
133
- tool_call_id=tool_call["id"],
134
- name=tool_call["name"]
135
- )
136
- )
137
-
138
- return propagate_state({
139
- "messages": outputs,
140
- "interaction_warnings": detect_interaction_warnings(outputs)
141
- }, state)
142
-
143
- def detect_interaction_warnings(tool_messages: List[ToolMessage]) -> List[str]:
144
- """Parse tool outputs for interaction warnings"""
145
- warnings = []
146
- for msg in tool_messages:
147
- try:
148
- content = json.loads(msg.content)
149
- if content.get("status") == "warning":
150
- warnings.extend(content.get("warnings", []))
151
- except json.JSONDecodeError:
152
- continue
153
- return warnings
154
-
155
- # ── Safety Reflection Node ───────────────────────────────────────────
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  def reflection_node(state: AgentState) -> Dict[str, Any]:
157
- """Analyze potential safety issues"""
158
- warnings = state.get("interaction_warnings", [])
159
- if not warnings:
160
  return state
161
-
162
- prompt = f"""Analyze these clinical warnings:
163
- {chr(10).join(warnings)}
164
-
165
- Provide concise safety recommendations:"""
166
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  try:
168
- reflection = ChatGroq(
169
- temperature=0.0, # Strict safety mode
170
- model=AGENT_MODEL_NAME
171
- ).invoke([HumanMessage(content=prompt)])
172
-
173
- return propagate_state({
174
- "messages": [reflection],
175
- "summary": f"Safety Review:\n{reflection.content}"
176
- }, state)
177
-
178
  except Exception as e:
179
- logger.error(f"Reflection error: {str(e)}")
180
- return propagate_state({
181
- "messages": [AIMessage(content=f"Safety review unavailable: {str(e)}")],
182
- "summary": "Failed safety review"
183
- }, state)
184
-
185
- # ── State Routing Logic ──────────────────────────────────────────────
186
- def route_state(state: AgentState) -> str:
187
- """Determine next node in workflow"""
188
- if state.get("done", False):
189
- return "end"
190
-
191
- messages = state.get("messages", [])
192
-
193
- # Prioritize safety reflection
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  if state.get("interaction_warnings"):
195
  return "reflection"
196
-
197
- # Check for tool calls
198
- if messages and isinstance(messages[-1], AIMessage):
199
- if messages[-1].tool_calls:
200
- return "tools"
201
-
202
- return "agent"
203
 
204
- # ── Workflow Construction ────────────────────────────────────────────
205
  class ClinicalAgent:
206
  def __init__(self):
207
- self.workflow = StateGraph(AgentState)
208
-
209
- # Define nodes
210
- self.workflow.add_node("agent", agent_node)
211
- self.workflow.add_node("tools", tool_node)
212
- self.workflow.add_node("reflection", reflection_node)
213
-
214
- # Configure edges
215
- self.workflow.set_entry_point("agent")
216
-
217
- self.workflow.add_conditional_edges(
218
- "agent",
219
- lambda state: "tools" if state.get("messages")[-1].tool_calls else "end",
220
- {"tools": "tools", "end": END}
221
- )
222
-
223
- self.workflow.add_conditional_edges(
224
- "tools",
225
- lambda state: "reflection" if state.get("interaction_warnings") else "agent",
226
- {"reflection": "reflection", "agent": "agent"}
227
- )
228
-
229
- self.workflow.add_edge("reflection", "agent")
230
-
231
- self.app = self.workflow.compile()
232
-
233
- def consult(self, initial_state: Dict) -> Dict:
234
- """Execute full consultation workflow"""
235
  try:
236
- return self.app.invoke(
237
- initial_state,
238
- {"recursion_limit": MAX_ITERATIONS + 2}
239
- )
240
  except Exception as e:
241
- logger.error(f"Consultation failed: {str(e)}")
242
  return {
243
- "error": str(e),
244
- "trace": traceback.format_exc(),
245
- "done": True
 
246
  }
247
-
248
- # ── Example Usage ────────────────────────────────────────────────────
249
- if __name__ == "__main__":
250
- agent = ClinicalAgent()
251
-
252
- initial_state = {
253
- "messages": [HumanMessage(content="Patient presents with chest pain")],
254
- "patient_data": {
255
- "age": 45,
256
- "vitals": {"bp": "150/95", "hr": 110}
257
- },
258
- "done": False,
259
- "iterations": 0
260
- }
261
-
262
- result = agent.consult(initial_state)
263
- print("Final State:", json.dumps(result, indent=2))
 
25
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
26
 
27
  if not all([UMLS_API_KEY, GROQ_API_KEY, TAVILY_API_KEY]):
28
+ logger.error("Missing one or more required API keys: UMLS_API_KEY, GROQ_API_KEY, TAVILY_API_KEY")
29
+ raise RuntimeError("Missing required API keys")
30
 
31
  # ── Agent Configuration ──────────────────────────────────────────────
32
+ AGENT_MODEL_NAME = "llama3-70b-8192"
33
+ AGENT_TEMPERATURE = 0.1
34
+ MAX_SEARCH_RESULTS = 3
35
+
36
  class ClinicalPrompts:
37
  SYSTEM_PROMPT = """
38
  You are SynapseAI, an expert AI clinical assistant engaged in an interactive consultation...
39
  [SYSTEM PROMPT CONTENT HERE]
40
  """
41
 
42
+ # ── Message Normalization Helpers ─────────────────────────────────────────
43
+ def wrap_message(msg: Any) -> AIMessage:
44
+ """
45
+ Ensures the given message is an AIMessage.
46
+ If it is a dict, extracts the 'content' field (or serializes the dict).
47
+ Otherwise, converts the message to a string.
48
+ """
49
+ if isinstance(msg, AIMessage):
50
+ return msg
51
+ elif isinstance(msg, dict):
52
+ return AIMessage(content=msg.get("content", json.dumps(msg)))
53
+ else:
54
+ return AIMessage(content=str(msg))
55
+
56
+ def normalize_messages(state: Dict[str, Any]) -> Dict[str, Any]:
57
+ """
58
+ Normalizes all messages in the state to be AIMessage objects.
59
+ """
60
+ state["messages"] = [wrap_message(m) for m in state.get("messages", [])]
61
+ return state
62
+
63
+ # ── Helper Functions ─────────────────────────────────────────────────────
64
+ UMLS_AUTH_ENDPOINT = "https://utslogin.nlm.nih.gov/cas/v1/api-key"
65
+ RXNORM_API_BASE = "https://rxnav.nlm.nih.gov/REST"
66
+ OPENFDA_API_BASE = "https://api.fda.gov/drug/label.json"
67
+
68
+ @lru_cache(maxsize=256)
69
+ def get_rxcui(drug_name: str) -> Optional[str]:
70
+ """Lookup RxNorm CUI for a given drug name."""
71
+ drug_name = (drug_name or "").strip()
72
+ if not drug_name:
73
+ return None
74
+ logger.info(f"Looking up RxCUI for '{drug_name}'")
75
+ try:
76
+ params = {"name": drug_name, "search": 1}
77
+ r = requests.get(f"{RXNORM_API_BASE}/rxcui.json", params=params, timeout=10)
78
+ r.raise_for_status()
79
+ ids = r.json().get("idGroup", {}).get("rxnormId")
80
+ if ids:
81
+ logger.info(f"Found RxCUI {ids[0]} for '{drug_name}'")
82
+ return ids[0]
83
+ r = requests.get(f"{RXNORM_API_BASE}/drugs.json", params={"name": drug_name}, timeout=10)
84
+ r.raise_for_status()
85
+ for grp in r.json().get("drugGroup", {}).get("conceptGroup", []):
86
+ props = grp.get("conceptProperties")
87
+ if props:
88
+ logger.info(f"Found RxCUI {props[0]['rxcui']} via /drugs for '{drug_name}'")
89
+ return props[0]["rxcui"]
90
+ except Exception:
91
+ logger.exception(f"Error fetching RxCUI for '{drug_name}'")
92
+ return None
93
+
94
+ @lru_cache(maxsize=128)
95
+ def get_openfda_label(rxcui: Optional[str] = None, drug_name: Optional[str] = None) -> Optional[Dict[str, Any]]:
96
+ """Fetch the OpenFDA label for a drug by RxCUI or name."""
97
+ if not (rxcui or drug_name):
98
+ return None
99
+ terms = []
100
+ if rxcui:
101
+ terms.append(f'spl_rxnorm_code:"{rxcui}" OR openfda.rxcui:"{rxcui}"')
102
+ if drug_name:
103
+ dn = drug_name.lower()
104
+ terms.append(f'(openfda.brand_name:"{dn}" OR openfda.generic_name:"{dn}")')
105
+ query = " OR ".join(terms)
106
+ logger.info(f"Looking up OpenFDA label with query: {query}")
107
+ try:
108
+ r = requests.get(OPENFDA_API_BASE, params={"search": query, "limit": 1}, timeout=15)
109
+ r.raise_for_status()
110
+ results = r.json().get("results", [])
111
+ if results:
112
+ return results[0]
113
+ except Exception:
114
+ logger.exception("Error fetching OpenFDA label")
115
+ return None
116
+
117
+ def search_text_list(texts: List[str], terms: List[str]) -> List[str]:
118
+ """Return highlighted snippets from a list of texts containing any of the search terms."""
119
+ snippets = []
120
+ lowers = [t.lower() for t in terms if t]
121
+ for text in texts or []:
122
+ tl = text.lower()
123
+ for term in lowers:
124
+ if term in tl:
125
+ i = tl.find(term)
126
+ start = max(0, i - 50)
127
+ end = min(len(text), i + len(term) + 100)
128
+ snippet = text[start:end]
129
+ snippet = re.sub(f"({re.escape(term)})", r"**\1**", snippet, flags=re.IGNORECASE)
130
+ snippets.append(f"...{snippet}...")
131
+ break
132
+ return snippets
133
+
134
+ def parse_bp(bp: str) -> Optional[tuple[int, int]]:
135
+ """Parse 'SYS/DIA' blood pressure string into a (sys, dia) tuple."""
136
+ if m := re.match(r"(\d{1,3})\s*/\s*(\d{1,3})", (bp or "").strip()):
137
+ return int(m.group(1)), int(m.group(2))
138
+ return None
139
+
140
+ def check_red_flags(patient_data: Dict[str, Any]) -> List[str]:
141
+ """Identify immediate red flags from patient_data."""
142
+ flags: List[str] = []
143
+ hpi = patient_data.get("hpi", {})
144
+ vitals = patient_data.get("vitals", {})
145
+ syms = [s.lower() for s in hpi.get("symptoms", []) if isinstance(s, str)]
146
+ mapping = {
147
+ "chest pain": "Chest pain reported",
148
+ "shortness of breath": "Shortness of breath reported",
149
+ "severe headache": "Severe headache reported",
150
+ "syncope": "Syncope reported",
151
+ "hemoptysis": "Hemoptysis reported"
152
+ }
153
+ for term, desc in mapping.items():
154
+ if term in syms:
155
+ flags.append(f"Red Flag: {desc}.")
156
+ temp = vitals.get("temp_c")
157
+ hr = vitals.get("hr_bpm")
158
+ rr = vitals.get("rr_rpm")
159
+ spo2 = vitals.get("spo2_percent")
160
+ bp = parse_bp(vitals.get("bp_mmhg", ""))
161
+ if temp is not None and temp >= 38.5:
162
+ flags.append(f"Red Flag: Fever ({temp}Β°C).")
163
+ if hr is not None:
164
+ if hr >= 120:
165
+ flags.append(f"Red Flag: Tachycardia ({hr} bpm).")
166
+ if hr <= 50:
167
+ flags.append(f"Red Flag: Bradycardia ({hr} bpm).")
168
+ if rr is not None and rr >= 24:
169
+ flags.append(f"Red Flag: Tachypnea ({rr} rpm).")
170
+ if spo2 is not None and spo2 <= 92:
171
+ flags.append(f"Red Flag: Hypoxia ({spo2}%).")
172
+ if bp:
173
+ sys, dia = bp
174
+ if sys >= 180 or dia >= 110:
175
+ flags.append(f"Red Flag: Hypertensive urgency/emergency ({sys}/{dia} mmHg).")
176
+ if sys <= 90 or dia <= 60:
177
+ flags.append(f"Red Flag: Hypotension ({sys}/{dia} mmHg).")
178
+ return list(dict.fromkeys(flags))
179
+
180
+ def format_patient_data_for_prompt(data: Dict[str, Any]) -> str:
181
+ """Format patient_data dict into a markdown-like prompt section."""
182
+ if not data:
183
+ return "No patient data provided."
184
+ lines: List[str] = []
185
+ for section, value in data.items():
186
+ title = section.replace("_", " ").title()
187
+ if isinstance(value, dict) and any(value.values()):
188
+ lines.append(f"**{title}:**")
189
+ for k, v in value.items():
190
+ if v:
191
+ lines.append(f"- {k.replace('_',' ').title()}: {v}")
192
+ elif isinstance(value, list) and value:
193
+ lines.append(f"**{title}:** {', '.join(map(str, value))}")
194
+ elif value:
195
+ lines.append(f"**{title}:** {value}")
196
+ return "\n".join(lines)
197
+
198
+ # ── Tool Input Schemas ─────────────────────────────────────────────────────
199
+ class LabOrderInput(BaseModel):
200
+ test_name: str = Field(...)
201
+ reason: str = Field(...)
202
+ priority: str = Field("Routine")
203
+
204
+ class PrescriptionInput(BaseModel):
205
+ medication_name: str = Field(...)
206
+ dosage: str = Field(...)
207
+ route: str = Field(...)
208
+ frequency: str = Field(...)
209
+ duration: str = Field("As directed")
210
+ reason: str = Field(...)
211
+
212
+ class InteractionCheckInput(BaseModel):
213
+ potential_prescription: str
214
+ current_medications: Optional[List[str]] = Field(None)
215
+ allergies: Optional[List[str]] = Field(None)
216
+
217
+ class FlagRiskInput(BaseModel):
218
+ risk_description: str = Field(...)
219
+ urgency: str = Field("High")
220
+
221
+ # ── Tool Implementations ───────────────────────────────────────────────────
222
+ @tool("order_lab_test", args_schema=LabOrderInput)
223
+ def order_lab_test(test_name: str, reason: str, priority: str = "Routine") -> str:
224
+ """
225
+ Place an order for a laboratory test.
226
+ """
227
+ logger.info(f"Ordering lab test: {test_name}, reason: {reason}, priority: {priority}")
228
+ return json.dumps({
229
+ "status": "success",
230
+ "message": f"Lab Ordered: {test_name} ({priority})",
231
+ "details": f"Reason: {reason}"
232
+ })
233
+
234
+ @tool("prescribe_medication", args_schema=PrescriptionInput)
235
+ def prescribe_medication(
236
+ medication_name: str,
237
+ dosage: str,
238
+ route: str,
239
+ frequency: str,
240
+ duration: str,
241
+ reason: str
242
+ ) -> str:
243
+ """
244
+ Prepare a medication prescription.
245
+ """
246
+ logger.info(f"Preparing prescription: {medication_name} {dosage}, route: {route}, freq: {frequency}")
247
+ return json.dumps({
248
+ "status": "success",
249
+ "message": f"Prescription Prepared: {medication_name} {dosage} {route} {frequency}",
250
+ "details": f"Duration: {duration}. Reason: {reason}"
251
+ })
252
+
253
+ @tool("check_drug_interactions", args_schema=InteractionCheckInput)
254
+ def check_drug_interactions(
255
+ potential_prescription: str,
256
+ current_medications: Optional[List[str]] = None,
257
+ allergies: Optional[List[str]] = None
258
+ ) -> str:
259
+ """
260
+ Check for drug–drug interactions and allergy risks.
261
+ """
262
+ logger.info(f"Checking interactions for: {potential_prescription}")
263
+ warnings: List[str] = []
264
+ pm = [m.lower().strip() for m in (current_medications or []) if m]
265
+ al = [a.lower().strip() for a in (allergies or []) if a]
266
+ if potential_prescription.lower().strip() in al:
267
+ warnings.append(f"CRITICAL ALLERGY: Patient allergic to '{potential_prescription}'.")
268
+ rxcui = get_rxcui(potential_prescription)
269
+ label = get_openfda_label(rxcui=rxcui, drug_name=potential_prescription)
270
+ if not (rxcui or label):
271
+ warnings.append(f"INFO: Could not identify '{potential_prescription}'. Checks may be incomplete.")
272
+ for section in ("contraindications", "warnings_and_cautions", "warnings"):
273
+ items = label.get(section) if label else None
274
+ if isinstance(items, list):
275
+ snippets = search_text_list(items, al)
276
+ if snippets:
277
+ warnings.append(f"ALLERGY RISK ({section}): {'; '.join(snippets)}")
278
+ for med in pm:
279
+ mrxcui = get_rxcui(med)
280
+ mlabel = get_openfda_label(rxcui=mrxcui, drug_name=med)
281
+ for sec in ("drug_interactions",):
282
+ for src_label, src_name in ((label, potential_prescription), (mlabel, med)):
283
+ items = src_label.get(sec) if src_label else None
284
+ if isinstance(items, list):
285
+ snippets = search_text_list(items, [med if src_name == potential_prescription else potential_prescription])
286
+ if snippets:
287
+ warnings.append(f"Interaction ({src_name} label): {'; '.join(snippets)}")
288
+ status = "warning" if warnings else "clear"
289
+ message = (
290
+ f"{len(warnings)} issue(s) found for '{potential_prescription}'."
291
+ if warnings else
292
+ f"No major interactions or allergy issues identified for '{potential_prescription}'."
293
+ )
294
+ return json.dumps({"status": status, "message": message, "warnings": warnings})
295
+
296
+ @tool("flag_risk", args_schema=FlagRiskInput)
297
+ def flag_risk(risk_description: str, urgency: str = "High") -> str:
298
+ """
299
+ Flag a clinical risk with given urgency.
300
+ """
301
+ logger.info(f"Flagging risk: {risk_description} (urgency={urgency})")
302
+ return json.dumps({
303
+ "status": "flagged",
304
+ "message": f"Risk '{risk_description}' flagged with {urgency} urgency."
305
+ })
306
 
307
+ # ── Include Tavily search tool ─────────────────────────────────────────────
308
+ search_tool = TavilySearchResults(max_results=MAX_SEARCH_RESULTS, name="tavily_search_results")
309
+ all_tools = [order_lab_test, prescribe_medication, check_drug_interactions, flag_risk, search_tool]
310
+
311
+ # ── LLM & Tool Executor ───────────────────────────────────────────────────
312
+ llm = ChatGroq(temperature=AGENT_TEMPERATURE, model=AGENT_MODEL_NAME)
313
+ model_with_tools = llm.bind_tools(all_tools)
314
+ tool_executor = ToolExecutor(all_tools)
315
+
316
+ # ── State Definition ─────────────────────────────────────────────────────
317
  class AgentState(TypedDict):
318
  messages: List[Any]
319
  patient_data: Optional[Dict[str, Any]]
320
  summary: Optional[str]
321
  interaction_warnings: Optional[List[str]]
322
+ done: Optional[bool]
323
+ iterations: Optional[int]
324
 
325
+ # Helper to propagate state fields between nodes
326
  def propagate_state(new: Dict[str, Any], old: Dict[str, Any]) -> Dict[str, Any]:
327
+ for key in ["iterations", "done", "patient_data", "summary", "interaction_warnings"]:
328
+ if key in old and key not in new:
329
+ new[key] = old[key]
330
+ return new
331
 
332
+ # ── Graph Nodes ─────────────────────────────────────────────────────────
333
  def agent_node(state: AgentState) -> Dict[str, Any]:
334
+ state = normalize_messages(state)
 
 
 
335
  if state.get("done", False):
336
  return state
337
+ msgs = state.get("messages", [])
338
+ if not msgs or not isinstance(msgs[0], SystemMessage):
339
+ msgs = [SystemMessage(content=ClinicalPrompts.SYSTEM_PROMPT)] + msgs
340
+ logger.info(f"Invoking LLM with {len(msgs)} messages")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
  try:
342
+ response = model_with_tools.invoke(msgs)
343
+ response = wrap_message(response)
344
+ new_state = {"messages": [response]}
345
+ return propagate_state(new_state, state)
 
 
 
 
 
 
 
346
  except Exception as e:
347
+ logger.exception("Error in agent_node")
348
+ new_state = {"messages": [wrap_message(AIMessage(content=f"Error: {e}"))]}
349
+ return propagate_state(new_state, state)
 
 
 
 
 
 
 
 
350
 
351
  def tool_node(state: AgentState) -> Dict[str, Any]:
352
+ state = normalize_messages(state)
353
+ if state.get("done", False):
 
 
 
 
354
  return state
355
+ messages_list = state.get("messages", [])
356
+ if not messages_list:
357
+ logger.warning("tool_node invoked with no messages")
358
+ new_state = {"messages": []}
359
+ return propagate_state(new_state, state)
360
+ last = wrap_message(messages_list[-1])
361
+ # Safely retrieve pending tool_calls
362
+ tool_calls = last.__dict__.get("tool_calls")
363
+ if not (isinstance(last, AIMessage) and tool_calls):
364
+ logger.warning("tool_node invoked without pending tool_calls")
365
+ new_state = {"messages": []}
366
+ return propagate_state(new_state, state)
367
+ calls = tool_calls
368
+ blocked_ids = set()
369
+ for call in calls:
370
+ if call["name"] == "prescribe_medication":
371
+ med = call["args"].get("medication_name", "").lower()
372
+ if not any(
373
+ c["name"] == "check_drug_interactions" and
374
+ c["args"].get("potential_prescription", "").lower() == med
375
+ for c in calls
376
+ ):
377
+ logger.warning(f"Blocking prescribe_medication for '{med}' without interaction check")
378
+ blocked_ids.add(call["id"])
379
+ to_execute = [c for c in calls if c["id"] not in blocked_ids]
380
+ pd = state.get("patient_data", {})
381
+ for call in to_execute:
382
+ if call["name"] == "check_drug_interactions":
383
+ call["args"].setdefault("current_medications", pd.get("medications", {}).get("current", []))
384
+ call["args"].setdefault("allergies", pd.get("allergies", []))
385
+ messages: List[ToolMessage] = []
386
+ warnings: List[str] = []
387
+ try:
388
+ responses = tool_executor.batch(to_execute, return_exceptions=True)
389
+ for call, resp in zip(to_execute, responses):
390
+ if isinstance(resp, Exception):
391
+ logger.exception(f"Error executing tool {call['name']}")
392
+ content = json.dumps({"status": "error", "message": str(resp)})
393
+ else:
394
+ content = str(resp)
395
+ if call["name"] == "check_drug_interactions":
396
+ data = json.loads(content)
397
+ if data.get("status") == "warning":
398
+ warnings.extend(data.get("warnings", []))
399
+ messages.append(ToolMessage(content=content, tool_call_id=call["id"], name=call["name"]))
400
+ except Exception as e:
401
+ logger.exception("Critical error in tool_node")
402
+ for call in to_execute:
403
+ messages.append(ToolMessage(
404
+ content=json.dumps({"status": "error", "message": str(e)}),
405
+ tool_call_id=call["id"],
406
+ name=call["name"]
407
+ ))
408
+ new_state = {"messages": messages, "interaction_warnings": warnings or None}
409
+ return propagate_state(new_state, state)
410
+
411
  def reflection_node(state: AgentState) -> Dict[str, Any]:
412
+ state = normalize_messages(state)
413
+ if state.get("done", False):
 
414
  return state
415
+ warns = state.get("interaction_warnings")
416
+ if not warns:
417
+ logger.warning("reflection_node called without warnings")
418
+ new_state = {"messages": []}
419
+ return propagate_state(new_state, state)
420
+ triggering = None
421
+ for msg in reversed(state.get("messages", [])):
422
+ wrapped = wrap_message(msg)
423
+ if isinstance(wrapped, AIMessage) and wrapped.__dict__.get("tool_calls"):
424
+ triggering = wrapped
425
+ break
426
+ if not triggering:
427
+ new_state = {"messages": [AIMessage(content="Internal Error: reflection context missing.")]}
428
+ return propagate_state(new_state, state)
429
+ prompt = (
430
+ "You are SynapseAI, performing a focused safety review of the following plan:\n\n"
431
+ f"{triggering.content}\n\n"
432
+ "Highlight any issues based on these warnings:\n" +
433
+ "\n".join(f"- {w}" for w in warns)
434
+ )
435
  try:
436
+ resp = llm.invoke([SystemMessage(content="Safety reflection"), HumanMessage(content=prompt)])
437
+ new_state = {"messages": [wrap_message(resp)]}
438
+ return propagate_state(new_state, state)
 
 
 
 
 
 
 
439
  except Exception as e:
440
+ logger.exception("Error during reflection")
441
+ new_state = {"messages": [AIMessage(content=f"Error during reflection: {e}")]}
442
+ return propagate_state(new_state, state)
443
+
444
+ # ── Routing Functions ────────────────────────────────────────────────────
445
+ def should_continue(state: AgentState) -> str:
446
+ state = normalize_messages(state)
447
+ state.setdefault("iterations", 0)
448
+ state["iterations"] += 1
449
+ logger.info(f"Iteration count: {state['iterations']}")
450
+ if state["iterations"] >= 4:
451
+ state.setdefault("messages", []).append(AIMessage(content="Final output: consultation complete."))
452
+ state["done"] = True
453
+ return "end_conversation_turn"
454
+ if not state.get("messages"):
455
+ state["done"] = True
456
+ return "end_conversation_turn"
457
+ last = wrap_message(state["messages"][-1])
458
+ if not isinstance(last, AIMessage):
459
+ state["done"] = True
460
+ return "end_conversation_turn"
461
+ if last.__dict__.get("tool_calls"):
462
+ return "continue_tools"
463
+ if "consultation complete" in last.content.lower():
464
+ state["done"] = True
465
+ return "end_conversation_turn"
466
+ state["done"] = False
467
+ return "agent"
468
+
469
+ def after_tools_router(state: AgentState) -> str:
470
  if state.get("interaction_warnings"):
471
  return "reflection"
472
+ return "end_conversation_turn"
 
 
 
 
 
 
473
 
474
+ # ── ClinicalAgent ─────────────────────────────────────────────────────────
475
  class ClinicalAgent:
476
  def __init__(self):
477
+ logger.info("Building ClinicalAgent workflow")
478
+ wf = StateGraph(AgentState)
479
+ wf.add_node("agent", agent_node)
480
+ wf.add_node("tools", tool_node)
481
+ wf.add_node("reflection", reflection_node)
482
+ wf.set_entry_point("agent")
483
+ wf.add_conditional_edges("agent", should_continue, {
484
+ "continue_tools": "tools",
485
+ "end_conversation_turn": END
486
+ })
487
+ wf.add_conditional_edges("tools", after_tools_router, {
488
+ "reflection": "reflection",
489
+ "end_conversation_turn": END
490
+ })
491
+ # Removed edge from reflection back to agent.
492
+ self.graph_app = wf.compile()
493
+ logger.info("ClinicalAgent ready")
494
+
495
+ def invoke_turn(self, state: Dict[str, Any]) -> Dict[str, Any]:
 
 
 
 
 
 
 
 
 
496
  try:
497
+ result = self.graph_app.invoke(state, {"recursion_limit": 100})
498
+ result.setdefault("summary", state.get("summary"))
499
+ result.setdefault("interaction_warnings", None)
500
+ return result
501
  except Exception as e:
502
+ logger.exception("Error during graph invocation")
503
  return {
504
+ "messages": state.get("messages", []) + [AIMessage(content=f"Error: {e}")],
505
+ "patient_data": state.get("patient_data"),
506
+ "summary": state.get("summary"),
507
+ "interaction_warnings": None
508
  }