File size: 27,776 Bytes
09daffd 0fd27e0 a580254 09daffd 0fd27e0 09daffd a580254 09daffd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 |
import gradio as gr
from utils.meldrx import MeldRxAPI
import json
import os
import tempfile
from datetime import datetime
import traceback
import logging
from huggingface_hub import InferenceClient # Import InferenceClient
from urllib.parse import urlparse, parse_qs # Import URL parsing utilities
from utils.callbackmanager import CallbackManager
from utils.prompts import system_instructions
# Import PDF utilities
from utils.pdfutils import PDFGenerator, generate_discharge_summary
from utils.callbackmanager import CallbackManager
# Import necessary libraries for new file types and AI analysis functions
import pydicom # For DICOM
import hl7 # For HL7
from xml.etree import ElementTree # For XML and CCDA
from pypdf import PdfReader # For PDF
import csv # For CSV
import io # For IO operations
from PIL import Image # For image handling
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Initialize Inference Client - Ensure YOUR_HF_TOKEN is set in environment variables or replace with your actual token
HF_TOKEN = os.getenv("HF_TOKEN") # Or replace with your actual token string
if not HF_TOKEN:
raise ValueError(
"HF_TOKEN environment variable not set. Please set your Hugging Face API token."
)
client = InferenceClient(api_key=HF_TOKEN)
model_name = "meta-llama/Llama-3.3-70B-Instruct" # Specify the model to use
def generate_pdf_from_form( first_name, last_name, middle_initial, dob, age, sex, address, city, state, zip_code, doctor_first_name, doctor_last_name, doctor_middle_initial, hospital_name, doctor_address, doctor_city, doctor_state, doctor_zip, admission_date, referral_source, admission_method, discharge_date, discharge_reason, date_of_death, diagnosis, procedures, medications, preparer_name, preparer_job_title,):
"""Generate a PDF discharge form using the provided data"""
# Create PDF generator
pdf_gen = PDFGenerator()
# Format data for PDF generation
patient_info = {
"first_name": first_name,
"last_name": last_name,
"dob": dob,
"age": age,
"sex": sex,
"mobile": "", # Not collected in the form
"address": address,
"city": city,
"state": state,
"zip": zip_code,
}
discharge_info = {
"date_of_admission": admission_date,
"date_of_discharge": discharge_date,
"source_of_admission": referral_source,
"mode_of_admission": admission_method,
"discharge_against_advice": "Yes"
if discharge_reason == "Discharge Against Advice"
else "No",
}
diagnosis_info = {
"diagnosis": diagnosis,
"operation_procedure": procedures,
"treatment": "", # Not collected in the form
"follow_up": "", # Not collected in the form
}
medication_info = {
"medications": [medications] if medications else [],
"instructions": "", # Not collected in the form
}
prepared_by = {
"name": preparer_name,
"title": preparer_job_title,
"signature": "", # Not collected in the form
}
# Generate PDF
pdf_buffer = pdf_gen.generate_discharge_form(patient_info,discharge_info,diagnosis_info,medication_info,prepared_by,)
# Create temporary file to save the PDF
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
temp_file.write(pdf_buffer.read())
temp_file_path = temp_file.name
temp_file.close()
return temp_file_path
def generate_pdf_from_meldrx(patient_data):
"""Generate a PDF using patient data from MeldRx"""
if isinstance(patient_data, str):
# If it's a string (error message or JSON string), try to parse it
try:
patient_data = json.loads(patient_data)
except:
return None, "Invalid patient data format"
if not patient_data:
return None, "No patient data available"
try:
# For demonstration, we'll use the first patient in the list if it's a list
if isinstance(patient_data, list) and len(patient_data):
patient = patient_data[0]
else:
patient = patient_data
# Extract patient info
patient_info = {
"name": f"{patient.get('name', {}).get('given', [''])[0]} {patient.get('name', {}).get('family', '')}",
"dob": patient.get("birthDate", "Unknown"),
"patient_id": patient.get("id", "Unknown"),
"admission_date": datetime.now().strftime("%Y-%m-%d"), # Mock data
"physician": "Dr. Provider", # Mock data
}
# Mock LLM-generated content - This part needs to be replaced with actual AI generation if desired for MeldRx PDF
llm_content = {
"diagnosis": "Diagnosis information would be generated by AI based on patient data from MeldRx.",
"treatment": "Treatment summary would be generated by AI based on patient data from MeldRx.",
"medications": "Medication list would be generated by AI based on patient data from MeldRx.",
"follow_up": "Follow-up instructions would be generated by AI based on patient data from MeldRx.",
"special_instructions": "Special instructions would be generated by AI based on patient data from MeldRx.",
}
# Create discharge summary - Using No-AI PDF generation for now, replace with AI-content generation later
output_dir = tempfile.mkdtemp()
pdf_path = generate_discharge_summary(
patient_info, llm_content, output_dir
) # Still using No-AI template
return pdf_path, "PDF generated successfully (No AI Content in PDF yet)" # Indicate No-AI content
except Exception as e:
return None, f"Error generating PDF: {str(e)}"
# CALLBACK_MANAGER = CallbackManager(
# redirect_uri="https://multitransformer-discharge-guard.hf.space/callback",
# client_secret=None,
# )
def generate_ai_discharge_content(patient_data):
"""Placeholder function to generate AI content for discharge summary.
Replace this with actual AI call using InferenceClient and patient_data."""
try:
patient_name = (
f"{patient_data['entry'][0]['resource']['name'][0]['given'][0]} {patient_data['entry'][0]['resource']['name'][0]['family']}"
if patient_data.get("entry")
else "Unknown Patient"
)
prompt_text = f"""{system_instructions}\n\nGenerate a discharge summary content (diagnosis, treatment, medications, follow-up instructions, special instructions) for patient: {patient_name}. Base the content on available patient data (if any provided, currently not provided in detail in this mock-up). Focus on creating clinically relevant and informative summary. Remember this is for informational purposes and NOT medical advice."""
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt_text}],
temperature=0.6, # Adjust temperature as needed for content generation
max_tokens=1024, # Adjust max_tokens as needed
top_p=0.9,
)
ai_content = response.choices[0].message.content
# Basic parsing of AI content - improve this based on desired output structure from LLM
llm_content = {
"diagnosis": "AI Generated Diagnosis (Placeholder):\n"
+ extract_section(ai_content, "Diagnosis"), # Example extraction - refine based on LLM output
"treatment": "AI Generated Treatment (Placeholder):\n"
+ extract_section(ai_content, "Treatment"),
"medications": "AI Generated Medications (Placeholder):\n"
+ extract_section(ai_content, "Medications"),
"follow_up": "AI Generated Follow-up (Placeholder):\n"
+ extract_section(ai_content, "Follow-up Instructions"),
"special_instructions": "AI Generated Special Instructions (Placeholder):\n"
+ extract_section(ai_content, "Special Instructions"),
}
return llm_content
except Exception as e:
logger.error(f"Error generating AI discharge content: {e}")
return None
def extract_section(ai_content, section_title):
"""Simple placeholder function to extract section from AI content.
Improve this with more robust parsing based on LLM output format."""
start_marker = f"**{section_title}:**"
end_marker = "\n\n" # Adjust based on typical LLM output structure
start_index = ai_content.find(start_marker)
if start_index != -1:
start_index += len(start_marker)
end_index = ai_content.find(end_marker, start_index)
if end_index != -1:
return ai_content[start_index:end_index].strip()
return "Not found in AI output."
def generate_pdf_from_meldrx_with_ai_content(patient_data, llm_content):
"""Generate a PDF using patient data from MeldRx and AI-generated content."""
if isinstance(patient_data, str):
try:
patient_data = json.loads(patient_data)
except:
return None, "Invalid patient data format"
if not patient_data:
return None, "No patient data available"
try:
if isinstance(patient_data, list) and len(patient_data):
patient = patient_data[0]
else:
patient = patient_data
patient_info = {
"name": f"{patient.get('name', {}).get('given', [''])[0]} {patient.get('name', {}).get('family', '')}",
"dob": patient.get("birthDate", "Unknown"),
"patient_id": patient.get("id", "Unknown"),
"admission_date": datetime.now().strftime("%Y-%m-%d"), # Mock data
"physician": "Dr. AI Provider", # Mock data - Indicate AI generated
}
output_dir = tempfile.mkdtemp()
pdf_path = generate_discharge_summary(
patient_info, llm_content, output_dir
) # Using AI content now
return pdf_path, "PDF generated successfully with AI Content" # Indicate AI content
except Exception as e:
return None, f"Error generating PDF with AI content: {str(e)}"
def analyze_dicom_file_with_ai(dicom_file_path): # Modified to accept file path
"""Analyzes DICOM file metadata using Discharge Guard AI."""
try:
dicom_file = pydicom.dcmread(
dicom_file_path.name
) # Read dicom using path, access file through .name for Gradio UploadedFile
dicom_metadata_json = dicom_file.to_json_dict()
prediction_response, trace_data_dicom_ai = analyze_dicom_content_ai(
dicom_metadata_json
) # Call content analysis
if prediction_response:
report = f"Discharge Guard AI Analysis of DICOM Metadata:\n\nDICOM Metadata Analysis Report:\n{prediction_response}\n\nDisclaimer: The Discharge Guard -generated analysis is for conceptual informational purposes only and may or **NOT medical advice.** Analysis is based on DICOM *metadata* and not image interpretation."
return report
else:
error_message = f"AI Analysis from DICOM Metadata: No predictions generated or analysis encountered an issue."
if trace_data_dicom_ai and "error" in trace_data_dicom_ai:
error_message += f"\nAI Analysis Failed: {trace_data_dicom_ai['error']}"
return error_message
except Exception as e:
return f"Error during DICOM file processing in analyze_dicom_file_with_ai: {e}"
def analyze_dicom_content_ai(dicom_metadata_json): # Copied from your code
"""Analyzes DICOM metadata JSON content using Discharge Guard AI."""
prompt_text = f"""{system_instructions} \n\n Perform a **deep and comprehensive analysis** of the following DICOM metadata in JSON format to provide a **structured summary and identify potential clinically relevant information with deep insights**. Focus not just on summarizing fields, but on **interpreting their clinical significance, identifying subtle patterns, and drawing inferences about the study's implications**. Think like an experienced radiologist reviewing this metadata for crucial diagnostic clues. Remember this is metadata, not the image itself, so focus on what can be gleaned from the data itself. Provide a "**Deep DICOM Metadata Analysis Report**". Important: Use the API Directories fhir endpoints FROM THIS LINK: https://app.meldrx.com/api/directories/fhir/endpoints.
**DICOM Metadata (JSON):**
```json
{json.dumps(dicom_metadata_json, indent=2)}
```
* Remember, this deep analysis is for conceptual informational purposes only and **NOT medical advice.** Focus on deep summarization and structuring the extracted metadata in a highly clinically relevant way.
"""
trace_data_detail_dicom_analysis = {
"prompt": "DICOM Metadata Analysis Request",
"language": "English",
"response_length": "Comprehensive",
"model_name": "Discharge Guard v1.0",
"generated_text": "N/A",
"input_file_types": ["DICOM Metadata JSON"],
"mode": "DICOM Metadata Analysis",
"candidates": [],
"usage_metadata": {},
"prompt_feedback": "N/A",
}
try:
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt_text}],
temperature=0.4,
max_tokens=1024, # Adjust as needed
top_p=0.9,
)
the_response = response.choices[0].message.content
return the_response, trace_data_detail_dicom_analysis
except Exception as e:
error_message = f"AI Analysis Error in analyze_dicom_content_ai (DICOM Metadata): {e}"
trace_data_detail_dicom_analysis["error"] = f"AI Analysis Error: {e}"
return error_message, trace_data_detail_dicom_analysis
# ... (Paste other AI analysis functions: analyze_hl7_file_with_ai, analyze_cda_xml_file_with_ai, analyze_pdf_file_with_ai, analyze_csv_file_with_ai here - ensure to adapt file reading for Gradio file paths if necessary) ...
def analyze_hl7_file_with_ai(hl7_file_path):
"""Analyzes HL7 file content using Discharge Guard AI."""
try:
with open(hl7_file_path.name, "r") as f: # Open file using path, access file through .name for Gradio UploadedFile
hl7_message_raw = f.read()
prediction_response, trace_data_hl7_ai = analyze_hl7_content_ai(
hl7_message_raw
)
if prediction_response:
report = f"Discharge Guard AI Analysis of HL7 Message:\n\nHL7 Message Analysis Report:\n{prediction_response}\n\n**Disclaimer:** The Discharge Guard AGI-generated analysis is for conceptual informational purposes only and may or **NOT medical advice.** Analysis is based on HL7 message content."
return report
else:
error_message = f"AI Analysis from HL7 Message: No predictions generated or analysis encountered an issue."
if trace_data_hl7_ai and "error" in trace_data_hl7_ai:
error_message += f"AI Analysis Failed: {trace_data_hl7_ai['error']}"
return error_message
except Exception as e:
return f"Error during HL7 file processing in analyze_hl7_file_with_ai: {e}"
def analyze_hl7_content_ai(hl7_message_string): # Copied from your code
"""Analyzes HL7 message content using Discharge Guard AI."""
prompt_text = f"""{system_instructions} \n\n Conduct a **deep and thorough analysis** of the following HL7 message content to provide a **structured summary and identify key clinical information with deep understanding**. Go beyond basic parsing; aim to **interpret the clinical narrative** embedded within the HL7 message. **Engage in deep search to contextualize medical codes and terminology**. Provide a "**Comprehensive HL7 Message Analysis Report**".
**HL7 Message Content:**
```hl7
{hl7_message_string}
```
* Remember, this deep analysis is for conceptual informational purposes only and **NOT medical advice.** Focus on deep summarization and structuring the extracted data in a highly clinically relevant way based on the HL7 content.
"""
# ... (rest of the function code) ...
trace_data_detail_hl7_analysis = {
"prompt": "HL7 Message Analysis Request",
"language": "English",
"response_length": "Comprehensive",
"model_name": "Discharge Guard v1.0",
"generated_text": "N/A",
"input_file_types": ["HL7 Message"],
"mode": "HL7 Message Analysis",
"candidates": [],
"usage_metadata": {},
"prompt_feedback": "N/A",
}
try:
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt_text}],
temperature=0.4,
max_tokens=1024, # Adjust as needed
top_p=0.9,
)
the_response = response.choices[0].message.content
return the_response, trace_data_detail_hl7_analysis
except Exception as e:
error_message = f"AI Analysis Error in analyze_hl7_content_ai (HL7 Message): {e}"
trace_data_detail_hl7_analysis["error"] = f"AI Analysis Error: {e}"
return error_message, trace_data_detail_hl7_analysis
def analyze_cda_xml_file_with_ai(cda_xml_file_path): # Modified to accept file path
"""Analyzes generic CDA or XML file content using Discharge Guard AI (more generalized version) Important: Use the API Directories fhir endpoints FROM THIS LINK: https://app.meldrx.com/api/directories/fhir/endpoints."""
try:
with open(
cda_xml_file_path.name, "r"
) as f: # Open file using path, access file through .name for Gradio UploadedFile
cda_xml_content = f.read()
prediction_response, trace_data_cda_xml_ai = analyze_cda_xml_content_ai(
cda_xml_content
)
if prediction_response:
report = f"Discharge Guard AI Analysis of Medical XML/CDA Data:\n\nMedical Document Analysis Report:\n{prediction_response}\n\n**Disclaimer:** The Discharge Guard AGI-generated analysis is for conceptual informational purposes only and may or **NOT medical advice.** Analysis is based on XML/CDA content."
return report
else:
error_message = f"AI Analysis from XML/CDA Data: No predictions generated or analysis encountered an issue."
if trace_data_cda_xml_ai and "error" in trace_data_cda_xml_ai:
error_message += f"AI Analysis Failed: {trace_data_cda_xml_ai['error']}"
return error_message
except Exception as e:
return f"Error during XML/CDA file processing in analyze_cda_xml_file_with_ai: {e}"
def analyze_cda_xml_content_ai(cda_xml_content): # Copied from your code
"""Analyzes generic CDA or XML content using Discharge Guard AI (more generalized version)."""
prompt_text = f"""{system_instructions} \n\n Analyze the following medical XML/CDA content to provide a **structured and comprehensive patient data analysis**, similar to how a medical professional would review a patient's chart or a clinical document. You need to parse the XML structure yourself to extract the relevant information. Use bullet points, tables, or numbered steps for complex tasks. Provide a "Medical Document Analysis" report.
**Instructions for Discharge Guard AI:**
1. **Parse the XML content above.** Understand the XML structure to identify sections that are relevant to clinical information. For CDA specifically, look for sections like Problems, Medications, Allergies, Encounters, Results, and Vital Signs. For generic medical XML, adapt based on the tags present.
2. **Extract and Summarize Key Medical Information:** Focus on extracting the following information if present in the XML:
* **Patient Demographics Summary:** (If available, summarize demographic details)
... (rest of your prompt_text for CDA/XML analysis) ...
* Remember, this analysis is for conceptual informational purposes only and **NOT medical advice.** Focus on summarizing and structuring the extracted data in a clinically relevant way based on the XML/CDA content.
"""
trace_data_detail_cda_xml_analysis = {
"prompt": "Generic CDA/XML Analysis Request",
"language": "English",
"response_length": "Comprehensive",
"model_name": "Discharge Guard v1.0",
"generated_text": "N/A",
"input_file_types": ["CDA/XML"],
"mode": "Generic XML/CDA Analysis",
"candidates": [],
"usage_metadata": {},
"prompt_feedback": "N/A",
}
try:
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt_text}],
temperature=0.4,
max_tokens=1024, # Adjust as needed
top_p=0.9,
)
the_response = response.choices[0].message.content
return the_response, trace_data_detail_cda_xml_analysis
except Exception as e:
error_message = f"AI Analysis Error in analyze_cda_xml_content_ai (Generic XML/CDA): {e}"
trace_data_detail_cda_xml_analysis["error"] = f"AI Analysis Error: {e}"
return error_message, trace_data_detail_cda_xml_analysis
def analyze_pdf_file_with_ai(pdf_file_path): # Modified to accept file path
"""Analyzes PDF file content using Discharge Guard AI."""
try:
with open(
pdf_file_path.name, "rb"
) as f: # Open file in binary mode for PdfReader, access file through .name for Gradio UploadedFile
pdf_file = f # Pass file object to PdfReader
pdf_reader = PdfReader(pdf_file)
text_content = ""
for page in pdf_reader.pages:
text_content += page.extract_text()
prediction_response, trace_data_pdf_ai = analyze_pdf_content_ai(
text_content
)
if prediction_response:
report = f"Discharge Guard AI Analysis of PDF Content:\n\nMedical Report Analysis Report:\n{prediction_response}\n\n**Disclaimer:** The Discharge Guard AGI-generated analysis is for conceptual informational purposes only and may or **NOT medical advice.** Analysis is based on PDF text content."
return report
else:
error_message = f"AI Analysis from PDF Content: No predictions generated or analysis encountered an issue."
if trace_data_pdf_ai and "error" in trace_data_pdf_ai:
error_message += f"AI Analysis Failed: {trace_data_pdf_ai['error']}"
return error_message
except Exception as e:
return f"Error during PDF file processing in analyze_pdf_file_with_ai: {e}"
def analyze_pdf_content_ai(pdf_text_content): # Copied from your code
"""Analyzes PDF text content using Discharge Guard AI."""
prompt_text = f"""{system_instructions} \n\n Analyze the following medical PDF text content to provide a **structured summary and identify key clinical information**. Focus on patient demographics, medical history, findings, diagnoses, medications, recommendations, and any important clinical details conveyed in the document. Provide a "Medical Report Analysis" report.
**Medical PDF Text Content:**
```text
{pdf_text_content}
```
* Remember, this analysis is for conceptual informational purposes only and **NOT medical advice.** Focus on summarizing and structuring the extracted data in a clinically relevant way based on the PDF content.
"""
trace_data_detail_pdf_analysis = {
"prompt": "PDF Text Analysis Request",
"language": "English",
"response_length": "Comprehensive",
"model_name": "Discharge Guard v1.0",
"generated_text": "N/A",
"input_file_types": ["PDF Text"],
"mode": "PDF Text Analysis",
"candidates": [],
"usage_metadata": {},
"prompt_feedback": "N/A",
}
try:
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt_text}],
temperature=0.4,
max_tokens=1024, # Adjust as needed
top_p=0.9,
)
the_response = response.choices[0].message.content
return the_response, trace_data_detail_pdf_analysis
except Exception as e:
error_message = f"AI Analysis Error in analyze_pdf_content_ai (PDF Text): {e}"
trace_data_detail_pdf_analysis["error"] = f"AI Analysis Error: {e}"
return error_message, trace_data_detail_pdf_analysis
def analyze_csv_file_with_ai(csv_file_path): # Modified to accept file path
"""Analyzes CSV file content using Discharge Guard AI."""
try:
csv_content = csv_file_path.read().decode(
"utf-8"
) # Read content directly from UploadedFile
prediction_response, trace_data_csv_ai = analyze_csv_content_ai(csv_content)
if prediction_response:
report = f"Discharge Guard AI Analysis of CSV Data:\n\nData Analysis Report:\n{prediction_response}\n\n**Disclaimer:** The Discharge Guard AGI-generated analysis is for conceptual informational purposes only and may or **NOT medical advice.** Analysis is based on CSV data content."
return report
else:
error_message = f"AI Analysis from CSV Data: No predictions generated or analysis encountered an issue."
if trace_data_csv_ai and "error" in trace_data_csv_ai:
error_message += f"AI Analysis Failed: {trace_data_csv_ai['error']}"
return error_message
except Exception as e:
return f"Error during CSV file processing in analyze_csv_file_with_ai: {e}"
def analyze_csv_content_ai(csv_content_string): # Copied from your code
"""Analyzes CSV content (string) using Discharge Guard AI."""
prompt_text = f"""{system_instructions} \n\n Analyze the following medical CSV data to provide a **structured summary and identify potential clinical insights**. Assume the CSV represents patient-related medical data. Focus on understanding the columns, summarizing key data points, identifying trends or patterns, and noting any potential clinical significance of the data. Provide a "Data Analysis" report.
**Medical CSV Data:**
```csv
{csv_content_string}
```
* Remember, this analysis is for conceptual informational purposes only and **NOT medical advice.** Focus on summarizing and structuring the data in a clinically relevant way based on the CSV content.
"""
trace_data_detail_csv_analysis = {
"prompt": "CSV Data Analysis Request",
"language": "English",
"response_length": "Comprehensive",
"model_name": "Discharge Guard v1.0",
"generated_text": "N/A",
"input_file_types": ["CSV Data"],
"mode": "CSV Data Analysis",
"candidates": [],
"usage_metadata": {},
"prompt_feedback": "N/A",
}
try:
response = client.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt_text}],
temperature=0.4,
max_tokens=1024, # Adjust as needed
top_p=0.9,
)
the_response = response.choices[0].message.content
return the_response, trace_data_detail_csv_analysis
except Exception as e:
error_message = f"AI Analysis Error in analyze_csv_content_ai (CSV Data): {e}"
trace_data_detail_csv_analysis["error"] = f"AI Analysis Error: {e}"
return error_message, trace_data_detail_csv_analysis
|