jason-moore's picture
Trim output
17aca3c
import torch
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load model directly from your Hugging Face repository
def load_model():
model_repo = "jason-moore/deepseek-soap-full" # Replace with your actual model repo
print(f"Loading tokenizer from {model_repo}...")
tokenizer = AutoTokenizer.from_pretrained(model_repo, trust_remote_code=True)
print(f"Loading model from {model_repo}...")
model = AutoModelForCausalLM.from_pretrained(
model_repo,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
return model, tokenizer
# Function to generate SOAP notes
def generate_soap_note(doctor_patient_conversation):
if not doctor_patient_conversation.strip():
return "Please enter a doctor-patient conversation."
# Format prompt to match your training style
prompt = """Below is an instruction that describes a task, paired with an input that provides further context.
Write a response that appropriately completes the request. Pay special attention to the format of the response.
### Instruction:
You are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning.
Summarize the following medical conversation between Doctor and Patient into a SOAP note with the following structure:
SUBJECTIVE: This section focuses on the patient's perspective, including their chief complaint, symptoms, and any relevant personal or medical history.
OBJECTIVE: This section contains factual, measurable observations and data
collected during the encounter, such as vital signs, test results, and physical exam findings.
Only include information actually present in the conversation
ASSESSMENT: This section involves the healthcare provider's analysis and
interpretation of the subjective and objective data, leading to a diagnosis or a proposed problem.
PLAN: This section outlines the next steps in the patient's care, including treatment recommendations, follow-up plans, or referrals.
### Conversation:
{}
### Response:
{}"""
formatted_prompt = prompt.format(doctor_patient_conversation, "")
# Tokenize and generate
inputs = tokenizer([formatted_prompt], return_tensors="pt").to(model.device)
outputs = model.generate(
input_ids=inputs.input_ids,
attention_mask=inputs.attention_mask,
max_new_tokens=1200,
temperature=0.1,
top_p=0.95,
)
# Decode and extract the response part
response = tokenizer.batch_decode(outputs)[0]
soap_note = response.split("### Response:")[1].strip() if "### Response:" in response else response
soap_note = soap_note.replace("<|end▁of▁sentence|>", "").strip()
return soap_note
# Load model and tokenizer (this will run once when the app starts)
model, tokenizer = load_model()
# Sample conversation for the example
sample_conversation = """
Doctor: Good morning, how are you feeling today?
Patient: Not so great, doctor. I've had this persistent cough for about two weeks now.
Doctor: I'm sorry to hear that. Can you tell me more about the cough? Is it dry or are you coughing up anything?
Patient: It started as a dry cough, but for the past few days I've been coughing up some yellowish phlegm.
Doctor: Do you have any other symptoms like fever, chills, or shortness of breath?
Patient: I had a fever of 100.5°F two days ago. I've been feeling more tired than usual, and sometimes it's a bit hard to catch my breath after coughing a lot.
"""
# Create Gradio interface
demo = gr.Interface(
fn=generate_soap_note,
inputs=gr.Textbox(
lines=15,
placeholder="Enter doctor-patient conversation here...",
label="Doctor-Patient Conversation",
value=sample_conversation
),
outputs=gr.Textbox(
label="Generated SOAP Note",
lines=15
),
title="Medical SOAP Note Generator",
description="Enter a doctor-patient conversation to generate a structured SOAP note using a fine-tuned DeepSeek-R1-Distill-Llama-8B model.",
examples=[[sample_conversation]],
allow_flagging="never"
)
# Launch the app
demo.launch()