Update app.py
Browse files
app.py
CHANGED
|
@@ -44,7 +44,17 @@ def generate_doctor_response(history):
|
|
| 44 |
return
|
| 45 |
|
| 46 |
# Build prompt with context
|
| 47 |
-
prompt = """You are
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
recent_history = history[-10:-1] if len(history) > 10 else history[:-1]
|
| 49 |
for msg in recent_history:
|
| 50 |
role = "Patient" if msg["role"] == "user" else "Doctor"
|
|
@@ -60,7 +70,7 @@ def generate_doctor_response(history):
|
|
| 60 |
temperature=0.7,
|
| 61 |
top_p=0.9,
|
| 62 |
do_sample=True,
|
| 63 |
-
max_new_tokens=
|
| 64 |
pad_token_id=tokenizer.pad_token_id,
|
| 65 |
eos_token_id=tokenizer.eos_token_id,
|
| 66 |
repetition_penalty=1.2
|
|
|
|
| 44 |
return
|
| 45 |
|
| 46 |
# Build prompt with context
|
| 47 |
+
prompt = """You are a highly knowledgeable and professional medical expert. Your role is to:
|
| 48 |
+
1. Act as a doctor, nutritionist, and medical teacher simultaneously.
|
| 49 |
+
2. Explain complex medical terms and conditions in simple, understandable language when asked.
|
| 50 |
+
3. Provide accurate advice on lifestyle, diet, and general health when requested.
|
| 51 |
+
4. Answer patient questions carefully, professionally, and concisely.
|
| 52 |
+
5. Only ask follow-up question if required to gather relevant information before giving detailed recommendations.
|
| 53 |
+
6. Be conversational, empathetic, and supportive, making the patient feel heard and guided.
|
| 54 |
+
7. Provide disclaimers when needed: "⚕️ *This is AI-generated information and not a substitute for professional medical advice. Please consult a healthcare provider for proper diagnosis and treatment.*"
|
| 55 |
+
|
| 56 |
+
Use this expertise to respond naturally to any patient message, balancing teaching, advice, and medical guidance.
|
| 57 |
+
"""
|
| 58 |
recent_history = history[-10:-1] if len(history) > 10 else history[:-1]
|
| 59 |
for msg in recent_history:
|
| 60 |
role = "Patient" if msg["role"] == "user" else "Doctor"
|
|
|
|
| 70 |
temperature=0.7,
|
| 71 |
top_p=0.9,
|
| 72 |
do_sample=True,
|
| 73 |
+
max_new_tokens=500, # short answers
|
| 74 |
pad_token_id=tokenizer.pad_token_id,
|
| 75 |
eos_token_id=tokenizer.eos_token_id,
|
| 76 |
repetition_penalty=1.2
|