lukiod commited on
Commit
4ef990b
β€’
1 Parent(s): 0e63fa4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +259 -0
app.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from datetime import datetime
4
+ from huggingface_hub import InferenceClient
5
+ import json
6
+ import os
7
+ from typing import Optional
8
+
9
+ # Initialize LLM client
10
+ def init_llm():
11
+ try:
12
+ client = InferenceClient(
13
+ model="meta-llama/Llama-2-7b-chat-hf",
14
+ token=os.getenv("HF_TOKEN")
15
+ )
16
+ return client, True
17
+ except Exception as e:
18
+ print(f"LLM initialization failed: {str(e)}")
19
+ return None, False
20
+
21
+ llm_client, has_llm = init_llm()
22
+
23
+ # Global storage (in production, use a database)
24
+ metrics_data = []
25
+ medication_data = []
26
+ chat_history = []
27
+
28
+ def generate_prompt(instruction: str, context: str = "") -> str:
29
+ """Generate prompt for LLaMA format"""
30
+ system_prompt = """You are a helpful healthcare assistant. Provide accurate information while noting
31
+ you're not a replacement for professional medical advice. Always include relevant medical disclaimers."""
32
+
33
+ return f"""<s>[INST] <<SYS>>{system_prompt}<</SYS>>
34
+
35
+ {context}
36
+
37
+ {instruction} [/INST]"""
38
+
39
+ def get_llm_response(prompt: str, temperature: float = 0.7) -> str:
40
+ """Get response from LLM"""
41
+ if not has_llm:
42
+ return "Service is running in fallback mode. Using basic response templates."
43
+
44
+ try:
45
+ formatted_prompt = generate_prompt(prompt)
46
+ response = llm_client.text_generation(
47
+ formatted_prompt,
48
+ max_new_tokens=512,
49
+ temperature=temperature,
50
+ repetition_penalty=1.1
51
+ )
52
+ return response
53
+ except Exception as e:
54
+ return f"Error accessing LLM: {str(e)}"
55
+
56
+ def analyze_symptoms(symptoms: str) -> str:
57
+ """Analyze symptoms using LLM"""
58
+ if not symptoms:
59
+ return "Please describe your symptoms."
60
+
61
+ prompt = f"""Analyze these symptoms and provide a detailed assessment:
62
+ Symptoms: {symptoms}
63
+
64
+ Please provide:
65
+ 1. Risk Level (Low/Medium/High)
66
+ 2. Possible causes
67
+ 3. Recommendations
68
+ 4. Whether immediate medical attention is needed
69
+
70
+ Format the response in a clear, structured way."""
71
+
72
+ response = get_llm_response(prompt, temperature=0.3)
73
+ return response if response else "Unable to analyze symptoms. Please try again."
74
+
75
+ def get_health_advice(topic: str, question: str) -> str:
76
+ """Get health advice using LLM"""
77
+ if not question:
78
+ return "Please enter a question."
79
+
80
+ context = f"Topic: {topic}\nContext: {HEALTH_KNOWLEDGE.get(topic, '')}"
81
+ prompt = f"""Based on this health topic and context, answer the following question:
82
+ Question: {question}
83
+
84
+ Provide a clear, informative answer with relevant health recommendations."""
85
+
86
+ response = get_llm_response(prompt)
87
+ return response if response else "Unable to provide advice at the moment. Please try again."
88
+
89
+ def chat_with_assistant(message: str, history: list) -> str:
90
+ """Chat with the health assistant"""
91
+ if not message:
92
+ return ""
93
+
94
+ # Format history for context
95
+ context = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in history[-3:]])
96
+
97
+ prompt = f"""Previous conversation:
98
+ {context}
99
+
100
+ User's new message: {message}
101
+
102
+ Provide a helpful response about their health question or concern."""
103
+
104
+ response = get_llm_response(prompt)
105
+ return response if response else "I apologize, but I'm unable to process your request at the moment."
106
+
107
+ # Gradio Interface
108
+ with gr.Blocks(title="Virtual Health Assistant", theme=gr.themes.Soft()) as demo:
109
+ gr.Markdown(
110
+ """
111
+ # πŸ₯ Virtual Health Assistant
112
+ Powered by AI to provide health information, track metrics, and manage medications.
113
+
114
+ βš•οΈ This is an AI assistant and not a replacement for professional medical advice.
115
+ """
116
+ )
117
+
118
+ with gr.Tabs():
119
+ # Chat Interface Tab
120
+ with gr.Tab("πŸ’¬ Health Chat"):
121
+ chatbot = gr.Chatbot(label="Chat History")
122
+ msg = gr.Textbox(label="Type your message", placeholder="Ask about health topics...")
123
+ clear = gr.Button("Clear Chat")
124
+
125
+ def respond(message, history):
126
+ bot_message = chat_with_assistant(message, history)
127
+ history.append((message, bot_message))
128
+ return "", history
129
+
130
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
131
+ clear.click(lambda: None, None, chatbot, queue=False)
132
+
133
+ # Symptom Checker Tab
134
+ with gr.Tab("πŸ” Symptom Checker"):
135
+ with gr.Row():
136
+ with gr.Column():
137
+ symptoms_input = gr.Textbox(
138
+ label="Describe your symptoms",
139
+ placeholder="Enter your symptoms here...",
140
+ lines=3
141
+ )
142
+ symptoms_button = gr.Button("Analyze Symptoms")
143
+ symptoms_output = gr.Markdown(label="Analysis")
144
+
145
+ with gr.Column():
146
+ gr.Markdown("""
147
+ ### How to use:
148
+ 1. Describe your symptoms in detail
149
+ 2. Include duration and severity
150
+ 3. Mention any relevant medical history
151
+
152
+ ⚠️ For emergencies, call emergency services immediately
153
+ """)
154
+
155
+ symptoms_button.click(
156
+ analyze_symptoms,
157
+ inputs=[symptoms_input],
158
+ outputs=[symptoms_output]
159
+ )
160
+
161
+ # Health Metrics Tab
162
+ with gr.Tab("πŸ“Š Health Metrics"):
163
+ with gr.Row():
164
+ with gr.Column():
165
+ weight_input = gr.Number(label="Weight (kg)")
166
+ steps_input = gr.Number(label="Steps")
167
+ sleep_input = gr.Number(label="Hours Slept")
168
+ metrics_button = gr.Button("Save Metrics")
169
+ metrics_output = gr.Textbox(
170
+ label="Status",
171
+ readonly=True
172
+ )
173
+
174
+ with gr.Column():
175
+ view_metrics_button = gr.Button("View Metrics")
176
+ metrics_plot = gr.Plot(label="Your Health Trends")
177
+
178
+ def save_metrics(weight, steps, sleep):
179
+ metrics_data.append({
180
+ 'date': datetime.now().strftime('%Y-%m-%d'),
181
+ 'weight': weight,
182
+ 'steps': steps,
183
+ 'sleep': sleep
184
+ })
185
+ return "βœ… Metrics saved successfully!"
186
+
187
+ def view_metrics():
188
+ if not metrics_data:
189
+ return None
190
+ df = pd.DataFrame(metrics_data)
191
+ fig = df.plot(x='date', figsize=(10, 6), title="Health Metrics Over Time")
192
+ return fig
193
+
194
+ metrics_button.click(
195
+ save_metrics,
196
+ inputs=[weight_input, steps_input, sleep_input],
197
+ outputs=[metrics_output]
198
+ )
199
+ view_metrics_button.click(
200
+ view_metrics,
201
+ outputs=[metrics_plot]
202
+ )
203
+
204
+ # Medication Manager Tab
205
+ with gr.Tab("πŸ’Š Medication Manager"):
206
+ with gr.Row():
207
+ with gr.Column():
208
+ med_name = gr.Textbox(label="Medication Name")
209
+ med_dosage = gr.Textbox(label="Dosage")
210
+ med_time = gr.Textbox(label="Time (e.g., 9:00 AM)")
211
+ med_notes = gr.Textbox(label="Notes (optional)")
212
+ med_button = gr.Button("Add Medication")
213
+ med_output = gr.Textbox(
214
+ label="Status",
215
+ readonly=True
216
+ )
217
+
218
+ with gr.Column():
219
+ view_meds_button = gr.Button("View Medications")
220
+ meds_table = gr.Dataframe(
221
+ headers=["Medication", "Dosage", "Time", "Notes"],
222
+ label="Your Medications"
223
+ )
224
+
225
+ def add_med(name, dosage, time, notes):
226
+ if not all([name, dosage, time]):
227
+ return "❌ Please fill in all required fields."
228
+ medication_data.append({
229
+ 'Medication': name,
230
+ 'Dosage': dosage,
231
+ 'Time': time,
232
+ 'Notes': notes
233
+ })
234
+ return f"βœ… Added {name} to medications!"
235
+
236
+ def view_meds():
237
+ return pd.DataFrame(medication_data)
238
+
239
+ med_button.click(
240
+ add_med,
241
+ inputs=[med_name, med_dosage, med_time, med_notes],
242
+ outputs=[med_output]
243
+ )
244
+ view_meds_button.click(
245
+ view_meds,
246
+ outputs=[meds_table]
247
+ )
248
+
249
+ gr.Markdown(
250
+ """
251
+ ### ⚠️ Important Disclaimer
252
+ This Virtual Health Assistant uses AI to provide general health information.
253
+ It is not a substitute for professional medical advice, diagnosis, or treatment.
254
+ Always seek the advice of qualified healthcare providers with questions about medical conditions.
255
+ """
256
+ )
257
+
258
+ # Launch the app
259
+ demo.launch()