Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -81,6 +81,47 @@ def build_table(title, rows):
|
|
| 81 |
html += '</tbody></table></div>'
|
| 82 |
return html
|
| 83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
def analyze_face(image):
|
| 85 |
if image is None:
|
| 86 |
return "<div style='color:red;'>⚠️ Error: No image provided.</div>", None
|
|
@@ -106,7 +147,7 @@ def analyze_face(image):
|
|
| 106 |
brightness_std = np.std(gray) / 255
|
| 107 |
tone_index = np.mean(frame_rgb[100:150, 100:150]) / 255 if frame_rgb[100:150, 100:150].size else 0.5
|
| 108 |
hr_features = [brightness_std, green_std, tone_index]
|
| 109 |
-
heart_rate = hr_model.predict([hr_features])[0]
|
| 110 |
skin_patch = frame_rgb[100:150, 100:150]
|
| 111 |
skin_tone_index = np.mean(skin_patch) / 255 if skin_patch.size else 0.5
|
| 112 |
brightness_variation = np.std(cv2.cvtColor(frame_rgb, cv2.COLOR_RGB2GRAY)) / 255
|
|
@@ -144,17 +185,18 @@ def analyze_face(image):
|
|
| 144 |
|
| 145 |
with gr.Blocks() as demo:
|
| 146 |
gr.Markdown("""
|
| 147 |
-
# 🧠 Face-Based Lab Test AI Report
|
| 148 |
-
Upload a face
|
| 149 |
""")
|
| 150 |
with gr.Row():
|
| 151 |
with gr.Column():
|
| 152 |
-
|
| 153 |
submit_btn = gr.Button("🔍 Analyze")
|
| 154 |
with gr.Column():
|
| 155 |
result_html = gr.HTML(label="🧪 Health Report Table")
|
| 156 |
-
result_image = gr.Image(label="📷
|
| 157 |
-
submit_btn.click(fn=analyze_face, inputs=
|
| 158 |
-
gr.Markdown("
|
|
|
|
| 159 |
|
| 160 |
demo.launch()
|
|
|
|
| 81 |
html += '</tbody></table></div>'
|
| 82 |
return html
|
| 83 |
|
| 84 |
+
def analyze_video(video_path):
|
| 85 |
+
import matplotlib.pyplot as plt
|
| 86 |
+
from PIL import Image
|
| 87 |
+
cap = cv2.VideoCapture(video_path)
|
| 88 |
+
brightness_vals = []
|
| 89 |
+
green_vals = []
|
| 90 |
+
frame_sample = None
|
| 91 |
+
while True:
|
| 92 |
+
ret, frame = cap.read()
|
| 93 |
+
if not ret:
|
| 94 |
+
break
|
| 95 |
+
if frame_sample is None:
|
| 96 |
+
frame_sample = frame.copy()
|
| 97 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
| 98 |
+
green = frame[:, :, 1]
|
| 99 |
+
brightness_vals.append(np.mean(gray))
|
| 100 |
+
green_vals.append(np.mean(green))
|
| 101 |
+
cap.release()
|
| 102 |
+
# simulate HR via std deviation signal
|
| 103 |
+
brightness_std = np.std(brightness_vals) / 255
|
| 104 |
+
green_std = np.std(green_vals) / 255
|
| 105 |
+
tone_index = np.mean(frame_sample[100:150, 100:150]) / 255 if frame_sample[100:150, 100:150].size else 0.5
|
| 106 |
+
hr_features = [brightness_std, green_std, tone_index]
|
| 107 |
+
heart_rate = float(np.clip(hr_model.predict([hr_features])[0], 60, 100))
|
| 108 |
+
skin_tone_index = np.mean(frame_sample[100:150, 100:150]) / 255 if frame_sample[100:150, 100:150].size else 0.5
|
| 109 |
+
brightness_variation = np.std(cv2.cvtColor(frame_sample, cv2.COLOR_BGR2GRAY)) / 255
|
| 110 |
+
spo2_features = [heart_rate, brightness_variation, skin_tone_index]
|
| 111 |
+
spo2 = spo2_model.predict([spo2_features])[0]
|
| 112 |
+
rr = int(12 + abs(heart_rate % 5 - 2))
|
| 113 |
+
plt.figure(figsize=(6, 2))
|
| 114 |
+
plt.plot(brightness_vals, label='rPPG Signal')
|
| 115 |
+
plt.title("Simulated rPPG Signal")
|
| 116 |
+
plt.xlabel("Frame")
|
| 117 |
+
plt.ylabel("Brightness")
|
| 118 |
+
plt.legend()
|
| 119 |
+
plt.tight_layout()
|
| 120 |
+
plot_path = "/tmp/ppg_plot.png"
|
| 121 |
+
plt.savefig(plot_path)
|
| 122 |
+
plt.close()
|
| 123 |
+
return f"<h4>❤️ HR: {heart_rate:.2f} bpm, SpO₂: {spo2:.2f}%, RR: {rr} bpm</h4><img src='file/{plot_path}' width='100%'>", cv2.cvtColor(frame_sample, cv2.COLOR_BGR2RGB)
|
| 124 |
+
|
| 125 |
def analyze_face(image):
|
| 126 |
if image is None:
|
| 127 |
return "<div style='color:red;'>⚠️ Error: No image provided.</div>", None
|
|
|
|
| 147 |
brightness_std = np.std(gray) / 255
|
| 148 |
tone_index = np.mean(frame_rgb[100:150, 100:150]) / 255 if frame_rgb[100:150, 100:150].size else 0.5
|
| 149 |
hr_features = [brightness_std, green_std, tone_index]
|
| 150 |
+
heart_rate = float(np.clip(hr_model.predict([hr_features])[0], 60, 100))
|
| 151 |
skin_patch = frame_rgb[100:150, 100:150]
|
| 152 |
skin_tone_index = np.mean(skin_patch) / 255 if skin_patch.size else 0.5
|
| 153 |
brightness_variation = np.std(cv2.cvtColor(frame_rgb, cv2.COLOR_RGB2GRAY)) / 255
|
|
|
|
| 185 |
|
| 186 |
with gr.Blocks() as demo:
|
| 187 |
gr.Markdown("""
|
| 188 |
+
# 🧠 Face-Based Lab Test AI Report (Video Mode)
|
| 189 |
+
Upload a short face video (10–30s) to infer health diagnostics using rPPG analysis.
|
| 190 |
""")
|
| 191 |
with gr.Row():
|
| 192 |
with gr.Column():
|
| 193 |
+
video_input = gr.Video(label="📽 Upload Face Video", sources=["upload", "webcam"], type="filepath")
|
| 194 |
submit_btn = gr.Button("🔍 Analyze")
|
| 195 |
with gr.Column():
|
| 196 |
result_html = gr.HTML(label="🧪 Health Report Table")
|
| 197 |
+
result_image = gr.Image(label="📷 Key Frame Snapshot")
|
| 198 |
+
submit_btn.click(fn=analyze_face, inputs=video_input, outputs=[result_html, result_image])
|
| 199 |
+
gr.Markdown("---
|
| 200 |
+
✅ Table Format • AI Prediction • rPPG-based HR • Dynamic Summary • Multilingual Support • CTA")
|
| 201 |
|
| 202 |
demo.launch()
|