Spaces:
Sleeping
Sleeping
Upload 8 files
Browse files- .gitignore +10 -0
- Dockerfile +34 -0
- README.md +18 -10
- app.py +213 -0
- requirements.txt +19 -0
- static/css/style.css +162 -0
- static/js/main.js +333 -0
- templates/index.html +143 -0
.gitignore
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
venv/
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.pyc
|
| 4 |
+
.git
|
| 5 |
+
.gitignore
|
| 6 |
+
captured_images/
|
| 7 |
+
temp_audio/
|
| 8 |
+
wellbeing_logs.csv
|
| 9 |
+
*.docx
|
| 10 |
+
*.pdf
|
Dockerfile
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
# Install system dependencies including FFmpeg
|
| 4 |
+
RUN apt-get update && apt-get install -y \
|
| 5 |
+
ffmpeg \
|
| 6 |
+
libsm6 \
|
| 7 |
+
libxext6 \
|
| 8 |
+
libxrender-dev \
|
| 9 |
+
libgomp1 \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# Set working directory
|
| 13 |
+
WORKDIR /app
|
| 14 |
+
|
| 15 |
+
# Create user for Hugging Face
|
| 16 |
+
RUN useradd -m -u 1000 user
|
| 17 |
+
USER user
|
| 18 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
| 19 |
+
|
| 20 |
+
# Copy requirements and install
|
| 21 |
+
COPY --chown=user requirements.txt .
|
| 22 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# Copy application
|
| 25 |
+
COPY --chown=user . /app
|
| 26 |
+
|
| 27 |
+
# Create necessary directories
|
| 28 |
+
RUN mkdir -p captured_images temp_audio
|
| 29 |
+
|
| 30 |
+
# Expose port 7860 (required by Hugging Face)
|
| 31 |
+
EXPOSE 7860
|
| 32 |
+
|
| 33 |
+
# Run the application
|
| 34 |
+
CMD ["python", "app.py"]
|
README.md
CHANGED
|
@@ -1,10 +1,18 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: Therapy Companion
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
---
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Therapy Companion
|
| 3 |
+
emoji: 🧠
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# Therapy Companion - AI Wellbeing Application
|
| 11 |
+
|
| 12 |
+
This is an AI-powered mental health companion that analyzes emotions through facial expressions and voice.
|
| 13 |
+
|
| 14 |
+
## Features
|
| 15 |
+
- Face emotion detection
|
| 16 |
+
- Voice emotion recognition
|
| 17 |
+
- Stress assessment
|
| 18 |
+
- Personalized coping strategies
|
app.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import datetime
|
| 3 |
+
import logging
|
| 4 |
+
import io
|
| 5 |
+
import base64
|
| 6 |
+
import uuid
|
| 7 |
+
|
| 8 |
+
import cv2
|
| 9 |
+
import pandas as pd
|
| 10 |
+
import numpy as np
|
| 11 |
+
import librosa
|
| 12 |
+
import torch
|
| 13 |
+
from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor
|
| 14 |
+
from deepface import DeepFace
|
| 15 |
+
|
| 16 |
+
from flask import Flask, request, jsonify, render_template
|
| 17 |
+
|
| 18 |
+
# --- App & Logger Setup ---
|
| 19 |
+
app = Flask(__name__)
|
| 20 |
+
logging.basicConfig(
|
| 21 |
+
level=logging.INFO,
|
| 22 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# --- Constants & Directory Setup ---
|
| 26 |
+
LOG_FILE = "wellbeing_logs.csv"
|
| 27 |
+
CAPTURED_IMAGE_DIR = "captured_images"
|
| 28 |
+
TEMP_AUDIO_DIR = "temp_audio"
|
| 29 |
+
|
| 30 |
+
os.makedirs(CAPTURED_IMAGE_DIR, exist_ok=True)
|
| 31 |
+
os.makedirs(TEMP_AUDIO_DIR, exist_ok=True)
|
| 32 |
+
|
| 33 |
+
# --- Caching the Model ---
|
| 34 |
+
voice_model = None
|
| 35 |
+
voice_feature_extractor = None
|
| 36 |
+
|
| 37 |
+
def load_voice_emotion_model():
|
| 38 |
+
global voice_model, voice_feature_extractor
|
| 39 |
+
if voice_model is None:
|
| 40 |
+
logging.info("Loading voice emotion model for the first time...")
|
| 41 |
+
model_name = "superb/wav2vec2-base-superb-er"
|
| 42 |
+
voice_model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name)
|
| 43 |
+
voice_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name)
|
| 44 |
+
logging.info("Voice emotion model loaded.")
|
| 45 |
+
return voice_model, voice_feature_extractor
|
| 46 |
+
|
| 47 |
+
# --- Analysis Functions ---
|
| 48 |
+
def analyze_voice_emotion(audio_file_path):
|
| 49 |
+
try:
|
| 50 |
+
model, feature_extractor = load_voice_emotion_model()
|
| 51 |
+
y, sr = librosa.load(audio_file_path, sr=16000, mono=True)
|
| 52 |
+
if y.shape[0] == 0:
|
| 53 |
+
logging.warning(f"Audio file {audio_file_path} was empty.")
|
| 54 |
+
return "Error: Invalid or empty audio"
|
| 55 |
+
inputs = feature_extractor(y, sampling_rate=sr, return_tensors="pt", padding=True)
|
| 56 |
+
with torch.no_grad():
|
| 57 |
+
logits = model(**inputs).logits
|
| 58 |
+
predicted_id = torch.argmax(logits, dim=-1).item()
|
| 59 |
+
return model.config.id2label[predicted_id]
|
| 60 |
+
except Exception as e:
|
| 61 |
+
logging.exception(f"Voice emotion analysis failed for file {audio_file_path}: {e}")
|
| 62 |
+
return "Error: Voice analysis failed"
|
| 63 |
+
|
| 64 |
+
def analyze_emotion_from_data(image_bytes, detector_backend="retinaface"):
|
| 65 |
+
try:
|
| 66 |
+
nparr = np.frombuffer(image_bytes, np.uint8)
|
| 67 |
+
img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
|
| 68 |
+
if img_np is None:
|
| 69 |
+
return "Error: Could not decode image"
|
| 70 |
+
|
| 71 |
+
# Use a fallback detector if the selected one fails
|
| 72 |
+
try:
|
| 73 |
+
result = DeepFace.analyze(
|
| 74 |
+
img_path=img_np, actions=['emotion'],
|
| 75 |
+
detector_backend=detector_backend, enforce_detection=False
|
| 76 |
+
)
|
| 77 |
+
except Exception as detector_error:
|
| 78 |
+
logging.warning(f"Detector '{detector_backend}' failed: {detector_error}. Falling back to 'opencv'.")
|
| 79 |
+
result = DeepFace.analyze(
|
| 80 |
+
img_path=img_np, actions=['emotion'],
|
| 81 |
+
detector_backend='opencv', enforce_detection=False
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
if isinstance(result, list) and len(result) > 0:
|
| 85 |
+
return result[0].get("dominant_emotion", "No face detected")
|
| 86 |
+
else:
|
| 87 |
+
return "No face detected"
|
| 88 |
+
except Exception as e:
|
| 89 |
+
logging.exception(f"Face emotion analysis failed with backend {detector_backend}: {e}")
|
| 90 |
+
return "Error: Face analysis failed"
|
| 91 |
+
|
| 92 |
+
def assess_stress_enhanced(face_emotion, sleep_hours, activity_level, voice_emotion):
|
| 93 |
+
activity_map = {"Very Low": 3, "Low": 2, "Moderate": 1, "High": 0}
|
| 94 |
+
emotion_map = { "angry": 2, "disgust": 2, "fear": 2, "sad": 2, "neutral": 1, "surprise": 1, "happy": 0 }
|
| 95 |
+
face_emotion_score = emotion_map.get(str(face_emotion).lower(), 1)
|
| 96 |
+
voice_emotion_score = emotion_map.get(str(voice_emotion).lower(), 1)
|
| 97 |
+
emotion_score = round((face_emotion_score + voice_emotion_score) / 2) if voice_emotion != "N/A" else face_emotion_score
|
| 98 |
+
activity_score = activity_map.get(str(activity_level), 1)
|
| 99 |
+
try:
|
| 100 |
+
sleep_hours = float(sleep_hours)
|
| 101 |
+
sleep_score = 0 if sleep_hours >= 7 else (1 if sleep_hours >= 5 else 2)
|
| 102 |
+
except (ValueError, TypeError):
|
| 103 |
+
sleep_score, sleep_hours = 2, 0
|
| 104 |
+
stress_score = emotion_score + activity_score + sleep_score
|
| 105 |
+
feedback = f"**Your potential stress score is {stress_score} (lower is better).**\n\n**Breakdown:**\n"
|
| 106 |
+
feedback += f"- Face Emotion: {face_emotion} (score: {face_emotion_score})\n"
|
| 107 |
+
feedback += f"- Voice Emotion: {voice_emotion} (score: {voice_emotion_score})\n"
|
| 108 |
+
feedback += f"- Sleep: {sleep_hours} hours (score: {sleep_score})\n"
|
| 109 |
+
feedback += f"- Activity: {activity_level} (score: {activity_score})\n"
|
| 110 |
+
if stress_score <= 2:
|
| 111 |
+
feedback += "\nGreat job! You seem to be in a good space."
|
| 112 |
+
elif stress_score <= 4:
|
| 113 |
+
feedback += "\nYou're doing okay, but remember to be mindful of your rest and mood."
|
| 114 |
+
else:
|
| 115 |
+
feedback += "\nConsider taking some time for self-care. Improving sleep or gentle activity might help."
|
| 116 |
+
return feedback, stress_score
|
| 117 |
+
|
| 118 |
+
# --- Flask Routes ---
|
| 119 |
+
@app.route('/')
|
| 120 |
+
def index():
|
| 121 |
+
return render_template('index.html')
|
| 122 |
+
|
| 123 |
+
@app.route('/analyze_face', methods=['POST'])
|
| 124 |
+
def analyze_face_endpoint():
|
| 125 |
+
data = request.json
|
| 126 |
+
detector = data.get('detector', 'retinaface')
|
| 127 |
+
image_data = base64.b64decode(data['image'].split(',')[1])
|
| 128 |
+
emotion = analyze_emotion_from_data(image_data, detector_backend=detector)
|
| 129 |
+
image_path = "N/A"
|
| 130 |
+
if not emotion.startswith("Error:") and not emotion == "No face detected":
|
| 131 |
+
filename = f"face_{uuid.uuid4()}.jpg"
|
| 132 |
+
image_path = os.path.join(CAPTURED_IMAGE_DIR, filename)
|
| 133 |
+
with open(image_path, "wb") as f:
|
| 134 |
+
f.write(image_data)
|
| 135 |
+
return jsonify({'emotion': emotion, 'image_path': image_path})
|
| 136 |
+
|
| 137 |
+
@app.route('/analyze_voice', methods=['POST'])
|
| 138 |
+
def analyze_voice_endpoint():
|
| 139 |
+
audio_file = request.files.get('audio')
|
| 140 |
+
if not audio_file:
|
| 141 |
+
return jsonify({'error': 'No audio file provided'}), 400
|
| 142 |
+
temp_filename = f"{uuid.uuid4()}.webm"
|
| 143 |
+
temp_filepath = os.path.join(TEMP_AUDIO_DIR, temp_filename)
|
| 144 |
+
try:
|
| 145 |
+
audio_file.save(temp_filepath)
|
| 146 |
+
emotion = analyze_voice_emotion(temp_filepath)
|
| 147 |
+
finally:
|
| 148 |
+
if os.path.exists(temp_filepath):
|
| 149 |
+
os.remove(temp_filepath)
|
| 150 |
+
return jsonify({'voice_emotion': emotion})
|
| 151 |
+
|
| 152 |
+
@app.route('/log_checkin', methods=['POST'])
|
| 153 |
+
def log_checkin_endpoint():
|
| 154 |
+
data = request.json
|
| 155 |
+
feedback, stress_score = assess_stress_enhanced(
|
| 156 |
+
data['emotion'], data['sleep_hours'], data['activity_level'], data['voice_emotion']
|
| 157 |
+
)
|
| 158 |
+
# *** FIX: Format timestamp as a consistent string BEFORE saving ***
|
| 159 |
+
new_log_entry = {
|
| 160 |
+
"timestamp": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
| 161 |
+
"face_emotion": data['emotion'],
|
| 162 |
+
"voice_emotion": data.get('voice_emotion', 'N/A'),
|
| 163 |
+
"sleep_hours": data['sleep_hours'],
|
| 164 |
+
"activity_level": data['activity_level'],
|
| 165 |
+
"stress_score": stress_score,
|
| 166 |
+
"detector_backend": data.get('detector', 'retinaface'),
|
| 167 |
+
"image_path": data.get('image_path', 'N/A')
|
| 168 |
+
}
|
| 169 |
+
try:
|
| 170 |
+
header = not os.path.exists(LOG_FILE)
|
| 171 |
+
df_new = pd.DataFrame([new_log_entry])
|
| 172 |
+
df_new.to_csv(LOG_FILE, mode='a', header=header, index=False)
|
| 173 |
+
return jsonify({'feedback': feedback, 'stress_score': stress_score, 'status': 'success'})
|
| 174 |
+
except Exception as e:
|
| 175 |
+
logging.exception(f"Could not save log: {e}")
|
| 176 |
+
return jsonify({'error': f'Could not save log: {e}'}), 500
|
| 177 |
+
|
| 178 |
+
@app.route('/get_logs', methods=['GET'])
|
| 179 |
+
def get_logs_endpoint():
|
| 180 |
+
if not os.path.exists(LOG_FILE):
|
| 181 |
+
return jsonify({'data': [], 'columns': []})
|
| 182 |
+
try:
|
| 183 |
+
df = pd.read_csv(LOG_FILE)
|
| 184 |
+
# *** FIX: No need to parse/reformat timestamps. They are already correct strings. ***
|
| 185 |
+
return jsonify({
|
| 186 |
+
'data': df.to_dict(orient='records'),
|
| 187 |
+
'columns': df.columns.tolist()
|
| 188 |
+
})
|
| 189 |
+
except pd.errors.EmptyDataError:
|
| 190 |
+
return jsonify({'data': [], 'columns': []})
|
| 191 |
+
except Exception as e:
|
| 192 |
+
logging.exception(f"Could not read logs: {e}")
|
| 193 |
+
return jsonify({'error': 'Could not read logs'}), 500
|
| 194 |
+
|
| 195 |
+
@app.route('/clear_logs', methods=['POST'])
|
| 196 |
+
def clear_logs_endpoint():
|
| 197 |
+
try:
|
| 198 |
+
if os.path.exists(LOG_FILE):
|
| 199 |
+
os.remove(LOG_FILE)
|
| 200 |
+
for directory in [CAPTURED_IMAGE_DIR, TEMP_AUDIO_DIR]:
|
| 201 |
+
if os.path.exists(directory):
|
| 202 |
+
for f in os.listdir(directory):
|
| 203 |
+
os.remove(os.path.join(directory, f))
|
| 204 |
+
return jsonify({'status': 'success', 'message': 'All logs and images cleared.'})
|
| 205 |
+
except Exception as e:
|
| 206 |
+
logging.exception(f"Error clearing logs: {e}")
|
| 207 |
+
return jsonify({'status': 'error', 'message': str(e)}), 500
|
| 208 |
+
|
| 209 |
+
if __name__ == '__main__':
|
| 210 |
+
load_voice_emotion_model()
|
| 211 |
+
# Hugging Face requires port 7860
|
| 212 |
+
port = int(os.environ.get('PORT', 7860))
|
| 213 |
+
app.run(debug=False, host='0.0.0.0', port=port)
|
requirements.txt
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# requirements.txt
|
| 2 |
+
|
| 3 |
+
Flask
|
| 4 |
+
pandas
|
| 5 |
+
numpy
|
| 6 |
+
opencv-python
|
| 7 |
+
deepface
|
| 8 |
+
librosa
|
| 9 |
+
torch
|
| 10 |
+
transformers
|
| 11 |
+
soundfile
|
| 12 |
+
gunicorn
|
| 13 |
+
tf-keras
|
| 14 |
+
accelerate
|
| 15 |
+
safetensors
|
| 16 |
+
mediapipe
|
| 17 |
+
sentencepiece
|
| 18 |
+
scipy
|
| 19 |
+
requests
|
static/css/style.css
ADDED
|
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/* static/css/style.css */
|
| 2 |
+
:root {
|
| 3 |
+
--bg-color: #F4F7F6;
|
| 4 |
+
--card-bg: #FFFFFF;
|
| 5 |
+
--primary-color: #3A8DDE;
|
| 6 |
+
--primary-hover: #2C68A6;
|
| 7 |
+
--secondary-color: #5FAD56;
|
| 8 |
+
--danger-color: #D9534F;
|
| 9 |
+
--danger-hover: #C9302C;
|
| 10 |
+
--text-primary: #212529;
|
| 11 |
+
--text-secondary: #6C757D;
|
| 12 |
+
--border-color: #DEE2E6;
|
| 13 |
+
--font-heading: 'Poppins', sans-serif;
|
| 14 |
+
--font-body: 'Noto Sans', sans-serif;
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
* { box-sizing: border-box; margin: 0; padding: 0; }
|
| 18 |
+
body {
|
| 19 |
+
font-family: var(--font-body);
|
| 20 |
+
background-color: var(--bg-color);
|
| 21 |
+
background-image: url('https://images.unsplash.com/photo-1554034483-04fda0d3507b?q=80&w=2070&auto=format&fit=crop');
|
| 22 |
+
background-size: cover;
|
| 23 |
+
background-attachment: fixed;
|
| 24 |
+
color: var(--text-primary);
|
| 25 |
+
line-height: 1.6;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
.container { display: flex; min-height: 100vh; background: rgba(255, 255, 255, 0.8); backdrop-filter: blur(12px); }
|
| 29 |
+
|
| 30 |
+
/* --- Sidebar --- */
|
| 31 |
+
.sidebar {
|
| 32 |
+
width: 380px; flex-shrink: 0;
|
| 33 |
+
background: rgba(255, 255, 255, 0.95);
|
| 34 |
+
padding: 30px; border-right: 1px solid var(--border-color);
|
| 35 |
+
overflow-y: auto;
|
| 36 |
+
}
|
| 37 |
+
.sidebar-header { text-align: center; margin-bottom: 30px; }
|
| 38 |
+
.logo { margin: 0 auto 10px; }
|
| 39 |
+
.sidebar-header h2 { font-family: var(--font-heading); font-weight: 600; color: var(--text-primary); }
|
| 40 |
+
|
| 41 |
+
.step { padding-bottom: 20px; border-bottom: 1px dashed var(--border-color); margin-bottom: 20px; }
|
| 42 |
+
.step:last-child { border-bottom: none; }
|
| 43 |
+
.step-header h3 { display: flex; align-items: center; gap: 10px; font-family: var(--font-heading); font-size: 1.1em; }
|
| 44 |
+
.step-header span {
|
| 45 |
+
display: flex; align-items: center; justify-content: center;
|
| 46 |
+
width: 28px; height: 28px;
|
| 47 |
+
background-color: var(--primary-color);
|
| 48 |
+
color: white; border-radius: 50%; font-size: 0.9em;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
.video-frame { padding: 5px; border: 1px solid var(--border-color); margin-top: 10px; border-radius: 8px; background: #fff; }
|
| 52 |
+
video { display: block; border-radius: 4px; }
|
| 53 |
+
|
| 54 |
+
/* --- Buttons & Forms --- */
|
| 55 |
+
button {
|
| 56 |
+
width: 100%; padding: 12px 15px; font-size: 1em;
|
| 57 |
+
font-family: var(--font-heading); font-weight: 600;
|
| 58 |
+
border: none; border-radius: 8px; cursor: pointer;
|
| 59 |
+
color: white; margin-top: 10px;
|
| 60 |
+
background-color: var(--text-secondary);
|
| 61 |
+
transition: all 0.2s ease;
|
| 62 |
+
}
|
| 63 |
+
button:hover:not(:disabled) { transform: translateY(-2px); box-shadow: 0 4px 10px rgba(0,0,0,0.1); }
|
| 64 |
+
button:disabled { background-color: #ccc; cursor: not-allowed; }
|
| 65 |
+
button.primary { background-color: var(--primary-color); }
|
| 66 |
+
button.primary:hover { background-color: var(--primary-hover); }
|
| 67 |
+
button.danger { background-color: var(--danger-color); }
|
| 68 |
+
button.danger:hover { background-color: var(--danger-hover); }
|
| 69 |
+
|
| 70 |
+
label { font-weight: 700; font-size: 0.9em; display: block; margin-bottom: 5px; }
|
| 71 |
+
select, input[type="range"] {
|
| 72 |
+
width: 100%; margin-top: 5px;
|
| 73 |
+
background-color: #F8F9FA; border: 1px solid var(--border-color);
|
| 74 |
+
color: var(--text-primary); padding: 10px; border-radius: 8px;
|
| 75 |
+
}
|
| 76 |
+
select { margin-bottom: 10px; }
|
| 77 |
+
|
| 78 |
+
/* --- Voice Visualizer --- */
|
| 79 |
+
#voice-visualizer {
|
| 80 |
+
display: block;
|
| 81 |
+
width: 100%;
|
| 82 |
+
height: 50px;
|
| 83 |
+
background-color: #f8f9fa;
|
| 84 |
+
border-radius: 8px;
|
| 85 |
+
margin-bottom: 10px;
|
| 86 |
+
border: 1px solid var(--border-color);
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
/* --- Main Content & Cards --- */
|
| 90 |
+
.main-content { flex-grow: 1; padding: 40px; overflow-y: auto; }
|
| 91 |
+
.main-header { margin-bottom: 30px; }
|
| 92 |
+
.main-header h1 { font-family: var(--font-heading); font-weight: 600; }
|
| 93 |
+
.main-header p { color: var(--text-secondary); font-size: 1.1em; max-width: 600px; }
|
| 94 |
+
|
| 95 |
+
.card {
|
| 96 |
+
background-color: var(--card-bg);
|
| 97 |
+
border-radius: 12px; box-shadow: 0 4px 25px rgba(0,0,0,0.08);
|
| 98 |
+
border: 1px solid var(--border-color); margin-bottom: 30px;
|
| 99 |
+
transition: transform 0.3s ease, box-shadow 0.3s ease;
|
| 100 |
+
}
|
| 101 |
+
.card:hover {
|
| 102 |
+
transform: translateY(-5px);
|
| 103 |
+
box-shadow: 0 8px 30px rgba(0,0,0,0.12);
|
| 104 |
+
}
|
| 105 |
+
@keyframes fadeIn { to { opacity: 1; transform: translateY(0); } }
|
| 106 |
+
|
| 107 |
+
.card-header { padding: 20px 25px; border-bottom: 1px solid var(--border-color); }
|
| 108 |
+
.card-header h2 { font-family: var(--font-heading); margin: 0; font-size: 1.2em; }
|
| 109 |
+
.card-body { padding: 25px; }
|
| 110 |
+
|
| 111 |
+
/* Tabs, Dashboard, & Logs */
|
| 112 |
+
.tabs { padding: 0 10px; }
|
| 113 |
+
.tab-link { background-color: transparent; border: none; border-bottom: 3px solid transparent; padding: 15px 20px; font-size: 1em; font-weight: 700; color: var(--text-secondary); }
|
| 114 |
+
.tab-link.active, .tab-link:hover { color: var(--primary-color); border-bottom-color: var(--primary-color); }
|
| 115 |
+
.tab-content { display: none; }
|
| 116 |
+
.section-description { color: var(--text-secondary); margin-bottom: 20px; padding: 0 25px; }
|
| 117 |
+
.dashboard-grid { display: grid; grid-template-columns: 2fr 1fr 1fr; gap: 25px; }
|
| 118 |
+
.chart-card { min-height: 300px; }
|
| 119 |
+
|
| 120 |
+
.table-container { max-height: 400px; overflow-y: auto; }
|
| 121 |
+
#log-table { width: 100%; border-collapse: collapse; }
|
| 122 |
+
#log-table th, #log-table td { padding: 12px 15px; text-align: left; border-bottom: 1px solid var(--border-color); }
|
| 123 |
+
#log-table th { font-family: var(--font-heading); background-color: #f8f9fa; }
|
| 124 |
+
#log-table td:last-child { font-weight: 700; }
|
| 125 |
+
#no-logs-message { padding: 40px 20px; text-align: center; color: var(--text-secondary); }
|
| 126 |
+
|
| 127 |
+
/* Results Area */
|
| 128 |
+
.results-area { display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); gap: 30px; margin-bottom: 30px; }
|
| 129 |
+
#current-analysis-card, #feedback-report-display, #recommendations-card {
|
| 130 |
+
opacity: 0;
|
| 131 |
+
animation: fadeIn 0.6s ease-out forwards;
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
#captured-image-display { width: 100%; border-radius: 8px; margin-bottom: 15px; }
|
| 135 |
+
.result-box {
|
| 136 |
+
margin-top: 10px; padding: 12px; border-radius: 8px;
|
| 137 |
+
background-color: #F8F9FA; opacity: 0; transform: translateY(10px);
|
| 138 |
+
transition: opacity 0.5s ease, transform 0.5s ease;
|
| 139 |
+
}
|
| 140 |
+
.result-box.show { opacity: 1; transform: translateY(0); }
|
| 141 |
+
|
| 142 |
+
.metric { text-align: center; margin-bottom: 15px; }
|
| 143 |
+
.metric-value { font-size: 2.5em; font-weight: 700; color: var(--primary-color); }
|
| 144 |
+
#recommendations-content ul { list-style: none; padding-left: 0; }
|
| 145 |
+
#recommendations-content li { display: flex; align-items: flex-start; gap: 10px; margin-bottom: 12px; }
|
| 146 |
+
#recommendations-content li::before { content: '✓'; color: var(--secondary-color); font-weight: 700; }
|
| 147 |
+
|
| 148 |
+
.status-message {
|
| 149 |
+
margin-top: 15px; min-height: 20px;
|
| 150 |
+
text-align: center; font-weight: 700;
|
| 151 |
+
}
|
| 152 |
+
.status-message.success { color: var(--secondary-color); }
|
| 153 |
+
.status-message.error { color: var(--danger-color); }
|
| 154 |
+
.status-message.info { color: var(--text-secondary); }
|
| 155 |
+
|
| 156 |
+
footer { text-align: center; margin-top: 20px; font-size: 0.9em; color: var(--text-secondary); }
|
| 157 |
+
footer button { width: auto; padding: 8px 15px; font-size: 0.9em; margin-top: 15px; }
|
| 158 |
+
|
| 159 |
+
/* --- Responsiveness --- */
|
| 160 |
+
@media (max-width: 1300px) { .dashboard-grid { grid-template-columns: 1fr; } }
|
| 161 |
+
@media (max-width: 1024px) { .container { flex-direction: column; } .sidebar { width: 100%; border-right: none; border-bottom: 1px solid var(--border-color); } }
|
| 162 |
+
@media (max-width: 768px) { .main-content { padding: 20px; } .results-area { grid-template-columns: 1fr; } }
|
static/js/main.js
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
document.addEventListener('DOMContentLoaded', () => {
|
| 2 |
+
// --- STATE MANAGEMENT ---
|
| 3 |
+
let state = {
|
| 4 |
+
currentFaceEmotion: null,
|
| 5 |
+
currentVoiceEmotion: null,
|
| 6 |
+
currentImagePath: null,
|
| 7 |
+
currentDetector: 'mediapipe',
|
| 8 |
+
charts: {},
|
| 9 |
+
audioContext: null,
|
| 10 |
+
analyser: null,
|
| 11 |
+
recorder: null,
|
| 12 |
+
animationFrameId: null,
|
| 13 |
+
};
|
| 14 |
+
|
| 15 |
+
// --- ELEMENT SELECTORS ---
|
| 16 |
+
const elements = {
|
| 17 |
+
video: document.getElementById('video-feed'),
|
| 18 |
+
canvas: document.getElementById('canvas'),
|
| 19 |
+
captureBtn: document.getElementById('capture-btn'),
|
| 20 |
+
detectorSelect: document.getElementById('detector-selector'),
|
| 21 |
+
faceStatus: document.getElementById('face-status'),
|
| 22 |
+
recordBtn: document.getElementById('record-btn'),
|
| 23 |
+
stopBtn: document.getElementById('stop-btn'),
|
| 24 |
+
voiceStatus: document.getElementById('voice-status'),
|
| 25 |
+
voiceVisualizer: document.getElementById('voice-visualizer'),
|
| 26 |
+
sleepSlider: document.getElementById('sleep-slider'),
|
| 27 |
+
sleepValue: document.getElementById('sleep-value'),
|
| 28 |
+
activitySelect: document.getElementById('activity-selector'),
|
| 29 |
+
logCheckinBtn: document.getElementById('log-checkin-btn'),
|
| 30 |
+
clearLogsBtn: document.getElementById('clear-logs-btn'),
|
| 31 |
+
analysisCard: document.getElementById('current-analysis-card'),
|
| 32 |
+
capturedImageDisplay: document.getElementById('captured-image-display'),
|
| 33 |
+
emotionResultDisplay: document.getElementById('emotion-result-display'),
|
| 34 |
+
voiceEmotionResultDisplay: document.getElementById('voice-emotion-result-display'),
|
| 35 |
+
feedbackReportDisplay: document.getElementById('feedback-report-display'),
|
| 36 |
+
stressMetric: document.getElementById('stress-metric'),
|
| 37 |
+
feedbackText: document.getElementById('feedback-text'),
|
| 38 |
+
recommendationsCard: document.getElementById('recommendations-card'),
|
| 39 |
+
recommendationsContent: document.getElementById('recommendations-content'),
|
| 40 |
+
logTableBody: document.querySelector('#log-table tbody'),
|
| 41 |
+
noLogsMessage: document.getElementById('no-logs-message'),
|
| 42 |
+
};
|
| 43 |
+
|
| 44 |
+
// --- INITIALIZATION ---
|
| 45 |
+
setupWebcam();
|
| 46 |
+
updateDashboard();
|
| 47 |
+
|
| 48 |
+
// --- EVENT LISTENERS ---
|
| 49 |
+
elements.detectorSelect.addEventListener('change', (e) => state.currentDetector = e.target.value);
|
| 50 |
+
elements.captureBtn.addEventListener('click', handleFaceCapture);
|
| 51 |
+
elements.recordBtn.addEventListener('click', startRecording);
|
| 52 |
+
elements.stopBtn.addEventListener('click', stopRecording);
|
| 53 |
+
elements.sleepSlider.addEventListener('input', () => elements.sleepValue.textContent = elements.sleepSlider.value);
|
| 54 |
+
elements.logCheckinBtn.addEventListener('click', handleLogCheckin);
|
| 55 |
+
elements.clearLogsBtn.addEventListener('click', handleClearLogs);
|
| 56 |
+
|
| 57 |
+
function setupWebcam() {
|
| 58 |
+
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
|
| 59 |
+
navigator.mediaDevices.getUserMedia({ video: true })
|
| 60 |
+
.then(stream => { elements.video.srcObject = stream; })
|
| 61 |
+
.catch(err => {
|
| 62 |
+
console.error("Webcam Error:", err);
|
| 63 |
+
updateStatus(elements.faceStatus, "Webcam access denied.", 'error');
|
| 64 |
+
});
|
| 65 |
+
}
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
function handleFaceCapture() {
|
| 69 |
+
updateStatus(elements.faceStatus, 'Analyzing...', 'info');
|
| 70 |
+
elements.captureBtn.disabled = true;
|
| 71 |
+
const context = elements.canvas.getContext('2d');
|
| 72 |
+
elements.canvas.width = elements.video.videoWidth;
|
| 73 |
+
elements.canvas.height = elements.video.videoHeight;
|
| 74 |
+
context.drawImage(elements.video, 0, 0, elements.canvas.width, elements.canvas.height);
|
| 75 |
+
const imageDataUrl = elements.canvas.toDataURL('image/jpeg');
|
| 76 |
+
|
| 77 |
+
fetch('/analyze_face', {
|
| 78 |
+
method: 'POST', headers: { 'Content-Type': 'application/json' },
|
| 79 |
+
body: JSON.stringify({ image: imageDataUrl, detector: state.currentDetector }),
|
| 80 |
+
})
|
| 81 |
+
.then(response => response.json())
|
| 82 |
+
.then(data => {
|
| 83 |
+
if (data.error) throw new Error(data.error);
|
| 84 |
+
state.currentFaceEmotion = data.emotion;
|
| 85 |
+
state.currentImagePath = data.image_path;
|
| 86 |
+
elements.analysisCard.style.display = 'block';
|
| 87 |
+
elements.capturedImageDisplay.src = imageDataUrl;
|
| 88 |
+
elements.capturedImageDisplay.style.display = 'block';
|
| 89 |
+
updateStatus(elements.faceStatus, `Success!`, 'success');
|
| 90 |
+
updateResultDisplay(elements.emotionResultDisplay, `Facial Expression: <strong>${data.emotion}</strong>`);
|
| 91 |
+
})
|
| 92 |
+
.catch(error => {
|
| 93 |
+
state.currentFaceEmotion = null; state.currentImagePath = null;
|
| 94 |
+
updateStatus(elements.faceStatus, `Analysis failed.`, 'error');
|
| 95 |
+
updateResultDisplay(elements.emotionResultDisplay, `Analysis Failed`, true);
|
| 96 |
+
})
|
| 97 |
+
.finally(() => { elements.captureBtn.disabled = false; });
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
// --- VOICE RECORDING & VISUALIZATION ---
|
| 101 |
+
async function startRecording() {
|
| 102 |
+
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
| 103 |
+
updateStatus(elements.voiceStatus, 'Audio capture not supported.', 'error');
|
| 104 |
+
return;
|
| 105 |
+
}
|
| 106 |
+
updateStatus(elements.voiceStatus, 'Starting...', 'info');
|
| 107 |
+
elements.recordBtn.disabled = true;
|
| 108 |
+
|
| 109 |
+
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
| 110 |
+
state.recorder = new MediaRecorder(stream);
|
| 111 |
+
const audioChunks = [];
|
| 112 |
+
|
| 113 |
+
// Setup visualization
|
| 114 |
+
if (!state.audioContext) {
|
| 115 |
+
state.audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
| 116 |
+
}
|
| 117 |
+
state.analyser = state.audioContext.createAnalyser();
|
| 118 |
+
const source = state.audioContext.createMediaStreamSource(stream);
|
| 119 |
+
source.connect(state.analyser);
|
| 120 |
+
drawVoiceVisualizer();
|
| 121 |
+
|
| 122 |
+
state.recorder.ondataavailable = event => audioChunks.push(event.data);
|
| 123 |
+
state.recorder.onstop = () => {
|
| 124 |
+
const audioBlob = new Blob(audioChunks, { type: 'audio/webm' });
|
| 125 |
+
analyzeVoice(audioBlob);
|
| 126 |
+
stream.getTracks().forEach(track => track.stop()); // Stop mic access
|
| 127 |
+
cancelAnimationFrame(state.animationFrameId);
|
| 128 |
+
clearVisualizer();
|
| 129 |
+
};
|
| 130 |
+
|
| 131 |
+
state.recorder.start();
|
| 132 |
+
updateStatus(elements.voiceStatus, 'Recording...', 'info');
|
| 133 |
+
elements.stopBtn.disabled = false;
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
function stopRecording() {
|
| 137 |
+
if (state.recorder && state.recorder.state === 'recording') {
|
| 138 |
+
state.recorder.stop();
|
| 139 |
+
elements.stopBtn.disabled = true;
|
| 140 |
+
elements.recordBtn.disabled = false;
|
| 141 |
+
}
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
function analyzeVoice(audioBlob) {
|
| 145 |
+
const formData = new FormData();
|
| 146 |
+
formData.append('audio', audioBlob, 'recording.webm');
|
| 147 |
+
updateStatus(elements.voiceStatus, 'Analyzing...', 'info');
|
| 148 |
+
|
| 149 |
+
fetch('/analyze_voice', { method: 'POST', body: formData })
|
| 150 |
+
.then(response => response.json())
|
| 151 |
+
.then(data => {
|
| 152 |
+
if (data.error) throw new Error(data.error);
|
| 153 |
+
state.currentVoiceEmotion = data.voice_emotion;
|
| 154 |
+
elements.analysisCard.style.display = 'block';
|
| 155 |
+
updateStatus(elements.voiceStatus, `Success!`, 'success');
|
| 156 |
+
updateResultDisplay(elements.voiceEmotionResultDisplay, `Vocal Tone: <strong>${data.voice_emotion}</strong>`);
|
| 157 |
+
})
|
| 158 |
+
.catch(error => {
|
| 159 |
+
state.currentVoiceEmotion = null;
|
| 160 |
+
updateStatus(elements.voiceStatus, `Analysis failed.`, 'error');
|
| 161 |
+
updateResultDisplay(elements.voiceEmotionResultDisplay, `Analysis Failed`, true);
|
| 162 |
+
});
|
| 163 |
+
}
|
| 164 |
+
|
| 165 |
+
function drawVoiceVisualizer() {
|
| 166 |
+
const canvas = elements.voiceVisualizer;
|
| 167 |
+
const canvasCtx = canvas.getContext('2d');
|
| 168 |
+
state.analyser.fftSize = 256;
|
| 169 |
+
const bufferLength = state.analyser.frequencyBinCount;
|
| 170 |
+
const dataArray = new Uint8Array(bufferLength);
|
| 171 |
+
|
| 172 |
+
const draw = () => {
|
| 173 |
+
state.animationFrameId = requestAnimationFrame(draw);
|
| 174 |
+
state.analyser.getByteFrequencyData(dataArray);
|
| 175 |
+
|
| 176 |
+
canvasCtx.fillStyle = '#f8f9fa';
|
| 177 |
+
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
|
| 178 |
+
|
| 179 |
+
const barWidth = (canvas.width / bufferLength) * 2.5;
|
| 180 |
+
let barHeight;
|
| 181 |
+
let x = 0;
|
| 182 |
+
|
| 183 |
+
for (let i = 0; i < bufferLength; i++) {
|
| 184 |
+
barHeight = dataArray[i] / 2;
|
| 185 |
+
canvasCtx.fillStyle = `rgba(58, 141, 222, ${barHeight / 100})`;
|
| 186 |
+
canvasCtx.fillRect(x, canvas.height - barHeight, barWidth, barHeight);
|
| 187 |
+
x += barWidth + 1;
|
| 188 |
+
}
|
| 189 |
+
};
|
| 190 |
+
draw();
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
function clearVisualizer() {
|
| 194 |
+
const canvas = elements.voiceVisualizer;
|
| 195 |
+
const canvasCtx = canvas.getContext('2d');
|
| 196 |
+
canvasCtx.fillStyle = '#f8f9fa';
|
| 197 |
+
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
// --- LOGGING & DASHBOARD ---
|
| 201 |
+
function handleLogCheckin() {
|
| 202 |
+
if (!state.currentFaceEmotion || state.currentFaceEmotion.startsWith("Error")) {
|
| 203 |
+
alert("Please complete a successful face analysis before logging.");
|
| 204 |
+
return;
|
| 205 |
+
}
|
| 206 |
+
const payload = {
|
| 207 |
+
emotion: state.currentFaceEmotion, voice_emotion: state.currentVoiceEmotion || "N/A",
|
| 208 |
+
sleep_hours: parseFloat(elements.sleepSlider.value), activity_level: elements.activitySelect.value,
|
| 209 |
+
detector: state.currentDetector, image_path: state.currentImagePath || "N/A"
|
| 210 |
+
};
|
| 211 |
+
elements.logCheckinBtn.textContent = 'Logging...';
|
| 212 |
+
elements.logCheckinBtn.disabled = true;
|
| 213 |
+
|
| 214 |
+
fetch('/log_checkin', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(payload) })
|
| 215 |
+
.then(response => response.json())
|
| 216 |
+
.then(data => {
|
| 217 |
+
if (data.error) throw new Error(data.error);
|
| 218 |
+
displayFeedback(data.feedback, data.stress_score);
|
| 219 |
+
displayRecommendations(data.stress_score);
|
| 220 |
+
updateDashboard();
|
| 221 |
+
})
|
| 222 |
+
.catch(error => alert(`Error logging check-in: ${error.message}`))
|
| 223 |
+
.finally(() => {
|
| 224 |
+
elements.logCheckinBtn.textContent = 'Complete Check-in & Get Feedback';
|
| 225 |
+
elements.logCheckinBtn.disabled = false;
|
| 226 |
+
});
|
| 227 |
+
}
|
| 228 |
+
|
| 229 |
+
function handleClearLogs() {
|
| 230 |
+
if (confirm("Are you sure you want to permanently delete all log data?")) {
|
| 231 |
+
fetch('/clear_logs', { method: 'POST' })
|
| 232 |
+
.then(response => response.json())
|
| 233 |
+
.then(data => {
|
| 234 |
+
if (data.status === 'success') updateDashboard();
|
| 235 |
+
else throw new Error(data.message);
|
| 236 |
+
})
|
| 237 |
+
.catch(error => alert(`Error clearing logs: ${error.message}`));
|
| 238 |
+
}
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
function updateDashboard() {
|
| 242 |
+
fetch('/get_logs')
|
| 243 |
+
.then(response => response.json())
|
| 244 |
+
.then(logData => {
|
| 245 |
+
Object.values(state.charts).forEach(chart => chart.destroy());
|
| 246 |
+
state.charts = {};
|
| 247 |
+
|
| 248 |
+
if (logData.data && logData.data.length > 0) {
|
| 249 |
+
elements.noLogsMessage.style.display = 'none';
|
| 250 |
+
populateLogTable(logData.data);
|
| 251 |
+
processLogDataForCharts(logData.data);
|
| 252 |
+
} else {
|
| 253 |
+
elements.logTableBody.innerHTML = '';
|
| 254 |
+
elements.noLogsMessage.style.display = 'block';
|
| 255 |
+
}
|
| 256 |
+
})
|
| 257 |
+
.catch(error => console.error("Failed to fetch logs:", error));
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
function populateLogTable(data) {
|
| 261 |
+
elements.logTableBody.innerHTML = '';
|
| 262 |
+
data.slice().reverse().forEach(log => {
|
| 263 |
+
const row = document.createElement('tr');
|
| 264 |
+
row.innerHTML = `<td>${new Date(log.timestamp).toLocaleString()}</td> <td>${log.face_emotion}</td> <td>${log.voice_emotion}</td> <td>${log.sleep_hours}h</td> <td>${log.activity_level}</td> <td>${log.stress_score}</td>`;
|
| 265 |
+
elements.logTableBody.appendChild(row);
|
| 266 |
+
});
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
function processLogDataForCharts(data) {
|
| 270 |
+
const labels = data.map(d => new Date(d.timestamp).toLocaleDateString());
|
| 271 |
+
renderChart('trends-chart', 'line', {
|
| 272 |
+
labels,
|
| 273 |
+
datasets: [
|
| 274 |
+
{ label: 'Stress Score', data: data.map(d => d.stress_score), borderColor: '#D9534F', backgroundColor: 'rgba(217, 83, 79, 0.1)', fill: true, tension: 0.4 },
|
| 275 |
+
{ label: 'Sleep Hours', data: data.map(d => d.sleep_hours), borderColor: '#3A8DDE', backgroundColor: 'rgba(58, 141, 222, 0.1)', fill: true, tension: 0.4 }
|
| 276 |
+
]
|
| 277 |
+
}, 'Well-being Trends');
|
| 278 |
+
|
| 279 |
+
const faceEmotionCounts = countOccurrences(data.map(d => d.face_emotion).filter(e => e && !e.startsWith('Error')));
|
| 280 |
+
renderChart('emotion-chart', 'doughnut', { labels: Object.keys(faceEmotionCounts), datasets: [{ data: Object.values(faceEmotionCounts) }] }, 'Facial Emotion Spectrum');
|
| 281 |
+
|
| 282 |
+
const voiceEmotionCounts = countOccurrences(data.map(d => d.voice_emotion).filter(e => e && !e.startsWith('Error') && e !== 'N/A'));
|
| 283 |
+
renderChart('voice-emotion-chart', 'doughnut', { labels: Object.keys(voiceEmotionCounts), datasets: [{ data: Object.values(voiceEmotionCounts) }] }, 'Vocal Emotion Spectrum');
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
// --- UTILITY & DISPLAY FUNCTIONS ---
|
| 287 |
+
function updateStatus(element, message, type) { element.innerHTML = message; element.className = `status-message ${type}`; }
|
| 288 |
+
function updateResultDisplay(element, message, isError = false) { element.innerHTML = message; element.style.color = isError ? 'var(--danger-color)' : 'var(--text-primary)'; element.classList.add('show'); }
|
| 289 |
+
function displayFeedback(feedback, score) {
|
| 290 |
+
elements.feedbackReportDisplay.style.display = 'block';
|
| 291 |
+
elements.stressMetric.innerHTML = `<div class="metric-label">Potential Stress Score</div><div class="metric-value">${score}</div>`;
|
| 292 |
+
elements.feedbackText.innerHTML = feedback.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>').replace(/\n/g, '<br>');
|
| 293 |
+
}
|
| 294 |
+
function displayRecommendations(score) {
|
| 295 |
+
let content = '<ul>';
|
| 296 |
+
if (score <= 2) { content += '<li>You seem to be in a great place! Keep up your healthy routines.</li><li>Consider sharing your positivity with someone today.</li>'; }
|
| 297 |
+
else if (score <= 4) { content += '<li>You\'re managing well. A short walk or 5 minutes of quiet time could be beneficial.</li><li>Ensure you are staying hydrated throughout the day.</li>'; }
|
| 298 |
+
else { content += '<li>Your stress score is elevated. Prioritize getting a full night\'s sleep.</li><li>Consider a calming activity like listening to music or a guided meditation.</li><li>Reaching out to a friend, family member, or professional can be very helpful.</li>'; }
|
| 299 |
+
content += '</ul>';
|
| 300 |
+
elements.recommendationsContent.innerHTML = content;
|
| 301 |
+
elements.recommendationsCard.style.display = 'block';
|
| 302 |
+
}
|
| 303 |
+
function renderChart(canvasId, type, chartData, title) {
|
| 304 |
+
const ctx = document.getElementById(canvasId).getContext('2d');
|
| 305 |
+
const chartColors = ['#3A8DDE', '#5FAD56', '#F0AD4E', '#D9534F', '#5BC0DE', '#8E7CC3', '#E56B6F'];
|
| 306 |
+
let options = { responsive: true, maintainAspectRatio: false,
|
| 307 |
+
plugins: {
|
| 308 |
+
title: { display: true, text: title, font: { size: 16, family: "'Poppins', sans-serif" }, padding: { top: 10, bottom: 20 } },
|
| 309 |
+
legend: { position: 'bottom', labels: { padding: 20, usePointStyle: true } },
|
| 310 |
+
tooltip: { backgroundColor: 'rgba(0, 0, 0, 0.7)', padding: 10, cornerRadius: 4 }
|
| 311 |
+
}
|
| 312 |
+
};
|
| 313 |
+
|
| 314 |
+
if (type === 'line') {
|
| 315 |
+
options.scales = { y: { beginAtZero: true, grid: { color: 'rgba(0,0,0,0.05)' } }, x: { grid: { display: false } } };
|
| 316 |
+
options.plugins.legend.labels.usePointStyle = true;
|
| 317 |
+
} else if (type === 'doughnut') {
|
| 318 |
+
chartData.datasets[0].backgroundColor = chartColors;
|
| 319 |
+
chartData.datasets[0].borderColor = 'var(--card-bg)';
|
| 320 |
+
options.cutout = '60%';
|
| 321 |
+
}
|
| 322 |
+
|
| 323 |
+
state.charts[canvasId] = new Chart(ctx, { type, data: chartData, options });
|
| 324 |
+
}
|
| 325 |
+
function countOccurrences(arr) { return arr.reduce((acc, curr) => (acc[curr] = (acc[curr] || 0) + 1, acc), {}); }
|
| 326 |
+
});
|
| 327 |
+
|
| 328 |
+
function openTab(evt, tabName) {
|
| 329 |
+
document.querySelectorAll(".tab-content").forEach(tc => tc.style.display = "none");
|
| 330 |
+
document.querySelectorAll(".tab-link").forEach(tl => tl.classList.remove("active"));
|
| 331 |
+
document.getElementById(tabName).style.display = "block";
|
| 332 |
+
evt.currentTarget.classList.add("active");
|
| 333 |
+
}
|
templates/index.html
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8">
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
| 6 |
+
<title>Mindful Companion</title>
|
| 7 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 8 |
+
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
| 9 |
+
<link href="https://fonts.googleapis.com/css2?family=Poppins:wght@400;600&family=Noto+Sans:wght@400;700&display=swap" rel="stylesheet">
|
| 10 |
+
<link rel="stylesheet" href="{{ url_for('static', filename='css/style.css') }}">
|
| 11 |
+
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
| 12 |
+
</head>
|
| 13 |
+
<body>
|
| 14 |
+
<div class="container">
|
| 15 |
+
<aside class="sidebar">
|
| 16 |
+
<header class="sidebar-header">
|
| 17 |
+
<div class="logo">
|
| 18 |
+
<svg width="40" height="40" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M12 2C6.48 2 2 6.48 2 12C2 17.52 6.48 22 12 22C17.52 22 22 17.52 22 12C22 6.48 17.52 2 12 2ZM16.68 15.26C16.42 15.74 15.86 16 15.32 16H8.68C8.14 16 7.58 15.74 7.32 15.26C7.06 14.78 7.14 14.18 7.5 13.76L10.5 10.16V8C10.5 7.17 11.17 6.5 12 6.5C12.83 6.5 13.5 7.17 13.5 8V10.16L16.5 13.76C16.86 14.18 16.94 14.78 16.68 15.26Z" fill="#5FAD56"/></svg>
|
| 19 |
+
</div>
|
| 20 |
+
<h2>Your Daily Check-in</h2>
|
| 21 |
+
</header>
|
| 22 |
+
|
| 23 |
+
<div class="stepper">
|
| 24 |
+
<div class="step">
|
| 25 |
+
<div class="step-header"><h3><span>1</span> Facial Expression</h3></div>
|
| 26 |
+
<div class="step-content">
|
| 27 |
+
<label for="detector-selector">Analysis Model</label>
|
| 28 |
+
<select id="detector-selector">
|
| 29 |
+
<option value="retinaface">RetinaFace (High Accuracy)</option>
|
| 30 |
+
<option value="mtcnn">MTCNN (Good Balance)</option>
|
| 31 |
+
<option value="opencv">OpenCV (Fastest, Basic)</option>
|
| 32 |
+
<option value="ssd">SSD (Lightweight)</option>
|
| 33 |
+
<option value="dlib">Dlib (CPU-Friendly)</option>
|
| 34 |
+
<option value="mediapipe" selected>MediaPipe (Real-time)</option>
|
| 35 |
+
<option value="yolov8">YOLOv8 (Modern, Accurate)</option>
|
| 36 |
+
<option value="yunet">YuNet (New, Accurate)</option>
|
| 37 |
+
</select>
|
| 38 |
+
<div class="video-frame"><video id="video-feed" width="100%" autoplay playsinline></video></div>
|
| 39 |
+
<canvas id="canvas" style="display:none;"></canvas>
|
| 40 |
+
<button id="capture-btn">Analyze Face</button>
|
| 41 |
+
<div id="face-status" class="status-message"></div>
|
| 42 |
+
</div>
|
| 43 |
+
</div>
|
| 44 |
+
|
| 45 |
+
<div class="step">
|
| 46 |
+
<div class="step-header"><h3><span>2</span> Vocal Tone</h3></div>
|
| 47 |
+
<div class="step-content">
|
| 48 |
+
<!-- NEW: Canvas for voice visualization -->
|
| 49 |
+
<canvas id="voice-visualizer" width="300" height="50"></canvas>
|
| 50 |
+
<div class="button-group">
|
| 51 |
+
<button id="record-btn">Record Voice</button>
|
| 52 |
+
<button id="stop-btn" disabled>Stop</button>
|
| 53 |
+
</div>
|
| 54 |
+
<div id="voice-status" class="status-message"></div>
|
| 55 |
+
</div>
|
| 56 |
+
</div>
|
| 57 |
+
|
| 58 |
+
<div class="step">
|
| 59 |
+
<div class="step-header"><h3><span>3</span> Lifestyle Factors</h3></div>
|
| 60 |
+
<div class="step-content">
|
| 61 |
+
<label for="sleep-slider">Hours of Sleep: <span id="sleep-value">7.5</span></label>
|
| 62 |
+
<input type="range" id="sleep-slider" min="0" max="12" value="7.5" step="0.5">
|
| 63 |
+
<label for="activity-selector">Activity Level</label>
|
| 64 |
+
<select id="activity-selector">
|
| 65 |
+
<option>Very Low</option><option>Low</option>
|
| 66 |
+
<option selected>Moderate</option><option>High</option>
|
| 67 |
+
</select>
|
| 68 |
+
</div>
|
| 69 |
+
</div>
|
| 70 |
+
|
| 71 |
+
<div class="step">
|
| 72 |
+
<button id="log-checkin-btn" class="primary">Complete Check-in & Get Feedback</button>
|
| 73 |
+
</div>
|
| 74 |
+
</div>
|
| 75 |
+
</aside>
|
| 76 |
+
|
| 77 |
+
<main class="main-content">
|
| 78 |
+
<header class="main-header">
|
| 79 |
+
<h1>Mindful Companion</h1>
|
| 80 |
+
<p>A private dashboard for your emotional well-being journey.</p>
|
| 81 |
+
</header>
|
| 82 |
+
|
| 83 |
+
<div class="results-area">
|
| 84 |
+
<div id="current-analysis-card" class="card" style="display:none;">
|
| 85 |
+
<div class="card-header"><h2>Today's Analysis</h2></div>
|
| 86 |
+
<div class="card-body" id="current-checkin-display">
|
| 87 |
+
<img id="captured-image-display" src="#" alt="Your captured image" style="display:none;">
|
| 88 |
+
<div id="emotion-result-display" class="result-box"></div>
|
| 89 |
+
<div id="voice-emotion-result-display" class="result-box"></div>
|
| 90 |
+
</div>
|
| 91 |
+
</div>
|
| 92 |
+
<div id="feedback-report-display" class="card" style="display: none;">
|
| 93 |
+
<div class="card-header"><h2>Your Feedback Report</h2></div>
|
| 94 |
+
<div class="card-body">
|
| 95 |
+
<div id="stress-metric" class="metric"></div>
|
| 96 |
+
<div id="feedback-text" class="feedback-text"></div>
|
| 97 |
+
</div>
|
| 98 |
+
</div>
|
| 99 |
+
<div id="recommendations-card" class="card" style="display: none;">
|
| 100 |
+
<div class="card-header"><h2>💡 Recommended Actions</h2></div>
|
| 101 |
+
<div id="recommendations-content" class="card-body"></div>
|
| 102 |
+
</div>
|
| 103 |
+
</div>
|
| 104 |
+
|
| 105 |
+
<div class="card">
|
| 106 |
+
<div class="tabs">
|
| 107 |
+
<button class="tab-link active" onclick="openTab(event, 'Dashboard')">Dashboard</button>
|
| 108 |
+
<button class="tab-link" onclick="openTab(event, 'LogHistory')">Log History</button>
|
| 109 |
+
</div>
|
| 110 |
+
|
| 111 |
+
<div id="Dashboard" class="tab-content" style="display: block;">
|
| 112 |
+
<p class="section-description">Visualize your journey over time. Each data point represents a completed check-in.</p>
|
| 113 |
+
<div class="dashboard-grid">
|
| 114 |
+
<div class="chart-card"><canvas id="trends-chart"></canvas></div>
|
| 115 |
+
<div class="chart-card"><canvas id="emotion-chart"></canvas></div>
|
| 116 |
+
<div class="chart-card"><canvas id="voice-emotion-chart"></canvas></div>
|
| 117 |
+
</div>
|
| 118 |
+
</div>
|
| 119 |
+
|
| 120 |
+
<div id="LogHistory" class="tab-content">
|
| 121 |
+
<p class="section-description">A complete record of all your past check-ins.</p>
|
| 122 |
+
<div class="table-container">
|
| 123 |
+
<table id="log-table">
|
| 124 |
+
<thead><tr>
|
| 125 |
+
<th>Timestamp</th><th>Face Emotion</th><th>Voice Emotion</th>
|
| 126 |
+
<th>Sleep</th><th>Activity</th><th>Stress Score</th>
|
| 127 |
+
</tr></thead>
|
| 128 |
+
<tbody></tbody>
|
| 129 |
+
</table>
|
| 130 |
+
<p id="no-logs-message" style="display:none;">No logs found. Complete a check-in to get started!</p>
|
| 131 |
+
</div>
|
| 132 |
+
</div>
|
| 133 |
+
</div>
|
| 134 |
+
|
| 135 |
+
<footer>
|
| 136 |
+
<p><em>Disclaimer: This tool is for informational purposes and is not a substitute for professional advice.</em></p>
|
| 137 |
+
<button id="clear-logs-btn" class="danger">Clear All Log Data</button>
|
| 138 |
+
</footer>
|
| 139 |
+
</main>
|
| 140 |
+
</div>
|
| 141 |
+
<script src="{{ url_for('static', filename='js/main.js') }}"></script>
|
| 142 |
+
</body>
|
| 143 |
+
</html>
|