Arnel Gwen Nuqui commited on
Commit
44ed412
·
1 Parent(s): ee584d7

Initial commit for ProctorVision AI Docker backend

Browse files
.gitignore ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ venv/
2
+ __pycache__/
3
+ *.pyc
4
+ *.pyo
5
+ *.log
6
+ model/*.h5
7
+ model/*.keras
Dockerfile ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ------------------------------------------------------------
2
+ # Hugging Face Spaces - Docker Flask + TensorFlow + MediaPipe
3
+ # ------------------------------------------------------------
4
+
5
+ # Base image (Python + system deps)
6
+ FROM python:3.10-slim
7
+
8
+ # Disable interactive prompts
9
+ ENV DEBIAN_FRONTEND=noninteractive
10
+ ENV PYTHONUNBUFFERED=1
11
+ ENV PYTHONDONTWRITEBYTECODE=1
12
+
13
+ # Install system packages
14
+ RUN apt-get update && apt-get install -y \
15
+ libgl1 \
16
+ libglib2.0-0 \
17
+ libopencv-core-dev \
18
+ libatlas-base-dev \
19
+ && rm -rf /var/lib/apt/lists/*
20
+
21
+ # Set work directory
22
+ WORKDIR /app
23
+
24
+ # Copy dependency list
25
+ COPY requirements.txt .
26
+
27
+ # Upgrade pip and install dependencies
28
+ RUN pip install --no-cache-dir --upgrade pip \
29
+ && pip install --no-cache-dir -r requirements.txt
30
+
31
+ # Copy the full project
32
+ COPY . .
33
+
34
+ # Expose Space default port
35
+ EXPOSE 7860
36
+
37
+ # Command to run the Flask server
38
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ # -------------------------------------------------------------
4
+ # Environment Configuration
5
+ # -------------------------------------------------------------
6
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Suppress TensorFlow INFO/WARN
7
+ os.environ["GLOG_minloglevel"] = "2" # Suppress Mediapipe logs
8
+
9
+ from flask import Flask, jsonify
10
+ from flask_cors import CORS
11
+
12
+ # -------------------------------------------------------------
13
+ # Flask Initialization
14
+ # -------------------------------------------------------------
15
+ app = Flask(__name__)
16
+
17
+ # CORS Configuration
18
+ CORS(
19
+ app,
20
+ resources={
21
+ r"/api/*": {
22
+ "origins": [
23
+ "http://localhost:3000",
24
+ "http://127.0.0.1:3000",
25
+ "https://proctorvision-client.vercel.app", # your frontend
26
+ "https://<your-hf-space>.hf.space" # if using Hugging Face
27
+ ]
28
+ }
29
+ },
30
+ supports_credentials=True,
31
+ )
32
+
33
+ # -------------------------------------------------------------
34
+ # Import Blueprints
35
+ # -------------------------------------------------------------
36
+ try:
37
+ from routes.classification_routes import classification_bp
38
+ from routes.webrtc_routes import webrtc_bp
39
+ app.register_blueprint(classification_bp, url_prefix="/api")
40
+ app.register_blueprint(webrtc_bp, url_prefix="/api")
41
+ print("✅ Blueprints registered successfully.")
42
+ except Exception as e:
43
+ print(f"⚠️ Warning: Failed to import one or more routes: {e}")
44
+
45
+ # -------------------------------------------------------------
46
+ # Root & Health Check Route
47
+ # -------------------------------------------------------------
48
+ @app.route("/")
49
+ def home():
50
+ return jsonify({
51
+ "status": "ok",
52
+ "message": "✅ ProctorVision AI Backend Running",
53
+ "available_routes": [
54
+ "/api/classify_multiple",
55
+ "/api/classify_behavior_logs",
56
+ "/api/webrtc"
57
+ ]
58
+ })
59
+
60
+ # -------------------------------------------------------------
61
+ # Main Entrypoint
62
+ # -------------------------------------------------------------
63
+ if __name__ == "__main__":
64
+ port = int(os.environ.get("PORT", 7860)) # Hugging Face / Railway default
65
+ debug = os.environ.get("DEBUG", "False").lower() == "true"
66
+ app.run(host="0.0.0.0", port=port, debug=debug)
database/__init__.py ADDED
File without changes
database/connection.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import mysql.connector
3
+
4
+ def get_db_connection():
5
+ connection = mysql.connector.connect(
6
+ host=os.getenv("DB_HOST", "localhost"),
7
+ port=os.getenv("DB_PORT", "3306"),
8
+ user=os.getenv("DB_USER", "root"),
9
+ password=os.getenv("DB_PASSWORD", ""),
10
+ database=os.getenv("DB_NAME", "proctorvision_db")
11
+ )
12
+ return connection
database/db_query.txt ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CREATE DATABASE proctorvision_db;
2
+
3
+ USE proctorvision_db;
4
+
5
+ CREATE TABLE admin (
6
+ id INT AUTO_INCREMENT PRIMARY KEY,
7
+ admin_id VARCHAR(50),
8
+ name VARCHAR(100),
9
+ username VARCHAR(50),
10
+ email VARCHAR(100),
11
+ password VARCHAR(255)
12
+ );
13
+
14
+ CREATE TABLE users (
15
+ id INT AUTO_INCREMENT PRIMARY KEY,
16
+ user_id VARCHAR(50),
17
+ name VARCHAR(100),
18
+ username VARCHAR(50),
19
+ email VARCHAR(100),
20
+ password VARCHAR(255),
21
+ user_type VARCHAR(50)
22
+ );
23
+
24
+ INSERT INTO admin (admin_id, name, username, email, password)
25
+ VALUES
26
+ ('ADM001', 'Gwen Nuqui', 'gwenadmin', 'gwen@example.com', '$2b$12$XrXNhJK8Gv5y6gXXQFZxJeHuqA6Z/hOZ.Af2okyWd8BpRU7hZzS3C') -- password123
27
+
28
+ SELECT * FROM admin;
29
+ SELECT * FROM users;
30
+
31
+ USE proctorvision_db;
32
+
33
+ SELECT * FROM admin;
34
+ SELECT * FROM users;
35
+ SELECT * FROM exams;
36
+ SELECT * FROM exam_students;
37
+ SELECT * FROM instructor_assignment;
38
+
39
+ CREATE TABLE suspicious_behavior_logs (
40
+ id INT AUTO_INCREMENT PRIMARY KEY,
41
+ user_id INT NOT NULL,
42
+ exam_id INT NOT NULL,
43
+ image_base64 LONGTEXT,
44
+ warning_type VARCHAR(255),
45
+ timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
46
+
47
+ FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE,
48
+ FOREIGN KEY (exam_id) REFERENCES exams(id) ON DELETE CASCADE
49
+ );
50
+
51
+ SELECT * FROM suspicious_behavior_logs;
52
+
53
+
54
+ ALTER TABLE suspicious_behavior_logs
55
+ ADD COLUMN classification_label VARCHAR(50) DEFAULT NULL;
56
+
57
+ CREATE TABLE exam_submissions (
58
+ id INT AUTO_INCREMENT PRIMARY KEY,
59
+ user_id INT NOT NULL,
60
+ exam_id INT NOT NULL,
61
+ submitted_at DATETIME DEFAULT CURRENT_TIMESTAMP,
62
+ UNIQUE KEY unique_submission (user_id, exam_id)
63
+ );
64
+
65
+ SELECT * FROM exam_submissions;
66
+
67
+ DELETE FROM suspicious_behavior_logs WHERE id = 1;
68
+
69
+ CREATE TABLE student_profiles (
70
+ user_id VARCHAR(50) PRIMARY KEY,
71
+ course VARCHAR(100),
72
+ section VARCHAR(100),
73
+ FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE
74
+ );
75
+
76
+
77
+
78
+
79
+
database/manual_insert.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bcrypt
2
+ import mysql.connector
3
+
4
+ # Database connection
5
+ from connection import get_db_connection
6
+ conn = get_db_connection()
7
+ cursor = conn.cursor()
8
+
9
+ # List of (name, username) tuples
10
+ users = [
11
+ ("Melojean C. Marave", "melojean"),
12
+ ("Carl Angelo S. Pamploma", "carl"),
13
+ ("Geoffrey S. Sepillo", "geoffrey"),
14
+ ("Hansel S. Ada", "hansel"),
15
+ ("John Lenon E. Agatep", "john"),
16
+ ("Israel M. Cabasug", "israel"),
17
+ ("Niemea M. Galang", "niemea"),
18
+ ("Jason S. Arates", "jason"),
19
+ ("Fiel M. Dullas", "fiel"),
20
+ ("Darwin M. Morana", "darwin"),
21
+ ("Ronnel M. Mesia", "ronnel"),
22
+ ("May Ann A. Acera", "may"),
23
+ ("Joseph J. Juliano", "joseph"),
24
+ ("Daniel A. Bachillar", "daniel"),
25
+ ("Darly John Ragadio", "darly"),
26
+ ("Eufemia Sion", "eufemia"),
27
+ ("Marionne Joyce F. Tapado", "marionne"),
28
+ ("Rowela Gongora", "rowela"),
29
+ ("Joseph S. Cortez", "joseph2"),
30
+ ("King Myer Mantolino", "king"),
31
+ ("Jamil Tan Elamparo", "jamil"),
32
+ ("Rowena Orboc", "rowena"),
33
+ ("Hicel Mae Mas", "hicel"),
34
+ ("Karen Quintoriano", "karen"),
35
+ ("Ashley Rambuyong", "ashley"),
36
+ ("Kie Ann Josafat", "kie"),
37
+ ("Jio Erika Pelinio", "jio"),
38
+ ("Dane Nalicat", "dane"),
39
+ ("Radowena Payumo", "radowena"),
40
+ ("Michael G. Albino", "michael"),
41
+ ("Apple Escalante", "apple"),
42
+ ("Katherine Uy", "katherine"),
43
+ ]
44
+
45
+ # Insert each instructor with hashed password
46
+ for name, username in users:
47
+ email = f"{username}@email.com"
48
+ raw_password = f"{username}123"
49
+ hashed_password = bcrypt.hashpw(raw_password.encode('utf-8'), bcrypt.gensalt())
50
+
51
+ cursor.execute("""
52
+ INSERT INTO users (name, username, email, password, user_type)
53
+ VALUES (%s, %s, %s, %s, %s)
54
+ """, (name, username, email, hashed_password, "Instructor"))
55
+
56
+ conn.commit()
57
+ cursor.close()
58
+ conn.close()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ Flask==3.0.3
2
+ Flask-Cors==4.0.0
3
+ numpy==1.26.4
4
+ opencv-python-headless==4.9.0.80
5
+ mediapipe==0.10.14
6
+ Pillow==10.3.0
7
+ gunicorn==21.2.0
8
+ tensorflow-cpu==2.15.0
routes/__init__.py ADDED
File without changes
routes/classification_routes.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, io, base64
2
+ from pathlib import Path
3
+ from flask import Blueprint, request, jsonify
4
+ import tensorflow as tf
5
+ import numpy as np
6
+ import cv2
7
+ from PIL import Image
8
+
9
+ try:
10
+ # TF-bundled Keras (common with TensorFlow installs)
11
+ from tensorflow.keras.applications import mobilenet_v2 as _mv2 # type: ignore
12
+ except Exception:
13
+ # Standalone Keras 3
14
+ from keras.applications import mobilenet_v2 as _mv2 # type: ignore
15
+
16
+ preprocess_input = _mv2.preprocess_input
17
+
18
+ from database.connection import get_db_connection
19
+
20
+ classification_bp = Blueprint('classification_bp', __name__)
21
+
22
+ # ---------- Model & threshold loading ----------
23
+ BASE_DIR = Path(__file__).resolve().parent.parent # -> server/
24
+ MODEL_DIR = BASE_DIR / "model"
25
+
26
+ CANDIDATES = [
27
+ "cheating_mobilenetv2_final.keras", # your chosen deploy name
28
+ "mnv2_clean_best.keras",
29
+ "mnv2_continue.keras",
30
+ "mnv2_finetune_best.keras",
31
+ ]
32
+
33
+ model_path = next((MODEL_DIR / f for f in CANDIDATES if (MODEL_DIR / f).exists()), None)
34
+ if model_path and model_path.exists():
35
+ model = tf.keras.models.load_model(model_path, compile=False)
36
+ print(f" Model loaded: {model_path}")
37
+ else:
38
+ model = None
39
+ print(f" No model file found in {MODEL_DIR}. Put one of: {CANDIDATES}")
40
+
41
+ thr_file = MODEL_DIR / "best_threshold.npy"
42
+ THRESHOLD = float(np.load(thr_file)[0]) if thr_file.exists() else 0.555
43
+ print(f"Using decision threshold: {THRESHOLD:.3f}")
44
+
45
+ # Input size
46
+ if model is not None:
47
+ H, W = model.input_shape[1:3]
48
+ else:
49
+ H, W = 224, 224 # fallback
50
+
51
+ # Class mapping used in training:
52
+ # class 0 -> "cheating", class 1 -> "non-cheating"
53
+ LABELS = ["Cheating", "Not Cheating"]
54
+
55
+ def preprocess_pil(pil_img: Image.Image) -> np.ndarray:
56
+ """Convert PIL -> model-ready tensor (1, H, W, 3) using MobileNetV2 preprocessing."""
57
+ img = pil_img.convert("RGB")
58
+ if img.size != (W, H):
59
+ img = img.resize((W, H), Image.BILINEAR)
60
+ x = np.asarray(img, dtype=np.float32)
61
+ x = preprocess_input(x) # <- IMPORTANT: same preprocessing as training
62
+ return np.expand_dims(x, 0)
63
+
64
+ def predict_batch(batch_np: np.ndarray) -> np.ndarray:
65
+ """batch_np: (N, H, W, 3) already preprocessed; returns probs of class 1 (non-cheating)."""
66
+ probs = model.predict(batch_np, verbose=0).ravel()
67
+ # If model outputs 2 logits (softmax), convert to class-1 prob
68
+ if probs.ndim == 0: # single item edge case
69
+ probs = np.array([probs])
70
+ if len(probs) != batch_np.shape[0]:
71
+ # Handle (N,2) softmax output
72
+ raw = model.predict(batch_np, verbose=0)
73
+ if raw.ndim == 2 and raw.shape[1] == 2:
74
+ probs = raw[:, 1] # prob for class index 1 => "Not Cheating"
75
+ else:
76
+ probs = raw.ravel()
77
+ return probs
78
+
79
+ def label_from_prob(prob_non_cheating: float) -> str:
80
+ """prob = P(class 1 = 'Not Cheating')"""
81
+ return LABELS[int(prob_non_cheating >= THRESHOLD)]
82
+ # ------------------------------------------------
83
+
84
+
85
+ # -------- Route 1: classify uploaded multiple files --------
86
+ @classification_bp.route('/classify_multiple', methods=['POST'])
87
+ def classify_multiple():
88
+ if model is None:
89
+ return jsonify({"error": "Model not loaded."}), 500
90
+
91
+ files = request.files.getlist('files') if 'files' in request.files else []
92
+ if not files:
93
+ return jsonify({"error": "No files uploaded"}), 400
94
+
95
+ # Preprocess all, predict in one batch (faster & avoids TF retracing spam)
96
+ batch = []
97
+ for f in files:
98
+ try:
99
+ pil = Image.open(io.BytesIO(f.read()))
100
+ batch.append(preprocess_pil(pil)[0]) # (H,W,3)
101
+ except Exception as e:
102
+ return jsonify({"error": f"Error reading an image: {str(e)}"}), 400
103
+
104
+ batch_np = np.stack(batch, axis=0) # (N,H,W,3)
105
+ probs = predict_batch(batch_np) # prob of class 1 (Not Cheating)
106
+ labels = [label_from_prob(p) for p in probs]
107
+
108
+ return jsonify({
109
+ "threshold": THRESHOLD,
110
+ "results": [{"label": lbl, "prob_non_cheating": float(p)} for lbl, p in zip(labels, probs)]
111
+ })
112
+
113
+
114
+ # -------- Route 2: auto-classify behavior logs --------
115
+ @classification_bp.route('/classify_behavior_logs', methods=['POST'])
116
+ def classify_behavior_logs():
117
+ if model is None:
118
+ return jsonify({"error": "Model not loaded."}), 500
119
+
120
+ data = request.get_json(silent=True) or {}
121
+ user_id = data.get('user_id')
122
+ exam_id = data.get('exam_id')
123
+ if not user_id or not exam_id:
124
+ return jsonify({"error": "Missing user_id or exam_id"}), 400
125
+
126
+ try:
127
+ conn = get_db_connection()
128
+ cursor = conn.cursor(dictionary=True)
129
+
130
+ cursor.execute("""
131
+ SELECT id, image_base64 FROM suspicious_behavior_logs
132
+ WHERE user_id = %s AND exam_id = %s AND image_base64 IS NOT NULL
133
+ """, (user_id, exam_id))
134
+ logs = cursor.fetchall()
135
+
136
+ # Vectorized predict in chunks
137
+ CHUNK = 64
138
+ for i in range(0, len(logs), CHUNK):
139
+ chunk = logs[i:i+CHUNK]
140
+ batch = []
141
+ ids = []
142
+ for log in chunk:
143
+ try:
144
+ img_data = base64.b64decode(log['image_base64'])
145
+ pil = Image.open(io.BytesIO(img_data))
146
+ batch.append(preprocess_pil(pil)[0])
147
+ ids.append(log['id'])
148
+ except Exception as e:
149
+ print(f" Failed to read image ID {log['id']}: {e}")
150
+
151
+ if not batch:
152
+ continue
153
+
154
+ batch_np = np.stack(batch, axis=0)
155
+ probs = predict_batch(batch_np)
156
+ labels = [label_from_prob(p) for p in probs]
157
+
158
+ # write back
159
+ cur2 = conn.cursor()
160
+ for _id, lbl in zip(ids, labels):
161
+ cur2.execute(
162
+ "UPDATE suspicious_behavior_logs SET classification_label=%s WHERE id=%s",
163
+ (lbl, _id)
164
+ )
165
+ conn.commit()
166
+
167
+ conn.close()
168
+ return jsonify({"message": "Classification complete.", "threshold": THRESHOLD}), 200
169
+
170
+ except Exception as e:
171
+ return jsonify({"error": str(e)}), 500
routes/webrtc_routes.py ADDED
@@ -0,0 +1,511 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # routes/webrtc_routes.py
2
+ from flask import Blueprint, request, jsonify
3
+ import asyncio, time, traceback, os, threading, base64
4
+ from collections import defaultdict, deque
5
+ from aiortc import RTCPeerConnection, RTCSessionDescription
6
+ from aiortc.contrib.media import MediaBlackhole
7
+ import cv2, numpy as np
8
+ import mediapipe as mp
9
+
10
+ from services.behavior_service import save_behavior_log_async # save image to DB (async)
11
+ from services.instructor_services import increment_suspicious_for_student_async # bump counter (async)
12
+
13
+ webrtc_bp = Blueprint("webrtc", __name__)
14
+
15
+ # -------- logging / config --------
16
+ SUMMARY_EVERY_S = float(os.getenv("PROCTOR_SUMMARY_EVERY_S", "1.0"))
17
+ RECV_TIMEOUT_S = float(os.getenv("PROCTOR_RECV_TIMEOUT_S", "5.0")) # timeout for track.recv
18
+ HEARTBEAT_S = float(os.getenv("PROCTOR_HEARTBEAT_S", "10.0")) # reader heartbeat log
19
+
20
+ def log(event, sid="-", eid="-", **kv):
21
+ tail = " ".join(f"{k}={v}" for k, v in kv.items())
22
+ print(f"[{event}] sid={sid} eid={eid} {tail}".strip(), flush=True)
23
+
24
+ # -------- persistent asyncio loop (aiortc needs this) --------
25
+ _loop = asyncio.new_event_loop()
26
+ threading.Thread(target=_loop.run_forever, daemon=True).start()
27
+ def run_coro(coro):
28
+ return asyncio.run_coroutine_threadsafe(coro, _loop).result()
29
+
30
+ # -------- global state --------
31
+ pcs = set()
32
+ last_warning = defaultdict(lambda: {"warning": "Looking Forward", "at": 0})
33
+ last_metrics = defaultdict(lambda: {"yaw": None, "pitch": None, "dx": None, "dy": None, "fps": None, "label": "n/a", "at": 0})
34
+ last_capture = defaultdict(lambda: {"label": None, "at": 0})
35
+
36
+ # -------- thresholds (env tunable) --------
37
+ # head pose
38
+ YAW_DEG_TRIG = float(os.getenv("PROCTOR_YAW_DEG", "12")) # left/right
39
+ PITCH_DEG_TRIG_UP = float(os.getenv("PROCTOR_PITCH_UP_DEG", "10")) # up
40
+ PITCH_DEG_TRIG_DOWN = float(os.getenv("PROCTOR_PITCH_DOWN_DEG", "16")) # down (less sensitive than before)
41
+ DX_TRIG = float(os.getenv("PROCTOR_DX", "0.06"))
42
+ DY_TRIG_UP = float(os.getenv("PROCTOR_DY_UP", "0.08"))
43
+ DY_TRIG_DOWN = float(os.getenv("PROCTOR_DY_DOWN", "0.10")) # down (less sensitive)
44
+ SMOOTH_N = int(os.getenv("PROCTOR_SMOOTH_N", "5"))
45
+
46
+ # capture policy
47
+ CAPTURE_MIN_MS = int(os.getenv("PROCTOR_CAPTURE_MIN_MS", "1200"))
48
+ HOLD_FRAMES_HEAD = int(os.getenv("PROCTOR_HOLD_FRAMES_HEAD", "3"))
49
+ HOLD_FRAMES_NOFACE = int(os.getenv("PROCTOR_HOLD_FRAMES_NOFACE", "3"))
50
+ HOLD_FRAMES_HAND = int(os.getenv("PROCTOR_HOLD_FRAMES_HAND", "5")) # stricter for hands
51
+
52
+ # hand filters (reduce sensitivity)
53
+ HAND_MIN_BOX_FRAC = float(os.getenv("PROCTOR_HAND_MIN_BOX_FRAC", "0.025")) # 2.5% of frame area
54
+ HAND_MAX_BOX_FRAC = float(os.getenv("PROCTOR_HAND_MAX_BOX_FRAC", "0.60")) # ignore near-full-frame noise
55
+ HAND_REQUIRE_LANDMARK = int(os.getenv("PROCTOR_HAND_REQUIRE_LANDMARK", "7")) # at least N landmarks in-box
56
+
57
+ # -------- MediaPipe --------
58
+ mp_face_mesh = mp.solutions.face_mesh
59
+ face_mesh = mp_face_mesh.FaceMesh(
60
+ static_image_mode=False, max_num_faces=1, refine_landmarks=True,
61
+ min_detection_confidence=0.6, min_tracking_confidence=0.6
62
+ )
63
+ mp_hands = mp.solutions.hands
64
+ hands = mp_hands.Hands(
65
+ static_image_mode=False, max_num_hands=2,
66
+ min_detection_confidence=0.6, min_tracking_confidence=0.6
67
+ )
68
+
69
+ # FaceMesh indices + 3D model
70
+ IDX_NOSE, IDX_CHIN = 1, 152
71
+ IDX_LE, IDX_RE = 263, 33
72
+ IDX_LM, IDX_RM = 291, 61
73
+ MODEL_3D = np.array([
74
+ [ 0.0, 0.0, 0.0],
75
+ [ 0.0, -63.6, -12.5],
76
+ [-43.3, 32.7, -26.0],
77
+ [ 43.3, 32.7, -26.0],
78
+ [-28.9, -28.9, -24.1],
79
+ [ 28.9, -28.9, -24.1],
80
+ ], dtype=np.float32)
81
+
82
+ def _landmarks_to_pts(lms, w, h):
83
+ ids = [IDX_NOSE, IDX_CHIN, IDX_LE, IDX_RE, IDX_LM, IDX_RM]
84
+ return np.array([[lms[i].x * w, lms[i].y * h] for i in ids], dtype=np.float32)
85
+
86
+ def _bbox_from_landmarks(lms, w, h, pad=0.03):
87
+ xs = [p.x for p in lms]; ys = [p.y for p in lms]
88
+ x1n, y1n = max(0.0, min(xs) - pad), max(0.0, min(ys) - pad)
89
+ x2n, y2n = min(1.0, max(xs) + pad), min(1.0, max(ys) + pad)
90
+ return (int(x1n*w), int(y1n*h), int(x2n*w), int(y2n*h))
91
+
92
+ # -------- detector --------
93
+ class ProctorDetector:
94
+ def __init__(self):
95
+ self.yaw_hist = deque(maxlen=SMOOTH_N)
96
+ self.pitch_hist = deque(maxlen=SMOOTH_N)
97
+ self.dx_hist = deque(maxlen=SMOOTH_N)
98
+ self.dy_hist = deque(maxlen=SMOOTH_N)
99
+ self.last_print = 0.0
100
+ self.base_yaw = None
101
+ self.base_pitch = None
102
+ self.last_capture_ms = 0
103
+
104
+ # streaks for noface / hand capture holds
105
+ self.noface_streak = 0
106
+ self.hand_streak = 0
107
+
108
+ def _update_baseline(self, yaw, pitch):
109
+ alpha = 0.10
110
+ if yaw is not None:
111
+ self.base_yaw = yaw if self.base_yaw is None else (1-alpha)*self.base_yaw + alpha*yaw
112
+ if pitch is not None:
113
+ self.base_pitch = pitch if self.base_pitch is None else (1-alpha)*self.base_pitch + alpha*pitch
114
+
115
+ def _pose_angles(self, lms, w, h):
116
+ try:
117
+ pts2d = _landmarks_to_pts(lms, w, h)
118
+ cam = np.array([[w, 0, w/2],
119
+ [0, w, h/2],
120
+ [0, 0, 1 ]], dtype=np.float32)
121
+ dist = np.zeros((4,1), dtype=np.float32)
122
+ ok, rvec, _ = cv2.solvePnP(MODEL_3D, pts2d, cam, dist, flags=cv2.SOLVEPNP_ITERATIVE)
123
+ if not ok: return None, None
124
+ R, _ = cv2.Rodrigues(rvec)
125
+ *_, euler = cv2.RQDecomp3x3(R)
126
+ pitch, yaw, _ = map(float, euler)
127
+ return yaw, pitch
128
+ except Exception:
129
+ return None, None
130
+
131
+ def detect(self, bgr, sid="-", eid="-"):
132
+ h, w = bgr.shape[:2]
133
+ rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
134
+ res = face_mesh.process(rgb)
135
+
136
+ if not res.multi_face_landmarks:
137
+ now = time.time()
138
+ if now - self.last_print >= SUMMARY_EVERY_S:
139
+ log("FRAME", sid, eid, note="no_face")
140
+ self.last_print = now
141
+ return "No Face", None, rgb, None, None, None, None
142
+
143
+ lms = res.multi_face_landmarks[0].landmark
144
+ box = _bbox_from_landmarks(lms, w, h, pad=0.03)
145
+
146
+ # pixel offsets
147
+ x1, y1, x2, y2 = box
148
+ fw, fh = max(1, x2-x1), max(1, y2-y1)
149
+ cx, cy = x1 + fw/2.0, y1 + fh/2.0
150
+ nose_x, nose_y = lms[IDX_NOSE].x * w, lms[IDX_NOSE].y * h
151
+ dx = (nose_x - cx) / fw # + right
152
+ dy = (nose_y - cy) / fh # + down
153
+ self.dx_hist.append(dx); self.dy_hist.append(dy)
154
+ dx_s = float(np.median(self.dx_hist))
155
+ dy_s = float(np.median(self.dy_hist))
156
+
157
+ # angles (may be None on some frames)
158
+ yaw, pitch = self._pose_angles(lms, w, h)
159
+ yaw_s = pitch_s = None
160
+ if yaw is not None:
161
+ self.yaw_hist.append(yaw); yaw_s = float(np.median(self.yaw_hist))
162
+ if pitch is not None:
163
+ self.pitch_hist.append(pitch); pitch_s = float(np.median(self.pitch_hist))
164
+
165
+ # triggers
166
+ yaw_trigger = (yaw_s is not None and abs(yaw_s) > YAW_DEG_TRIG) or (abs(dx_s) > DX_TRIG)
167
+
168
+ # For "Down", require BOTH pitch and DY to exceed higher thresholds
169
+ pitch_down_trigger = (
170
+ (pitch_s is not None and pitch_s > PITCH_DEG_TRIG_DOWN) and
171
+ (dy_s > DY_TRIG_DOWN)
172
+ )
173
+ # For "Up", keep original sensitivity (OR) unless you also want stricter:
174
+ pitch_up_trigger = (
175
+ (pitch_s is not None and -pitch_s > PITCH_DEG_TRIG_UP) or
176
+ (dy_s < -DY_TRIG_UP)
177
+ )
178
+
179
+ label = "Looking Forward"
180
+ if yaw_trigger and (abs(dx_s) >= abs(dy_s) or not (pitch_up_trigger or pitch_down_trigger)):
181
+ label = "Looking Right" if dx_s > 0 else "Looking Left"
182
+ elif pitch_down_trigger:
183
+ label = "Looking Down"
184
+ elif pitch_up_trigger:
185
+ label = "Looking Up"
186
+
187
+ if label == "Looking Forward":
188
+ self._update_baseline(yaw_s, pitch_s)
189
+
190
+ now = time.time()
191
+ if now - self.last_print >= SUMMARY_EVERY_S:
192
+ log("FRAME", sid, eid,
193
+ yaw=f"{yaw_s:.1f}" if yaw_s is not None else "na",
194
+ pitch=f"{pitch_s:.1f}" if pitch_s is not None else "na",
195
+ dx=f"{dx_s:.3f}", dy=f"{dy_s:.3f}", choose=label)
196
+ self.last_print = now
197
+
198
+ return label, box, rgb, yaw_s, pitch_s, dx_s, dy_s
199
+
200
+ def detect_hands_anywhere(self, rgb):
201
+ """
202
+ Less sensitive hand detection:
203
+ - requires a minimum hand bounding box area vs frame (HAND_MIN_BOX_FRAC)
204
+ - ignores absurdly large boxes (HAND_MAX_BOX_FRAC)
205
+ - returns ('Hand Detected', count) only if at least one qualified hand is present
206
+ """
207
+ h, w = rgb.shape[:2]
208
+ frame_area = float(h * w)
209
+ res = hands.process(rgb)
210
+ if not res.multi_hand_landmarks:
211
+ return None, 0
212
+
213
+ qualified = 0
214
+ for hlms in res.multi_hand_landmarks:
215
+ xs = [p.x for p in hlms.landmark]; ys = [p.y for p in hlms.landmark]
216
+ x1 = max(0, int(min(xs) * w)); y1 = max(0, int(min(ys) * h))
217
+ x2 = min(w, int(max(xs) * w)); y2 = min(h, int(max(ys) * h))
218
+ bw, bh = max(1, x2 - x1), max(1, y2 - y1)
219
+ box_area_frac = (bw * bh) / frame_area
220
+
221
+ # crude landmark-count-in-box sanity (they all are, but keep option)
222
+ lm_in_box = sum(
223
+ 1 for p in hlms.landmark
224
+ if (x1 <= int(p.x * w) <= x2 and y1 <= int(p.y * h) <= y2)
225
+ )
226
+
227
+ if HAND_MIN_BOX_FRAC <= box_area_frac <= HAND_MAX_BOX_FRAC and lm_in_box >= HAND_REQUIRE_LANDMARK:
228
+ qualified += 1
229
+
230
+ if qualified > 0:
231
+ return "Hand Detected", qualified
232
+ return None, 0
233
+
234
+ # capture throttles (shared for all capture kinds)
235
+ def _throttle_ok(self):
236
+ return int(time.time()*1000) - self.last_capture_ms >= CAPTURE_MIN_MS
237
+ def _mark_captured(self):
238
+ self.last_capture_ms = int(time.time()*1000)
239
+
240
+ # head (left/right/up/down)
241
+ def should_capture_head(self, head_label, sid="-", eid="-"):
242
+ if head_label in ("Looking Forward", "No Face"):
243
+ return False
244
+ if not self._throttle_ok():
245
+ return False
246
+
247
+ # Pixel hold
248
+ pixel_hold_ok = False
249
+ if len(self.dx_hist) >= HOLD_FRAMES_HEAD and len(self.dy_hist) >= HOLD_FRAMES_HEAD:
250
+ dxs = list(self.dx_hist)[-HOLD_FRAMES_HEAD:]
251
+ dys = list(self.dy_hist)[-HOLD_FRAMES_HEAD:]
252
+ if head_label in ("Looking Left", "Looking Right"):
253
+ pixel_hold_ok = all(abs(x) > DX_TRIG for x in dxs)
254
+ elif head_label in ("Looking Up", "Looking Down"):
255
+ # For "Down", mirror the stricter rule: require DY hold above down threshold
256
+ if head_label == "Looking Down":
257
+ pixel_hold_ok = all(y > DY_TRIG_DOWN for y in dys)
258
+ else: # Looking Up
259
+ pixel_hold_ok = all(-y > DY_TRIG_UP for y in dys)
260
+ else:
261
+ pixel_hold_ok = (all(abs(x) > DX_TRIG for x in dxs) or
262
+ all(abs(y) > max(DY_TRIG_UP, DY_TRIG_DOWN) for y in dys))
263
+
264
+ # Angle hold
265
+ angle_hold_ok = False
266
+ if len(self.yaw_hist) >= HOLD_FRAMES_HEAD and len(self.pitch_hist) >= HOLD_FRAMES_HEAD:
267
+ by = self.base_yaw if self.base_yaw is not None else 0.0
268
+ bp = self.base_pitch if self.base_pitch is not None else 0.0
269
+ recent_yaw = list(self.yaw_hist)[-HOLD_FRAMES_HEAD:]
270
+ recent_pitch = list(self.pitch_hist)[-HOLD_FRAMES_HEAD:]
271
+ if head_label in ("Looking Left", "Looking Right"):
272
+ angle_hold_ok = all(abs(y - by) > YAW_DEG_TRIG for y in recent_yaw)
273
+ elif head_label == "Looking Down":
274
+ angle_hold_ok = all((p - bp) > PITCH_DEG_TRIG_DOWN for p in recent_pitch)
275
+ elif head_label == "Looking Up":
276
+ angle_hold_ok = all((bp - p) > PITCH_DEG_TRIG_UP for p in recent_pitch)
277
+
278
+ ok = pixel_hold_ok or angle_hold_ok
279
+ if ok:
280
+ log("CAPTURE_DECISION_HEAD", sid, eid, label=head_label,
281
+ pixel_hold=pixel_hold_ok, angle_hold=angle_hold_ok,
282
+ hold_frames=HOLD_FRAMES_HEAD, ok=True)
283
+ self._mark_captured()
284
+ return ok
285
+
286
+ # no face
287
+ def should_capture_noface(self, sid="-", eid="-"):
288
+ if not self._throttle_ok():
289
+ return False
290
+ ok = self.noface_streak >= HOLD_FRAMES_NOFACE
291
+ if ok:
292
+ log("CAPTURE_DECISION_NOFACE", sid, eid,
293
+ streak=self.noface_streak, hold_frames=HOLD_FRAMES_NOFACE, ok=True)
294
+ self._mark_captured()
295
+ return ok
296
+
297
+ # hands
298
+ def should_capture_hand(self, hand_label, sid="-", eid="-"):
299
+ if not hand_label:
300
+ return False
301
+ if not self._throttle_ok():
302
+ return False
303
+ ok = self.hand_streak >= HOLD_FRAMES_HAND
304
+ if ok:
305
+ log("CAPTURE_DECISION_HAND", sid, eid,
306
+ streak=self.hand_streak, hold_frames=HOLD_FRAMES_HAND, ok=True)
307
+ self._mark_captured()
308
+ return ok
309
+
310
+ detectors = defaultdict(ProctorDetector)
311
+
312
+ # -------- capture helper --------
313
+ def _maybe_capture(student_id: str, exam_id: str, bgr, label: str):
314
+ ok, buf = cv2.imencode(".jpg", bgr)
315
+ if not ok:
316
+ log("CAPTURE_SKIP", student_id, exam_id, reason="encode_failed")
317
+ return
318
+
319
+ img_b64 = base64.b64encode(buf).decode("utf-8")
320
+ log("CAPTURE_ENQUEUE", student_id, exam_id, label=label, bytes=len(buf))
321
+
322
+ # Save image to DB (async)
323
+ save_behavior_log_async(
324
+ int(student_id), int(exam_id), img_b64, label,
325
+ on_error=lambda e: log("CAPTURE_ERR", student_id, exam_id, err=str(e))
326
+ )
327
+
328
+ # bump suspicious counter (async)
329
+ increment_suspicious_for_student_async(int(student_id))
330
+
331
+ # update in-memory cache for frontend beep
332
+ ts = int(time.time() * 1000)
333
+ last_capture[(student_id, exam_id)] = {"label": label, "at": ts}
334
+ log("LAST_CAPTURE_SET", student_id, exam_id, label=label, at=ts)
335
+
336
+ # -------- aiortc helpers --------
337
+ async def _wait_ice_complete(pc: RTCPeerConnection):
338
+ if pc.iceGatheringState == "complete":
339
+ return
340
+ done = asyncio.Event()
341
+ @pc.on("icegatheringstatechange")
342
+ def _(_ev=None):
343
+ if pc.iceGatheringState == "complete":
344
+ done.set()
345
+ await asyncio.wait_for(done.wait(), timeout=5.0)
346
+
347
+ async def handle_offer(data: dict):
348
+ student_id = str(data.get("student_id", "0"))
349
+ exam_id = str(data.get("exam_id", "0"))
350
+ key = (student_id, exam_id)
351
+ log("OFFER_HANDLE", student_id, exam_id)
352
+
353
+ offer = RTCSessionDescription(sdp=data["sdp"], type=data["type"])
354
+ pc = RTCPeerConnection()
355
+ pcs.add(pc)
356
+ log("PC_CREATED", student_id, exam_id, pc=id(pc))
357
+
358
+ @pc.on("connectionstatechange")
359
+ async def _():
360
+ log("PC_STATE", student_id, exam_id, state=pc.connectionState)
361
+ if pc.connectionState in ("failed", "closed", "disconnected"):
362
+ try:
363
+ await pc.close()
364
+ finally:
365
+ pcs.discard(pc)
366
+ for d in (detectors, last_warning, last_metrics, last_capture):
367
+ d.pop(key, None)
368
+ log("PC_CLOSED", student_id, exam_id, pc=id(pc))
369
+
370
+ @pc.on("track")
371
+ def on_track(track):
372
+ log("TRACK", student_id, exam_id, kind=track.kind)
373
+ if track.kind == "video":
374
+ async def reader():
375
+ det = detectors[key]
376
+ t_fps = time.time(); frames = 0
377
+ last_heartbeat = time.time()
378
+
379
+ while True:
380
+ # --- receive with timeout ---
381
+ try:
382
+ frame = await asyncio.wait_for(track.recv(), timeout=RECV_TIMEOUT_S)
383
+ except asyncio.TimeoutError:
384
+ # lightweight heartbeat on timeouts
385
+ now = time.time()
386
+ if now - last_heartbeat >= HEARTBEAT_S:
387
+ log("TRACK_RECV_TIMEOUT", student_id, exam_id, timeout_s=RECV_TIMEOUT_S)
388
+ last_heartbeat = now
389
+ continue
390
+ except Exception as e:
391
+ log("TRACK_RECV_END", student_id, exam_id, err=str(e))
392
+ break
393
+
394
+ # fps window
395
+ frames += 1
396
+ now = time.time()
397
+ if now - t_fps >= 2.0:
398
+ fps = frames / max(1e-6, (now - t_fps))
399
+ lm = last_metrics.get(key, {})
400
+ lm["fps"] = fps
401
+ last_metrics[key] = lm
402
+ log("FPS", student_id, exam_id, fps=f"{fps:.1f}")
403
+ t_fps = now; frames = 0
404
+
405
+ # --- per-frame detection protected ---
406
+ try:
407
+ bgr = frame.to_ndarray(format="bgr24")
408
+ head_label, face_box, rgb, yaw_s, pitch_s, dx_s, dy_s = det.detect(bgr, sid=student_id, eid=exam_id)
409
+ hand_label, _ = det.detect_hands_anywhere(rgb)
410
+ except Exception as e:
411
+ log("DETECT_ERR", student_id, exam_id, err=str(e))
412
+ continue
413
+
414
+ # maintain streaks with stricter holds for hand/noface
415
+ det.noface_streak = det.noface_streak + 1 if head_label == "No Face" else 0
416
+ det.hand_streak = det.hand_streak + 1 if hand_label else 0
417
+
418
+ # choose UI label
419
+ if head_label == "No Face":
420
+ warn = "No Face (Hand Detected)" if hand_label else "No Face"
421
+ elif head_label != "Looking Forward":
422
+ warn = head_label
423
+ elif hand_label:
424
+ warn = hand_label
425
+ else:
426
+ warn = "Looking Forward"
427
+
428
+ # --- capture priority with holds/throttle ---
429
+ if head_label == "No Face" and det.should_capture_noface(sid=student_id, eid=exam_id):
430
+ _maybe_capture(student_id, exam_id, bgr, "No Face")
431
+ elif head_label not in ("Looking Forward", "No Face") and det.should_capture_head(head_label, sid=student_id, eid=exam_id):
432
+ _maybe_capture(student_id, exam_id, bgr, head_label)
433
+ elif hand_label and det.should_capture_hand(hand_label, sid=student_id, eid=exam_id):
434
+ _maybe_capture(student_id, exam_id, bgr, "Hand Detected")
435
+
436
+ ts = int(time.time() * 1000)
437
+ last_warning[key] = {"warning": warn, "at": ts}
438
+ last_metrics[key] = {
439
+ "yaw": yaw_s, "pitch": pitch_s, "dx": dx_s, "dy": dy_s,
440
+ "fps": last_metrics.get(key, {}).get("fps"), "label": warn, "at": ts
441
+ }
442
+
443
+ # reader heartbeat (when frames flow)
444
+ if now - last_heartbeat >= HEARTBEAT_S:
445
+ log("READER_ALIVE", student_id, exam_id, label=warn)
446
+ last_heartbeat = now
447
+
448
+ asyncio.ensure_future(reader(), loop=_loop)
449
+ else:
450
+ MediaBlackhole().addTrack(track)
451
+
452
+ await pc.setRemoteDescription(offer)
453
+ answer = await pc.createAnswer()
454
+ await pc.setLocalDescription(answer)
455
+ await _wait_ice_complete(pc)
456
+ log("ICE_DONE", student_id, exam_id)
457
+ return pc.localDescription
458
+
459
+ # -------- routes --------
460
+ @webrtc_bp.route("/webrtc/offer", methods=["POST"])
461
+ def webrtc_offer():
462
+ try:
463
+ data = request.get_json(force=True)
464
+ except Exception as e:
465
+ return jsonify({"error": f"invalid json: {e}"}), 400
466
+ sid = str(data.get("student_id", "0")); eid = str(data.get("exam_id", "0"))
467
+ log("OFFER_RX", sid, eid)
468
+ try:
469
+ desc = run_coro(handle_offer(data))
470
+ except Exception as e:
471
+ traceback.print_exc()
472
+ log("OFFER_ERR", sid, eid, err=str(e))
473
+ return jsonify({"error": str(e)}), 500
474
+ log("ANSWER_TX", sid, eid)
475
+ return jsonify({"sdp": desc.sdp, "type": desc.type})
476
+
477
+ @webrtc_bp.route("/webrtc/cleanup", methods=["POST"])
478
+ def webrtc_cleanup():
479
+ async def _close_all():
480
+ for pc in list(pcs):
481
+ try:
482
+ await pc.close()
483
+ finally:
484
+ pcs.discard(pc)
485
+ run_coro(_close_all())
486
+ print("[CLEANUP] closed all RTCPeerConnections", flush=True)
487
+ return jsonify({"ok": True})
488
+
489
+ @webrtc_bp.route("/proctor/last_warning")
490
+ def proctor_last_warning():
491
+ student_id = request.args.get("student_id")
492
+ exam_id = request.args.get("exam_id")
493
+ if not student_id or not exam_id:
494
+ return jsonify(error="missing student_id or exam_id"), 400
495
+ return jsonify(last_warning.get((student_id, exam_id), {"warning": "Looking Forward", "at": 0}))
496
+
497
+ @webrtc_bp.route("/proctor/last_capture")
498
+ def proctor_last_capture():
499
+ student_id = request.args.get("student_id")
500
+ exam_id = request.args.get("exam_id")
501
+ if not student_id or not exam_id:
502
+ return jsonify(error="missing student_id or exam_id"), 400
503
+ return jsonify(last_capture.get((student_id, exam_id), {"label": None, "at": 0}))
504
+
505
+ @webrtc_bp.route("/proctor/metrics")
506
+ def proctor_metrics():
507
+ student_id = request.args.get("student_id")
508
+ exam_id = request.args.get("exam_id")
509
+ if not student_id or not exam_id:
510
+ return jsonify(error="missing student_id or exam_id"), 400
511
+ return jsonify(last_metrics.get((student_id, exam_id), {}))
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.10.13
services/__init__.py ADDED
File without changes
services/behavior_service.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # services/behavior_service.py
2
+ from database.connection import get_db_connection
3
+ import threading
4
+
5
+ def save_behavior_log(user_id: int, exam_id: int, image_base64: str, warning_type: str):
6
+ """Persist a behavior log. Returns inserted row id when available."""
7
+ conn = get_db_connection()
8
+ cur = None
9
+ try:
10
+ cur = conn.cursor()
11
+ row_id = None
12
+ try:
13
+ # Postgres path (supports RETURNING)
14
+ cur.execute(
15
+ """
16
+ INSERT INTO suspicious_behavior_logs (user_id, exam_id, image_base64, warning_type)
17
+ VALUES (%s, %s, %s, %s)
18
+ RETURNING id
19
+ """,
20
+ (user_id, exam_id, image_base64, warning_type),
21
+ )
22
+ row = cur.fetchone()
23
+ if row:
24
+ row_id = row[0]
25
+ except Exception:
26
+ # Fallback for MySQL/MariaDB (no RETURNING)
27
+ cur.execute(
28
+ """
29
+ INSERT INTO suspicious_behavior_logs (user_id, exam_id, image_base64, warning_type)
30
+ VALUES (%s, %s, %s, %s)
31
+ """,
32
+ (user_id, exam_id, image_base64, warning_type),
33
+ )
34
+ try:
35
+ row_id = getattr(cur, "lastrowid", None)
36
+ except Exception:
37
+ row_id = None
38
+
39
+ conn.commit()
40
+ return row_id
41
+ except Exception:
42
+ conn.rollback()
43
+ raise
44
+ finally:
45
+ try:
46
+ cur.close()
47
+ except Exception:
48
+ pass
49
+ conn.close()
50
+
51
+ def save_behavior_log_async(user_id: int, exam_id: int, image_base64: str, warning_type: str, on_error=None):
52
+ """Fire-and-forget background insert so the WebRTC loop never blocks."""
53
+ def _worker():
54
+ try:
55
+ save_behavior_log(user_id, exam_id, image_base64, warning_type)
56
+ except Exception as e:
57
+ if on_error:
58
+ on_error(e)
59
+ threading.Thread(target=_worker, daemon=True).start()
services/instructor_services.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # services/instructor_service.py increment if the suspicous capture
2
+ from database.connection import get_db_connection
3
+ import threading
4
+ import sys
5
+
6
+ def _find_instructor_id_for_student(conn, student_id: int):
7
+ """
8
+ Return an instructor_id for this student.
9
+ Adjust the query if your schema is different (e.g., join to exams).
10
+ """
11
+ cur = conn.cursor()
12
+ try:
13
+ # If you can have multiple assignments, pick the latest.
14
+ cur.execute(
15
+ """
16
+ SELECT instructor_id
17
+ FROM instructor_assignments
18
+ WHERE student_id = %s
19
+ ORDER BY id DESC
20
+ LIMIT 1
21
+ """,
22
+ (student_id,),
23
+ )
24
+ row = cur.fetchone()
25
+ return row[0] if row else None
26
+ finally:
27
+ cur.close()
28
+
29
+ def increment_suspicious(student_id: int, instructor_id: int) -> int:
30
+ """Direct bump: requires instructor_id; returns affected rows."""
31
+ conn = get_db_connection()
32
+ cur = None
33
+ try:
34
+ cur = conn.cursor()
35
+ cur.execute(
36
+ """
37
+ UPDATE instructor_assignments
38
+ SET suspicious_behavior_count = suspicious_behavior_count + 1
39
+ WHERE student_id = %s AND instructor_id = %s
40
+ """,
41
+ (student_id, instructor_id),
42
+ )
43
+ conn.commit()
44
+ return cur.rowcount
45
+ except Exception:
46
+ try: conn.rollback()
47
+ except Exception: pass
48
+ raise
49
+ finally:
50
+ try: cur.close()
51
+ except Exception: pass
52
+ conn.close()
53
+
54
+ def increment_suspicious_for_student(student_id: int) -> int:
55
+ """
56
+ Convenience: look up instructor_id for the student and bump the count.
57
+ Returns affected rows (0 if no assignment found).
58
+ """
59
+ conn = get_db_connection()
60
+ cur = None
61
+ try:
62
+ iid = _find_instructor_id_for_student(conn, student_id)
63
+ if iid is None:
64
+ return 0
65
+ cur = conn.cursor()
66
+ cur.execute(
67
+ """
68
+ UPDATE instructor_assignments
69
+ SET suspicious_behavior_count = suspicious_behavior_count + 1
70
+ WHERE student_id = %s AND instructor_id = %s
71
+ """,
72
+ (student_id, iid),
73
+ )
74
+ conn.commit()
75
+ return cur.rowcount
76
+ except Exception:
77
+ try: conn.rollback()
78
+ except Exception: pass
79
+ raise
80
+ finally:
81
+ try: cur.close()
82
+ except Exception: pass
83
+ conn.close()
84
+
85
+ def increment_suspicious_for_student_async(student_id: int, on_error=None):
86
+ """Fire-and-forget background increment."""
87
+ def _worker():
88
+ try:
89
+ increment_suspicious_for_student(student_id)
90
+ except Exception as e:
91
+ if on_error: on_error(e)
92
+ else: print(f"[INCR_SUSPICIOUS_ERR] {e}", file=sys.stderr, flush=True)
93
+ threading.Thread(target=_worker, daemon=True).start()