Bachstelze commited on
Commit
796297a
·
1 Parent(s): 6bb2631

test pose estimation

Browse files
Files changed (3) hide show
  1. A7/app.py +472 -0
  2. app.py +25 -466
  3. regression_app.py +472 -0
A7/app.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import pickle
4
+ import os
5
+ import threading
6
+ import time
7
+ from A5.CorrelationFilter import CorrelationFilter
8
+
9
+
10
+ # Get directory where this script is located
11
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
12
+
13
+ # Local paths - models loaded from A4/models/ directory
14
+ MODEL_PATH = os.path.join(
15
+ SCRIPT_DIR,
16
+ "A5/models/aimoscores_improved_A4.pkl"
17
+ )
18
+ CLASSIFICATION_MODEL_PATH = os.path.join(
19
+ SCRIPT_DIR,
20
+ "A5b/models/ensemble_classification_champion.pkl",
21
+ # new classifier without "classes" key "A5b/models/ensemble_classification_champion.pkl"
22
+ )
23
+ DATA_PATH = os.path.join(
24
+ SCRIPT_DIR,
25
+ "A3/A3_Data/train_dataset.csv"
26
+ )
27
+
28
+ model = None
29
+ FEATURE_NAMES = None
30
+ MODEL_METRICS = None
31
+
32
+ # Classification model
33
+ classification_model = None
34
+ CLASSIFICATION_FEATURE_NAMES = None
35
+ CLASSIFICATION_CLASSES = None
36
+ CLASSIFICATION_METRICS = None
37
+
38
+ # Loading state tracking
39
+ models_loaded = False
40
+ loading_error = None
41
+
42
+ BODY_REGION_RECOMMENDATIONS = {
43
+ 'Upper Body': (
44
+ "Focus on shoulder mobility, thoracic spine extension, "
45
+ "and keeping your head neutral."),
46
+ 'Lower Body': (
47
+ "Work on hip mobility, ankle dorsiflexion, "
48
+ "and knee tracking over toes.")
49
+ }
50
+
51
+
52
+ def load_champion_model():
53
+ global model, FEATURE_NAMES, MODEL_METRICS, loading_error
54
+
55
+ if os.path.exists(MODEL_PATH):
56
+ print(f"Loading champion model from {MODEL_PATH}")
57
+ start_time = time.perf_counter()
58
+ try:
59
+ with open(MODEL_PATH, "rb") as f:
60
+ artifact = pickle.load(f)
61
+
62
+ model = artifact["model"]
63
+ FEATURE_NAMES = artifact["feature_columns"]
64
+ MODEL_METRICS = artifact.get("test_metrics", {})
65
+
66
+ elapsed_time = time.perf_counter() - start_time
67
+ print(f"Model loaded: {len(FEATURE_NAMES)} features")
68
+ print(f"Test R2: {MODEL_METRICS.get('r2', 'N/A')}")
69
+ print(f"Model loading time: {elapsed_time:.2f} seconds")
70
+ return True
71
+ except Exception as e:
72
+ loading_error = f"Error loading champion model: {e}"
73
+ print(loading_error)
74
+ return False
75
+
76
+ loading_error = f"Champion model not found at {MODEL_PATH}"
77
+ print(loading_error)
78
+ return False
79
+
80
+
81
+ def load_classification_model():
82
+ global classification_model
83
+ global CLASSIFICATION_FEATURE_NAMES
84
+ global CLASSIFICATION_CLASSES
85
+ global CLASSIFICATION_METRICS
86
+ global loading_error
87
+
88
+ if os.path.exists(CLASSIFICATION_MODEL_PATH):
89
+ print(f"Loading classification model from {CLASSIFICATION_MODEL_PATH}")
90
+ start_time = time.perf_counter()
91
+ try:
92
+ with open(CLASSIFICATION_MODEL_PATH, "rb") as f:
93
+ artifact = pickle.load(f)
94
+
95
+ classification_model = artifact["model"]
96
+ CLASSIFICATION_FEATURE_NAMES = artifact["feature_columns"]
97
+ CLASSIFICATION_CLASSES = artifact["classes"]
98
+ CLASSIFICATION_METRICS = artifact.get("test_metrics", {})
99
+
100
+ len_features = len(CLASSIFICATION_FEATURE_NAMES)
101
+ elapsed_time = time.perf_counter() - start_time
102
+ print(
103
+ f"Classification model loaded: {len_features} features")
104
+ print(f"Classes: {CLASSIFICATION_CLASSES}")
105
+ print(f"Classification model loading time: {elapsed_time:.2f} seconds")
106
+ return True
107
+ except Exception as e:
108
+ loading_error = f"Error loading classification model: {e}"
109
+ print(loading_error)
110
+ return False
111
+
112
+ loading_error = f"Classification model not found at {CLASSIFICATION_MODEL_PATH}"
113
+ print(loading_error)
114
+ return False
115
+
116
+
117
+ def predict_score(*feature_values):
118
+ if model is None:
119
+ if loading_error:
120
+ return "Error", loading_error, ""
121
+ return "Error", "Model not loaded yet", ""
122
+
123
+ features_df = pd.DataFrame([feature_values], columns=FEATURE_NAMES)
124
+ raw_score = model.predict(features_df)[0]
125
+ score = max(0, min(1, raw_score)) * 100
126
+
127
+ if score >= 80:
128
+ interpretation = "Excellent, great squat form"
129
+ elif score >= 60:
130
+ interpretation = "Good, minor improvements needed"
131
+ elif score >= 40:
132
+ interpretation = "Average, a lot of areas to work on"
133
+ else:
134
+ interpretation = "Needs work, focus on proper form"
135
+
136
+ r2 = MODEL_METRICS.get('r2', 'N/A')
137
+ correlation = MODEL_METRICS.get('correlation', 'N/A')
138
+ r2_str = f"{r2:.4f}" if isinstance(r2, (int, float)) else str(r2)
139
+ corr_str = f"{correlation:.4f}" if isinstance(
140
+ correlation, (int, float)) else str(correlation)
141
+
142
+ details = f"""
143
+ ### Prediction Details
144
+ - **Raw Model Output:** {raw_score:.4f}
145
+ - **Normalized Score:** {score:.1f}%
146
+ - **Assessment:** {interpretation}
147
+
148
+ ### Model Performance
149
+ - **Test R-squared:** {r2_str}
150
+ - **Test Correlation:** {corr_str}
151
+
152
+ *Lower deviation values = better form*
153
+ """
154
+
155
+ return f"{score:.1f}%", interpretation, details
156
+
157
+
158
+ def predict_weakest_link(*feature_values):
159
+ if classification_model is None:
160
+ if loading_error:
161
+ return "Error", loading_error, ""
162
+ return "Error", "Classification model not loaded yet", ""
163
+
164
+ features_df = pd.DataFrame(
165
+ [feature_values], columns=CLASSIFICATION_FEATURE_NAMES)
166
+
167
+ prediction = classification_model.predict(features_df)[0]
168
+ probabilities = classification_model.predict_proba(features_df)[0]
169
+
170
+ class_probs = list(zip(CLASSIFICATION_CLASSES, probabilities))
171
+ class_probs.sort(key=lambda x: x[1], reverse=True)
172
+
173
+ confidence = max(probabilities) * 100
174
+ recommendation = BODY_REGION_RECOMMENDATIONS.get(
175
+ prediction, "Focus on exercises that strengthen this region.")
176
+
177
+ accuracy = CLASSIFICATION_METRICS.get('accuracy', 'N/A')
178
+ f1_weighted = CLASSIFICATION_METRICS.get('f1_weighted', 'N/A')
179
+ acc_str = f"{accuracy:.2%}" if isinstance(
180
+ accuracy, (int, float)) else str(accuracy)
181
+ f1_str = f"{f1_weighted:.2%}" if isinstance(
182
+ f1_weighted, (int, float)) else str(f1_weighted)
183
+
184
+ predictions_list = "\n".join(
185
+ [f"{i+1}. **{cp[0]}** - {cp[1]*100:.1f}%" for i, cp in enumerate(class_probs)]
186
+ )
187
+
188
+ details = f"""
189
+ ### Prediction Details
190
+ - **Predicted Body Region:** {prediction}
191
+ - **Confidence:** {confidence:.1f}%
192
+
193
+ ### Probability Distribution
194
+ {predictions_list}
195
+
196
+ ### Recommendation
197
+ {recommendation}
198
+
199
+ ### Model Performance
200
+ - **Test Accuracy:** {acc_str}
201
+ - **Test F1 (weighted):** {f1_str}
202
+ """
203
+
204
+ return prediction, f"Confidence: {confidence:.1f}%", details
205
+
206
+
207
+ def load_example():
208
+ if FEATURE_NAMES is None:
209
+ return [0.5] * 35
210
+
211
+ try:
212
+ df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
213
+ sample_row = df.sample(1)
214
+ # Return value for each feature
215
+ result = []
216
+ for f in FEATURE_NAMES:
217
+ if f in df.columns:
218
+ val = float(sample_row[f].values[0])
219
+ # Clamp to valid slider range [0, 1]
220
+ val = max(0.0, min(1.0, val))
221
+ result.append(val)
222
+ # using 0.5 as default if feature not in dataset
223
+ else:
224
+ result.append(0.5)
225
+ return result
226
+ except Exception as e:
227
+ print(f"Error loading example: {e}")
228
+ return [0.5] * len(FEATURE_NAMES)
229
+
230
+
231
+ def load_classification_example():
232
+ if CLASSIFICATION_FEATURE_NAMES is None:
233
+ return [0.5] * 40
234
+
235
+ try:
236
+ df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
237
+ sample_row = df.sample(1)
238
+ # Return value for each feature
239
+ result = []
240
+ for f in CLASSIFICATION_FEATURE_NAMES:
241
+ if f in df.columns:
242
+ val = float(sample_row[f].values[0])
243
+ # Clamp to valid slider range [0, 1]
244
+ val = max(0.0, min(1.0, val))
245
+ result.append(val)
246
+ # using 0.5 as default if feature not in dataset
247
+ else:
248
+ result.append(0.5)
249
+ return result
250
+ except Exception as e:
251
+ print(f"Error loading classification example: {e}")
252
+ return [0.5] * len(CLASSIFICATION_FEATURE_NAMES)
253
+
254
+
255
+ def create_interface():
256
+
257
+ if FEATURE_NAMES is None:
258
+ error_message = loading_error if loading_error else "Model not loaded"
259
+ return gr.Interface(
260
+ fn=lambda: error_message,
261
+ inputs=[],
262
+ outputs="text",
263
+ title="Error: Model not loaded"
264
+ )
265
+
266
+ inputs = []
267
+ for name in FEATURE_NAMES:
268
+ slider = gr.Slider(minimum=0, maximum=1, value=0.5,
269
+ step=0.01, label=name.replace("_", " "))
270
+ inputs.append(slider)
271
+
272
+ classification_inputs = []
273
+ if CLASSIFICATION_FEATURE_NAMES is not None:
274
+ for name in CLASSIFICATION_FEATURE_NAMES:
275
+ slider = gr.Slider(minimum=0, maximum=1, value=0.5,
276
+ step=0.01, label=name.replace("_", " "))
277
+ classification_inputs.append(slider)
278
+
279
+ description = """
280
+ ## Deep Squat Movement Assessment
281
+
282
+ **How to use:**
283
+ 1. Adjust the sliders to input deviation values
284
+ (0 = no deviation, 1 = maximum deviation)
285
+ 2. Click "Submit" to get your predicted score
286
+ 3. Or click "Load Sample" to test with real data
287
+
288
+ **Score Interpretation:**
289
+ - 80-100%: Excellent form
290
+ - 60-79%: Good form
291
+ - 40-59%: Average form
292
+ - 0-39%: Needs improvement
293
+ """
294
+
295
+ classification_description = """
296
+ ## Body Region Classification
297
+
298
+ **How to use:**
299
+ 1. Adjust the sliders to input deviation values (0 = no deviation, 1 = maximum deviation)
300
+ 2. Click "Predict Body Region" to identify where to focus improvements
301
+ 3. Or click "Load Sample" to test with real data
302
+
303
+ **Body Regions:** Upper Body, Lower Body
304
+ """
305
+
306
+ angle_features = [n for n in FEATURE_NAMES if "Angle" in n]
307
+ nasm_features = [n for n in FEATURE_NAMES if "NASM" in n]
308
+ time_features = [n for n in FEATURE_NAMES if "Time" in n]
309
+ other_features = [
310
+ n for n in FEATURE_NAMES if "Angle" not in n and "NASM" not in n and "Time" not in n]
311
+
312
+ angle_indices = [FEATURE_NAMES.index(f) for f in angle_features]
313
+ nasm_indices = [FEATURE_NAMES.index(f) for f in nasm_features]
314
+ time_indices = [FEATURE_NAMES.index(f) for f in time_features]
315
+ other_indices = [FEATURE_NAMES.index(f) for f in other_features]
316
+
317
+ if CLASSIFICATION_FEATURE_NAMES is not None:
318
+ class_angle_features = [
319
+ n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" in n]
320
+ class_nasm_features = [
321
+ n for n in CLASSIFICATION_FEATURE_NAMES if "NASM" in n]
322
+ class_time_features = [
323
+ n for n in CLASSIFICATION_FEATURE_NAMES if "Time" in n]
324
+ class_other_features = [
325
+ n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" not in n and "NASM" not in n and "Time" not in n]
326
+ class_angle_indices = [CLASSIFICATION_FEATURE_NAMES.index(
327
+ f) for f in class_angle_features]
328
+ class_nasm_indices = [CLASSIFICATION_FEATURE_NAMES.index(
329
+ f) for f in class_nasm_features]
330
+ class_time_indices = [CLASSIFICATION_FEATURE_NAMES.index(
331
+ f) for f in class_time_features]
332
+ class_other_indices = [CLASSIFICATION_FEATURE_NAMES.index(
333
+ f) for f in class_other_features]
334
+
335
+ with gr.Blocks(title="Deep Squat Assessment") as demo:
336
+ gr.Markdown("# Deep Squat Movement Assessment")
337
+
338
+ with gr.Tabs():
339
+ with gr.TabItem("Movement Scoring"):
340
+ gr.Markdown(description)
341
+
342
+ with gr.Row():
343
+ with gr.Column(scale=2):
344
+ gr.Markdown("### Input Features")
345
+ gr.Markdown(
346
+ f"*{len(FEATURE_NAMES)} features loaded from champion model*")
347
+ gr.Markdown(
348
+ "*Deviation values: 0 = perfect, 1 = maximum deviation*")
349
+
350
+ with gr.Tabs():
351
+ with gr.TabItem(f"Angle Deviations ({len(angle_indices)})"):
352
+ for idx in angle_indices:
353
+ inputs[idx].render()
354
+
355
+ with gr.TabItem(f"NASM Deviations ({len(nasm_indices)})"):
356
+ for idx in nasm_indices:
357
+ inputs[idx].render()
358
+
359
+ with gr.TabItem(f"Time Deviations ({len(time_indices)})"):
360
+ for idx in time_indices:
361
+ inputs[idx].render()
362
+
363
+ if other_indices:
364
+ with gr.TabItem(f"Other ({len(other_indices)})"):
365
+ for idx in other_indices:
366
+ inputs[idx].render()
367
+
368
+ with gr.Column(scale=1):
369
+ gr.Markdown("### Results")
370
+ score_output = gr.Textbox(label="Predicted Score")
371
+ interp_output = gr.Textbox(label="Assessment")
372
+ details_output = gr.Markdown(label="Details")
373
+
374
+ with gr.Row():
375
+ submit_btn = gr.Button("Submit", variant="primary")
376
+ example_btn = gr.Button("Load Sample")
377
+ clear_btn = gr.Button("Clear")
378
+
379
+ submit_btn.click(fn=predict_score, inputs=inputs, outputs=[
380
+ score_output, interp_output, details_output])
381
+ example_btn.click(fn=load_example, inputs=[], outputs=inputs)
382
+ clear_btn.click(
383
+ fn=lambda: [0.5] * len(FEATURE_NAMES) + ["", "", ""],
384
+ inputs=[],
385
+ outputs=inputs + [score_output,
386
+ interp_output, details_output],
387
+ )
388
+
389
+ if CLASSIFICATION_FEATURE_NAMES is not None:
390
+ with gr.TabItem("Body Region Classification"):
391
+ gr.Markdown(classification_description)
392
+
393
+ with gr.Row():
394
+ with gr.Column(scale=2):
395
+ gr.Markdown("### Input Features")
396
+ gr.Markdown(
397
+ f"*{len(CLASSIFICATION_FEATURE_NAMES)} features for classification*")
398
+ gr.Markdown(
399
+ "*Deviation values: 0 = perfect, 1 = maximum deviation*")
400
+
401
+ with gr.Tabs():
402
+ with gr.TabItem(f"Angle Deviations ({len(class_angle_indices)})"):
403
+ for idx in class_angle_indices:
404
+ classification_inputs[idx].render()
405
+
406
+ with gr.TabItem(f"NASM Deviations ({len(class_nasm_indices)})"):
407
+ for idx in class_nasm_indices:
408
+ classification_inputs[idx].render()
409
+
410
+ with gr.TabItem(f"Time Deviations ({len(class_time_indices)})"):
411
+ for idx in class_time_indices:
412
+ classification_inputs[idx].render()
413
+
414
+ if class_other_indices:
415
+ with gr.TabItem(f"Other ({len(class_other_indices)})"):
416
+ for idx in class_other_indices:
417
+ classification_inputs[idx].render()
418
+
419
+ with gr.Column(scale=1):
420
+ gr.Markdown("### Results")
421
+ class_output = gr.Textbox(
422
+ label="Predicted Body Region")
423
+ class_interp_output = gr.Textbox(
424
+ label="Confidence")
425
+ class_details_output = gr.Markdown(label="Details")
426
+
427
+ with gr.Row():
428
+ class_submit_btn = gr.Button(
429
+ "Predict Body Region", variant="primary")
430
+ class_example_btn = gr.Button("Load Sample")
431
+ class_clear_btn = gr.Button("Clear")
432
+
433
+ class_submit_btn.click(fn=predict_weakest_link, inputs=classification_inputs, outputs=[
434
+ class_output, class_interp_output, class_details_output])
435
+ class_example_btn.click(fn=load_classification_example, inputs=[
436
+ ], outputs=classification_inputs)
437
+ output_list = [class_output, class_interp_output, class_details_output]
438
+ class_clear_btn.click(
439
+ fn=lambda: [
440
+ 0.5] * len(CLASSIFICATION_FEATURE_NAMES) + ["", "", ""],
441
+ inputs=[],
442
+ outputs=classification_inputs + output_list,
443
+ )
444
+
445
+ return demo
446
+
447
+ def load_models_async():
448
+ # Load models asynchronously in background threads
449
+ global models_loaded
450
+ start_time = time.perf_counter()
451
+ print("Starting asynchronous model loading...")
452
+ load_champion_model()
453
+ load_classification_model()
454
+ models_loaded = True
455
+ elapsed_time = time.perf_counter() - start_time
456
+ print(f"Model loading complete (total time: {elapsed_time:.2f} seconds)")
457
+
458
+ if __name__ == "__main__":
459
+ # Start model loading in background thread
460
+ #loading_thread = threading.Thread(target=load_models_async, daemon=True)
461
+ #loading_thread.start()
462
+ load_models_async()
463
+
464
+ # Create the interface immediately (models loading in background)
465
+ demo = create_interface()
466
+
467
+ # Add loading status to the interface
468
+ if not models_loaded:
469
+ print("Models are loading in the background...")
470
+ print("You can use the interface while models load.")
471
+
472
+ demo.launch(share=False, server_name="0.0.0.0", server_port=7860)
app.py CHANGED
@@ -1,472 +1,31 @@
 
1
  import gradio as gr
2
- import pandas as pd
3
- import pickle
4
- import os
5
- import threading
6
- import time
7
- from A5.CorrelationFilter import CorrelationFilter
8
 
 
 
9
 
10
- # Get directory where this script is located
11
- SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
12
-
13
- # Local paths - models loaded from A4/models/ directory
14
- MODEL_PATH = os.path.join(
15
- SCRIPT_DIR,
16
- "A5/models/aimoscores_improved_A4.pkl"
17
- )
18
- CLASSIFICATION_MODEL_PATH = os.path.join(
19
- SCRIPT_DIR,
20
- "A5b/models/ensemble_classification_champion.pkl",
21
- # new classifier without "classes" key "A5b/models/ensemble_classification_champion.pkl"
22
- )
23
- DATA_PATH = os.path.join(
24
- SCRIPT_DIR,
25
- "A3/A3_Data/train_dataset.csv"
26
- )
27
-
28
- model = None
29
- FEATURE_NAMES = None
30
- MODEL_METRICS = None
31
-
32
- # Classification model
33
- classification_model = None
34
- CLASSIFICATION_FEATURE_NAMES = None
35
- CLASSIFICATION_CLASSES = None
36
- CLASSIFICATION_METRICS = None
37
-
38
- # Loading state tracking
39
- models_loaded = False
40
- loading_error = None
41
-
42
- BODY_REGION_RECOMMENDATIONS = {
43
- 'Upper Body': (
44
- "Focus on shoulder mobility, thoracic spine extension, "
45
- "and keeping your head neutral."),
46
- 'Lower Body': (
47
- "Work on hip mobility, ankle dorsiflexion, "
48
- "and knee tracking over toes.")
49
- }
50
-
51
-
52
- def load_champion_model():
53
- global model, FEATURE_NAMES, MODEL_METRICS, loading_error
54
-
55
- if os.path.exists(MODEL_PATH):
56
- print(f"Loading champion model from {MODEL_PATH}")
57
- start_time = time.perf_counter()
58
- try:
59
- with open(MODEL_PATH, "rb") as f:
60
- artifact = pickle.load(f)
61
-
62
- model = artifact["model"]
63
- FEATURE_NAMES = artifact["feature_columns"]
64
- MODEL_METRICS = artifact.get("test_metrics", {})
65
-
66
- elapsed_time = time.perf_counter() - start_time
67
- print(f"Model loaded: {len(FEATURE_NAMES)} features")
68
- print(f"Test R2: {MODEL_METRICS.get('r2', 'N/A')}")
69
- print(f"Model loading time: {elapsed_time:.2f} seconds")
70
- return True
71
- except Exception as e:
72
- loading_error = f"Error loading champion model: {e}"
73
- print(loading_error)
74
- return False
75
-
76
- loading_error = f"Champion model not found at {MODEL_PATH}"
77
- print(loading_error)
78
- return False
79
-
80
-
81
- def load_classification_model():
82
- global classification_model
83
- global CLASSIFICATION_FEATURE_NAMES
84
- global CLASSIFICATION_CLASSES
85
- global CLASSIFICATION_METRICS
86
- global loading_error
87
-
88
- if os.path.exists(CLASSIFICATION_MODEL_PATH):
89
- print(f"Loading classification model from {CLASSIFICATION_MODEL_PATH}")
90
- start_time = time.perf_counter()
91
- try:
92
- with open(CLASSIFICATION_MODEL_PATH, "rb") as f:
93
- artifact = pickle.load(f)
94
-
95
- classification_model = artifact["model"]
96
- CLASSIFICATION_FEATURE_NAMES = artifact["feature_columns"]
97
- CLASSIFICATION_CLASSES = artifact["classes"]
98
- CLASSIFICATION_METRICS = artifact.get("test_metrics", {})
99
-
100
- len_features = len(CLASSIFICATION_FEATURE_NAMES)
101
- elapsed_time = time.perf_counter() - start_time
102
- print(
103
- f"Classification model loaded: {len_features} features")
104
- print(f"Classes: {CLASSIFICATION_CLASSES}")
105
- print(f"Classification model loading time: {elapsed_time:.2f} seconds")
106
- return True
107
- except Exception as e:
108
- loading_error = f"Error loading classification model: {e}"
109
- print(loading_error)
110
- return False
111
-
112
- loading_error = f"Classification model not found at {CLASSIFICATION_MODEL_PATH}"
113
- print(loading_error)
114
- return False
115
-
116
-
117
- def predict_score(*feature_values):
118
- if model is None:
119
- if loading_error:
120
- return "Error", loading_error, ""
121
- return "Error", "Model not loaded yet", ""
122
-
123
- features_df = pd.DataFrame([feature_values], columns=FEATURE_NAMES)
124
- raw_score = model.predict(features_df)[0]
125
- score = max(0, min(1, raw_score)) * 100
126
-
127
- if score >= 80:
128
- interpretation = "Excellent, great squat form"
129
- elif score >= 60:
130
- interpretation = "Good, minor improvements needed"
131
- elif score >= 40:
132
- interpretation = "Average, a lot of areas to work on"
133
  else:
134
- interpretation = "Needs work, focus on proper form"
135
-
136
- r2 = MODEL_METRICS.get('r2', 'N/A')
137
- correlation = MODEL_METRICS.get('correlation', 'N/A')
138
- r2_str = f"{r2:.4f}" if isinstance(r2, (int, float)) else str(r2)
139
- corr_str = f"{correlation:.4f}" if isinstance(
140
- correlation, (int, float)) else str(correlation)
141
-
142
- details = f"""
143
- ### Prediction Details
144
- - **Raw Model Output:** {raw_score:.4f}
145
- - **Normalized Score:** {score:.1f}%
146
- - **Assessment:** {interpretation}
147
-
148
- ### Model Performance
149
- - **Test R-squared:** {r2_str}
150
- - **Test Correlation:** {corr_str}
151
-
152
- *Lower deviation values = better form*
153
- """
154
-
155
- return f"{score:.1f}%", interpretation, details
156
-
157
-
158
- def predict_weakest_link(*feature_values):
159
- if classification_model is None:
160
- if loading_error:
161
- return "Error", loading_error, ""
162
- return "Error", "Classification model not loaded yet", ""
163
-
164
- features_df = pd.DataFrame(
165
- [feature_values], columns=CLASSIFICATION_FEATURE_NAMES)
166
-
167
- prediction = classification_model.predict(features_df)[0]
168
- probabilities = classification_model.predict_proba(features_df)[0]
169
-
170
- class_probs = list(zip(CLASSIFICATION_CLASSES, probabilities))
171
- class_probs.sort(key=lambda x: x[1], reverse=True)
172
-
173
- confidence = max(probabilities) * 100
174
- recommendation = BODY_REGION_RECOMMENDATIONS.get(
175
- prediction, "Focus on exercises that strengthen this region.")
176
-
177
- accuracy = CLASSIFICATION_METRICS.get('accuracy', 'N/A')
178
- f1_weighted = CLASSIFICATION_METRICS.get('f1_weighted', 'N/A')
179
- acc_str = f"{accuracy:.2%}" if isinstance(
180
- accuracy, (int, float)) else str(accuracy)
181
- f1_str = f"{f1_weighted:.2%}" if isinstance(
182
- f1_weighted, (int, float)) else str(f1_weighted)
183
-
184
- predictions_list = "\n".join(
185
- [f"{i+1}. **{cp[0]}** - {cp[1]*100:.1f}%" for i, cp in enumerate(class_probs)]
186
- )
187
-
188
- details = f"""
189
- ### Prediction Details
190
- - **Predicted Body Region:** {prediction}
191
- - **Confidence:** {confidence:.1f}%
192
-
193
- ### Probability Distribution
194
- {predictions_list}
195
-
196
- ### Recommendation
197
- {recommendation}
198
-
199
- ### Model Performance
200
- - **Test Accuracy:** {acc_str}
201
- - **Test F1 (weighted):** {f1_str}
202
- """
203
-
204
- return prediction, f"Confidence: {confidence:.1f}%", details
205
-
206
-
207
- def load_example():
208
- if FEATURE_NAMES is None:
209
- return [0.5] * 35
210
-
211
- try:
212
- df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
213
- sample_row = df.sample(1)
214
- # Return value for each feature
215
- result = []
216
- for f in FEATURE_NAMES:
217
- if f in df.columns:
218
- val = float(sample_row[f].values[0])
219
- # Clamp to valid slider range [0, 1]
220
- val = max(0.0, min(1.0, val))
221
- result.append(val)
222
- # using 0.5 as default if feature not in dataset
223
- else:
224
- result.append(0.5)
225
- return result
226
- except Exception as e:
227
- print(f"Error loading example: {e}")
228
- return [0.5] * len(FEATURE_NAMES)
229
-
230
-
231
- def load_classification_example():
232
- if CLASSIFICATION_FEATURE_NAMES is None:
233
- return [0.5] * 40
234
-
235
- try:
236
- df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
237
- sample_row = df.sample(1)
238
- # Return value for each feature
239
- result = []
240
- for f in CLASSIFICATION_FEATURE_NAMES:
241
- if f in df.columns:
242
- val = float(sample_row[f].values[0])
243
- # Clamp to valid slider range [0, 1]
244
- val = max(0.0, min(1.0, val))
245
- result.append(val)
246
- # using 0.5 as default if feature not in dataset
247
- else:
248
- result.append(0.5)
249
- return result
250
- except Exception as e:
251
- print(f"Error loading classification example: {e}")
252
- return [0.5] * len(CLASSIFICATION_FEATURE_NAMES)
253
-
254
-
255
- def create_interface():
256
-
257
- if FEATURE_NAMES is None:
258
- error_message = loading_error if loading_error else "Model not loaded"
259
- return gr.Interface(
260
- fn=lambda: error_message,
261
- inputs=[],
262
- outputs="text",
263
- title="Error: Model not loaded"
264
- )
265
-
266
- inputs = []
267
- for name in FEATURE_NAMES:
268
- slider = gr.Slider(minimum=0, maximum=1, value=0.5,
269
- step=0.01, label=name.replace("_", " "))
270
- inputs.append(slider)
271
-
272
- classification_inputs = []
273
- if CLASSIFICATION_FEATURE_NAMES is not None:
274
- for name in CLASSIFICATION_FEATURE_NAMES:
275
- slider = gr.Slider(minimum=0, maximum=1, value=0.5,
276
- step=0.01, label=name.replace("_", " "))
277
- classification_inputs.append(slider)
278
-
279
- description = """
280
- ## Deep Squat Movement Assessment
281
-
282
- **How to use:**
283
- 1. Adjust the sliders to input deviation values
284
- (0 = no deviation, 1 = maximum deviation)
285
- 2. Click "Submit" to get your predicted score
286
- 3. Or click "Load Sample" to test with real data
287
-
288
- **Score Interpretation:**
289
- - 80-100%: Excellent form
290
- - 60-79%: Good form
291
- - 40-59%: Average form
292
- - 0-39%: Needs improvement
293
- """
294
-
295
- classification_description = """
296
- ## Body Region Classification
297
-
298
- **How to use:**
299
- 1. Adjust the sliders to input deviation values (0 = no deviation, 1 = maximum deviation)
300
- 2. Click "Predict Body Region" to identify where to focus improvements
301
- 3. Or click "Load Sample" to test with real data
302
-
303
- **Body Regions:** Upper Body, Lower Body
304
- """
305
-
306
- angle_features = [n for n in FEATURE_NAMES if "Angle" in n]
307
- nasm_features = [n for n in FEATURE_NAMES if "NASM" in n]
308
- time_features = [n for n in FEATURE_NAMES if "Time" in n]
309
- other_features = [
310
- n for n in FEATURE_NAMES if "Angle" not in n and "NASM" not in n and "Time" not in n]
311
-
312
- angle_indices = [FEATURE_NAMES.index(f) for f in angle_features]
313
- nasm_indices = [FEATURE_NAMES.index(f) for f in nasm_features]
314
- time_indices = [FEATURE_NAMES.index(f) for f in time_features]
315
- other_indices = [FEATURE_NAMES.index(f) for f in other_features]
316
-
317
- if CLASSIFICATION_FEATURE_NAMES is not None:
318
- class_angle_features = [
319
- n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" in n]
320
- class_nasm_features = [
321
- n for n in CLASSIFICATION_FEATURE_NAMES if "NASM" in n]
322
- class_time_features = [
323
- n for n in CLASSIFICATION_FEATURE_NAMES if "Time" in n]
324
- class_other_features = [
325
- n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" not in n and "NASM" not in n and "Time" not in n]
326
- class_angle_indices = [CLASSIFICATION_FEATURE_NAMES.index(
327
- f) for f in class_angle_features]
328
- class_nasm_indices = [CLASSIFICATION_FEATURE_NAMES.index(
329
- f) for f in class_nasm_features]
330
- class_time_indices = [CLASSIFICATION_FEATURE_NAMES.index(
331
- f) for f in class_time_features]
332
- class_other_indices = [CLASSIFICATION_FEATURE_NAMES.index(
333
- f) for f in class_other_features]
334
-
335
- with gr.Blocks(title="Deep Squat Assessment") as demo:
336
- gr.Markdown("# Deep Squat Movement Assessment")
337
-
338
- with gr.Tabs():
339
- with gr.TabItem("Movement Scoring"):
340
- gr.Markdown(description)
341
-
342
- with gr.Row():
343
- with gr.Column(scale=2):
344
- gr.Markdown("### Input Features")
345
- gr.Markdown(
346
- f"*{len(FEATURE_NAMES)} features loaded from champion model*")
347
- gr.Markdown(
348
- "*Deviation values: 0 = perfect, 1 = maximum deviation*")
349
-
350
- with gr.Tabs():
351
- with gr.TabItem(f"Angle Deviations ({len(angle_indices)})"):
352
- for idx in angle_indices:
353
- inputs[idx].render()
354
-
355
- with gr.TabItem(f"NASM Deviations ({len(nasm_indices)})"):
356
- for idx in nasm_indices:
357
- inputs[idx].render()
358
-
359
- with gr.TabItem(f"Time Deviations ({len(time_indices)})"):
360
- for idx in time_indices:
361
- inputs[idx].render()
362
-
363
- if other_indices:
364
- with gr.TabItem(f"Other ({len(other_indices)})"):
365
- for idx in other_indices:
366
- inputs[idx].render()
367
-
368
- with gr.Column(scale=1):
369
- gr.Markdown("### Results")
370
- score_output = gr.Textbox(label="Predicted Score")
371
- interp_output = gr.Textbox(label="Assessment")
372
- details_output = gr.Markdown(label="Details")
373
-
374
- with gr.Row():
375
- submit_btn = gr.Button("Submit", variant="primary")
376
- example_btn = gr.Button("Load Sample")
377
- clear_btn = gr.Button("Clear")
378
-
379
- submit_btn.click(fn=predict_score, inputs=inputs, outputs=[
380
- score_output, interp_output, details_output])
381
- example_btn.click(fn=load_example, inputs=[], outputs=inputs)
382
- clear_btn.click(
383
- fn=lambda: [0.5] * len(FEATURE_NAMES) + ["", "", ""],
384
- inputs=[],
385
- outputs=inputs + [score_output,
386
- interp_output, details_output],
387
- )
388
-
389
- if CLASSIFICATION_FEATURE_NAMES is not None:
390
- with gr.TabItem("Body Region Classification"):
391
- gr.Markdown(classification_description)
392
-
393
- with gr.Row():
394
- with gr.Column(scale=2):
395
- gr.Markdown("### Input Features")
396
- gr.Markdown(
397
- f"*{len(CLASSIFICATION_FEATURE_NAMES)} features for classification*")
398
- gr.Markdown(
399
- "*Deviation values: 0 = perfect, 1 = maximum deviation*")
400
-
401
- with gr.Tabs():
402
- with gr.TabItem(f"Angle Deviations ({len(class_angle_indices)})"):
403
- for idx in class_angle_indices:
404
- classification_inputs[idx].render()
405
-
406
- with gr.TabItem(f"NASM Deviations ({len(class_nasm_indices)})"):
407
- for idx in class_nasm_indices:
408
- classification_inputs[idx].render()
409
-
410
- with gr.TabItem(f"Time Deviations ({len(class_time_indices)})"):
411
- for idx in class_time_indices:
412
- classification_inputs[idx].render()
413
-
414
- if class_other_indices:
415
- with gr.TabItem(f"Other ({len(class_other_indices)})"):
416
- for idx in class_other_indices:
417
- classification_inputs[idx].render()
418
-
419
- with gr.Column(scale=1):
420
- gr.Markdown("### Results")
421
- class_output = gr.Textbox(
422
- label="Predicted Body Region")
423
- class_interp_output = gr.Textbox(
424
- label="Confidence")
425
- class_details_output = gr.Markdown(label="Details")
426
-
427
- with gr.Row():
428
- class_submit_btn = gr.Button(
429
- "Predict Body Region", variant="primary")
430
- class_example_btn = gr.Button("Load Sample")
431
- class_clear_btn = gr.Button("Clear")
432
-
433
- class_submit_btn.click(fn=predict_weakest_link, inputs=classification_inputs, outputs=[
434
- class_output, class_interp_output, class_details_output])
435
- class_example_btn.click(fn=load_classification_example, inputs=[
436
- ], outputs=classification_inputs)
437
- output_list = [class_output, class_interp_output, class_details_output]
438
- class_clear_btn.click(
439
- fn=lambda: [
440
- 0.5] * len(CLASSIFICATION_FEATURE_NAMES) + ["", "", ""],
441
- inputs=[],
442
- outputs=classification_inputs + output_list,
443
- )
444
-
445
- return demo
446
-
447
- def load_models_async():
448
- # Load models asynchronously in background threads
449
- global models_loaded
450
- start_time = time.perf_counter()
451
- print("Starting asynchronous model loading...")
452
- load_champion_model()
453
- load_classification_model()
454
- models_loaded = True
455
- elapsed_time = time.perf_counter() - start_time
456
- print(f"Model loading complete (total time: {elapsed_time:.2f} seconds)")
457
 
458
  if __name__ == "__main__":
459
- # Start model loading in background thread
460
- #loading_thread = threading.Thread(target=load_models_async, daemon=True)
461
- #loading_thread.start()
462
- load_models_async()
463
-
464
- # Create the interface immediately (models loading in background)
465
- demo = create_interface()
466
-
467
- # Add loading status to the interface
468
- if not models_loaded:
469
- print("Models are loading in the background...")
470
- print("You can use the interface while models load.")
471
-
472
- demo.launch(share=False, server_name="0.0.0.0", server_port=7860)
 
1
+ from PIL import Image
2
  import gradio as gr
3
+ from controlnet_aux import OpenposeDetector
 
 
 
 
 
4
 
5
+ # Load OpenPose detector
6
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
7
 
8
+ def generate_pose(image, use_openpose=True):
9
+ img = image.convert("RGB")
10
+ if use_openpose:
11
+ result = openpose(img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  else:
13
+ result = img
14
+ if not isinstance(result, Image.Image):
15
+ result = Image.fromarray(result)
16
+ return result
17
+
18
+ # Gradio UI
19
+ demo = gr.Interface(
20
+ fn=generate_pose,
21
+ inputs=[
22
+ gr.Image(type="pil", label="Upload Image"),
23
+ gr.Checkbox(value=True, label="Use OpenPose (default: true)"),
24
+ ],
25
+ outputs=gr.Image(type="pil", label="Pose Output"),
26
+ title="OpenPose Pose Generator",
27
+ description="Generate full body pose including face and hands."
28
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  if __name__ == "__main__":
31
+ demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
regression_app.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import pickle
4
+ import os
5
+ import threading
6
+ import time
7
+ from A5.CorrelationFilter import CorrelationFilter
8
+
9
+
10
+ # Get directory where this script is located
11
+ SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
12
+
13
+ # Local paths - models loaded from A4/models/ directory
14
+ MODEL_PATH = os.path.join(
15
+ SCRIPT_DIR,
16
+ "A5/models/aimoscores_improved_A4.pkl"
17
+ )
18
+ CLASSIFICATION_MODEL_PATH = os.path.join(
19
+ SCRIPT_DIR,
20
+ "A5b/models/ensemble_classification_champion.pkl",
21
+ # new classifier without "classes" key "A5b/models/ensemble_classification_champion.pkl"
22
+ )
23
+ DATA_PATH = os.path.join(
24
+ SCRIPT_DIR,
25
+ "A3/A3_Data/train_dataset.csv"
26
+ )
27
+
28
+ model = None
29
+ FEATURE_NAMES = None
30
+ MODEL_METRICS = None
31
+
32
+ # Classification model
33
+ classification_model = None
34
+ CLASSIFICATION_FEATURE_NAMES = None
35
+ CLASSIFICATION_CLASSES = None
36
+ CLASSIFICATION_METRICS = None
37
+
38
+ # Loading state tracking
39
+ models_loaded = False
40
+ loading_error = None
41
+
42
+ BODY_REGION_RECOMMENDATIONS = {
43
+ 'Upper Body': (
44
+ "Focus on shoulder mobility, thoracic spine extension, "
45
+ "and keeping your head neutral."),
46
+ 'Lower Body': (
47
+ "Work on hip mobility, ankle dorsiflexion, "
48
+ "and knee tracking over toes.")
49
+ }
50
+
51
+
52
+ def load_champion_model():
53
+ global model, FEATURE_NAMES, MODEL_METRICS, loading_error
54
+
55
+ if os.path.exists(MODEL_PATH):
56
+ print(f"Loading champion model from {MODEL_PATH}")
57
+ start_time = time.perf_counter()
58
+ try:
59
+ with open(MODEL_PATH, "rb") as f:
60
+ artifact = pickle.load(f)
61
+
62
+ model = artifact["model"]
63
+ FEATURE_NAMES = artifact["feature_columns"]
64
+ MODEL_METRICS = artifact.get("test_metrics", {})
65
+
66
+ elapsed_time = time.perf_counter() - start_time
67
+ print(f"Model loaded: {len(FEATURE_NAMES)} features")
68
+ print(f"Test R2: {MODEL_METRICS.get('r2', 'N/A')}")
69
+ print(f"Model loading time: {elapsed_time:.2f} seconds")
70
+ return True
71
+ except Exception as e:
72
+ loading_error = f"Error loading champion model: {e}"
73
+ print(loading_error)
74
+ return False
75
+
76
+ loading_error = f"Champion model not found at {MODEL_PATH}"
77
+ print(loading_error)
78
+ return False
79
+
80
+
81
+ def load_classification_model():
82
+ global classification_model
83
+ global CLASSIFICATION_FEATURE_NAMES
84
+ global CLASSIFICATION_CLASSES
85
+ global CLASSIFICATION_METRICS
86
+ global loading_error
87
+
88
+ if os.path.exists(CLASSIFICATION_MODEL_PATH):
89
+ print(f"Loading classification model from {CLASSIFICATION_MODEL_PATH}")
90
+ start_time = time.perf_counter()
91
+ try:
92
+ with open(CLASSIFICATION_MODEL_PATH, "rb") as f:
93
+ artifact = pickle.load(f)
94
+
95
+ classification_model = artifact["model"]
96
+ CLASSIFICATION_FEATURE_NAMES = artifact["feature_columns"]
97
+ CLASSIFICATION_CLASSES = artifact["classes"]
98
+ CLASSIFICATION_METRICS = artifact.get("test_metrics", {})
99
+
100
+ len_features = len(CLASSIFICATION_FEATURE_NAMES)
101
+ elapsed_time = time.perf_counter() - start_time
102
+ print(
103
+ f"Classification model loaded: {len_features} features")
104
+ print(f"Classes: {CLASSIFICATION_CLASSES}")
105
+ print(f"Classification model loading time: {elapsed_time:.2f} seconds")
106
+ return True
107
+ except Exception as e:
108
+ loading_error = f"Error loading classification model: {e}"
109
+ print(loading_error)
110
+ return False
111
+
112
+ loading_error = f"Classification model not found at {CLASSIFICATION_MODEL_PATH}"
113
+ print(loading_error)
114
+ return False
115
+
116
+
117
+ def predict_score(*feature_values):
118
+ if model is None:
119
+ if loading_error:
120
+ return "Error", loading_error, ""
121
+ return "Error", "Model not loaded yet", ""
122
+
123
+ features_df = pd.DataFrame([feature_values], columns=FEATURE_NAMES)
124
+ raw_score = model.predict(features_df)[0]
125
+ score = max(0, min(1, raw_score)) * 100
126
+
127
+ if score >= 80:
128
+ interpretation = "Excellent, great squat form"
129
+ elif score >= 60:
130
+ interpretation = "Good, minor improvements needed"
131
+ elif score >= 40:
132
+ interpretation = "Average, a lot of areas to work on"
133
+ else:
134
+ interpretation = "Needs work, focus on proper form"
135
+
136
+ r2 = MODEL_METRICS.get('r2', 'N/A')
137
+ correlation = MODEL_METRICS.get('correlation', 'N/A')
138
+ r2_str = f"{r2:.4f}" if isinstance(r2, (int, float)) else str(r2)
139
+ corr_str = f"{correlation:.4f}" if isinstance(
140
+ correlation, (int, float)) else str(correlation)
141
+
142
+ details = f"""
143
+ ### Prediction Details
144
+ - **Raw Model Output:** {raw_score:.4f}
145
+ - **Normalized Score:** {score:.1f}%
146
+ - **Assessment:** {interpretation}
147
+
148
+ ### Model Performance
149
+ - **Test R-squared:** {r2_str}
150
+ - **Test Correlation:** {corr_str}
151
+
152
+ *Lower deviation values = better form*
153
+ """
154
+
155
+ return f"{score:.1f}%", interpretation, details
156
+
157
+
158
+ def predict_weakest_link(*feature_values):
159
+ if classification_model is None:
160
+ if loading_error:
161
+ return "Error", loading_error, ""
162
+ return "Error", "Classification model not loaded yet", ""
163
+
164
+ features_df = pd.DataFrame(
165
+ [feature_values], columns=CLASSIFICATION_FEATURE_NAMES)
166
+
167
+ prediction = classification_model.predict(features_df)[0]
168
+ probabilities = classification_model.predict_proba(features_df)[0]
169
+
170
+ class_probs = list(zip(CLASSIFICATION_CLASSES, probabilities))
171
+ class_probs.sort(key=lambda x: x[1], reverse=True)
172
+
173
+ confidence = max(probabilities) * 100
174
+ recommendation = BODY_REGION_RECOMMENDATIONS.get(
175
+ prediction, "Focus on exercises that strengthen this region.")
176
+
177
+ accuracy = CLASSIFICATION_METRICS.get('accuracy', 'N/A')
178
+ f1_weighted = CLASSIFICATION_METRICS.get('f1_weighted', 'N/A')
179
+ acc_str = f"{accuracy:.2%}" if isinstance(
180
+ accuracy, (int, float)) else str(accuracy)
181
+ f1_str = f"{f1_weighted:.2%}" if isinstance(
182
+ f1_weighted, (int, float)) else str(f1_weighted)
183
+
184
+ predictions_list = "\n".join(
185
+ [f"{i+1}. **{cp[0]}** - {cp[1]*100:.1f}%" for i, cp in enumerate(class_probs)]
186
+ )
187
+
188
+ details = f"""
189
+ ### Prediction Details
190
+ - **Predicted Body Region:** {prediction}
191
+ - **Confidence:** {confidence:.1f}%
192
+
193
+ ### Probability Distribution
194
+ {predictions_list}
195
+
196
+ ### Recommendation
197
+ {recommendation}
198
+
199
+ ### Model Performance
200
+ - **Test Accuracy:** {acc_str}
201
+ - **Test F1 (weighted):** {f1_str}
202
+ """
203
+
204
+ return prediction, f"Confidence: {confidence:.1f}%", details
205
+
206
+
207
+ def load_example():
208
+ if FEATURE_NAMES is None:
209
+ return [0.5] * 35
210
+
211
+ try:
212
+ df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
213
+ sample_row = df.sample(1)
214
+ # Return value for each feature
215
+ result = []
216
+ for f in FEATURE_NAMES:
217
+ if f in df.columns:
218
+ val = float(sample_row[f].values[0])
219
+ # Clamp to valid slider range [0, 1]
220
+ val = max(0.0, min(1.0, val))
221
+ result.append(val)
222
+ # using 0.5 as default if feature not in dataset
223
+ else:
224
+ result.append(0.5)
225
+ return result
226
+ except Exception as e:
227
+ print(f"Error loading example: {e}")
228
+ return [0.5] * len(FEATURE_NAMES)
229
+
230
+
231
+ def load_classification_example():
232
+ if CLASSIFICATION_FEATURE_NAMES is None:
233
+ return [0.5] * 40
234
+
235
+ try:
236
+ df = pd.read_csv(DATA_PATH, sep=';', decimal=',')
237
+ sample_row = df.sample(1)
238
+ # Return value for each feature
239
+ result = []
240
+ for f in CLASSIFICATION_FEATURE_NAMES:
241
+ if f in df.columns:
242
+ val = float(sample_row[f].values[0])
243
+ # Clamp to valid slider range [0, 1]
244
+ val = max(0.0, min(1.0, val))
245
+ result.append(val)
246
+ # using 0.5 as default if feature not in dataset
247
+ else:
248
+ result.append(0.5)
249
+ return result
250
+ except Exception as e:
251
+ print(f"Error loading classification example: {e}")
252
+ return [0.5] * len(CLASSIFICATION_FEATURE_NAMES)
253
+
254
+
255
+ def create_interface():
256
+
257
+ if FEATURE_NAMES is None:
258
+ error_message = loading_error if loading_error else "Model not loaded"
259
+ return gr.Interface(
260
+ fn=lambda: error_message,
261
+ inputs=[],
262
+ outputs="text",
263
+ title="Error: Model not loaded"
264
+ )
265
+
266
+ inputs = []
267
+ for name in FEATURE_NAMES:
268
+ slider = gr.Slider(minimum=0, maximum=1, value=0.5,
269
+ step=0.01, label=name.replace("_", " "))
270
+ inputs.append(slider)
271
+
272
+ classification_inputs = []
273
+ if CLASSIFICATION_FEATURE_NAMES is not None:
274
+ for name in CLASSIFICATION_FEATURE_NAMES:
275
+ slider = gr.Slider(minimum=0, maximum=1, value=0.5,
276
+ step=0.01, label=name.replace("_", " "))
277
+ classification_inputs.append(slider)
278
+
279
+ description = """
280
+ ## Deep Squat Movement Assessment
281
+
282
+ **How to use:**
283
+ 1. Adjust the sliders to input deviation values
284
+ (0 = no deviation, 1 = maximum deviation)
285
+ 2. Click "Submit" to get your predicted score
286
+ 3. Or click "Load Sample" to test with real data
287
+
288
+ **Score Interpretation:**
289
+ - 80-100%: Excellent form
290
+ - 60-79%: Good form
291
+ - 40-59%: Average form
292
+ - 0-39%: Needs improvement
293
+ """
294
+
295
+ classification_description = """
296
+ ## Body Region Classification
297
+
298
+ **How to use:**
299
+ 1. Adjust the sliders to input deviation values (0 = no deviation, 1 = maximum deviation)
300
+ 2. Click "Predict Body Region" to identify where to focus improvements
301
+ 3. Or click "Load Sample" to test with real data
302
+
303
+ **Body Regions:** Upper Body, Lower Body
304
+ """
305
+
306
+ angle_features = [n for n in FEATURE_NAMES if "Angle" in n]
307
+ nasm_features = [n for n in FEATURE_NAMES if "NASM" in n]
308
+ time_features = [n for n in FEATURE_NAMES if "Time" in n]
309
+ other_features = [
310
+ n for n in FEATURE_NAMES if "Angle" not in n and "NASM" not in n and "Time" not in n]
311
+
312
+ angle_indices = [FEATURE_NAMES.index(f) for f in angle_features]
313
+ nasm_indices = [FEATURE_NAMES.index(f) for f in nasm_features]
314
+ time_indices = [FEATURE_NAMES.index(f) for f in time_features]
315
+ other_indices = [FEATURE_NAMES.index(f) for f in other_features]
316
+
317
+ if CLASSIFICATION_FEATURE_NAMES is not None:
318
+ class_angle_features = [
319
+ n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" in n]
320
+ class_nasm_features = [
321
+ n for n in CLASSIFICATION_FEATURE_NAMES if "NASM" in n]
322
+ class_time_features = [
323
+ n for n in CLASSIFICATION_FEATURE_NAMES if "Time" in n]
324
+ class_other_features = [
325
+ n for n in CLASSIFICATION_FEATURE_NAMES if "Angle" not in n and "NASM" not in n and "Time" not in n]
326
+ class_angle_indices = [CLASSIFICATION_FEATURE_NAMES.index(
327
+ f) for f in class_angle_features]
328
+ class_nasm_indices = [CLASSIFICATION_FEATURE_NAMES.index(
329
+ f) for f in class_nasm_features]
330
+ class_time_indices = [CLASSIFICATION_FEATURE_NAMES.index(
331
+ f) for f in class_time_features]
332
+ class_other_indices = [CLASSIFICATION_FEATURE_NAMES.index(
333
+ f) for f in class_other_features]
334
+
335
+ with gr.Blocks(title="Deep Squat Assessment") as demo:
336
+ gr.Markdown("# Deep Squat Movement Assessment")
337
+
338
+ with gr.Tabs():
339
+ with gr.TabItem("Movement Scoring"):
340
+ gr.Markdown(description)
341
+
342
+ with gr.Row():
343
+ with gr.Column(scale=2):
344
+ gr.Markdown("### Input Features")
345
+ gr.Markdown(
346
+ f"*{len(FEATURE_NAMES)} features loaded from champion model*")
347
+ gr.Markdown(
348
+ "*Deviation values: 0 = perfect, 1 = maximum deviation*")
349
+
350
+ with gr.Tabs():
351
+ with gr.TabItem(f"Angle Deviations ({len(angle_indices)})"):
352
+ for idx in angle_indices:
353
+ inputs[idx].render()
354
+
355
+ with gr.TabItem(f"NASM Deviations ({len(nasm_indices)})"):
356
+ for idx in nasm_indices:
357
+ inputs[idx].render()
358
+
359
+ with gr.TabItem(f"Time Deviations ({len(time_indices)})"):
360
+ for idx in time_indices:
361
+ inputs[idx].render()
362
+
363
+ if other_indices:
364
+ with gr.TabItem(f"Other ({len(other_indices)})"):
365
+ for idx in other_indices:
366
+ inputs[idx].render()
367
+
368
+ with gr.Column(scale=1):
369
+ gr.Markdown("### Results")
370
+ score_output = gr.Textbox(label="Predicted Score")
371
+ interp_output = gr.Textbox(label="Assessment")
372
+ details_output = gr.Markdown(label="Details")
373
+
374
+ with gr.Row():
375
+ submit_btn = gr.Button("Submit", variant="primary")
376
+ example_btn = gr.Button("Load Sample")
377
+ clear_btn = gr.Button("Clear")
378
+
379
+ submit_btn.click(fn=predict_score, inputs=inputs, outputs=[
380
+ score_output, interp_output, details_output])
381
+ example_btn.click(fn=load_example, inputs=[], outputs=inputs)
382
+ clear_btn.click(
383
+ fn=lambda: [0.5] * len(FEATURE_NAMES) + ["", "", ""],
384
+ inputs=[],
385
+ outputs=inputs + [score_output,
386
+ interp_output, details_output],
387
+ )
388
+
389
+ if CLASSIFICATION_FEATURE_NAMES is not None:
390
+ with gr.TabItem("Body Region Classification"):
391
+ gr.Markdown(classification_description)
392
+
393
+ with gr.Row():
394
+ with gr.Column(scale=2):
395
+ gr.Markdown("### Input Features")
396
+ gr.Markdown(
397
+ f"*{len(CLASSIFICATION_FEATURE_NAMES)} features for classification*")
398
+ gr.Markdown(
399
+ "*Deviation values: 0 = perfect, 1 = maximum deviation*")
400
+
401
+ with gr.Tabs():
402
+ with gr.TabItem(f"Angle Deviations ({len(class_angle_indices)})"):
403
+ for idx in class_angle_indices:
404
+ classification_inputs[idx].render()
405
+
406
+ with gr.TabItem(f"NASM Deviations ({len(class_nasm_indices)})"):
407
+ for idx in class_nasm_indices:
408
+ classification_inputs[idx].render()
409
+
410
+ with gr.TabItem(f"Time Deviations ({len(class_time_indices)})"):
411
+ for idx in class_time_indices:
412
+ classification_inputs[idx].render()
413
+
414
+ if class_other_indices:
415
+ with gr.TabItem(f"Other ({len(class_other_indices)})"):
416
+ for idx in class_other_indices:
417
+ classification_inputs[idx].render()
418
+
419
+ with gr.Column(scale=1):
420
+ gr.Markdown("### Results")
421
+ class_output = gr.Textbox(
422
+ label="Predicted Body Region")
423
+ class_interp_output = gr.Textbox(
424
+ label="Confidence")
425
+ class_details_output = gr.Markdown(label="Details")
426
+
427
+ with gr.Row():
428
+ class_submit_btn = gr.Button(
429
+ "Predict Body Region", variant="primary")
430
+ class_example_btn = gr.Button("Load Sample")
431
+ class_clear_btn = gr.Button("Clear")
432
+
433
+ class_submit_btn.click(fn=predict_weakest_link, inputs=classification_inputs, outputs=[
434
+ class_output, class_interp_output, class_details_output])
435
+ class_example_btn.click(fn=load_classification_example, inputs=[
436
+ ], outputs=classification_inputs)
437
+ output_list = [class_output, class_interp_output, class_details_output]
438
+ class_clear_btn.click(
439
+ fn=lambda: [
440
+ 0.5] * len(CLASSIFICATION_FEATURE_NAMES) + ["", "", ""],
441
+ inputs=[],
442
+ outputs=classification_inputs + output_list,
443
+ )
444
+
445
+ return demo
446
+
447
+ def load_models_async():
448
+ # Load models asynchronously in background threads
449
+ global models_loaded
450
+ start_time = time.perf_counter()
451
+ print("Starting asynchronous model loading...")
452
+ load_champion_model()
453
+ load_classification_model()
454
+ models_loaded = True
455
+ elapsed_time = time.perf_counter() - start_time
456
+ print(f"Model loading complete (total time: {elapsed_time:.2f} seconds)")
457
+
458
+ if __name__ == "__main__":
459
+ # Start model loading in background thread
460
+ #loading_thread = threading.Thread(target=load_models_async, daemon=True)
461
+ #loading_thread.start()
462
+ load_models_async()
463
+
464
+ # Create the interface immediately (models loading in background)
465
+ demo = create_interface()
466
+
467
+ # Add loading status to the interface
468
+ if not models_loaded:
469
+ print("Models are loading in the background...")
470
+ print("You can use the interface while models load.")
471
+
472
+ demo.launch(share=False, server_name="0.0.0.0", server_port=7860)