Doux Thibault commited on
Commit
134c1cb
·
2 Parent(s): 3e299e4 5692f7c

Merge branch 'main' of https://huggingface.co/spaces/EntrepreneurFirst/FitnessEquation

Browse files
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  data/pose/squat.mp4 filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  data/pose/squat.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ data/pose/squat_inference.mp4 filter=lfs diff=lfs merge=lfs -text
Modules/PoseEstimation/pose_estimation.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Modules/PoseEstimation/pose_estimator.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import numpy as np
3
+
4
+ id_joints_dict = {0: 'nose',
5
+ 1: 'left_eye',
6
+ 2: 'right_eye',
7
+ 3: 'left_ear',
8
+ 4: 'right_ear',
9
+ 5: 'left_shoulder',
10
+ 6: 'right_shoulder',
11
+ 7: 'left_elbow',
12
+ 8: 'right_elbow',
13
+ 9: 'left_wrist',
14
+ 10: 'right_wrist',
15
+ 11: 'left_hip',
16
+ 12: 'right_hip',
17
+ 13: 'left_knee',
18
+ 14: 'right_knee',
19
+ 15: 'left_ankle',
20
+ 16: 'right_ankle'}
21
+ joints_id_dict = {v: k for k, v in id_joints_dict.items()}
22
+
23
+ model = YOLO('yolov8n-pose.pt')
24
+
25
+ def get_keypoints_from_keypoints(model, video_path):
26
+
27
+ keypoints = []
28
+ results = model(video_path, save=True, show_conf=False, show_boxes=False)
29
+ for frame in results:
30
+ keypoints.append(frame.keypoints.xy)
31
+
32
+ return keypoints
33
+
34
+ keypoints = get_keypoints_from_keypoints(model, '../../data/pose/squat.mp4')
35
+
36
+ def calculate_angle(a, b, c):
37
+
38
+ """
39
+ Calculates the angle between three joints.
40
+
41
+ Args:
42
+ a (tuple): coordinates of the first joint
43
+ b (tuple): coordinates of the second joint
44
+ c (tuple): coordinates of the third joint
45
+
46
+ Returns:
47
+ angle (float): angle between the three joints
48
+ """
49
+
50
+ ba = np.array(a) - np.array(b)
51
+ bc = np.array(c) - np.array(b)
52
+
53
+ cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
54
+ angle = np.arccos(cosine_angle)
55
+
56
+ return np.degrees(angle)
57
+
58
+ def compute_left_knee_angle(pose):
59
+
60
+ """
61
+ Computes the knee angle.
62
+
63
+ Args:
64
+ pose (dict): pose dictionary
65
+
66
+ Returns:
67
+ knee_angle (float): knee angle
68
+ """
69
+
70
+ left_hip = pose[0][joints_id_dict['left_hip']]
71
+ left_knee = pose[0][joints_id_dict['left_knee']]
72
+ left_ankle = pose[0][joints_id_dict['left_ankle']]
73
+
74
+ knee_angle = calculate_angle(left_hip, left_knee, left_ankle)
75
+
76
+ return knee_angle
77
+
78
+ def compute_right_knee_angle(pose):
79
+
80
+ """
81
+ Computes the knee angle.
82
+
83
+ Args:
84
+ pose (dict): pose dictionary
85
+
86
+ Returns:
87
+ knee_angle (float): knee angle
88
+ """
89
+
90
+ right_hip = pose[0][joints_id_dict['right_hip']]
91
+ right_knee = pose[0][joints_id_dict['right_knee']]
92
+ right_ankle = pose[0][joints_id_dict['right_ankle']]
93
+
94
+ knee_angle = calculate_angle(right_hip, right_knee, right_ankle)
95
+
96
+ return knee_angle
97
+
98
+ def moving_average(data, window_size):
99
+
100
+ """
101
+ Computes the moving average of a list.
102
+
103
+ Args:
104
+ data (list): list of values
105
+ window_size (int): size of the window
106
+
107
+ Returns:
108
+ avg (list): list of moving average values
109
+ """
110
+
111
+ avg = []
112
+ for i in range(len(data) - window_size + 1):
113
+ avg.append(sum(data[i:i + window_size]) / window_size)
114
+
115
+ return avg
116
+
Modules/Speech2Text/transcribe.py CHANGED
@@ -3,4 +3,4 @@ import whisper
3
  def transcribe(audio_path):
4
  model = whisper.load_model("base")
5
  result = model.transcribe(audio_path)
6
- return result.text
 
3
  def transcribe(audio_path):
4
  model = whisper.load_model("base")
5
  result = model.transcribe(audio_path)
6
+ return result['text']
app.py CHANGED
@@ -1,32 +1,62 @@
1
  import streamlit as st
2
- from moviepy.editor import AudioClip
3
- import tempfile
4
- import os
5
  from st_audiorec import st_audiorec
 
 
6
 
7
  st.set_page_config(layout="wide", initial_sidebar_state="collapsed")
8
  # Create two columns
9
  col1, col2 = st.columns(2)
 
10
 
11
  # First column containers
12
  with col1:
13
  st.subheader("Audio Recorder")
 
 
14
  wav_audio_data = st_audiorec()
 
 
 
 
 
 
 
 
15
 
16
  st.subheader("LLM answering")
17
- # Add your content here
 
 
 
 
 
18
 
19
- st.subheader("Movement Analysis")
20
- # Add your content here
 
 
 
 
 
 
21
 
 
 
22
  # Second column containers
23
  with col2:
24
  st.subheader("Sports Agenda")
25
- # Add your content here
26
-
27
  st.subheader("Video Analysis")
28
- video_path = "data/video/temp.mp4"
29
- st.video(video_path)
 
 
 
 
 
 
 
 
30
 
31
  st.subheader("Graph Displayer")
32
- # Add your content here
 
1
  import streamlit as st
 
 
 
2
  from st_audiorec import st_audiorec
3
+ from Modules.Speech2Text.transcribe import transcribe
4
+ import base64
5
 
6
  st.set_page_config(layout="wide", initial_sidebar_state="collapsed")
7
  # Create two columns
8
  col1, col2 = st.columns(2)
9
+ video_uploaded = None
10
 
11
  # First column containers
12
  with col1:
13
  st.subheader("Audio Recorder")
14
+ recorded = False
15
+ temp_path = 'data/temp_audio/audio_file.wav'
16
  wav_audio_data = st_audiorec()
17
+ if wav_audio_data is not None:
18
+ with open(temp_path, 'wb') as f:
19
+ # Write the audio data to the file
20
+ f.write(wav_audio_data)
21
+ instruction = transcribe(temp_path)
22
+ print(instruction)
23
+ recorded = True
24
+
25
 
26
  st.subheader("LLM answering")
27
+ if recorded:
28
+ if "messages" not in st.session_state:
29
+ st.session_state.messages = []
30
+ for message in st.session_state.messages:
31
+ with st.chat_message(message["role"]):
32
+ st.markdown(message["content"])
33
 
34
+ st.session_state.messages.append({"role": "user", "content": instruction})
35
+ with st.chat_message("user"):
36
+ st.markdown(instruction)
37
+
38
+ with st.chat_message("assistant"):
39
+ # Build answer from LLM
40
+ response = " to be DEFINED "# TO DO
41
+ st.session_state.messages.append({"role": "assistant", "content": response})
42
 
43
+ st.subheader("Movement Analysis")
44
+ # TO DO
45
  # Second column containers
46
  with col2:
47
  st.subheader("Sports Agenda")
48
+ # TO DO
 
49
  st.subheader("Video Analysis")
50
+ ask_video = st.empty()
51
+ if video_uploaded is None:
52
+ video_uploaded = ask_video.file_uploader("Choose a video file", type=["mp4", "ogg", "webm"])
53
+ if video_uploaded:
54
+ ask_video.empty()
55
+ with st.spin("Processing video"):
56
+ pass # TO DO
57
+ _left, mid, _right = st.columns(3)
58
+ with mid:
59
+ st.video(video_uploaded)
60
 
61
  st.subheader("Graph Displayer")
62
+ # TO DO
config.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Pose estimation
2
+
3
+ pose_mode_size = 'm'
data/pose/squat_inference.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf32617db792283036d8ee54e4a53e96e88efab820c4367ea28f9851ebb9811
3
+ size 1784680
data/temp_audio/example.wav ADDED
Binary file (393 kB). View file
 
pose_estimation.ipynb DELETED
File without changes