Doux Thibault commited on
Commit
3930469
2 Parent(s): 9a30a8c e051030

Merge branch 'main' of https://huggingface.co/spaces/EntrepreneurFirst/FitnessEquation

Browse files
Modules/PoseEstimation/__init__.py ADDED
File without changes
Modules/PoseEstimation/curl_agent.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Modules.PoseEstimation.pose_estimator import calculate_angle, joints_id_dict, model
2
+ from langchain.tools import tool
3
+ from langchain.agents import AgentExecutor, create_tool_calling_agent
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from langchain_core.messages import HumanMessage
6
+ from langchain_mistralai.chat_models import ChatMistralAI
7
+
8
+ from operator import itemgetter
9
+ from typing import Dict, List, Union
10
+
11
+ from langchain_core.messages import AIMessage
12
+ from langchain_core.runnables import (
13
+ Runnable,
14
+ RunnableLambda,
15
+ RunnableMap,
16
+ RunnablePassthrough,
17
+ )
18
+
19
+ import numpy as np
20
+
21
+ # If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.
22
+ llm = ChatMistralAI(model='mistral-large-latest', api_key="i5jSJkCFNGKfgIztloxTMjfckiFbYBj4")
23
+
24
+ @tool
25
+ def shoulder_angle(pose: list) -> float:
26
+
27
+ """
28
+ Computes the shoulder angle.
29
+
30
+ Args:
31
+ pose (list): list of keypoints
32
+
33
+ Returns:
34
+ arm_angle (float): arm angle with chest
35
+ """
36
+ right_elbow = pose[joints_id_dict['right_elbow']]
37
+ right_shoulder = pose[joints_id_dict['right_shoulder']]
38
+ right_hip = pose[joints_id_dict['right_hip']]
39
+
40
+ left_elbow = pose[joints_id_dict['left_elbow']]
41
+ left_shoulder = pose[joints_id_dict['left_shoulder']]
42
+ left_hip = pose[joints_id_dict['left_hip']]
43
+
44
+ right_arm_angle = calculate_angle(right_elbow, right_shoulder, right_hip)
45
+ left_arm_angle = calculate_angle(left_elbow, left_shoulder, left_hip)
46
+
47
+ return right_arm_angle
48
+
49
+
50
+ @tool
51
+ def elbow_angle(pose):
52
+ """
53
+ Computes the elbow angle.
54
+
55
+ Args:
56
+ pose (list): list of keypoints
57
+
58
+ Returns:
59
+ elbow_angle (float): elbow angle with chest
60
+ """
61
+ right_elbow = pose[joints_id_dict['right_elbow']]
62
+ right_shoulder = pose[joints_id_dict['right_shoulder']]
63
+ right_wrist = pose[joints_id_dict['right_wrist']]
64
+
65
+ left_elbow = pose[joints_id_dict['left_elbow']]
66
+ left_shoulder = pose[joints_id_dict['left_shoulder']]
67
+ left_wrist = pose[joints_id_dict['left_wrist']]
68
+
69
+ right_elbow_angle = calculate_angle(right_shoulder, right_elbow, right_wrist)
70
+ left_elbow_angle = calculate_angle(left_shoulder, left_elbow, left_wrist)
71
+
72
+ return right_elbow_angle
73
+
74
+
75
+ tools = [shoulder_angle, elbow_angle]
76
+
77
+ llm_with_tools = llm.bind_tools(tools)
78
+ tool_map = {tool.name: tool for tool in tools}
79
+
80
+ # prompt = ChatPromptTemplate.from_messages(
81
+ # [
82
+ # (
83
+ # "system",
84
+ # "You are a helpful assistant. Make sure to use the compute_right_knee_angle tool for information.",
85
+ # ),
86
+ # ("placeholder", "{chat_history}"),
87
+ # ("human", "{input}"),
88
+ # ("placeholder", "{agent_scratchpad}"),
89
+ # ]
90
+ # )
91
+
92
+ # Construct the Tools agent
93
+ # curl_agent = create_tool_calling_agent(llm, tools, prompt)
94
+
95
+
96
+ pose_sequence = [
97
+ # Pose 1
98
+ [
99
+ # Head
100
+ [50, 50],
101
+ # Shoulders
102
+ [40, 80], [60, 80],
103
+ # Elbows
104
+ [30, 110], [70, 110],
105
+ # Wrists
106
+ [25, 140], [75, 140],
107
+ # Hips
108
+ [45, 180], [55, 180],
109
+ # Knees
110
+ [40, 220], [60, 220],
111
+ # Ankles
112
+ [35, 250], [65, 250],
113
+ ],
114
+ # Pose 2
115
+ [
116
+ # Head
117
+ [60, 60],
118
+ # Shoulders
119
+ [50, 90], [70, 90],
120
+ # Elbows
121
+ [40, 120], [80, 120],
122
+ # Wrists
123
+ [35, 150], [85, 150],
124
+ # Hips
125
+ [55, 180], [65, 180],
126
+ # Knees
127
+ [50, 220], [70, 220],
128
+ # Ankles
129
+ [45, 250], [75, 250],
130
+ ]]
131
+
132
+ # Create an agent executor by passing in the agent and tools
133
+ # agent_executor = AgentExecutor(agent=curl_agent, tools=tools, verbose=True)
134
+ # agent_executor.invoke({"input": f"Compute shoulder and elbow angle and display them given the following pose estimation: {pose_sequence[0]}"})
135
+
136
+ def call_tools(msg: AIMessage) -> Runnable:
137
+ """Simple sequential tool calling helper."""
138
+ tool_map = {tool.name: tool for tool in tools}
139
+ tool_calls = msg.tool_calls.copy()
140
+ for tool_call in tool_calls:
141
+ tool_call["output"] = tool_map[tool_call["name"]].invoke(tool_call["args"])
142
+ return tool_calls
143
+
144
+
145
+ chain = llm_with_tools | call_tools
146
+
147
+ print(chain.invoke(f"What is the shoulder angle and elbow angle given the following pose estimation: {pose_sequence[0]}"))
Modules/PoseEstimation/pose_agent.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Modules.PoseEstimation.pose_estimator import calculate_angle, joints_id_dict, model
2
+ from langchain.tools import tool
3
+ from langchain.agents import AgentExecutor, create_tool_calling_agent
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from langchain_core.messages import HumanMessage
6
+ from langchain_mistralai.chat_models import ChatMistralAI
7
+
8
+ # If api_key is not passed, default behavior is to use the `MISTRAL_API_KEY` environment variable.
9
+ llm = ChatMistralAI(model='mistral-large-latest', api_key="i5jSJkCFNGKfgIztloxTMjfckiFbYBj4")
10
+
11
+ @tool
12
+ def compute_right_knee_angle(pose: list) -> float:
13
+
14
+ """
15
+ Computes the knee angle.
16
+
17
+ Args:
18
+ pose (list): list of keypoints
19
+
20
+ Returns:
21
+ knee_angle (float): knee angle
22
+ """
23
+
24
+ right_hip = pose[joints_id_dict['right_hip']]
25
+ right_knee = pose[joints_id_dict['right_knee']]
26
+ right_ankle = pose[joints_id_dict['right_ankle']]
27
+
28
+ knee_angle = calculate_angle(right_hip, right_knee, right_ankle)
29
+
30
+ print(knee_angle)
31
+
32
+ return str(knee_angle)
33
+
34
+ @tool
35
+ def get_keypoints_from_path(video_path: str):
36
+ """
37
+ Get keypoints from a video.
38
+
39
+ Args:
40
+ video_path (str): path to the video
41
+ model (YOLO): model to use
42
+
43
+ Returns:
44
+ keypoints (list): list of keypoints
45
+ """
46
+
47
+ keypoints = []
48
+ results = model(video_path, save=True, show_conf=False, show_boxes=False)
49
+ for frame in results:
50
+ tensor = frame.keypoints.xy[0]
51
+ keypoints.append(tensor.tolist())
52
+
53
+ return keypoints
54
+
55
+
56
+
57
+ tools = [compute_right_knee_angle]
58
+
59
+ prompt = ChatPromptTemplate.from_messages(
60
+ [
61
+ (
62
+ "system",
63
+ "You are a helpful assistant. Make sure to use the compute_right_knee_angle tool for information.",
64
+ ),
65
+ ("placeholder", "{chat_history}"),
66
+ ("human", "{input}"),
67
+ ("placeholder", "{agent_scratchpad}"),
68
+ ]
69
+ )
70
+
71
+ # Construct the Tools agent
72
+ agent = create_tool_calling_agent(llm, tools, prompt)
73
+
74
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
Modules/PoseEstimation/pose_estimation.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
Modules/PoseEstimation/pose_estimator.py CHANGED
@@ -31,8 +31,6 @@ def get_keypoints_from_keypoints(model, video_path):
31
 
32
  return keypoints
33
 
34
- keypoints = get_keypoints_from_keypoints(model, '../../data/pose/squat.mp4')
35
-
36
  def calculate_angle(a, b, c):
37
 
38
  """
@@ -112,5 +110,4 @@ def moving_average(data, window_size):
112
  for i in range(len(data) - window_size + 1):
113
  avg.append(sum(data[i:i + window_size]) / window_size)
114
 
115
- return avg
116
-
 
31
 
32
  return keypoints
33
 
 
 
34
  def calculate_angle(a, b, c):
35
 
36
  """
 
110
  for i in range(len(data) - window_size + 1):
111
  avg.append(sum(data[i:i + window_size]) / window_size)
112
 
113
+ return avg
 
app.py CHANGED
@@ -10,6 +10,8 @@ import os
10
  from Modules.rag import rag_chain
11
 
12
  mistral_api_key = os.getenv("MISTRAL_API_KEY")
 
 
13
 
14
  def format_messages(messages):
15
  formatted_messages = ""
@@ -72,12 +74,17 @@ with col2:
72
  if video_uploaded is None:
73
  video_uploaded = ask_video.file_uploader("Choose a video file", type=["mp4", "ogg", "webm"])
74
  if video_uploaded:
 
75
  ask_video.empty()
76
- with st.spin("Processing video"):
77
- pass # TO DO
78
  _left, mid, _right = st.columns(3)
79
  with mid:
80
  st.video(video_uploaded)
 
 
 
 
 
 
81
 
82
  st.subheader("Graph Displayer")
83
  # TO DO
 
10
  from Modules.rag import rag_chain
11
 
12
  mistral_api_key = os.getenv("MISTRAL_API_KEY")
13
+ from Modules.PoseEstimation import pose_estimator
14
+ from utils import save_uploaded_file
15
 
16
  def format_messages(messages):
17
  formatted_messages = ""
 
74
  if video_uploaded is None:
75
  video_uploaded = ask_video.file_uploader("Choose a video file", type=["mp4", "ogg", "webm"])
76
  if video_uploaded:
77
+ video_uploaded = save_uploaded_file(video_uploaded)
78
  ask_video.empty()
 
 
79
  _left, mid, _right = st.columns(3)
80
  with mid:
81
  st.video(video_uploaded)
82
+ apply_pose = st.button("Apply Pose Estimation")
83
+
84
+ if apply_pose:
85
+ with st.spinner("Processing video"):
86
+ keypoints = pose_estimator.get_keypoints_from_keypoints(pose_estimator.model, video_uploaded)
87
+
88
 
89
  st.subheader("Graph Displayer")
90
  # TO DO
utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+
4
+ def save_uploaded_file(uploaded_file):
5
+ try:
6
+ file_path = os.path.join('uploaded', uploaded_file.name)
7
+ with open(file_path, 'wb') as f:
8
+ f.write(uploaded_file.getvalue())
9
+ return file_path
10
+ except Exception as e:
11
+ st.error(f"Error: {e}")
12
+ return None