Spaces:
Running
Running
Dan Biagini
commited on
Commit
·
6445ea5
1
Parent(s):
38d30c3
added hockey breeds v2 page and try it functionality
Browse files- .gitattributes +1 -0
- README.md +9 -1
- requirements-cpu.txt +6 -0
- requirements.txt +6 -0
- src/Hockey_Breeds.py +1 -1
- src/Home.py +1 -1
- src/hockey_object_detection.py +87 -2
- src/images/artifacts/{confusion_matrix.png → confusion_matrix_v1.png} +0 -0
- src/images/artifacts/confusion_matrix_v2.png +0 -0
- src/images/samples/v2/v2-sample1-090124.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
src/images/samples/v2/v2-sample1-090124.png filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -17,7 +17,15 @@ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-
|
|
17 |
```source .venv/bin/activate```
|
18 |
|
19 |
## Update requirements.txt
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
## Manual Testing
|
23 |
To run in google cloud shell:
|
|
|
17 |
```source .venv/bin/activate```
|
18 |
|
19 |
## Update requirements.txt
|
20 |
+
There are two requirements.txt files, the requirements-cpu.txt can be used for smaller installations but will only use CPU based
|
21 |
+
pytorch computations.
|
22 |
+
|
23 |
+
Keeping them in sync is important, *to do so on a CPU installed dev system* do the following:
|
24 |
+
|
25 |
+
1.```pip freeze > requirements-cpu.txt```
|
26 |
+
1.```diff requirements-cpu.txt requirements.txt > req-patch.diff```
|
27 |
+
1. Edit the req-patch.diff to remove the torch* related diffs (the requirements.txt torch entries should not have +cpu)
|
28 |
+
1. ```patch -R requirements.txt req-patch.diff```
|
29 |
|
30 |
## Manual Testing
|
31 |
To run in google cloud shell:
|
requirements-cpu.txt
CHANGED
@@ -41,11 +41,14 @@ murmurhash==1.0.10
|
|
41 |
narwhals==1.6.0
|
42 |
networkx==3.2.1
|
43 |
numpy==1.26.4
|
|
|
44 |
packaging==24.1
|
45 |
pandas==2.2.2
|
46 |
pillow==10.4.0
|
47 |
preshed==3.0.9
|
48 |
protobuf==5.28.0
|
|
|
|
|
49 |
pyarrow==17.0.0
|
50 |
pydantic==2.9.0
|
51 |
pydantic_core==2.23.2
|
@@ -61,6 +64,7 @@ rich==13.8.0
|
|
61 |
rpds-py==0.20.0
|
62 |
scikit-learn==1.5.1
|
63 |
scipy==1.14.1
|
|
|
64 |
shellingham==1.5.4
|
65 |
six==1.16.0
|
66 |
smart-open==7.0.4
|
@@ -84,6 +88,8 @@ tqdm==4.66.5
|
|
84 |
typer==0.12.5
|
85 |
typing_extensions==4.12.2
|
86 |
tzdata==2024.1
|
|
|
|
|
87 |
urllib3==2.2.2
|
88 |
wasabi==1.1.3
|
89 |
watchdog==4.0.2
|
|
|
41 |
narwhals==1.6.0
|
42 |
networkx==3.2.1
|
43 |
numpy==1.26.4
|
44 |
+
opencv-python==4.10.0.84
|
45 |
packaging==24.1
|
46 |
pandas==2.2.2
|
47 |
pillow==10.4.0
|
48 |
preshed==3.0.9
|
49 |
protobuf==5.28.0
|
50 |
+
psutil==6.0.0
|
51 |
+
py-cpuinfo==9.0.0
|
52 |
pyarrow==17.0.0
|
53 |
pydantic==2.9.0
|
54 |
pydantic_core==2.23.2
|
|
|
64 |
rpds-py==0.20.0
|
65 |
scikit-learn==1.5.1
|
66 |
scipy==1.14.1
|
67 |
+
seaborn==0.13.2
|
68 |
shellingham==1.5.4
|
69 |
six==1.16.0
|
70 |
smart-open==7.0.4
|
|
|
88 |
typer==0.12.5
|
89 |
typing_extensions==4.12.2
|
90 |
tzdata==2024.1
|
91 |
+
ultralytics==8.2.90
|
92 |
+
ultralytics-thop==2.0.6
|
93 |
urllib3==2.2.2
|
94 |
wasabi==1.1.3
|
95 |
watchdog==4.0.2
|
requirements.txt
CHANGED
@@ -41,11 +41,14 @@ murmurhash==1.0.10
|
|
41 |
narwhals==1.6.0
|
42 |
networkx==3.2.1
|
43 |
numpy==1.26.4
|
|
|
44 |
packaging==24.1
|
45 |
pandas==2.2.2
|
46 |
pillow==10.4.0
|
47 |
preshed==3.0.9
|
48 |
protobuf==5.28.0
|
|
|
|
|
49 |
pyarrow==17.0.0
|
50 |
pydantic==2.9.0
|
51 |
pydantic_core==2.23.2
|
@@ -61,6 +64,7 @@ rich==13.8.0
|
|
61 |
rpds-py==0.20.0
|
62 |
scikit-learn==1.5.1
|
63 |
scipy==1.14.1
|
|
|
64 |
shellingham==1.5.4
|
65 |
six==1.16.0
|
66 |
smart-open==7.0.4
|
@@ -84,6 +88,8 @@ tqdm==4.66.5
|
|
84 |
typer==0.12.5
|
85 |
typing_extensions==4.12.2
|
86 |
tzdata==2024.1
|
|
|
|
|
87 |
urllib3==2.2.2
|
88 |
wasabi==1.1.3
|
89 |
watchdog==4.0.2
|
|
|
41 |
narwhals==1.6.0
|
42 |
networkx==3.2.1
|
43 |
numpy==1.26.4
|
44 |
+
opencv-python==4.10.0.84
|
45 |
packaging==24.1
|
46 |
pandas==2.2.2
|
47 |
pillow==10.4.0
|
48 |
preshed==3.0.9
|
49 |
protobuf==5.28.0
|
50 |
+
psutil==6.0.0
|
51 |
+
py-cpuinfo==9.0.0
|
52 |
pyarrow==17.0.0
|
53 |
pydantic==2.9.0
|
54 |
pydantic_core==2.23.2
|
|
|
64 |
rpds-py==0.20.0
|
65 |
scikit-learn==1.5.1
|
66 |
scipy==1.14.1
|
67 |
+
seaborn==0.13.2
|
68 |
shellingham==1.5.4
|
69 |
six==1.16.0
|
70 |
smart-open==7.0.4
|
|
|
88 |
typer==0.12.5
|
89 |
typing_extensions==4.12.2
|
90 |
tzdata==2024.1
|
91 |
+
ultralytics==8.2.90
|
92 |
+
ultralytics-thop==2.0.6
|
93 |
urllib3==2.2.2
|
94 |
wasabi==1.1.3
|
95 |
watchdog==4.0.2
|
src/Hockey_Breeds.py
CHANGED
@@ -53,7 +53,7 @@ st.markdown(desc)
|
|
53 |
st.image("src/images/samples/sampl_batch.png")
|
54 |
st.subheader("Validation Results")
|
55 |
st.markdown('Validation of the model\'s performance was done using 26 images not included in the training set. The model performed fairly well against the validation dataset, with only 1 misclassified image.')
|
56 |
-
st.image("src/images/artifacts/
|
57 |
|
58 |
st.subheader("Try It Out")
|
59 |
|
|
|
53 |
st.image("src/images/samples/sampl_batch.png")
|
54 |
st.subheader("Validation Results")
|
55 |
st.markdown('Validation of the model\'s performance was done using 26 images not included in the training set. The model performed fairly well against the validation dataset, with only 1 misclassified image.')
|
56 |
+
st.image("src/images/artifacts/confusion_matrix_v1.png", caption="Confusion Matrix for Hockey Breeds ")
|
57 |
|
58 |
st.subheader("Try It Out")
|
59 |
|
src/Home.py
CHANGED
@@ -10,7 +10,7 @@ st.title('Welcome To Top Shelf :goal_net:',
|
|
10 |
st.subheader('Artificial Intelligence for Hockey Coaches and Players',
|
11 |
help='Proof of concept application')
|
12 |
|
13 |
-
overview = '''**Top Shelf** helps coaches and players analyze their gameplay, providing helpful suggestions on areas for improvement.
|
14 |
|
15 |
We're starting with a focus on ice hockey, however this same technology could apply to other "invasion" games and sports, for example lacrosse, basketball, soccer, etc.
|
16 |
|
|
|
10 |
st.subheader('Artificial Intelligence for Hockey Coaches and Players',
|
11 |
help='Proof of concept application')
|
12 |
|
13 |
+
overview = '''**Top Shelf** helps coaches and players analyze their gameplay, providing helpful suggestions & recommendations on areas for improvement.
|
14 |
|
15 |
We're starting with a focus on ice hockey, however this same technology could apply to other "invasion" games and sports, for example lacrosse, basketball, soccer, etc.
|
16 |
|
src/hockey_object_detection.py
CHANGED
@@ -1,15 +1,100 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
st.set_page_config(page_title='Hockey Breeds v2 - Objects', layout="wide",
|
4 |
page_icon=":frame_with_picture:")
|
5 |
|
6 |
st.title('Hockey Breeds v2 - Objects')
|
7 |
-
|
|
|
|
|
|
|
8 |
st.subheader('Object Detection')
|
9 |
|
10 |
desc = '''Hockey Breed detector v2 uses a state of the art (circa 2023) computer vision approach.
|
11 |
|
12 |
I used the same training images as the first version of the Hockey Breeds model, but change the ML algorithm to use YOLO object detection (YOLO v8).
|
13 |
-
The output will be a set of hockey objects (defined by "bounding boxes") with labels for any hockey image uploaded.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
st.markdown(desc)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from ultralytics import YOLO
|
3 |
+
from huggingface_hub import hf_hub_download
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
@st.cache_resource
|
8 |
+
def get_model():
|
9 |
+
repo_id = "danbiagini/hockey_breeds_v2"
|
10 |
+
return hf_hub_download(repo_id=repo_id, filename="hockey_breeds-v2-101623.pt")
|
11 |
+
|
12 |
+
def run_inference(img, model, thresh=0.5):
|
13 |
+
model = YOLO(model_f)
|
14 |
+
st.session_state.results = model(img)
|
15 |
+
return draw_hockey_boxes(img, st.session_state.results, thresh)
|
16 |
+
|
17 |
+
def draw_hockey_boxes(frame, results, thresh=0.5):
|
18 |
+
colors = {0: (0, 255, 0), 1: (255, 0, 0), 2: (0, 0, 255), 3: (128, 0, 0), 4: (0, 128, 0), 5: (0, 0, 128), 6: (0, 64, 0), 7: (64, 0, 0), 8: (0, 0, 64)}
|
19 |
+
font_scale = frame.shape[0] / 500
|
20 |
+
objects = []
|
21 |
+
|
22 |
+
for name in results:
|
23 |
+
for box in name.boxes.data.tolist():
|
24 |
+
x1, y1, x2, y2, score, class_id = box
|
25 |
+
objects.append((name.names[int(class_id)], score))
|
26 |
+
|
27 |
+
if score > thresh:
|
28 |
+
cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), colors[(class_id % 9)], 3)
|
29 |
+
cv2.putText(frame, f'{name.names[int(class_id)].upper()}: {score:.2f}', (int(x1), int(y1 - 10)),
|
30 |
+
cv2.FONT_HERSHEY_SIMPLEX, font_scale, colors[(class_id % 9)], 3, cv2.LINE_AA)
|
31 |
+
else:
|
32 |
+
print(f'Found an object under confidence threshold {thresh} type: {name.names[class_id]}, score:{score}, x1, y2:{x1}, {y2}')
|
33 |
+
return objects
|
34 |
+
|
35 |
+
|
36 |
+
if 'results' not in st.session_state:
|
37 |
+
st.session_state.results = []
|
38 |
|
39 |
st.set_page_config(page_title='Hockey Breeds v2 - Objects', layout="wide",
|
40 |
page_icon=":frame_with_picture:")
|
41 |
|
42 |
st.title('Hockey Breeds v2 - Objects')
|
43 |
+
intro = '''The first version of Hockey Breeds was fun and educational, but not useful for analyzing hockey videos. The second version is to a proof of concept
|
44 |
+
with the ability to recognize individual "objects" within an image, which paves the way to ultimately tracking those objects through game play.'''
|
45 |
+
|
46 |
+
st.markdown(intro)
|
47 |
st.subheader('Object Detection')
|
48 |
|
49 |
desc = '''Hockey Breed detector v2 uses a state of the art (circa 2023) computer vision approach.
|
50 |
|
51 |
I used the same training images as the first version of the Hockey Breeds model, but change the ML algorithm to use YOLO object detection (YOLO v8).
|
52 |
+
The output will be a set of hockey objects (defined by "bounding boxes") with labels for any hockey image uploaded.
|
53 |
+
|
54 |
+
**Object List**:
|
55 |
+
1. net
|
56 |
+
1. stick
|
57 |
+
1. puck
|
58 |
+
1. skater
|
59 |
+
1. goalie
|
60 |
+
1. referee
|
61 |
+
'''
|
62 |
|
63 |
st.markdown(desc)
|
64 |
+
|
65 |
+
st.subheader("Sample")
|
66 |
+
st.image('src/images/samples/v2/v2-sample1-090124.png', caption='Sample image with hockey objects detected')
|
67 |
+
|
68 |
+
st.subheader("Validation Results")
|
69 |
+
|
70 |
+
st.markdown('''Validation of the model\'s performance was done using 15 images not included in the training set. The model had many issues; it did poorly with detecting *pucks* and *sticks* vs backgrounds and even goalies and skaters. It did very well on detecting referees.''')
|
71 |
+
st.image("src/images/artifacts/confusion_matrix_v2.png", caption="Confusion Matrix for Hockey Breeds v2", )
|
72 |
+
|
73 |
+
st.subheader("Try It Out")
|
74 |
+
|
75 |
+
img = st.file_uploader("Upload an image for object detection", type=["jpg", "jpeg", "png"])
|
76 |
+
|
77 |
+
if img is not None:
|
78 |
+
thresh =st.slider('Set the object confidence threshold', min_value=0.0, max_value=1.0, value=0.5, step=0.01)
|
79 |
+
with st.status("Detecting hockey objects..."):
|
80 |
+
st.write("Loading model...")
|
81 |
+
model_f = get_model()
|
82 |
+
|
83 |
+
st.write("Processing image...")
|
84 |
+
frame = cv2.imdecode(np.frombuffer(img.read(), np.uint8), 1)
|
85 |
+
|
86 |
+
st.write("Running inference on image...")
|
87 |
+
objects = run_inference(frame, model_f, thresh)
|
88 |
+
st.dataframe(objects, column_config={
|
89 |
+
"0": "Object",
|
90 |
+
"1": "Confidence"
|
91 |
+
})
|
92 |
+
|
93 |
+
# check if the results list is empty
|
94 |
+
if len(st.session_state.results) == 0:
|
95 |
+
st.image(img, caption='Uploaded Image')
|
96 |
+
else:
|
97 |
+
st.image(frame, caption='Uploaded Image')
|
98 |
+
|
99 |
+
else:
|
100 |
+
st.session_state.results = []
|
src/images/artifacts/{confusion_matrix.png → confusion_matrix_v1.png}
RENAMED
File without changes
|
src/images/artifacts/confusion_matrix_v2.png
ADDED
src/images/samples/v2/v2-sample1-090124.png
ADDED
Git LFS Details
|