keremberke commited on
Commit
19e6cbd
1 Parent(s): 450a620

dataset uploaded by roboflow2huggingface package

Browse files
README.dataset.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NFL-competition > 2022-07-08 12:06am
2
+ https://universe.roboflow.com/home-mxzv1/nfl-competition
3
+
4
+ Provided by a Roboflow user
5
+ License: Public Domain
6
+
7
+ The NFL Competition dataset contains nearly 10,000 images of football players during game play. This dataset can be used to detect the number of players and their locations on the field in order to alert coaches of possible NFL formation penalties or to analyze formations post-game. It can also be used as a starting point for automated player safety checks, ensuring each player is wearing a helmet.
8
+
9
+ For inspiration on how computer vision applications in the world of sports, check out this post from the Roboflow blog:
10
+
11
+ * https://blog.roboflow.com/ai-football-coach-playbook
12
+ * https://blog.roboflow.com/computer-vision-twilio-notifications/
13
+ * https://blog.roboflow.com/polygons-object-detection/
README.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - object-detection
4
+ tags:
5
+ - roboflow
6
+ ---
7
+
8
+ ### Roboflow Dataset Page
9
+ https://universe.roboflow.com/home-mxzv1/nfl-competition/dataset/1
10
+
11
+ ### Dataset Labels
12
+
13
+ ```
14
+ ['helmet', 'helmet-blurred', 'helmet-difficult', 'helmet-partial', 'helmet-sideline']
15
+ ```
16
+
17
+ ### Citation
18
+
19
+ ```
20
+ @misc{ nfl-competition_dataset,
21
+ title = { NFL-competition Dataset },
22
+ type = { Open Source Dataset },
23
+ author = { home },
24
+ howpublished = { \\url{ https://universe.roboflow.com/home-mxzv1/nfl-competition } },
25
+ url = { https://universe.roboflow.com/home-mxzv1/nfl-competition },
26
+ journal = { Roboflow Universe },
27
+ publisher = { Roboflow },
28
+ year = { 2022 },
29
+ month = { sep },
30
+ note = { visited on 2022-12-30 },
31
+ }
32
+ ```
33
+
34
+ ### License
35
+ Public Domain
36
+
37
+ ### Dataset Summary
38
+ This dataset was exported via roboflow.com on December 29, 2022 at 8:12 PM GMT
39
+
40
+ Roboflow is an end-to-end computer vision platform that helps you
41
+ * collaborate with your team on computer vision projects
42
+ * collect & organize images
43
+ * understand unstructured image data
44
+ * annotate, and create datasets
45
+ * export, train, and deploy computer vision models
46
+ * use active learning to improve your dataset over time
47
+
48
+ It includes 9947 images.
49
+ Helmets are annotated in COCO format.
50
+
51
+ The following pre-processing was applied to each image:
52
+ * Auto-orientation of pixel data (with EXIF-orientation stripping)
53
+ * Resize to 1280x720 (Stretch)
54
+
55
+ No image augmentation techniques were applied.
56
+
57
+
58
+
README.roboflow.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ NFL-competition - v1 2022-07-08 12:06am
3
+ ==============================
4
+
5
+ This dataset was exported via roboflow.com on December 29, 2022 at 8:12 PM GMT
6
+
7
+ Roboflow is an end-to-end computer vision platform that helps you
8
+ * collaborate with your team on computer vision projects
9
+ * collect & organize images
10
+ * understand unstructured image data
11
+ * annotate, and create datasets
12
+ * export, train, and deploy computer vision models
13
+ * use active learning to improve your dataset over time
14
+
15
+ It includes 9947 images.
16
+ Helmets are annotated in COCO format.
17
+
18
+ The following pre-processing was applied to each image:
19
+ * Auto-orientation of pixel data (with EXIF-orientation stripping)
20
+ * Resize to 1280x720 (Stretch)
21
+
22
+ No image augmentation techniques were applied.
23
+
24
+
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dce8a1c5ee951ab36a7980925a22a88485ea0c496f40af861183c6246724f6
3
+ size 121495574
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7a4fda18ebed706b9e82e4ff593f23e5d265de516ee9869d93769c2151ff204
3
+ size 859629963
data/valid.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ff4e9f5fedfb23ee1c8b0057b8ca7b87adf7a9d03749882e592d28754c6e3cf
3
+ size 244027422
nfl-object-detection.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+
7
+
8
+ _HOMEPAGE = "https://universe.roboflow.com/home-mxzv1/nfl-competition/dataset/1"
9
+ _LICENSE = "Public Domain"
10
+ _CITATION = """\
11
+ @misc{ nfl-competition_dataset,
12
+ title = { NFL-competition Dataset },
13
+ type = { Open Source Dataset },
14
+ author = { home },
15
+ howpublished = { \\url{ https://universe.roboflow.com/home-mxzv1/nfl-competition } },
16
+ url = { https://universe.roboflow.com/home-mxzv1/nfl-competition },
17
+ journal = { Roboflow Universe },
18
+ publisher = { Roboflow },
19
+ year = { 2022 },
20
+ month = { sep },
21
+ note = { visited on 2022-12-30 },
22
+ }
23
+ """
24
+ _URLS = {
25
+ "train": "https://huggingface.co/datasets/keremberke/nfl-object-detection/resolve/main/data/train.zip",
26
+ "validation": "https://huggingface.co/datasets/keremberke/nfl-object-detection/resolve/main/data/valid.zip",
27
+ "test": "https://huggingface.co/datasets/keremberke/nfl-object-detection/resolve/main/data/test.zip",
28
+ }
29
+
30
+ _CATEGORIES = ['helmet', 'helmet-blurred', 'helmet-difficult', 'helmet-partial', 'helmet-sideline']
31
+ _ANNOTATION_FILENAME = "_annotations.coco.json"
32
+
33
+
34
+ class NFLOBJECTDETECTION(datasets.GeneratorBasedBuilder):
35
+ VERSION = datasets.Version("1.0.0")
36
+
37
+ def _info(self):
38
+ features = datasets.Features(
39
+ {
40
+ "image_id": datasets.Value("int64"),
41
+ "image": datasets.Image(),
42
+ "width": datasets.Value("int32"),
43
+ "height": datasets.Value("int32"),
44
+ "objects": datasets.Sequence(
45
+ {
46
+ "id": datasets.Value("int64"),
47
+ "area": datasets.Value("int64"),
48
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
49
+ "category": datasets.ClassLabel(names=_CATEGORIES),
50
+ }
51
+ ),
52
+ }
53
+ )
54
+ return datasets.DatasetInfo(
55
+ features=features,
56
+ homepage=_HOMEPAGE,
57
+ citation=_CITATION,
58
+ license=_LICENSE,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ data_files = dl_manager.download_and_extract(_URLS)
63
+ return [
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TRAIN,
66
+ gen_kwargs={
67
+ "folder_dir": data_files["train"],
68
+ },
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={
73
+ "folder_dir": data_files["validation"],
74
+ },
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TEST,
78
+ gen_kwargs={
79
+ "folder_dir": data_files["test"],
80
+ },
81
+ ),
82
+ ]
83
+
84
+ def _generate_examples(self, folder_dir):
85
+ def process_annot(annot, category_id_to_category):
86
+ return {
87
+ "id": annot["id"],
88
+ "area": annot["area"],
89
+ "bbox": annot["bbox"],
90
+ "category": category_id_to_category[annot["category_id"]],
91
+ }
92
+
93
+ image_id_to_image = {}
94
+ idx = 0
95
+
96
+ annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
97
+ with open(annotation_filepath, "r") as f:
98
+ annotations = json.load(f)
99
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
100
+ image_id_to_annotations = collections.defaultdict(list)
101
+ for annot in annotations["annotations"]:
102
+ image_id_to_annotations[annot["image_id"]].append(annot)
103
+ image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
104
+
105
+ for filename in os.listdir(folder_dir):
106
+ filepath = os.path.join(folder_dir, filename)
107
+ if filename in image_id_to_image:
108
+ image = image_id_to_image[filename]
109
+ objects = [
110
+ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
111
+ ]
112
+ with open(filepath, "rb") as f:
113
+ image_bytes = f.read()
114
+ yield idx, {
115
+ "image_id": image["id"],
116
+ "image": {"path": filepath, "bytes": image_bytes},
117
+ "width": image["width"],
118
+ "height": image["height"],
119
+ "objects": objects,
120
+ }
121
+ idx += 1