dcayton commited on
Commit
9132e42
1 Parent(s): 5707f91

first draft of loading script

Browse files
Files changed (1) hide show
  1. nba_tracking_data_15_16.py +152 -104
nba_tracking_data_15_16.py CHANGED
@@ -12,92 +12,123 @@
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  # TODO: Address all TODOs and remove all explanatory comments
15
- """TODO: Add a description here."""
16
 
17
 
18
  import csv
19
  import json
20
  import os
 
21
 
22
  import datasets
 
23
 
24
 
25
- # TODO: Add BibTeX citation
26
- # Find for instance the citation on arxiv or on the dataset repo/website
27
  _CITATION = """\
28
- @InProceedings{huggingface:dataset,
29
- title = {A great new dataset},
30
- author={huggingface, Inc.
31
- },
32
- year={2020}
33
- }
34
  """
35
 
36
- # TODO: Add description of the dataset here
37
- # You can copy an official description
38
  _DESCRIPTION = """\
39
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
 
40
  """
41
 
42
- # TODO: Add a link to an official homepage for the dataset here
43
- _HOMEPAGE = ""
44
-
45
- # TODO: Add the licence for the dataset here if you can find it
46
- _LICENSE = ""
47
-
48
- # TODO: Add link to the official dataset URLs here
49
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
50
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
- _URLS = {
52
- "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
53
- "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
54
- }
55
-
56
-
57
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
58
- class NewDataset(datasets.GeneratorBasedBuilder):
59
- """TODO: Short description of my dataset."""
60
-
61
- VERSION = datasets.Version("1.1.0")
62
 
63
- # This is an example of a dataset with multiple configurations.
64
- # If you don't want/need to define several sub-sets in your dataset,
65
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
66
 
67
- # If you need to make complex sub-parts in the datasets with configurable options
68
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
69
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
70
 
71
- # You will be able to load one or the other configurations in the following list with
72
- # data = datasets.load_dataset('my_dataset', 'first_domain')
73
- # data = datasets.load_dataset('my_dataset', 'second_domain')
74
- BUILDER_CONFIGS = [
75
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
76
- datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
77
- ]
78
 
79
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
 
80
 
81
  def _info(self):
82
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
83
- if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
84
- features = datasets.Features(
85
- {
86
- "sentence": datasets.Value("string"),
87
- "option1": datasets.Value("string"),
88
- "answer": datasets.Value("string")
89
- # These are the features of your dataset like images, labels ...
90
- }
91
- )
92
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
93
- features = datasets.Features(
94
- {
95
- "sentence": datasets.Value("string"),
96
- "option2": datasets.Value("string"),
97
- "second_domain_answer": datasets.Value("string")
98
- # These are the features of your dataset like images, labels ...
99
- }
100
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  return datasets.DatasetInfo(
102
  # This is the description that will appear on the datasets page.
103
  description=_DESCRIPTION,
@@ -108,65 +139,82 @@ class NewDataset(datasets.GeneratorBasedBuilder):
108
  # supervised_keys=("sentence", "label"),
109
  # Homepage of the dataset for documentation
110
  homepage=_HOMEPAGE,
111
- # License for the dataset if available
112
- license=_LICENSE,
113
  # Citation for the dataset
114
  citation=_CITATION,
115
  )
116
 
117
  def _split_generators(self, dl_manager):
118
  # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
119
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
120
-
121
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
122
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
  urls = _URLS[self.config.name]
125
  data_dir = dl_manager.download_and_extract(urls)
 
 
 
 
 
126
  return [
127
  datasets.SplitGenerator(
128
  name=datasets.Split.TRAIN,
129
  # These kwargs will be passed to _generate_examples
130
  gen_kwargs={
131
- "filepath": os.path.join(data_dir, "train.jsonl"),
132
  "split": "train",
133
- },
134
- ),
135
- datasets.SplitGenerator(
136
- name=datasets.Split.VALIDATION,
137
- # These kwargs will be passed to _generate_examples
138
- gen_kwargs={
139
- "filepath": os.path.join(data_dir, "dev.jsonl"),
140
- "split": "dev",
141
- },
142
- ),
143
- datasets.SplitGenerator(
144
- name=datasets.Split.TEST,
145
- # These kwargs will be passed to _generate_examples
146
- gen_kwargs={
147
- "filepath": os.path.join(data_dir, "test.jsonl"),
148
- "split": "test"
149
- },
150
- ),
151
  ]
152
 
153
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
154
  def _generate_examples(self, filepath, split):
155
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
156
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
157
- with open(filepath, encoding="utf-8") as f:
158
- for key, row in enumerate(f):
159
- data = json.loads(row)
160
- if self.config.name == "first_domain":
161
- # Yields examples as (key, example) tuples
162
- yield key, {
163
- "sentence": data["sentence"],
164
- "option1": data["option1"],
165
- "answer": "" if split == "test" else data["answer"],
166
- }
167
- else:
168
- yield key, {
169
- "sentence": data["sentence"],
170
- "option2": data["option2"],
171
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
172
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
  # TODO: Address all TODOs and remove all explanatory comments
15
+ """This is tracking data of the 2015-2016 NBA season"""
16
 
17
 
18
  import csv
19
  import json
20
  import os
21
+ import py7zr
22
 
23
  import datasets
24
+ import requests
25
 
26
 
 
 
27
  _CITATION = """\
28
+ @misc{Linou2016,
29
+ title = {NBA-Player-Movements},
30
+ author={Kostya Linou},
31
+ publisher={SportVU},
32
+ year={2016}
 
33
  """
34
 
35
+
 
36
  _DESCRIPTION = """\
37
+ This dataset is designed to give further easy access to tracking data.
38
+ By merging all .7z files into one large .json file, access is easier to retrieve all information at once.
39
  """
40
 
41
+ _HOMEPAGE = "https://github.com/linouk23/NBA-Player-Movements/tree/master/"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
+ res = requests.get(_HOMEPAGE)
 
 
44
 
45
+ items = res.json()['payload']['tree']['items']
 
 
46
 
47
+ _URL = "https://github.com/linouk23/NBA-Player-Movements/raw/master/data/2016.NBA.Raw.SportVU.Game.Logs"
48
+ _URLS = {}
49
+ for game in items:
50
+ name = game['name'][:-3]
51
+ _URLS[name] = _URL + "/" + name + ".7z"
 
 
52
 
53
+ class NbaTracking(datasets.GeneratorBasedBuilder):
54
+ """Tracking data for all games of 2015-2016 season in forms of coordinates for players and ball at each moment."""
55
 
56
  def _info(self):
57
+ features = datasets.Features(
58
+ {
59
+ "gameid": datasets.Value("string"),
60
+ "gamedate": datasets.Value("string"),
61
+ # question: how to indicate list of dictionaries?
62
+ "events": datasets.Sequence(
63
+ {
64
+ "eventid": datasets.Value("string"),
65
+ # "visitor": {
66
+ # "name": datasets.Value("string"),
67
+ # "teamid": datasets.Value("int64"),
68
+ # "abbreviation": datasets.Value("string"),
69
+ # "players": datasets.Sequence({
70
+ # "lastname": datasets.Value("string"),
71
+ # "firstname": datasets.Value("string"),
72
+ # "playerid": datasets.Value("int64"),
73
+ # "jersey": datasets.Value("string"),
74
+ # "position": datasets.Value("string")
75
+ # })
76
+ # },
77
+ # "home": {
78
+ # "name": datasets.Value("string"),
79
+ # "teamid": datasets.Value("int64"),
80
+ # "abbreviation": datasets.Value("string"),
81
+ # "players": datasets.Sequence({
82
+ # "lastname": datasets.Value("string"),
83
+ # "firstname": datasets.Value("string"),
84
+ # "playerid": datasets.Value("int64"),
85
+ # "jersey": datasets.Value("string"),
86
+ # "position": datasets.Value("string")
87
+ # })
88
+ # },
89
+ "moments": datasets.Sequence(
90
+ # question, how to indicate lists of lists of different types
91
+ {
92
+ "quarter": datasets.Value("int64"),
93
+ "game_clock": datasets.Value("float32"),
94
+ "shot_clock": datasets.Value("float32"),
95
+ "ball_coordinates": datasets.Sequence(
96
+ datasets.Value("float32"),
97
+ datasets.Value("float32"),
98
+ datasets.Value("float32")
99
+ ),
100
+ "player_coordinates": datsets.Sequence(
101
+ {
102
+ "teamid": datasets.Value("int64"),
103
+ "playerid": datasets.Value("int64"),
104
+ "x": datasets.Value("float32"),
105
+ "y": datasets.Value("float32"),
106
+ "z": datasets.Value("float32")
107
+ }
108
+ )
109
+ }
110
+ # datasets.Sequence(
111
+ # datasets.Value("int64"),
112
+ # datasets.Value("float32"),
113
+ # datasets.Value("float32"),
114
+ # datasets.Value("float32"),
115
+ # datasets.Value("null"),
116
+ # datasets.Sequence(
117
+ # datasets.Sequence(
118
+ # datasets.Value("int64"),
119
+ # datasets.Value("int64"),
120
+ # datasets.Value("float32"),
121
+ # datasets.Value("float32"),
122
+ # datasets.Value("float32")
123
+ # )
124
+ # )
125
+ # )
126
+ )
127
+ }
128
+ )
129
+ }
130
+ )
131
+
132
  return datasets.DatasetInfo(
133
  # This is the description that will appear on the datasets page.
134
  description=_DESCRIPTION,
 
139
  # supervised_keys=("sentence", "label"),
140
  # Homepage of the dataset for documentation
141
  homepage=_HOMEPAGE,
 
 
142
  # Citation for the dataset
143
  citation=_CITATION,
144
  )
145
 
146
  def _split_generators(self, dl_manager):
147
  # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
148
+
 
149
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
150
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
151
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
152
  urls = _URLS[self.config.name]
153
  data_dir = dl_manager.download_and_extract(urls)
154
+
155
+ all_file_paths = {}
156
+ for key, directory_path in data_dir.items():
157
+ all_file_paths[key] = os.path.join(directory_path, os.listdir(directory_path))
158
+
159
  return [
160
  datasets.SplitGenerator(
161
  name=datasets.Split.TRAIN,
162
  # These kwargs will be passed to _generate_examples
163
  gen_kwargs={
164
+ "filepaths": all_file_paths,
165
  "split": "train",
166
+ }
167
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  ]
169
 
170
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
171
  def _generate_examples(self, filepath, split):
172
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
173
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
174
+ for link in filepath:
175
+ with open(link, encoding="utf-8") as fp:
176
+ game_id = json.load(fp)["gameid"]
177
+ game_date = json.load(fp)["gamedate"]
178
+ for event in json.load(fp)["events"]:
179
+ event_id = event["eventId"]
180
+ for moment in event["moments"]:
181
+ for element in moment:
182
+ quarter = element[0]
183
+ game_clock = element[2]
184
+ shot_clock = element[3]
185
+ ball_coords = element[5][0][2:]
186
+ for position in element[5][1:]:
187
+ team_id = position[0]
188
+ player_id = position[1]
189
+ x = position[2]
190
+ y = position[3]
191
+ z = position[4]
192
+
193
+ yield game_id, {
194
+ "gameid": game_id,
195
+ "gamedate": game_date,
196
+ "events": {
197
+ "eventid": event_id,
198
+ "moments": {
199
+ "quarter": quarter,
200
+ "game_clock": game_clock,
201
+ "shot_clock": shot_clock,
202
+ "ball_coordinates": ball_coords,
203
+ "player_coordinates": {
204
+ "teamid": team_id,
205
+ "playerid": player_id,
206
+ "x": x,
207
+ "y": y,
208
+ "z": z
209
+ }
210
+ }
211
+ }
212
+ }
213
+
214
+
215
+ # for key, row in enumerate(fp):
216
+ # data = json.load(row)
217
+ # # Yields examples as (key, example) tuples
218
+ # yield key, {
219
+
220
+ # }