alexandros-petkos commited on
Commit
386005a
·
1 Parent(s): 09222e7

first commit

Browse files
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ dataset/* filter=lfs diff=lfs merge=lfs -text
61
+ dataset/** filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ # Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ # poetry.lock
109
+ # poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ # pdm.lock
116
+ # pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ # pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # Redis
135
+ *.rdb
136
+ *.aof
137
+ *.pid
138
+
139
+ # RabbitMQ
140
+ mnesia/
141
+ rabbitmq/
142
+ rabbitmq-data/
143
+
144
+ # ActiveMQ
145
+ activemq-data/
146
+
147
+ # SageMath parsed files
148
+ *.sage.py
149
+
150
+ # Environments
151
+ .env
152
+ .envrc
153
+ .venv
154
+ env/
155
+ venv/
156
+ ENV/
157
+ env.bak/
158
+ venv.bak/
159
+
160
+ # Spyder project settings
161
+ .spyderproject
162
+ .spyproject
163
+
164
+ # Rope project settings
165
+ .ropeproject
166
+
167
+ # mkdocs documentation
168
+ /site
169
+
170
+ # mypy
171
+ .mypy_cache/
172
+ .dmypy.json
173
+ dmypy.json
174
+
175
+ # Pyre type checker
176
+ .pyre/
177
+
178
+ # pytype static type analyzer
179
+ .pytype/
180
+
181
+ # Cython debug symbols
182
+ cython_debug/
183
+
184
+ # PyCharm
185
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
186
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
187
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
188
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
189
+ # .idea/
190
+
191
+ # Abstra
192
+ # Abstra is an AI-powered process automation framework.
193
+ # Ignore directories containing user credentials, local state, and settings.
194
+ # Learn more at https://abstra.io/docs
195
+ .abstra/
196
+
197
+ # Visual Studio Code
198
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
199
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
200
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
201
+ # you could uncomment the following to ignore the entire vscode folder
202
+ # .vscode/
203
+
204
+ # Ruff stuff:
205
+ .ruff_cache/
206
+
207
+ # PyPI configuration file
208
+ .pypirc
209
+
210
+ # Marimo
211
+ marimo/_static/
212
+ marimo/_lsp/
213
+ __marimo__/
214
+
215
+ # Streamlit
216
+ .streamlit/secrets.toml
README.md CHANGED
@@ -1,3 +1,35 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Spatial Everyday Activities
2
+ [[Website]](https://spatial-ai.com/) [[Data Explorer]](https://spatial-ai.com/explorer)
3
+
4
+ Spatial Everyday Activities (SEA) is an egocentric dataset designed for training robotic foundation models. It comprises approximately 10,000 hours of egocentric data collected by computer vision experts across a diverse range of locations in the US and EU.
5
+
6
+ SEA-small is a 100GB open-source subset of the full SEA dataset.
7
+
8
+ info@spatial-ai.com
9
+
10
+ ![image](assets/teaser.gif)
11
+
12
+ ## Run the code
13
+
14
+ Setup an isolated environment
15
+ ```bash
16
+ conda create -n sea python=3.12
17
+ conda activate sea
18
+ ```
19
+
20
+ Install dependencies and download SEA-small
21
+ ```bash
22
+ pip install rerun-sdk
23
+ git clone https://huggingface.co/datasets/spatial-ai/sea-small
24
+ cd sea-small
25
+ ```
26
+
27
+ Install the project requirements
28
+ ```bash
29
+ pip install -e .
30
+ ```
31
+
32
+ Run the example viewer
33
+ ```bash
34
+ python -m sea_scenes
35
+ ```
assets/teaser.gif ADDED

Git LFS Details

  • SHA256: 9480191c5121e351171af37bd34603543550f5936ac43be59070038f2be68918
  • Pointer size: 133 Bytes
  • Size of remote file: 24.8 MB
pyproject.toml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "sea_scenes"
3
+ version = "0.1.0"
4
+ # requires-python = "<3.13"
5
+ readme = "README.md"
6
+ dependencies = [
7
+ "numpy",
8
+ "opencv-python",
9
+ "rerun-sdk",
10
+ ]
11
+
12
+ [project.scripts]
13
+ sea_scenes = "sea_scenes.__main__:main"
14
+
15
+ [build-system]
16
+ requires = ["hatchling"]
17
+ build-backend = "hatchling.build"
sea_scenes/__init__.py ADDED
File without changes
sea_scenes/__main__.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ import numpy as np
9
+ import rerun as rr
10
+ import rerun.blueprint as rrb
11
+
12
+ from rerun.blueprint import archetypes as rrba
13
+ from rerun.blueprint.components import BackgroundKind
14
+
15
+ from .loaders import load_frames, load_depth, load_trajectory, load_body_data, load_hand_data
16
+
17
+ DESCRIPTION = """
18
+ # SEA Scenes
19
+ This example visualizes the [SEA dataset](https://huggingface.co/datasets/spatial-ai/sea-small) using Rerun.
20
+
21
+ Spatial Everyday Activities (SEA) is an egocentric dataset designed for training robotic foundation models.
22
+ It comprises approximately 10,000 hours of egocentric data collected by computer vision experts across a diverse range of locations in the US and EU.
23
+ """.strip()
24
+
25
+ Color = tuple[float, float, float, float]
26
+
27
+ SEQUENCE_ROOT = Path("./dataset")
28
+ AVAILABLE_SEQUENCES = [s.name for s in SEQUENCE_ROOT.iterdir() if s.is_dir()]
29
+
30
+ CAMERA_LEFT_ENTITY_PATH = "world/camera_left"
31
+ CAMERA_RIGHT_ENTITY_PATH = "world/camera_right"
32
+ TRAJECTORY_LEFT_ENTITY_PATH = "world/trajectory_left"
33
+ TRAJECTORY_RIGHT_ENTITY_PATH = "world/trajectory_right"
34
+ BODY_ENTITY_PATH = "world/body"
35
+ HAND_LEFT_ENTITY_PATH = "world/hand_left"
36
+ HAND_RIGHT_ENTITY_PATH = "world/hand_right"
37
+
38
+ BODY_CONNECTIONS = [
39
+ (6, 14), (14, 15), (15, 16), (16, 17), # Left arm
40
+ (6, 9), (9, 10), (10, 11), (11, 12), # Right arm
41
+ (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), # Spine
42
+ ]
43
+
44
+ HAND_CONNECTIONS = [
45
+ (1, 2), (2, 3), (3, 4), (4, 5), # Thumb
46
+ (1, 6), (6, 7), (7, 8), (8, 9), (9, 10), # Index
47
+ (1, 11), (11, 12), (12, 13), (13, 14), (14, 15), # Middle finger
48
+ (1, 16), (16, 17), (17, 18), (18, 19), (19, 20), # Ring finger
49
+ (1, 21), (21, 22), (22, 23), (23, 24), (24, 25), # Little finger
50
+ ]
51
+
52
+ def log_camera(
53
+ intrinsics: np.ndarray,
54
+ translation: np.ndarray,
55
+ rotation_xyzw: np.ndarray,
56
+ entity_id: str,
57
+ ) -> None:
58
+ """Log a pinhole camera and its transform."""
59
+ w, h, fx, fy, cx, cy = intrinsics
60
+ intrinsic = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]], dtype=np.float32)
61
+
62
+ rr.log(
63
+ entity_id,
64
+ rr.Transform3D(
65
+ translation=translation,
66
+ rotation=rr.Quaternion(xyzw=rotation_xyzw),
67
+ ),
68
+ )
69
+ rr.log(
70
+ entity_id,
71
+ rr.Pinhole(
72
+ image_from_camera=intrinsic,
73
+ resolution=[int(w), int(h)],
74
+ camera_xyz=rr.ViewCoordinates.LEFT_HAND_Y_UP,
75
+ image_plane_distance=2e-1,
76
+ ),
77
+ )
78
+
79
+
80
+ def log_trajectory(
81
+ positions: list[list[float]],
82
+ entity_id: str,
83
+ color: Color = (1.0, 1.0, 1.0, 1.0),
84
+ radii: float = 0.0025,
85
+ ) -> None:
86
+ """Log a simple 3D trajectory as a line strip."""
87
+ strips = np.array(positions, dtype=np.float32)
88
+
89
+ rr.log(
90
+ entity_id,
91
+ rr.LineStrips3D(
92
+ strips=[strips],
93
+ colors=[color],
94
+ radii=[radii],
95
+ ),
96
+ )
97
+
98
+
99
+ def log_keypoints(
100
+ keypoints: list[Any],
101
+ connections: list[tuple[int, int]],
102
+ entity_id: str,
103
+ color: Color = (1.0, 1.0, 1.0, 1.0),
104
+ radii: float = 0.0075,
105
+ ) -> None:
106
+ """Log a set of 3D keypoints as point primitives and connections."""
107
+ if not keypoints:
108
+ return
109
+
110
+ positions = np.array([[keypoint.position.x, keypoint.position.y, keypoint.position.z]
111
+ for keypoint in keypoints], dtype=np.float32)
112
+
113
+ rr.log(
114
+ f"{entity_id}/keypoints",
115
+ rr.Points3D(
116
+ positions=positions,
117
+ colors=[color],
118
+ radii=radii,
119
+ ),
120
+ )
121
+
122
+ strips = np.array([[positions[connection[0]], positions[connection[1]]]
123
+ for connection in connections], dtype=np.float32)
124
+
125
+ rr.log(
126
+ f"{entity_id}/connections",
127
+ rr.LineStrips3D(
128
+ strips=strips,
129
+ colors=[color],
130
+ radii=[radii * 0.25],
131
+ ),
132
+ )
133
+
134
+
135
+ def log_sea(sequence_path: Path) -> None:
136
+ """
137
+ Logs SEA sequence data using Rerun.
138
+
139
+ Args:
140
+ ----
141
+ sequence_path (Path):
142
+ The path to the SEA recording.
143
+
144
+ Returns
145
+ -------
146
+ None
147
+
148
+ """
149
+
150
+ left_frames_path = sequence_path / "stereo" / "left_frames.dat"
151
+ right_frames_path = sequence_path / "stereo" / "right_frames.dat"
152
+ depth_path = sequence_path / "depth"
153
+ left_intrinsics_path = sequence_path / "stereo" / "left_intrinsics.txt"
154
+ right_intrinsics_path = sequence_path / "stereo" / "right_intrinsics.txt"
155
+ left_trajectory_path = sequence_path / "stereo" / "left_trajectory.bin"
156
+ right_trajectory_path = sequence_path / "stereo" / "right_trajectory.bin"
157
+ body_data_path = sequence_path / "body_data.bin"
158
+ hand_data_path = sequence_path / "hand_data.bin"
159
+
160
+ # Load frames
161
+ left_frames = load_frames(left_frames_path)
162
+ right_frames = load_frames(right_frames_path)
163
+
164
+ # Load depth
165
+ depth_frames = load_depth(depth_path)
166
+
167
+ # Load intrinsics
168
+ left_intrinsics = np.loadtxt(left_intrinsics_path)
169
+ right_intrinsics = np.loadtxt(right_intrinsics_path)
170
+
171
+ # Load trajectories
172
+ left_trajectory = load_trajectory(left_trajectory_path)
173
+ right_trajectory = load_trajectory(right_trajectory_path)
174
+
175
+ # Load body and hand data
176
+ body_data = load_body_data(body_data_path)
177
+ hand_data= load_hand_data(hand_data_path)
178
+
179
+ # World coordinate system
180
+ rr.log("world", rr.ViewCoordinates.LEFT_HAND_Y_UP, static=True)
181
+
182
+ # Log left and right images
183
+ for timestamp, image in left_frames:
184
+ rr.set_time("time", timestamp=timestamp * 1e-3)
185
+ rr.log(f"{CAMERA_LEFT_ENTITY_PATH}/bgr", rr.Image(image, color_model="BGR"))
186
+
187
+ for timestamp, image in right_frames:
188
+ rr.set_time("time", timestamp=timestamp * 1e-3)
189
+ rr.log(f"{CAMERA_RIGHT_ENTITY_PATH}/bgr", rr.Image(image, color_model="BGR"))
190
+
191
+ # Log depth
192
+ for timestamp, depth in depth_frames:
193
+ rr.set_time("time", timestamp=timestamp * 1e-3)
194
+ rr.log(f"{CAMERA_LEFT_ENTITY_PATH}/depth",
195
+ rr.DepthImage(depth, meter=1.0, colormap="viridis", depth_range=(0.0, 1.0)),
196
+ )
197
+
198
+ # Log left camera poses and trajectory
199
+ cumulative_xyz: list[list[float]] = []
200
+ for timestamp, pos, quat in left_trajectory:
201
+ rr.set_time("time", timestamp=timestamp * 1e-3)
202
+ log_camera(left_intrinsics, pos, quat, CAMERA_LEFT_ENTITY_PATH)
203
+
204
+ cumulative_xyz.append(pos.tolist())
205
+ log_trajectory(cumulative_xyz, TRAJECTORY_LEFT_ENTITY_PATH, color=(1.0, 1.0, 0.0, 1.0))
206
+
207
+ for timestamp, keypoints in body_data:
208
+ rr.set_time("time", timestamp=timestamp * 1e-3)
209
+ log_keypoints(keypoints, BODY_CONNECTIONS, BODY_ENTITY_PATH, color=(0.8, 0.0, 1.0, 1.0))
210
+
211
+ for timestamp, left_keypoints, right_keypoints in hand_data:
212
+ rr.set_time("time", timestamp=timestamp * 1e-3)
213
+ log_keypoints(left_keypoints, HAND_CONNECTIONS, HAND_LEFT_ENTITY_PATH, color=(0.3, 0.0, 0.4, 1.0))
214
+ log_keypoints(right_keypoints, HAND_CONNECTIONS, HAND_RIGHT_ENTITY_PATH, color=(0.3, 0.0, 0.4, 1.0))
215
+
216
+
217
+ def main() -> None:
218
+ parser = argparse.ArgumentParser(description=DESCRIPTION)
219
+ parser.add_argument(
220
+ "--sequence",
221
+ type=str,
222
+ choices=AVAILABLE_SEQUENCES,
223
+ default="0aeb0c00-ef9c-4325-b005-53ace076b641",
224
+ help="Sequence ID of the SEA dataset",
225
+ )
226
+ rr.script_add_args(parser)
227
+ args = parser.parse_args()
228
+
229
+ blueprint = rrb.Horizontal(
230
+ rrb.Spatial3DView(
231
+ name="3D",
232
+ origin="world",
233
+ background=rrba.Background(
234
+ kind=BackgroundKind.SolidColor,
235
+ color=[0, 0, 0, 255],
236
+ )
237
+ ),
238
+ rrb.Vertical(
239
+ rrb.Spatial2DView(
240
+ name="Left",
241
+ origin=CAMERA_LEFT_ENTITY_PATH,
242
+ contents=["$origin/bgr"],
243
+ ),
244
+ rrb.Spatial2DView(
245
+ name="Right",
246
+ origin=CAMERA_RIGHT_ENTITY_PATH,
247
+ contents=["$origin/bgr"],
248
+ ),
249
+ rrb.Spatial2DView(
250
+ name="Depth",
251
+ origin=CAMERA_LEFT_ENTITY_PATH,
252
+ contents=["$origin/depth"],
253
+ ),
254
+ name="2D",
255
+ ),
256
+ )
257
+
258
+ rr.script_setup(args, "sea_scenes")
259
+ rr.send_blueprint(blueprint)
260
+
261
+ log_sea(SEQUENCE_ROOT / args.sequence)
262
+
263
+ rr.script_teardown(args)
264
+
265
+
266
+ if __name__ == "__main__":
267
+ main()
sea_scenes/loaders.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Any, List, Tuple
3
+ import struct
4
+
5
+ import numpy as np
6
+ import cv2
7
+
8
+ from .types import Vector3, Quaternion, Keypoint
9
+
10
+
11
+ def load_frames(frames_path: Path) -> List[Tuple[int, np.ndarray]]:
12
+ """Load encoded frames from a SEA stereo .dat file."""
13
+ frames: List[Tuple[int, np.ndarray]] = []
14
+
15
+ header_size = 8 + 8 + 4
16
+
17
+ with open(frames_path, "rb") as f:
18
+ while True:
19
+ header = f.read(header_size)
20
+ if len(header) < header_size:
21
+ break
22
+
23
+ frame_timestamp, frame_index, frame_size = struct.unpack(">qqi", header)
24
+ frame_bytes = f.read(frame_size)
25
+
26
+ if len(frame_bytes) != frame_size:
27
+ raise EOFError("Unexpected EOF while reading frame data")
28
+
29
+ frame_array = np.frombuffer(frame_bytes, dtype=np.uint8)
30
+ frame = cv2.imdecode(frame_array, cv2.IMREAD_COLOR)
31
+ if frame is not None:
32
+ frames.append((frame_timestamp, frame))
33
+
34
+ return frames
35
+
36
+
37
+ def load_depth(depth_dir: Path) -> list[tuple[int, np.ndarray]]:
38
+ """Load depth from .npy files: <timestamp>_depth_meter.npy."""
39
+ depth_frames: list[tuple[int, np.ndarray]] = []
40
+
41
+ for path in sorted(depth_dir.glob("*_depth_meter.npy")):
42
+ timestamp = int(path.stem.split("_")[0])
43
+
44
+ depth = np.load(path).astype(np.float32)
45
+ depth_frames.append((timestamp, depth))
46
+
47
+ return depth_frames
48
+
49
+
50
+ def load_trajectory(trajectory_path: Path) -> List[Tuple[int, np.ndarray, np.ndarray]]:
51
+ """Load camera trajectory from SEA binary trajectory file."""
52
+ trajectory: List[Tuple[int, np.ndarray, np.ndarray]] = []
53
+
54
+ trajectory_buffer_size = 8 + 7 * 4
55
+
56
+ with open(trajectory_path, "rb") as f:
57
+ while True:
58
+ trajectory_buffer = f.read(trajectory_buffer_size)
59
+ if len(trajectory_buffer) < trajectory_buffer_size:
60
+ break
61
+
62
+ timestamp = struct.unpack("<q", trajectory_buffer[:8])[0]
63
+ pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w = struct.unpack("<7f", trajectory_buffer[8:])
64
+
65
+ pos = np.array([pos_x, pos_y, pos_z], dtype=np.float32)
66
+ quat = np.array([quat_x, quat_y, quat_z, quat_w], dtype=np.float32)
67
+
68
+ trajectory.append((timestamp, pos, quat))
69
+
70
+ return trajectory
71
+
72
+
73
+ def load_body_data(body_data_path: Path) -> List[Any]:
74
+ """Load body tracking data from SEA binary body_data file."""
75
+ body_frames: List[Any] = []
76
+
77
+ header_size = 8 + 4
78
+
79
+ with open(body_data_path, "rb") as f:
80
+ while True:
81
+ header = f.read(header_size)
82
+ if len(header) < header_size:
83
+ break
84
+
85
+ timestamp, keypoint_count = struct.unpack("<qi", header)
86
+ keypoints = _read_keypoints(f, keypoint_count)[:70]
87
+
88
+ body_frames.append(
89
+ (
90
+ timestamp,
91
+ keypoints
92
+ )
93
+ )
94
+
95
+ return body_frames
96
+
97
+
98
+ def load_hand_data(hand_data_path: Path) -> List[Any]:
99
+ """Load hand tracking data from SEA binary hand_data file."""
100
+ hand_frames: List[Any] = []
101
+
102
+ timestamp_buffer_size = 8
103
+ left_count_buffer_size = 4
104
+ right_count_buffer_size = 4
105
+
106
+ with open(hand_data_path, "rb") as f:
107
+ while True:
108
+ timestamp_buffer = f.read(timestamp_buffer_size)
109
+ if len(timestamp_buffer) < timestamp_buffer_size:
110
+ break
111
+ timestamp = struct.unpack("<q", timestamp_buffer)[0]
112
+
113
+ left_count_buffer = f.read(left_count_buffer_size)
114
+ if len(left_count_buffer) < left_count_buffer_size:
115
+ break
116
+ left_count = struct.unpack("<i", left_count_buffer)[0]
117
+ left_keypoints = _read_keypoints(f, left_count)
118
+
119
+ right_count_buffer = f.read(right_count_buffer_size)
120
+ if len(right_count_buffer) < right_count_buffer_size:
121
+ break
122
+ right_count = struct.unpack("<i", right_count_buffer)[0]
123
+ right_keypoints = _read_keypoints(f, right_count)
124
+
125
+ hand_frames.append(
126
+ (
127
+ timestamp,
128
+ left_keypoints if left_keypoints else None,
129
+ right_keypoints if right_keypoints else None,
130
+ )
131
+ )
132
+
133
+ return hand_frames
134
+
135
+
136
+ def _read_keypoints(f: Any, keypoint_count: int) -> List[Keypoint]:
137
+ if keypoint_count <= 0:
138
+ return []
139
+
140
+ floats_per_keypoint = 7
141
+ total_float_count = keypoint_count * floats_per_keypoint
142
+ bytes_per_float = 4
143
+ expected_byte_count = total_float_count * bytes_per_float
144
+
145
+ raw_bytes = f.read(expected_byte_count)
146
+
147
+ if len(raw_bytes) != expected_byte_count:
148
+ raise EOFError("Unexpected EOF while reading keypoint data")
149
+
150
+ float_values = struct.unpack("<" + "f" * total_float_count, raw_bytes)
151
+
152
+ keypoints: List[Keypoint] = []
153
+
154
+ for keypoint_index in range(keypoint_count):
155
+ base_index = keypoint_index * floats_per_keypoint
156
+ pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w = float_values[
157
+ base_index : base_index + floats_per_keypoint
158
+ ]
159
+
160
+ keypoints.append(
161
+ Keypoint(
162
+ position=Vector3(pos_x, pos_y, pos_z),
163
+ rotation=Quaternion(quat_x, quat_y, quat_z, quat_w),
164
+ )
165
+ )
166
+
167
+ return keypoints
sea_scenes/types.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class Vector3:
6
+ x: float
7
+ y: float
8
+ z: float
9
+
10
+
11
+ @dataclass
12
+ class Quaternion:
13
+ x: float
14
+ y: float
15
+ z: float
16
+ w: float
17
+
18
+
19
+ @dataclass
20
+ class Keypoint:
21
+ position: Vector3
22
+ rotation: Quaternion