Spaces:
Sleeping
Sleeping
02alexander
commited on
Commit
•
586d2b6
1
Parent(s):
96fda9e
handle logging of path+timestamp
Browse files- app.py +13 -10
- dataset_conversion.py +93 -4
- pyproject.toml +0 -1
- requirements.txt +1 -0
app.py
CHANGED
@@ -13,14 +13,14 @@ import urllib
|
|
13 |
from pathlib import Path
|
14 |
|
15 |
import gradio as gr
|
16 |
-
import lerobot.common.datasets.video_utils
|
17 |
import rerun as rr
|
18 |
from datasets import load_dataset
|
19 |
from fastapi import FastAPI
|
20 |
from fastapi.middleware.cors import CORSMiddleware
|
21 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
|
|
22 |
|
23 |
-
from dataset_conversion import log_dataset_to_rerun
|
24 |
|
25 |
CUSTOM_PATH = "/"
|
26 |
|
@@ -51,14 +51,17 @@ def show_dataset(dataset_id: str, episode_index: int) -> str:
|
|
51 |
|
52 |
rr.save(filename.as_posix())
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
|
|
|
|
|
|
62 |
|
63 |
return filename.as_posix()
|
64 |
|
|
|
13 |
from pathlib import Path
|
14 |
|
15 |
import gradio as gr
|
|
|
16 |
import rerun as rr
|
17 |
from datasets import load_dataset
|
18 |
from fastapi import FastAPI
|
19 |
from fastapi.middleware.cors import CORSMiddleware
|
20 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
21 |
+
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
22 |
|
23 |
+
from dataset_conversion import log_dataset_to_rerun, log_lerobot_dataset_to_rerun
|
24 |
|
25 |
CUSTOM_PATH = "/"
|
26 |
|
|
|
51 |
|
52 |
rr.save(filename.as_posix())
|
53 |
|
54 |
+
if "/" in dataset_id and dataset_id.split("/")[0] == "lerobot":
|
55 |
+
dataset = LeRobotDataset(dataset_id)
|
56 |
+
log_lerobot_dataset_to_rerun(dataset, episode_index)
|
57 |
+
else:
|
58 |
+
dataset = load_dataset(dataset_id, split="train", streaming=True)
|
59 |
+
|
60 |
+
# This is for LeRobot datasets (https://huggingface.co/lerobot):
|
61 |
+
ds_subset = dataset.filter(
|
62 |
+
lambda frame: "episode_index" not in frame or frame["episode_index"] == episode_index
|
63 |
+
)
|
64 |
+
log_dataset_to_rerun(ds_subset)
|
65 |
|
66 |
return filename.as_posix()
|
67 |
|
dataset_conversion.py
CHANGED
@@ -1,17 +1,56 @@
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
import logging
|
|
|
4 |
from typing import Any
|
5 |
|
|
|
6 |
import numpy as np
|
7 |
import rerun as rr
|
|
|
|
|
8 |
from PIL import Image
|
9 |
from tqdm import tqdm
|
10 |
|
11 |
logger = logging.getLogger(__name__)
|
12 |
|
13 |
|
14 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
"""Do our best to interpret the value and convert it to a Rerun-compatible archetype."""
|
16 |
if isinstance(value, Image.Image):
|
17 |
if "depth" in column_name:
|
@@ -27,22 +66,47 @@ def to_rerun(column_name: str, value: Any) -> Any:
|
|
27 |
return rr.TextDocument(str(value)) # Fallback to text
|
28 |
elif isinstance(value, float) or isinstance(value, int):
|
29 |
return rr.Scalar(value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
else:
|
31 |
return rr.TextDocument(str(value)) # Fallback to text
|
32 |
|
33 |
|
34 |
-
def
|
35 |
-
# Special time-like columns for LeRobot datasets (https://huggingface.co/
|
36 |
TIME_LIKE = {"index", "frame_id", "timestamp"}
|
37 |
|
38 |
# Ignore these columns (again, LeRobot-specific):
|
39 |
IGNORE = {"episode_data_index_from", "episode_data_index_to", "episode_id"}
|
40 |
|
41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
# Handle time-like columns first, since they set a state (time is an index in Rerun):
|
43 |
for column_name in TIME_LIKE:
|
44 |
if column_name in row:
|
45 |
cell = row[column_name]
|
|
|
|
|
46 |
if isinstance(cell, int):
|
47 |
rr.set_time_sequence(column_name, cell)
|
48 |
elif isinstance(cell, float):
|
@@ -54,5 +118,30 @@ def log_dataset_to_rerun(dataset: Any) -> None:
|
|
54 |
for column_name, cell in row.items():
|
55 |
if column_name in TIME_LIKE or column_name in IGNORE:
|
56 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
|
|
|
|
|
|
|
|
58 |
rr.log(column_name, to_rerun(column_name, cell))
|
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
import logging
|
4 |
+
from pathlib import PosixPath
|
5 |
from typing import Any
|
6 |
|
7 |
+
import cv2
|
8 |
import numpy as np
|
9 |
import rerun as rr
|
10 |
+
import torch
|
11 |
+
from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
|
12 |
from PIL import Image
|
13 |
from tqdm import tqdm
|
14 |
|
15 |
logger = logging.getLogger(__name__)
|
16 |
|
17 |
|
18 |
+
def get_frame(
|
19 |
+
video_path: PosixPath, timestamp: float, video_cache: dict[PosixPath, tuple[np.ndarray, float]] | None = None
|
20 |
+
) -> np.ndarray:
|
21 |
+
"""
|
22 |
+
Extracts a specific frame from a video.
|
23 |
+
|
24 |
+
`video_path`: path to the video.
|
25 |
+
`timestamp`: timestamp of the wanted frame.
|
26 |
+
`video_cache`: cache to prevent reading the same video file twice.
|
27 |
+
"""
|
28 |
+
|
29 |
+
if video_cache is None:
|
30 |
+
video_cache = {}
|
31 |
+
if video_path not in video_cache:
|
32 |
+
cap = cv2.VideoCapture(str(video_path))
|
33 |
+
print("new video!")
|
34 |
+
frames = []
|
35 |
+
while cap.isOpened():
|
36 |
+
success, frame = cap.read()
|
37 |
+
if success:
|
38 |
+
frames.append(frame)
|
39 |
+
else:
|
40 |
+
break
|
41 |
+
frame_rate = cap.get(cv2.CAP_PROP_FPS)
|
42 |
+
video_cache[video_path] = (frames, frame_rate)
|
43 |
+
|
44 |
+
frames, frame_rate = video_cache[video_path]
|
45 |
+
return frames[int(timestamp * frame_rate)]
|
46 |
+
|
47 |
+
|
48 |
+
def to_rerun(
|
49 |
+
column_name: str,
|
50 |
+
value: Any,
|
51 |
+
video_cache: dict[PosixPath, tuple[np.ndarray, float]] | None = None,
|
52 |
+
videos_dir: PosixPath | None = None,
|
53 |
+
) -> Any:
|
54 |
"""Do our best to interpret the value and convert it to a Rerun-compatible archetype."""
|
55 |
if isinstance(value, Image.Image):
|
56 |
if "depth" in column_name:
|
|
|
66 |
return rr.TextDocument(str(value)) # Fallback to text
|
67 |
elif isinstance(value, float) or isinstance(value, int):
|
68 |
return rr.Scalar(value)
|
69 |
+
elif isinstance(value, torch.Tensor):
|
70 |
+
if value.dim() == 0:
|
71 |
+
return rr.Scalar(value.item())
|
72 |
+
elif value.dim() == 1:
|
73 |
+
return rr.BarChart(value)
|
74 |
+
elif value.dim() == 2 and "depth" in column_name:
|
75 |
+
return rr.DepthImage(value)
|
76 |
+
elif value.dim() == 2:
|
77 |
+
return rr.Image(value)
|
78 |
+
elif value.dim() == 3 and (value.shape[2] == 3 or value.shape[2] == 4):
|
79 |
+
return rr.Image(value) # Treat it as a RGB or RGBA image
|
80 |
+
else:
|
81 |
+
return rr.Tensor(value)
|
82 |
+
elif isinstance(value, dict) and "path" in value and "timestamp" in value:
|
83 |
+
path = (videos_dir or PosixPath("./")) / PosixPath(value["path"])
|
84 |
+
timestamp = value["timestamp"]
|
85 |
+
return rr.Image(get_frame(path, timestamp, video_cache=video_cache))
|
86 |
else:
|
87 |
return rr.TextDocument(str(value)) # Fallback to text
|
88 |
|
89 |
|
90 |
+
def log_lerobot_dataset_to_rerun(dataset: LeRobotDataset, episode_index: int) -> None:
|
91 |
+
# Special time-like columns for LeRobot datasets (https://huggingface.co/lerobot/):
|
92 |
TIME_LIKE = {"index", "frame_id", "timestamp"}
|
93 |
|
94 |
# Ignore these columns (again, LeRobot-specific):
|
95 |
IGNORE = {"episode_data_index_from", "episode_data_index_to", "episode_id"}
|
96 |
|
97 |
+
hf_ds_subset = dataset.hf_dataset.filter(
|
98 |
+
lambda frame: "episode_index" not in frame or frame["episode_index"] == episode_index
|
99 |
+
)
|
100 |
+
|
101 |
+
video_cache: dict[PosixPath, tuple[np.ndarray, float]] = {}
|
102 |
+
|
103 |
+
for row in tqdm(hf_ds_subset):
|
104 |
# Handle time-like columns first, since they set a state (time is an index in Rerun):
|
105 |
for column_name in TIME_LIKE:
|
106 |
if column_name in row:
|
107 |
cell = row[column_name]
|
108 |
+
if isinstance(cell, torch.Tensor) and cell.dim() == 0:
|
109 |
+
cell = cell.item()
|
110 |
if isinstance(cell, int):
|
111 |
rr.set_time_sequence(column_name, cell)
|
112 |
elif isinstance(cell, float):
|
|
|
118 |
for column_name, cell in row.items():
|
119 |
if column_name in TIME_LIKE or column_name in IGNORE:
|
120 |
continue
|
121 |
+
else:
|
122 |
+
rr.log(
|
123 |
+
column_name,
|
124 |
+
to_rerun(column_name, cell, video_cache=video_cache, videos_dir=dataset.videos_dir.parent),
|
125 |
+
)
|
126 |
+
|
127 |
+
|
128 |
+
def log_dataset_to_rerun(dataset: Any) -> None:
|
129 |
+
TIME_LIKE = {"index", "frame_id", "timestamp"}
|
130 |
+
|
131 |
+
for row in tqdm(dataset):
|
132 |
+
# Handle time-like columns first, since they set a state (time is an index in Rerun):
|
133 |
+
for column_name in TIME_LIKE:
|
134 |
+
if column_name in row:
|
135 |
+
cell = row[column_name]
|
136 |
+
if isinstance(cell, int):
|
137 |
+
rr.set_time_sequence(column_name, cell)
|
138 |
+
elif isinstance(cell, float):
|
139 |
+
rr.set_time_seconds(column_name, cell) # assume seconds
|
140 |
+
else:
|
141 |
+
print(f"Unknown time-like column {column_name} with value {cell}")
|
142 |
|
143 |
+
# Now log actual data columns:
|
144 |
+
for column_name, cell in row.items():
|
145 |
+
if column_name in TIME_LIKE:
|
146 |
+
continue
|
147 |
rr.log(column_name, to_rerun(column_name, cell))
|
pyproject.toml
CHANGED
@@ -65,7 +65,6 @@ lint.unfixable = [
|
|
65 |
|
66 |
[tool.ruff.lint.per-file-ignores]
|
67 |
"__init__.py" = ["F401", "F403"]
|
68 |
-
"app.py" = ["F401"]
|
69 |
|
70 |
[tool.ruff.lint.isort]
|
71 |
required-imports = ["from __future__ import annotations"]
|
|
|
65 |
|
66 |
[tool.ruff.lint.per-file-ignores]
|
67 |
"__init__.py" = ["F401", "F403"]
|
|
|
68 |
|
69 |
[tool.ruff.lint.isort]
|
70 |
required-imports = ["from __future__ import annotations"]
|
requirements.txt
CHANGED
@@ -5,5 +5,6 @@ gradio_huggingfacehub_search
|
|
5 |
pillow
|
6 |
rerun-sdk>=0.15.0,<0.16.0
|
7 |
tqdm
|
|
|
8 |
webdataset
|
9 |
git+https://github.com/huggingface/lerobot@7bb5b15f4c0393ba16b73f6482611892301401d7#egg=lerobot
|
|
|
5 |
pillow
|
6 |
rerun-sdk>=0.15.0,<0.16.0
|
7 |
tqdm
|
8 |
+
cv2
|
9 |
webdataset
|
10 |
git+https://github.com/huggingface/lerobot@7bb5b15f4c0393ba16b73f6482611892301401d7#egg=lerobot
|