upload code
Browse files- code/.gitignore +4 -0
- code/LICENSE +21 -0
- code/README.md +31 -0
- code/environment_ubuntu.yml +126 -0
- code/modify_rlds_dataset.py +68 -0
- code/prepare_open_x.sh +54 -0
- code/rlds_dataset_mod/mod_functions.py +173 -0
- code/rlds_dataset_mod/multithreaded_adhoc_tfds_builder.py +243 -0
- code/setup.py +3 -0
- code/upload_hf.py +25 -0
code/.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
wandb
|
2 |
+
__pycache__
|
3 |
+
.idea
|
4 |
+
.DS_Store
|
code/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Karl Pertsch
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
code/README.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# RLDS dataset for train vla
|
2 |
+
The code modified from [rlds_dataset_mod](https://github.com/moojink/rlds_dataset_mod/blob/main/README.md)
|
3 |
+
|
4 |
+
We upload the precessed dataset in this repository ❤
|
5 |
+
|
6 |
+
below is the code for processing ⚙
|
7 |
+
|
8 |
+
|
9 |
+
### prepare gsutil
|
10 |
+
|
11 |
+
```shell
|
12 |
+
# https://cloud.google.com/sdk/docs/install-sdk?hl=zh-cn#linux
|
13 |
+
curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-cli-linux-x86_64.tar.gz
|
14 |
+
tar -xf google-cloud-cli-linux-x86_64.tar.gz
|
15 |
+
./google-cloud-sdk/install.sh
|
16 |
+
./google-cloud-sdk/bin/gcloud init --console-only
|
17 |
+
./google-cloud-sdk/bin/gcloud components install gsutil
|
18 |
+
# check gsutil
|
19 |
+
export PATH=$PATH:/path/to/google-cloud-sdk/bin
|
20 |
+
ls ./google-cloud-sdk/bin/gsutil
|
21 |
+
```
|
22 |
+
### prepare environment
|
23 |
+
|
24 |
+
```
|
25 |
+
conda env create -f environment_ubuntu.yml
|
26 |
+
conda activate rlds
|
27 |
+
mkdir data_download
|
28 |
+
mkdir data_tmp
|
29 |
+
```
|
30 |
+
|
31 |
+
All the rights are reserved.
|
code/environment_ubuntu.yml
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: rlds
|
2 |
+
channels:
|
3 |
+
- conda-forge
|
4 |
+
dependencies:
|
5 |
+
- _libgcc_mutex=0.1=conda_forge
|
6 |
+
- _openmp_mutex=4.5=2_gnu
|
7 |
+
- ca-certificates=2023.7.22=hbcca054_0
|
8 |
+
- ld_impl_linux-64=2.40=h41732ed_0
|
9 |
+
- libffi=3.3=h58526e2_2
|
10 |
+
- libgcc-ng=13.1.0=he5830b7_0
|
11 |
+
- libgomp=13.1.0=he5830b7_0
|
12 |
+
- libsqlite=3.42.0=h2797004_0
|
13 |
+
- libstdcxx-ng=13.1.0=hfd8a6a1_0
|
14 |
+
- libzlib=1.2.13=hd590300_5
|
15 |
+
- ncurses=6.4=hcb278e6_0
|
16 |
+
- openssl=1.1.1u=hd590300_0
|
17 |
+
- pip=23.2.1=pyhd8ed1ab_0
|
18 |
+
- python=3.9.0=hffdb5ce_5_cpython
|
19 |
+
- readline=8.2=h8228510_1
|
20 |
+
- setuptools=68.0.0=pyhd8ed1ab_0
|
21 |
+
- sqlite=3.42.0=h2c6b66d_0
|
22 |
+
- tk=8.6.12=h27826a3_0
|
23 |
+
- tzdata=2023c=h71feb2d_0
|
24 |
+
- wheel=0.41.0=pyhd8ed1ab_0
|
25 |
+
- xz=5.2.6=h166bdaf_0
|
26 |
+
- zlib=1.2.13=hd590300_5
|
27 |
+
- pip:
|
28 |
+
- absl-py==1.4.0
|
29 |
+
- anyio==3.7.1
|
30 |
+
- apache-beam==2.49.0
|
31 |
+
- appdirs==1.4.4
|
32 |
+
- array-record==0.4.0
|
33 |
+
- astunparse==1.6.3
|
34 |
+
- cachetools==5.3.1
|
35 |
+
- certifi==2023.7.22
|
36 |
+
- charset-normalizer==3.2.0
|
37 |
+
- click==8.1.6
|
38 |
+
- cloudpickle==2.2.1
|
39 |
+
- contourpy==1.1.0
|
40 |
+
- crcmod==1.7
|
41 |
+
- cycler==0.11.0
|
42 |
+
- dill==0.3.1.1
|
43 |
+
- dm-tree==0.1.8
|
44 |
+
- dnspython==2.4.0
|
45 |
+
- docker-pycreds==0.4.0
|
46 |
+
- docopt==0.6.2
|
47 |
+
- etils==1.3.0
|
48 |
+
- exceptiongroup==1.1.2
|
49 |
+
- fastavro==1.8.2
|
50 |
+
- fasteners==0.18
|
51 |
+
- flatbuffers==23.5.26
|
52 |
+
- fonttools==4.41.1
|
53 |
+
- gast==0.4.0
|
54 |
+
- gitdb==4.0.10
|
55 |
+
- gitpython==3.1.32
|
56 |
+
- google-auth==2.22.0
|
57 |
+
- google-auth-oauthlib==1.0.0
|
58 |
+
- google-pasta==0.2.0
|
59 |
+
- googleapis-common-protos==1.59.1
|
60 |
+
- grpcio==1.56.2
|
61 |
+
- h11==0.14.0
|
62 |
+
- h5py==3.9.0
|
63 |
+
- hdfs==2.7.0
|
64 |
+
- httpcore==0.17.3
|
65 |
+
- httplib2==0.22.0
|
66 |
+
- idna==3.4
|
67 |
+
- importlib-metadata==6.8.0
|
68 |
+
- importlib-resources==6.0.0
|
69 |
+
- keras==2.13.1
|
70 |
+
- kiwisolver==1.4.4
|
71 |
+
- libclang==16.0.6
|
72 |
+
- markdown==3.4.3
|
73 |
+
- markupsafe==2.1.3
|
74 |
+
- matplotlib==3.7.2
|
75 |
+
- numpy==1.24.3
|
76 |
+
- oauthlib==3.2.2
|
77 |
+
- objsize==0.6.1
|
78 |
+
- opt-einsum==3.3.0
|
79 |
+
- orjson==3.9.2
|
80 |
+
- packaging==23.1
|
81 |
+
- pathtools==0.1.2
|
82 |
+
- pillow==10.0.0
|
83 |
+
- plotly==5.15.0
|
84 |
+
- promise==2.3
|
85 |
+
- proto-plus==1.22.3
|
86 |
+
- protobuf==4.23.4
|
87 |
+
- psutil==5.9.5
|
88 |
+
- pyarrow==11.0.0
|
89 |
+
- pyasn1==0.5.0
|
90 |
+
- pyasn1-modules==0.3.0
|
91 |
+
- pydot==1.4.2
|
92 |
+
- pymongo==4.4.1
|
93 |
+
- pyparsing==3.0.9
|
94 |
+
- python-dateutil==2.8.2
|
95 |
+
- pytz==2023.3
|
96 |
+
- pyyaml==6.0.1
|
97 |
+
- regex==2023.6.3
|
98 |
+
- requests==2.31.0
|
99 |
+
- requests-oauthlib==1.3.1
|
100 |
+
- rsa==4.9
|
101 |
+
- sentry-sdk==1.28.1
|
102 |
+
- setproctitle==1.3.2
|
103 |
+
- six==1.16.0
|
104 |
+
- smmap==5.0.0
|
105 |
+
- sniffio==1.3.0
|
106 |
+
- tenacity==8.2.2
|
107 |
+
- tensorboard==2.13.0
|
108 |
+
- tensorboard-data-server==0.7.1
|
109 |
+
- tensorflow==2.13.0
|
110 |
+
- tensorflow-datasets==4.9.2
|
111 |
+
- tensorflow-estimator==2.13.0
|
112 |
+
- tensorflow-hub==0.14.0
|
113 |
+
- tensorflow-io-gcs-filesystem==0.32.0
|
114 |
+
- tensorflow-metadata==1.13.1
|
115 |
+
- termcolor==2.3.0
|
116 |
+
- toml==0.10.2
|
117 |
+
- tqdm==4.65.0
|
118 |
+
- typing-extensions==4.5.0
|
119 |
+
- urllib3==1.26.16
|
120 |
+
- wandb==0.15.6
|
121 |
+
- werkzeug==2.3.6
|
122 |
+
- wrapt==1.15.0
|
123 |
+
- zipp==3.16.2
|
124 |
+
- zstandard==0.21.0
|
125 |
+
- dlimp @ git+https://github.com/kvablack/dlimp@fba663b10858793d35f9a0fdbe8f0d51906c8c90
|
126 |
+
prefix: /scr/kpertsch/miniconda3/envs/rlds_env
|
code/modify_rlds_dataset.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Modifies TFDS dataset with a map function, updates the feature definition and stores new dataset."""
|
2 |
+
from functools import partial
|
3 |
+
|
4 |
+
from absl import app, flags
|
5 |
+
import tensorflow as tf
|
6 |
+
import tensorflow_datasets as tfds
|
7 |
+
|
8 |
+
from rlds_dataset_mod.mod_functions import TFDS_MOD_FUNCTIONS
|
9 |
+
from rlds_dataset_mod.multithreaded_adhoc_tfds_builder import (
|
10 |
+
MultiThreadedAdhocDatasetBuilder,
|
11 |
+
)
|
12 |
+
|
13 |
+
FLAGS = flags.FLAGS
|
14 |
+
|
15 |
+
flags.DEFINE_string("dataset", None, "Dataset name.")
|
16 |
+
flags.DEFINE_string("data_dir", None, "Directory where source data is stored.")
|
17 |
+
flags.DEFINE_string("target_dir", None, "Directory where modified data is stored.")
|
18 |
+
flags.DEFINE_list("mods", None, "List of modification functions, applied in order.")
|
19 |
+
flags.DEFINE_integer("n_workers", 10, "Number of parallel workers for data conversion.")
|
20 |
+
flags.DEFINE_integer(
|
21 |
+
"max_episodes_in_memory",
|
22 |
+
100,
|
23 |
+
"Number of episodes converted & stored in memory before writing to disk.",
|
24 |
+
)
|
25 |
+
|
26 |
+
|
27 |
+
def mod_features(features):
|
28 |
+
"""Modifies feature dict."""
|
29 |
+
for mod in FLAGS.mods:
|
30 |
+
features = TFDS_MOD_FUNCTIONS[mod].mod_features(features)
|
31 |
+
return features
|
32 |
+
|
33 |
+
|
34 |
+
def mod_dataset_generator(builder, split, mods):
|
35 |
+
"""Modifies dataset features."""
|
36 |
+
ds = builder.as_dataset(split=split)
|
37 |
+
for mod in mods:
|
38 |
+
ds = TFDS_MOD_FUNCTIONS[mod].mod_dataset(ds)
|
39 |
+
for episode in tfds.core.dataset_utils.as_numpy(ds):
|
40 |
+
yield episode
|
41 |
+
|
42 |
+
|
43 |
+
def main(_):
|
44 |
+
builder = tfds.builder(FLAGS.dataset, data_dir=FLAGS.data_dir)
|
45 |
+
|
46 |
+
features = mod_features(builder.info.features)
|
47 |
+
print("############# Target features: ###############")
|
48 |
+
print(features)
|
49 |
+
print("##############################################")
|
50 |
+
assert FLAGS.data_dir != FLAGS.target_dir # prevent overwriting original dataset
|
51 |
+
|
52 |
+
mod_dataset_builder = MultiThreadedAdhocDatasetBuilder(
|
53 |
+
name=FLAGS.dataset,
|
54 |
+
version=builder.version,
|
55 |
+
features=features,
|
56 |
+
split_datasets={split: builder.info.splits[split] for split in builder.info.splits},
|
57 |
+
config=builder.builder_config,
|
58 |
+
data_dir=FLAGS.target_dir,
|
59 |
+
description=builder.info.description,
|
60 |
+
generator_fcn=partial(mod_dataset_generator, builder=builder, mods=FLAGS.mods),
|
61 |
+
n_workers=FLAGS.n_workers,
|
62 |
+
max_episodes_in_memory=FLAGS.max_episodes_in_memory,
|
63 |
+
)
|
64 |
+
mod_dataset_builder.download_and_prepare()
|
65 |
+
|
66 |
+
|
67 |
+
if __name__ == "__main__":
|
68 |
+
app.run(main)
|
code/prepare_open_x.sh
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DOWNLOAD_DIR="<your path>" # todo:
|
2 |
+
CONVERSION_DIR="<your path>" # todo:
|
3 |
+
N_WORKERS=10 # number of workers used for parallel conversion --> adjust based on available RAM
|
4 |
+
MAX_EPISODES_IN_MEMORY=100 # number of episodes converted in parallel --> adjust based on available RAM
|
5 |
+
|
6 |
+
# increase limit on number of files opened in parallel to 20k --> conversion opens up to 1k temporary files
|
7 |
+
# in /tmp to store dataset during conversion
|
8 |
+
ulimit -n 20000
|
9 |
+
|
10 |
+
# format: [dataset_name, dataset_version, transforms]
|
11 |
+
DATASET_TRANSFORMS=(
|
12 |
+
# Datasets used for OpenVLA: https://openvla.github.io/
|
13 |
+
"fractal20220817_data 0.1.0 resize_and_jpeg_encode"
|
14 |
+
"kuka 0.1.0 resize_and_jpeg_encode,filter_success"
|
15 |
+
"taco_play 0.1.0 resize_and_jpeg_encode"
|
16 |
+
"jaco_play 0.1.0 resize_and_jpeg_encode"
|
17 |
+
"berkeley_cable_routing 0.1.0 resize_and_jpeg_encode"
|
18 |
+
"roboturk 0.1.0 resize_and_jpeg_encode"
|
19 |
+
"viola 0.1.0 resize_and_jpeg_encode"
|
20 |
+
"berkeley_autolab_ur5 0.1.0 resize_and_jpeg_encode,flip_wrist_image_channels"
|
21 |
+
"toto 0.1.0 resize_and_jpeg_encode"
|
22 |
+
"language_table 0.1.0 resize_and_jpeg_encode"
|
23 |
+
"stanford_hydra_dataset_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode,flip_wrist_image_channels,flip_image_channels"
|
24 |
+
"austin_buds_dataset_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
25 |
+
"nyu_franka_play_dataset_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
26 |
+
"furniture_bench_dataset_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
27 |
+
"ucsd_kitchen_dataset_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
28 |
+
"austin_sailor_dataset_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
29 |
+
"austin_sirius_dataset_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
30 |
+
"bc_z 0.1.0 resize_and_jpeg_encode"
|
31 |
+
"dlr_edan_shared_control_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
32 |
+
"iamlab_cmu_pickup_insert_converted_externally_to_rlds 0.1.0 resize_and_jpeg_encode"
|
33 |
+
"utaustin_mutex 0.1.0 resize_and_jpeg_encode,flip_wrist_image_channels,flip_image_channels"
|
34 |
+
"berkeley_fanuc_manipulation 0.1.0 resize_and_jpeg_encode,flip_wrist_image_channels,flip_image_channels"
|
35 |
+
"cmu_stretch 0.1.0 resize_and_jpeg_encode"
|
36 |
+
"dobbe 0.0.1 resize_and_jpeg_encode"
|
37 |
+
"fmb 0.0.1 resize_and_jpeg_encode"
|
38 |
+
"droid 1.0.0 resize_and_jpeg_encode"
|
39 |
+
)
|
40 |
+
|
41 |
+
for tuple in "${DATASET_TRANSFORMS[@]}"; do
|
42 |
+
# Extract strings from the tuple
|
43 |
+
strings=($tuple)
|
44 |
+
DATASET=${strings[0]}
|
45 |
+
VERSION=${strings[1]}
|
46 |
+
TRANSFORM=${strings[2]}
|
47 |
+
mkdir ${DOWNLOAD_DIR}/${DATASET}
|
48 |
+
./google-cloud-sdk/bin/gsutil -m cp -r gs://gresearch/robotics/${DATASET}/${VERSION} ${DOWNLOAD_DIR}/${DATASET}
|
49 |
+
python3 modify_rlds_dataset.py --dataset=$DATASET --data_dir=$DOWNLOAD_DIR --target_dir=$CONVERSION_DIR --mods=$TRANSFORM --n_workers=$N_WORKERS --max_episodes_in_memory=$MAX_EPISODES_IN_MEMORY
|
50 |
+
rm -rf ${DOWNLOAD_DIR}/${DATASET}
|
51 |
+
mv ${CONVERSION_DIR}/${DATASET} ${DOWNLOAD_DIR}
|
52 |
+
# python3 upload_hf.py $DATASET $VERSION
|
53 |
+
rm -rf ${DOWNLOAD_DIR}/${DATASET}
|
54 |
+
done
|
code/rlds_dataset_mod/mod_functions.py
ADDED
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
|
3 |
+
import dlimp as dl
|
4 |
+
import tensorflow as tf
|
5 |
+
import tensorflow_datasets as tfds
|
6 |
+
|
7 |
+
|
8 |
+
class TfdsModFunction(ABC):
|
9 |
+
@classmethod
|
10 |
+
@abstractmethod
|
11 |
+
def mod_features(
|
12 |
+
cls,
|
13 |
+
features: tfds.features.FeaturesDict,
|
14 |
+
) -> tfds.features.FeaturesDict:
|
15 |
+
"""
|
16 |
+
Modifies the data builder feature dict to reflect feature changes of ModFunction.
|
17 |
+
"""
|
18 |
+
...
|
19 |
+
|
20 |
+
@classmethod
|
21 |
+
@abstractmethod
|
22 |
+
def mod_dataset(cls, ds: tf.data.Dataset) -> tf.data.Dataset:
|
23 |
+
"""
|
24 |
+
Perform arbitrary modifications on the dataset that comply with the modified feature definition.
|
25 |
+
"""
|
26 |
+
...
|
27 |
+
|
28 |
+
|
29 |
+
def mod_obs_features(features, obs_feature_mod_function):
|
30 |
+
"""Utility function to only modify keys in observation dict."""
|
31 |
+
return tfds.features.FeaturesDict(
|
32 |
+
{
|
33 |
+
"steps": tfds.features.Dataset(
|
34 |
+
{
|
35 |
+
"observation": tfds.features.FeaturesDict(
|
36 |
+
{
|
37 |
+
key: obs_feature_mod_function(
|
38 |
+
key, features["steps"]["observation"][key]
|
39 |
+
)
|
40 |
+
for key in features["steps"]["observation"].keys()
|
41 |
+
}
|
42 |
+
),
|
43 |
+
**{
|
44 |
+
key: features["steps"][key]
|
45 |
+
for key in features["steps"].keys()
|
46 |
+
if key not in ("observation",)
|
47 |
+
},
|
48 |
+
}
|
49 |
+
),
|
50 |
+
**{key: features[key] for key in features.keys() if key not in ("steps",)},
|
51 |
+
}
|
52 |
+
)
|
53 |
+
|
54 |
+
|
55 |
+
class ResizeAndJpegEncode(TfdsModFunction):
|
56 |
+
MAX_RES: int = 256
|
57 |
+
|
58 |
+
@classmethod
|
59 |
+
def mod_features(
|
60 |
+
cls,
|
61 |
+
features: tfds.features.FeaturesDict,
|
62 |
+
) -> tfds.features.FeaturesDict:
|
63 |
+
def downsize_and_jpeg(key, feat):
|
64 |
+
"""Downsizes image features, encodes as jpeg."""
|
65 |
+
if len(feat.shape) >= 2 and feat.shape[0] >= 64 and feat.shape[1] >= 64: # is image / depth feature
|
66 |
+
should_jpeg_encode = (
|
67 |
+
isinstance(feat, tfds.features.Image) and "depth" not in key
|
68 |
+
)
|
69 |
+
if len(feat.shape) > 2:
|
70 |
+
new_shape = (ResizeAndJpegEncode.MAX_RES, ResizeAndJpegEncode.MAX_RES, feat.shape[2])
|
71 |
+
else:
|
72 |
+
new_shape = (ResizeAndJpegEncode.MAX_RES, ResizeAndJpegEncode.MAX_RES)
|
73 |
+
|
74 |
+
if isinstance(feat, tfds.features.Image):
|
75 |
+
return tfds.features.Image(
|
76 |
+
shape=new_shape,
|
77 |
+
dtype=feat.dtype,
|
78 |
+
encoding_format="jpeg" if should_jpeg_encode else "png",
|
79 |
+
doc=feat.doc,
|
80 |
+
)
|
81 |
+
else:
|
82 |
+
return tfds.features.Tensor(
|
83 |
+
shape=new_shape,
|
84 |
+
dtype=feat.dtype,
|
85 |
+
doc=feat.doc,
|
86 |
+
)
|
87 |
+
|
88 |
+
return feat
|
89 |
+
|
90 |
+
return mod_obs_features(features, downsize_and_jpeg)
|
91 |
+
|
92 |
+
@classmethod
|
93 |
+
def mod_dataset(cls, ds: tf.data.Dataset) -> tf.data.Dataset:
|
94 |
+
def resize_image_fn(step):
|
95 |
+
# resize images
|
96 |
+
for key in step["observation"]:
|
97 |
+
if len(step["observation"][key].shape) >= 2 and (
|
98 |
+
step["observation"][key].shape[0] >= 64
|
99 |
+
or step["observation"][key].shape[1] >= 64
|
100 |
+
):
|
101 |
+
size = (ResizeAndJpegEncode.MAX_RES,
|
102 |
+
ResizeAndJpegEncode.MAX_RES)
|
103 |
+
if "depth" in key:
|
104 |
+
step["observation"][key] = tf.cast(
|
105 |
+
dl.utils.resize_depth_image(
|
106 |
+
tf.cast(step["observation"][key], tf.float32), size
|
107 |
+
),
|
108 |
+
step["observation"][key].dtype,
|
109 |
+
)
|
110 |
+
else:
|
111 |
+
step["observation"][key] = tf.cast(
|
112 |
+
dl.utils.resize_image(step["observation"][key], size),
|
113 |
+
tf.uint8,
|
114 |
+
)
|
115 |
+
return step
|
116 |
+
|
117 |
+
def episode_map_fn(episode):
|
118 |
+
episode["steps"] = episode["steps"].map(resize_image_fn)
|
119 |
+
return episode
|
120 |
+
|
121 |
+
return ds.map(episode_map_fn)
|
122 |
+
|
123 |
+
|
124 |
+
class FilterSuccess(TfdsModFunction):
|
125 |
+
@classmethod
|
126 |
+
def mod_features(
|
127 |
+
cls,
|
128 |
+
features: tfds.features.FeaturesDict,
|
129 |
+
) -> tfds.features.FeaturesDict:
|
130 |
+
return features # no feature changes
|
131 |
+
|
132 |
+
@classmethod
|
133 |
+
def mod_dataset(cls, ds: tf.data.Dataset) -> tf.data.Dataset:
|
134 |
+
return ds.filter(lambda e: e["success"])
|
135 |
+
|
136 |
+
|
137 |
+
class FlipImgChannels(TfdsModFunction):
|
138 |
+
FLIP_KEYS = ["image"]
|
139 |
+
|
140 |
+
@classmethod
|
141 |
+
def mod_features(
|
142 |
+
cls,
|
143 |
+
features: tfds.features.FeaturesDict,
|
144 |
+
) -> tfds.features.FeaturesDict:
|
145 |
+
return features # no feature changes
|
146 |
+
|
147 |
+
@classmethod
|
148 |
+
def mod_dataset(cls, ds: tf.data.Dataset) -> tf.data.Dataset:
|
149 |
+
def flip(step):
|
150 |
+
for key in cls.FLIP_KEYS:
|
151 |
+
if key in step["observation"]:
|
152 |
+
step["observation"][key] = step["observation"][key][..., ::-1]
|
153 |
+
return step
|
154 |
+
|
155 |
+
def episode_map_fn(episode):
|
156 |
+
episode["steps"] = episode["steps"].map(flip)
|
157 |
+
return episode
|
158 |
+
|
159 |
+
return ds.map(episode_map_fn)
|
160 |
+
|
161 |
+
|
162 |
+
class FlipWristImgChannels(FlipImgChannels):
|
163 |
+
FLIP_KEYS = ["wrist_image", "hand_image"]
|
164 |
+
|
165 |
+
|
166 |
+
TFDS_MOD_FUNCTIONS = {
|
167 |
+
"resize_and_jpeg_encode": ResizeAndJpegEncode,
|
168 |
+
"filter_success": FilterSuccess,
|
169 |
+
"flip_image_channels": FlipImgChannels,
|
170 |
+
"flip_wrist_image_channels": FlipWristImgChannels,
|
171 |
+
}
|
172 |
+
|
173 |
+
|
code/rlds_dataset_mod/multithreaded_adhoc_tfds_builder.py
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
import itertools
|
3 |
+
from multiprocessing import Pool
|
4 |
+
from typing import Any, Callable, Dict, Iterable, Tuple, Union
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import tensorflow as tf
|
8 |
+
import tensorflow_datasets as tfds
|
9 |
+
from tensorflow_datasets.core import (
|
10 |
+
dataset_builder,
|
11 |
+
download,
|
12 |
+
example_serializer,
|
13 |
+
file_adapters,
|
14 |
+
naming,
|
15 |
+
)
|
16 |
+
from tensorflow_datasets.core import split_builder as split_builder_lib
|
17 |
+
from tensorflow_datasets.core import splits as splits_lib
|
18 |
+
from tensorflow_datasets.core import utils
|
19 |
+
from tensorflow_datasets.core import writer as writer_lib
|
20 |
+
|
21 |
+
Key = Union[str, int]
|
22 |
+
# The nested example dict passed to `features.encode_example`
|
23 |
+
Example = Dict[str, Any]
|
24 |
+
KeyExample = Tuple[Key, Example]
|
25 |
+
|
26 |
+
|
27 |
+
class MultiThreadedAdhocDatasetBuilder(tfds.core.dataset_builders.AdhocBuilder):
|
28 |
+
"""Multithreaded adhoc dataset builder."""
|
29 |
+
|
30 |
+
def __init__(
|
31 |
+
self, *args, generator_fcn, n_workers, max_episodes_in_memory, **kwargs
|
32 |
+
):
|
33 |
+
super().__init__(*args, **kwargs)
|
34 |
+
self._generator_fcn = generator_fcn
|
35 |
+
self._n_workers = n_workers
|
36 |
+
self._max_episodes_in_memory = max_episodes_in_memory
|
37 |
+
|
38 |
+
def _download_and_prepare( # pytype: disable=signature-mismatch # overriding-parameter-type-checks
|
39 |
+
self,
|
40 |
+
dl_manager: download.DownloadManager,
|
41 |
+
download_config: download.DownloadConfig,
|
42 |
+
) -> None:
|
43 |
+
"""Generate all splits and returns the computed split infos."""
|
44 |
+
assert (
|
45 |
+
self._max_episodes_in_memory % self._n_workers == 0
|
46 |
+
) # need to divide max_episodes by workers
|
47 |
+
split_builder = ParallelSplitBuilder(
|
48 |
+
split_dict=self._split_datasets,
|
49 |
+
features=self.info.features,
|
50 |
+
dataset_size=self.info.dataset_size,
|
51 |
+
max_examples_per_split=download_config.max_examples_per_split,
|
52 |
+
beam_options=download_config.beam_options,
|
53 |
+
beam_runner=download_config.beam_runner,
|
54 |
+
file_format=self.info.file_format,
|
55 |
+
shard_config=download_config.get_shard_config(),
|
56 |
+
generator_fcn=self._generator_fcn,
|
57 |
+
n_workers=self._n_workers,
|
58 |
+
max_episodes_in_memory=self._max_episodes_in_memory,
|
59 |
+
)
|
60 |
+
split_generators = self._split_generators(dl_manager)
|
61 |
+
split_generators = split_builder.normalize_legacy_split_generators(
|
62 |
+
split_generators=split_generators,
|
63 |
+
generator_fn=self._generate_examples,
|
64 |
+
is_beam=False,
|
65 |
+
)
|
66 |
+
dataset_builder._check_split_names(split_generators.keys())
|
67 |
+
|
68 |
+
# Start generating data for all splits
|
69 |
+
path_suffix = file_adapters.ADAPTER_FOR_FORMAT[
|
70 |
+
self.info.file_format
|
71 |
+
].FILE_SUFFIX
|
72 |
+
|
73 |
+
split_info_futures = []
|
74 |
+
for split_name, generator in utils.tqdm(
|
75 |
+
split_generators.items(),
|
76 |
+
desc="Generating splits...",
|
77 |
+
unit=" splits",
|
78 |
+
leave=False,
|
79 |
+
):
|
80 |
+
filename_template = naming.ShardedFileTemplate(
|
81 |
+
split=split_name,
|
82 |
+
dataset_name=self.name,
|
83 |
+
data_dir=self.data_path,
|
84 |
+
filetype_suffix=path_suffix,
|
85 |
+
)
|
86 |
+
future = split_builder.submit_split_generation(
|
87 |
+
split_name=split_name,
|
88 |
+
generator=generator,
|
89 |
+
filename_template=filename_template,
|
90 |
+
disable_shuffling=self.info.disable_shuffling,
|
91 |
+
)
|
92 |
+
split_info_futures.append(future)
|
93 |
+
|
94 |
+
# Finalize the splits (after apache beam completed, if it was used)
|
95 |
+
split_infos = [future.result() for future in split_info_futures]
|
96 |
+
|
97 |
+
# Update the info object with the splits.
|
98 |
+
split_dict = splits_lib.SplitDict(split_infos)
|
99 |
+
self.info.set_splits(split_dict)
|
100 |
+
|
101 |
+
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
|
102 |
+
"""Define dummy split generators."""
|
103 |
+
|
104 |
+
def dummy_generator():
|
105 |
+
yield None
|
106 |
+
|
107 |
+
return {split: dummy_generator() for split in self._split_datasets}
|
108 |
+
|
109 |
+
|
110 |
+
class _SplitInfoFuture:
|
111 |
+
"""Future containing the `tfds.core.SplitInfo` result."""
|
112 |
+
|
113 |
+
def __init__(self, callback: Callable[[], splits_lib.SplitInfo]):
|
114 |
+
self._callback = callback
|
115 |
+
|
116 |
+
def result(self) -> splits_lib.SplitInfo:
|
117 |
+
return self._callback()
|
118 |
+
|
119 |
+
|
120 |
+
def parse_examples_from_generator(
|
121 |
+
episodes, max_episodes, fcn, split_name, total_num_examples, features, serializer
|
122 |
+
):
|
123 |
+
upper = episodes[-1] + 1
|
124 |
+
upper_str = f'{upper}' if upper < max_episodes else ''
|
125 |
+
generator = fcn(split=split_name + f"[{episodes[0]}:{upper_str}]")
|
126 |
+
outputs = []
|
127 |
+
for key, sample in utils.tqdm(
|
128 |
+
zip(episodes, generator),
|
129 |
+
desc=f"Generating {split_name} examples...",
|
130 |
+
unit=" examples",
|
131 |
+
total=total_num_examples,
|
132 |
+
leave=False,
|
133 |
+
mininterval=1.0,
|
134 |
+
):
|
135 |
+
if sample is None:
|
136 |
+
continue
|
137 |
+
try:
|
138 |
+
sample = features.encode_example(sample)
|
139 |
+
except Exception as e: # pylint: disable=broad-except
|
140 |
+
utils.reraise(e, prefix=f"Failed to encode example:\n{sample}\n")
|
141 |
+
outputs.append((str(key), serializer.serialize_example(sample)))
|
142 |
+
return outputs
|
143 |
+
|
144 |
+
|
145 |
+
class ParallelSplitBuilder(split_builder_lib.SplitBuilder):
|
146 |
+
def __init__(
|
147 |
+
self, *args, generator_fcn, n_workers, max_episodes_in_memory, **kwargs
|
148 |
+
):
|
149 |
+
super().__init__(*args, **kwargs)
|
150 |
+
self._generator_fcn = generator_fcn
|
151 |
+
self._n_workers = n_workers
|
152 |
+
self._max_episodes_in_memory = max_episodes_in_memory
|
153 |
+
|
154 |
+
def _build_from_generator(
|
155 |
+
self,
|
156 |
+
split_name: str,
|
157 |
+
generator: Iterable[KeyExample],
|
158 |
+
filename_template: naming.ShardedFileTemplate,
|
159 |
+
disable_shuffling: bool,
|
160 |
+
) -> _SplitInfoFuture:
|
161 |
+
"""Split generator for example generators.
|
162 |
+
|
163 |
+
Args:
|
164 |
+
split_name: str,
|
165 |
+
generator: Iterable[KeyExample],
|
166 |
+
filename_template: Template to format the filename for a shard.
|
167 |
+
disable_shuffling: Specifies whether to shuffle the examples,
|
168 |
+
|
169 |
+
Returns:
|
170 |
+
future: The future containing the `tfds.core.SplitInfo`.
|
171 |
+
"""
|
172 |
+
total_num_examples = None
|
173 |
+
serialized_info = self._features.get_serialized_info()
|
174 |
+
writer = writer_lib.Writer(
|
175 |
+
serializer=example_serializer.ExampleSerializer(serialized_info),
|
176 |
+
filename_template=filename_template,
|
177 |
+
hash_salt=split_name,
|
178 |
+
disable_shuffling=disable_shuffling,
|
179 |
+
file_format=self._file_format,
|
180 |
+
shard_config=self._shard_config,
|
181 |
+
)
|
182 |
+
|
183 |
+
del generator # use parallel generators instead
|
184 |
+
episode_lists = chunk_max(
|
185 |
+
list(np.arange(self._split_dict[split_name].num_examples)),
|
186 |
+
self._n_workers,
|
187 |
+
self._max_episodes_in_memory,
|
188 |
+
) # generate N episode lists
|
189 |
+
print(f"Generating with {self._n_workers} workers!")
|
190 |
+
pool = Pool(processes=self._n_workers)
|
191 |
+
for i, episodes in enumerate(episode_lists):
|
192 |
+
print(f"Processing chunk {i + 1} of {len(episode_lists)}.")
|
193 |
+
results = pool.map(
|
194 |
+
partial(
|
195 |
+
parse_examples_from_generator,
|
196 |
+
fcn=self._generator_fcn,
|
197 |
+
split_name=split_name,
|
198 |
+
total_num_examples=total_num_examples,
|
199 |
+
serializer=writer._serializer,
|
200 |
+
features=self._features,
|
201 |
+
max_episodes=self._split_dict[split_name].num_examples,
|
202 |
+
),
|
203 |
+
episodes,
|
204 |
+
)
|
205 |
+
# write results to shuffler --> this will automatically offload to disk if necessary
|
206 |
+
print("Writing conversion results...")
|
207 |
+
for result in itertools.chain(*results):
|
208 |
+
key, serialized_example = result
|
209 |
+
writer._shuffler.add(key, serialized_example)
|
210 |
+
writer._num_examples += 1
|
211 |
+
pool.close()
|
212 |
+
|
213 |
+
print("Finishing split conversion...")
|
214 |
+
shard_lengths, total_size = writer.finalize()
|
215 |
+
|
216 |
+
split_info = splits_lib.SplitInfo(
|
217 |
+
name=split_name,
|
218 |
+
shard_lengths=shard_lengths,
|
219 |
+
num_bytes=total_size,
|
220 |
+
filename_template=filename_template,
|
221 |
+
)
|
222 |
+
return _SplitInfoFuture(lambda: split_info)
|
223 |
+
|
224 |
+
|
225 |
+
def dictlist2listdict(DL):
|
226 |
+
"Converts a dict of lists to a list of dicts"
|
227 |
+
return [dict(zip(DL, t)) for t in zip(*DL.values())]
|
228 |
+
|
229 |
+
|
230 |
+
def chunks(l, n):
|
231 |
+
"""Yield n number of sequential chunks from l."""
|
232 |
+
d, r = divmod(len(l), n)
|
233 |
+
for i in range(n):
|
234 |
+
si = (d + 1) * (i if i < r else r) + d * (0 if i < r else i - r)
|
235 |
+
yield l[si : si + (d + 1 if i < r else d)]
|
236 |
+
|
237 |
+
|
238 |
+
def chunk_max(l, n, max_chunk_sum):
|
239 |
+
out = []
|
240 |
+
for _ in range(int(np.ceil(len(l) / max_chunk_sum))):
|
241 |
+
out.append([c for c in chunks(l[:max_chunk_sum], n) if c])
|
242 |
+
l = l[max_chunk_sum:]
|
243 |
+
return out
|
code/setup.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup
|
2 |
+
|
3 |
+
setup(name="rlds_dataset_mod", packages=["rlds_dataset_mod"])
|
code/upload_hf.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import HfApi, HfFolder
|
2 |
+
import sys
|
3 |
+
import os
|
4 |
+
|
5 |
+
dataset_name = sys.argv[1]
|
6 |
+
version = sys.argv[2]
|
7 |
+
|
8 |
+
|
9 |
+
api = HfApi()
|
10 |
+
repo_id = "" # TODO:
|
11 |
+
token = "" # TODO:
|
12 |
+
folder_path = f"data_download/{dataset_name}/{version}"
|
13 |
+
|
14 |
+
def upload_folder_to_hf(folder_path, repo_id, token):
|
15 |
+
# 使用 Hugging Face API 上传整个文件夹
|
16 |
+
api.upload_folder(
|
17 |
+
folder_path=folder_path,
|
18 |
+
path_in_repo=f"{dataset_name}/{version}",
|
19 |
+
repo_id=repo_id,
|
20 |
+
token=token,
|
21 |
+
repo_type="dataset"
|
22 |
+
)
|
23 |
+
print(f"Uploaded the entire folder {folder_path} to {repo_id}")
|
24 |
+
|
25 |
+
upload_folder_to_hf(folder_path, repo_id, token)
|