Datasets:

ArXiv:
NickKuijpers commited on
Commit
cdb5c9a
·
verified ·
1 Parent(s): 04dd67e

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +160 -0
  2. .gitattributes +29 -57
  3. .github/ISSUE_TEMPLATE/bug-report.yml +68 -0
  4. .github/PULL_REQUEST_TEMPLATE.md +34 -0
  5. .github/workflows/build-docker-images.yml +135 -0
  6. .github/workflows/build_documentation.yml +23 -0
  7. .github/workflows/build_pr_documentation.yml +19 -0
  8. .github/workflows/nightly-tests.yml +93 -0
  9. .github/workflows/quality.yml +72 -0
  10. .github/workflows/test-docker-build.yml +82 -0
  11. .github/workflows/test.yml +150 -0
  12. .github/workflows/trufflehog.yml +35 -0
  13. .github/workflows/upload_pr_documentation.yml +16 -0
  14. .gitignore +174 -0
  15. .pre-commit-config.yaml +74 -0
  16. CODE_OF_CONDUCT.md +133 -0
  17. CONTRIBUTING.md +305 -0
  18. LICENSE +507 -0
  19. Makefile +142 -0
  20. README.md +423 -0
  21. benchmarks/video/README.md +271 -0
  22. benchmarks/video/capture_camera_feed.py +102 -0
  23. benchmarks/video/run_video_benchmark.py +490 -0
  24. docker/lerobot-cpu/Dockerfile +29 -0
  25. docker/lerobot-gpu-dev/Dockerfile +68 -0
  26. docker/lerobot-gpu/Dockerfile +24 -0
  27. docs/README.md +137 -0
  28. docs/source/_toctree.yml +28 -0
  29. docs/source/backwardcomp.mdx +82 -0
  30. docs/source/cameras.mdx +173 -0
  31. docs/source/contributing.md +1 -0
  32. docs/source/getting_started_real_world_robot.mdx +311 -0
  33. docs/source/index.mdx +19 -0
  34. docs/source/installation.mdx +70 -0
  35. docs/source/koch.mdx +1 -0
  36. docs/source/lekiwi.mdx +1 -0
  37. docs/source/so100.mdx +1 -0
  38. docs/source/so101.mdx +1 -0
  39. examples/1_load_lerobot_dataset.py +148 -0
  40. examples/2_evaluate_pretrained_policy.py +139 -0
  41. examples/3_train_policy.py +120 -0
  42. examples/4_train_policy_with_script.md +274 -0
  43. examples/advanced/1_add_image_transforms.py +67 -0
  44. examples/advanced/2_calculate_validation_loss.py +104 -0
  45. examples/backward_compatibility/replay.py +105 -0
  46. examples/lekiwi/evaluate.py +32 -0
  47. examples/lekiwi/record.py +67 -0
  48. examples/lekiwi/replay.py +25 -0
  49. examples/lekiwi/teleoperate.py +32 -0
  50. lerobot/__init__.py +212 -0
.dockerignore ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Misc
16
+ .git
17
+ tmp
18
+ wandb
19
+ data
20
+ outputs
21
+ .vscode
22
+ rl
23
+ media
24
+
25
+
26
+ # Logging
27
+ logs
28
+
29
+ # HPC
30
+ nautilus/*.yaml
31
+ *.key
32
+
33
+ # Slurm
34
+ sbatch*.sh
35
+
36
+ # Byte-compiled / optimized / DLL files
37
+ __pycache__/
38
+ *.py[cod]
39
+ *$py.class
40
+
41
+ # C extensions
42
+ *.so
43
+
44
+ # Distribution / packaging
45
+ .Python
46
+ build/
47
+ develop-eggs/
48
+ dist/
49
+ downloads/
50
+ eggs/
51
+ .eggs/
52
+ lib/
53
+ lib64/
54
+ parts/
55
+ sdist/
56
+ var/
57
+ wheels/
58
+ pip-wheel-metadata/
59
+ share/python-wheels/
60
+ *.egg-info/
61
+ .installed.cfg
62
+ *.egg
63
+ MANIFEST
64
+
65
+ # PyInstaller
66
+ # Usually these files are written by a python script from a template
67
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
68
+ *.manifest
69
+ *.spec
70
+
71
+ # Installer logs
72
+ pip-log.txt
73
+ pip-delete-this-directory.txt
74
+
75
+ # Unit test / coverage reports
76
+ !tests/artifacts
77
+ htmlcov/
78
+ .tox/
79
+ .nox/
80
+ .coverage
81
+ .coverage.*
82
+ nosetests.xml
83
+ coverage.xml
84
+ *.cover
85
+ *.py,cover
86
+ .hypothesis/
87
+ .pytest_cache/
88
+
89
+ # Ignore .cache except calibration
90
+ .cache/*
91
+ !.cache/calibration/
92
+ !.cache/calibration/**
93
+
94
+ # Translations
95
+ *.mo
96
+ *.pot
97
+
98
+ # Django stuff:
99
+ *.log
100
+ local_settings.py
101
+ db.sqlite3
102
+ db.sqlite3-journal
103
+
104
+ # Flask stuff:
105
+ instance/
106
+ .webassets-cache
107
+
108
+ # Scrapy stuff:
109
+ .scrapy
110
+
111
+ # Sphinx documentation
112
+ docs/_build/
113
+
114
+ # PyBuilder
115
+ target/
116
+
117
+ # Jupyter Notebook
118
+ .ipynb_checkpoints
119
+
120
+ # IPython
121
+ profile_default/
122
+ ipython_config.py
123
+
124
+ # pyenv
125
+ .python-version
126
+
127
+ # pipenv
128
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
129
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
130
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
131
+ # install all needed dependencies.
132
+ #Pipfile.lock
133
+
134
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
135
+ __pypackages__/
136
+
137
+ # Celery stuff
138
+ celerybeat-schedule
139
+ celerybeat.pid
140
+
141
+ # SageMath parsed files
142
+ *.sage.py
143
+
144
+ # Spyder project settings
145
+ .spyderproject
146
+ .spyproject
147
+
148
+ # Rope project settings
149
+ .ropeproject
150
+
151
+ # mkdocs documentation
152
+ /site
153
+
154
+ # mypy
155
+ .mypy_cache/
156
+ .dmypy.json
157
+ dmypy.json
158
+
159
+ # Pyre type checker
160
+ .pyre/
.gitattributes CHANGED
@@ -1,59 +1,31 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
- *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
  *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ *.memmap filter=lfs diff=lfs merge=lfs -text
15
+ *.stl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
16
  *.safetensors filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  *.mp4 filter=lfs diff=lfs merge=lfs -text
18
+ *.arrow filter=lfs diff=lfs merge=lfs -text
19
+ *.json !text !filter !merge !diff
20
+ tests/artifacts/cameras/*.png filter=lfs diff=lfs merge=lfs -text
21
+ *.bag filter=lfs diff=lfs merge=lfs -text
22
+ media/gym/aloha_act.gif filter=lfs diff=lfs merge=lfs -text
23
+ media/gym/pusht_diffusion.gif filter=lfs diff=lfs merge=lfs -text
24
+ media/gym/simxarm_tdmpc.gif filter=lfs diff=lfs merge=lfs -text
25
+ media/lekiwi/kiwi.webp filter=lfs diff=lfs merge=lfs -text
26
+ media/lerobot-logo-light.png filter=lfs diff=lfs merge=lfs -text
27
+ media/lerobot-logo-thumbnail.png filter=lfs diff=lfs merge=lfs -text
28
+ media/so100/leader_follower.webp filter=lfs diff=lfs merge=lfs -text
29
+ media/so101/so101-leader.webp filter=lfs diff=lfs merge=lfs -text
30
+ media/so101/so101.webp filter=lfs diff=lfs merge=lfs -text
31
+ media/wandb.png filter=lfs diff=lfs merge=lfs -text
.github/ISSUE_TEMPLATE/bug-report.yml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ name: "\U0001F41B Bug Report"
16
+ description: Submit a bug report to help us improve LeRobot
17
+ body:
18
+ - type: markdown
19
+ attributes:
20
+ value: |
21
+ Thanks for taking the time to submit a bug report! 🐛
22
+ If this is not a bug related to the LeRobot library directly, but instead a general question about your code or the library specifically please use our [discord](https://discord.gg/s3KuuzsPFb).
23
+
24
+ - type: textarea
25
+ id: system-info
26
+ attributes:
27
+ label: System Info
28
+ description: If needed, you can share your lerobot configuration with us by running `python -m lerobot.scripts.display_sys_info` and copy-pasting its outputs below
29
+ render: Shell
30
+ placeholder: lerobot version, OS, python version, numpy version, torch version, and lerobot's configuration
31
+ validations:
32
+ required: true
33
+
34
+ - type: checkboxes
35
+ id: information-scripts-examples
36
+ attributes:
37
+ label: Information
38
+ description: 'The problem arises when using:'
39
+ options:
40
+ - label: "One of the scripts in the examples/ folder of LeRobot"
41
+ - label: "My own task or dataset (give details below)"
42
+
43
+ - type: textarea
44
+ id: reproduction
45
+ validations:
46
+ required: true
47
+ attributes:
48
+ label: Reproduction
49
+ description: |
50
+ If needed, provide a simple code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
51
+ Sharing error messages or stack traces could be useful as well!
52
+ Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
53
+ Try to avoid screenshots, as they are hard to read and don't allow copy-and-pasting.
54
+
55
+ placeholder: |
56
+ Steps to reproduce the behavior:
57
+
58
+ 1.
59
+ 2.
60
+ 3.
61
+
62
+ - type: textarea
63
+ id: expected-behavior
64
+ validations:
65
+ required: true
66
+ attributes:
67
+ label: Expected behavior
68
+ description: "A clear and concise description of what you would expect to happen."
.github/PULL_REQUEST_TEMPLATE.md ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## What this does
2
+ Explain what this PR does. Feel free to tag your PR with the appropriate label(s).
3
+
4
+ Examples:
5
+ | Title | Label |
6
+ |----------------------|-----------------|
7
+ | Fixes #[issue] | (🐛 Bug) |
8
+ | Adds new dataset | (🗃️ Dataset) |
9
+ | Optimizes something | (⚡️ Performance) |
10
+
11
+ ## How it was tested
12
+ Explain/show how you tested your changes.
13
+
14
+ Examples:
15
+ - Added `test_something` in `tests/test_stuff.py`.
16
+ - Added `new_feature` and checked that training converges with policy X on dataset/environment Y.
17
+ - Optimized `some_function`, it now runs X times faster than previously.
18
+
19
+ ## How to checkout & try? (for the reviewer)
20
+ Provide a simple way for the reviewer to try out your changes.
21
+
22
+ Examples:
23
+ ```bash
24
+ pytest -sx tests/test_stuff.py::test_something
25
+ ```
26
+ ```bash
27
+ python lerobot/scripts/train.py --some.option=true
28
+ ```
29
+
30
+ ## SECTION TO REMOVE BEFORE SUBMITTING YOUR PR
31
+ **Note**: Anyone in the community is free to review the PR once the tests have passed. Feel free to tag
32
+ members/contributors who may be interested in your PR. Try to avoid tagging more than 3 people.
33
+
34
+ **Note**: Before submitting this PR, please read the [contributor guideline](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md#submitting-a-pull-request-pr).
.github/workflows/build-docker-images.yml ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Inspired by
16
+ # https://github.com/huggingface/peft/blob/main/.github/workflows/build_docker_images.yml
17
+ name: Builds
18
+
19
+ on:
20
+ workflow_dispatch:
21
+ workflow_call:
22
+ schedule:
23
+ - cron: "0 1 * * *"
24
+
25
+ permissions: {}
26
+
27
+ env:
28
+ PYTHON_VERSION: "3.10"
29
+
30
+ jobs:
31
+ latest-cpu:
32
+ name: CPU
33
+ runs-on:
34
+ group: aws-general-8-plus
35
+ steps:
36
+ - name: Install Git LFS
37
+ run: |
38
+ sudo apt-get update
39
+ sudo apt-get install git-lfs
40
+ git lfs install
41
+
42
+ - name: Set up Docker Buildx
43
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
44
+ with:
45
+ cache-binary: false
46
+
47
+ - name: Check out code
48
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
49
+ with:
50
+ lfs: true
51
+ persist-credentials: false
52
+
53
+ - name: Login to DockerHub
54
+ uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
55
+ with:
56
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
57
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
58
+
59
+ - name: Build and Push CPU
60
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
61
+ with:
62
+ context: .
63
+ file: ./docker/lerobot-cpu/Dockerfile
64
+ push: true
65
+ tags: huggingface/lerobot-cpu
66
+ build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
67
+
68
+
69
+ latest-cuda:
70
+ name: GPU
71
+ runs-on:
72
+ group: aws-general-8-plus
73
+ steps:
74
+ - name: Install Git LFS
75
+ run: |
76
+ sudo apt-get update
77
+ sudo apt-get install git-lfs
78
+ git lfs install
79
+
80
+ - name: Set up Docker Buildx
81
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
82
+ with:
83
+ cache-binary: false
84
+
85
+ - name: Check out code
86
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
87
+ with:
88
+ lfs: true
89
+ persist-credentials: false
90
+
91
+ - name: Login to DockerHub
92
+ uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
93
+ with:
94
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
95
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
96
+
97
+ - name: Build and Push GPU
98
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
99
+ with:
100
+ context: .
101
+ file: ./docker/lerobot-gpu/Dockerfile
102
+ push: true
103
+ tags: huggingface/lerobot-gpu
104
+ build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
105
+
106
+
107
+ latest-cuda-dev:
108
+ name: GPU Dev
109
+ runs-on:
110
+ group: aws-general-8-plus
111
+ steps:
112
+ - name: Set up Docker Buildx
113
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
114
+ with:
115
+ cache-binary: false
116
+
117
+ - name: Check out code
118
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
119
+ with:
120
+ persist-credentials: false
121
+
122
+ - name: Login to DockerHub
123
+ uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
124
+ with:
125
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
126
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
127
+
128
+ - name: Build and Push GPU dev
129
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
130
+ with:
131
+ context: .
132
+ file: ./docker/lerobot-gpu-dev/Dockerfile
133
+ push: true
134
+ tags: huggingface/lerobot-gpu:dev
135
+ build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
.github/workflows/build_documentation.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build documentation
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ push:
6
+ paths:
7
+ - "docs/**"
8
+ branches:
9
+ - main
10
+ - doc-builder*
11
+ - v*-release
12
+
13
+
14
+ jobs:
15
+ build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
16
+ uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
17
+ with:
18
+ commit_sha: ${{ github.sha }}
19
+ package: lerobot
20
+ additional_args: --not_python_module
21
+ secrets:
22
+ token: ${{ secrets.HUGGINGFACE_PUSH }}
23
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
.github/workflows/build_pr_documentation.yml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build PR Documentation
2
+
3
+ on:
4
+ pull_request:
5
+ paths:
6
+ - "docs/**"
7
+
8
+ concurrency:
9
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
10
+ cancel-in-progress: true
11
+
12
+ jobs:
13
+ build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
14
+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
15
+ with:
16
+ commit_sha: ${{ github.event.pull_request.head.sha }}
17
+ pr_number: ${{ github.event.number }}
18
+ package: lerobot
19
+ additional_args: --not_python_module
.github/workflows/nightly-tests.yml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Inspired by
16
+ # https://github.com/huggingface/peft/blob/main/.github/workflows/nightly.yml
17
+ name: Nightly
18
+
19
+ on:
20
+ workflow_dispatch:
21
+ schedule:
22
+ - cron: "0 2 * * *"
23
+
24
+ permissions: {}
25
+
26
+ # env:
27
+ # SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
28
+ jobs:
29
+ run_all_tests_cpu:
30
+ name: CPU
31
+ strategy:
32
+ fail-fast: false
33
+ runs-on:
34
+ group: aws-general-8-plus
35
+ container:
36
+ image: huggingface/lerobot-cpu:latest # zizmor: ignore[unpinned-images]
37
+ options: --shm-size "16gb"
38
+ credentials:
39
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
40
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
41
+ defaults:
42
+ run:
43
+ shell: bash
44
+ working-directory: /lerobot
45
+ steps:
46
+ - name: Tests
47
+ run: pytest -v --cov=./lerobot --disable-warnings tests
48
+
49
+ - name: Tests end-to-end
50
+ run: make test-end-to-end
51
+
52
+
53
+ run_all_tests_single_gpu:
54
+ name: GPU
55
+ strategy:
56
+ fail-fast: false
57
+ runs-on:
58
+ group: aws-g6-4xlarge-plus
59
+ env:
60
+ CUDA_VISIBLE_DEVICES: "0"
61
+ TEST_TYPE: "single_gpu"
62
+ container:
63
+ image: huggingface/lerobot-gpu:latest # zizmor: ignore[unpinned-images]
64
+ options: --gpus all --shm-size "16gb"
65
+ credentials:
66
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
67
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
68
+ defaults:
69
+ run:
70
+ shell: bash
71
+ working-directory: /lerobot
72
+ steps:
73
+ - name: Nvidia-smi
74
+ run: nvidia-smi
75
+
76
+ - name: Test
77
+ run: pytest -v --cov=./lerobot --cov-report=xml --disable-warnings tests
78
+ # TODO(aliberts): Link with HF Codecov account
79
+ # - name: Upload coverage reports to Codecov with GitHub Action
80
+ # uses: codecov/codecov-action@v4
81
+ # with:
82
+ # files: ./coverage.xml
83
+ # verbose: true
84
+ - name: Tests end-to-end
85
+ env:
86
+ DEVICE: cuda
87
+ run: make test-end-to-end
88
+
89
+ # - name: Generate Report
90
+ # if: always()
91
+ # run: |
92
+ # pip install slack_sdk tabulate
93
+ # python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
.github/workflows/quality.yml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ name: Quality
16
+
17
+ on:
18
+ workflow_dispatch:
19
+ workflow_call:
20
+ pull_request:
21
+ push:
22
+ branches:
23
+ - main
24
+
25
+ permissions: {}
26
+
27
+ env:
28
+ PYTHON_VERSION: "3.10"
29
+
30
+ jobs:
31
+ style:
32
+ name: Style
33
+ runs-on: ubuntu-latest
34
+ steps:
35
+ - name: Checkout Repository
36
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
37
+ with:
38
+ persist-credentials: false
39
+
40
+ - name: Set up Python
41
+ uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4.9.1
42
+ with:
43
+ python-version: ${{ env.PYTHON_VERSION }}
44
+
45
+ - name: Get Ruff Version from pre-commit-config.yaml
46
+ id: get-ruff-version
47
+ run: |
48
+ RUFF_VERSION=$(awk '/repo: https:\/\/github.com\/astral-sh\/ruff-pre-commit/{flag=1;next}/rev:/{if(flag){print $2;exit}}' .pre-commit-config.yaml)
49
+ echo "ruff_version=${RUFF_VERSION}" >> $GITHUB_OUTPUT
50
+
51
+ - name: Install Ruff
52
+ env:
53
+ RUFF_VERSION: ${{ steps.get-ruff-version.outputs.ruff_version }}
54
+ run: python -m pip install "ruff==${RUFF_VERSION}"
55
+
56
+ - name: Ruff check
57
+ run: ruff check --output-format=github
58
+
59
+ - name: Ruff format
60
+ run: ruff format --diff
61
+
62
+ typos:
63
+ name: Typos
64
+ runs-on: ubuntu-latest
65
+ steps:
66
+ - name: Checkout Repository
67
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
68
+ with:
69
+ persist-credentials: false
70
+
71
+ - name: typos-action
72
+ uses: crate-ci/typos@db35ee91e80fbb447f33b0e5fbddb24d2a1a884f # v1.29.10
.github/workflows/test-docker-build.yml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Inspired by
16
+ # https://github.com/huggingface/peft/blob/main/.github/workflows/test-docker-build.yml
17
+ name: Test Dockerfiles
18
+
19
+ on:
20
+ pull_request:
21
+ paths:
22
+ # Run only when DockerFile files are modified
23
+ - "docker/**"
24
+
25
+ permissions: {}
26
+
27
+ env:
28
+ PYTHON_VERSION: "3.10"
29
+
30
+ jobs:
31
+ get_changed_files:
32
+ name: Detect modified Dockerfiles
33
+ runs-on: ubuntu-latest
34
+ outputs:
35
+ matrix: ${{ steps.set-matrix.outputs.matrix }}
36
+ steps:
37
+ - name: Check out code
38
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
39
+ with:
40
+ persist-credentials: false
41
+
42
+ - name: Get changed files
43
+ id: changed-files
44
+ uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
45
+ with:
46
+ files: docker/**
47
+ json: "true"
48
+
49
+ - name: Run step if only the files listed above change # zizmor: ignore[template-injection]
50
+ if: steps.changed-files.outputs.any_changed == 'true'
51
+ id: set-matrix
52
+ run: |
53
+ echo "matrix=${{ steps.changed-files.outputs.all_changed_files}}" >> $GITHUB_OUTPUT
54
+
55
+ build_modified_dockerfiles:
56
+ name: Build modified Docker images
57
+ needs: get_changed_files
58
+ runs-on:
59
+ group: aws-general-8-plus
60
+ if: needs.get_changed_files.outputs.matrix != ''
61
+ strategy:
62
+ fail-fast: false
63
+ matrix:
64
+ docker-file: ${{ fromJson(needs.get_changed_files.outputs.matrix) }}
65
+ steps:
66
+ - name: Set up Docker Buildx
67
+ uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
68
+ with:
69
+ cache-binary: false
70
+
71
+ - name: Check out code
72
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
73
+ with:
74
+ persist-credentials: false
75
+
76
+ - name: Build Docker image
77
+ uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5.4.0
78
+ with:
79
+ file: ${{ matrix.docker-file }}
80
+ context: .
81
+ push: False
82
+ build-args: PYTHON_VERSION=${{ env.PYTHON_VERSION }}
.github/workflows/test.yml ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ name: Tests
16
+
17
+ on:
18
+ pull_request:
19
+ paths:
20
+ - "lerobot/**"
21
+ - "tests/**"
22
+ - "examples/**"
23
+ - ".github/**"
24
+ - "pyproject.toml"
25
+ - ".pre-commit-config.yaml"
26
+ - "Makefile"
27
+ - ".cache/**"
28
+ push:
29
+ branches:
30
+ - main
31
+ paths:
32
+ - "lerobot/**"
33
+ - "tests/**"
34
+ - "examples/**"
35
+ - ".github/**"
36
+ - "pyproject.toml"
37
+ - ".pre-commit-config.yaml"
38
+ - "Makefile"
39
+ - ".cache/**"
40
+
41
+ permissions: {}
42
+
43
+ env:
44
+ UV_VERSION: "0.6.0"
45
+
46
+ jobs:
47
+ pytest:
48
+ name: Pytest
49
+ runs-on: ubuntu-latest
50
+ env:
51
+ MUJOCO_GL: egl
52
+ steps:
53
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
54
+ with:
55
+ lfs: true # Ensure LFS files are pulled
56
+ persist-credentials: false
57
+
58
+ - name: Install apt dependencies
59
+ # portaudio19-dev is needed to install pyaudio
60
+ run: |
61
+ sudo apt-get update && \
62
+ sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
63
+
64
+ - name: Install uv and python
65
+ uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
66
+ with:
67
+ enable-cache: true
68
+ version: ${{ env.UV_VERSION }}
69
+ python-version: "3.10"
70
+
71
+ - name: Install lerobot (all extras)
72
+ run: uv sync --all-extras
73
+
74
+ - name: Test with pytest
75
+ run: |
76
+ uv run pytest tests -v --cov=./lerobot --durations=0 \
77
+ -W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
78
+ -W ignore::UserWarning:torch.utils.data.dataloader:558 \
79
+ -W ignore::UserWarning:gymnasium.utils.env_checker:247 \
80
+ && rm -rf tests/outputs outputs
81
+
82
+ pytest-minimal:
83
+ name: Pytest (minimal install)
84
+ runs-on: ubuntu-latest
85
+ env:
86
+ MUJOCO_GL: egl
87
+ steps:
88
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
89
+ with:
90
+ lfs: true # Ensure LFS files are pulled
91
+ persist-credentials: false
92
+
93
+ - name: Install apt dependencies
94
+ run: sudo apt-get update && sudo apt-get install -y ffmpeg
95
+
96
+ - name: Install uv and python
97
+ uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
98
+ with:
99
+ enable-cache: true
100
+ version: ${{ env.UV_VERSION }}
101
+ python-version: "3.10"
102
+
103
+ - name: Install lerobot
104
+ run: uv sync --extra "test"
105
+
106
+ - name: Test with pytest
107
+ run: |
108
+ uv run pytest tests -v --cov=./lerobot --durations=0 \
109
+ -W ignore::DeprecationWarning:imageio_ffmpeg._utils:7 \
110
+ -W ignore::UserWarning:torch.utils.data.dataloader:558 \
111
+ -W ignore::UserWarning:gymnasium.utils.env_checker:247 \
112
+ && rm -rf tests/outputs outputs
113
+
114
+ end-to-end:
115
+ name: End-to-end
116
+ runs-on: ubuntu-latest
117
+ env:
118
+ MUJOCO_GL: egl
119
+ steps:
120
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
121
+ with:
122
+ lfs: true # Ensure LFS files are pulled
123
+ persist-credentials: false
124
+
125
+ - name: Install apt dependencies
126
+ # portaudio19-dev is needed to install pyaudio
127
+ run: |
128
+ sudo apt-get update && \
129
+ sudo apt-get install -y libegl1-mesa-dev ffmpeg portaudio19-dev
130
+
131
+ - name: Install uv and python
132
+ uses: astral-sh/setup-uv@d4b2f3b6ecc6e67c4457f6d3e41ec42d3d0fcb86 # v5.4.2
133
+ with:
134
+ enable-cache: true
135
+ version: ${{ env.UV_VERSION }}
136
+ python-version: "3.10"
137
+
138
+ - name: Install lerobot (all extras)
139
+ run: |
140
+ uv venv
141
+ uv sync --all-extras
142
+
143
+ - name: venv
144
+ run: |
145
+ echo "PYTHON_PATH=${{ github.workspace }}/.venv/bin/python" >> $GITHUB_ENV
146
+
147
+ - name: Test end-to-end
148
+ run: |
149
+ make test-end-to-end \
150
+ && rm -rf outputs
.github/workflows/trufflehog.yml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ on:
16
+ push:
17
+
18
+ name: Secret Leaks
19
+
20
+ permissions: {}
21
+
22
+ jobs:
23
+ trufflehog:
24
+ runs-on: ubuntu-latest
25
+ steps:
26
+ - name: Checkout code
27
+ uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
28
+ with:
29
+ fetch-depth: 0
30
+ persist-credentials: false
31
+
32
+ - name: Secret Scanning
33
+ uses: trufflesecurity/trufflehog@90694bf9af66e7536abc5824e7a87246dbf933cb # v3.88.35
34
+ with:
35
+ extra_args: --only-verified
.github/workflows/upload_pr_documentation.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Upload PR Documentation
2
+
3
+ on: # zizmor: ignore[dangerous-triggers] We follow the same pattern as in Transformers
4
+ workflow_run:
5
+ workflows: [ "Build PR Documentation" ]
6
+ types:
7
+ - completed
8
+
9
+ jobs:
10
+ build: # zizmor: ignore[excessive-permissions] We follow the same pattern as in Transformers
11
+ uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
12
+ with:
13
+ package_name: lerobot
14
+ secrets:
15
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
16
+ comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
.gitignore ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Dev scripts
16
+ .dev
17
+
18
+ # Logging
19
+ logs
20
+ tmp
21
+ wandb
22
+
23
+ # Data
24
+ data
25
+ outputs
26
+
27
+ # Apple
28
+ .DS_Store
29
+
30
+ # VS Code
31
+ .vscode
32
+
33
+ # HPC
34
+ nautilus/*.yaml
35
+ *.key
36
+
37
+ # Slurm
38
+ sbatch*.sh
39
+
40
+ # Byte-compiled / optimized / DLL files
41
+ __pycache__/
42
+ *.py[cod]
43
+ *$py.class
44
+
45
+ # C extensions
46
+ *.so
47
+
48
+ # Distribution / packaging
49
+ .Python
50
+ build/
51
+ develop-eggs/
52
+ dist/
53
+ downloads/
54
+ eggs/
55
+ .eggs/
56
+ lib/
57
+ lib64/
58
+ parts/
59
+ sdist/
60
+ var/
61
+ wheels/
62
+ pip-wheel-metadata/
63
+ share/python-wheels/
64
+ *.egg-info/
65
+ .installed.cfg
66
+ *.egg
67
+ MANIFEST
68
+
69
+ # uv/poetry lock files
70
+ poetry.lock
71
+ uv.lock
72
+
73
+ # PyInstaller
74
+ # Usually these files are written by a python script from a template
75
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
76
+ *.manifest
77
+ *.spec
78
+
79
+ # Installer logs
80
+ pip-log.txt
81
+ pip-delete-this-directory.txt
82
+
83
+ # Unit test / coverage reports
84
+ !tests/artifacts
85
+ htmlcov/
86
+ .tox/
87
+ .nox/
88
+ .coverage
89
+ .coverage.*
90
+ nosetests.xml
91
+ coverage.xml
92
+ *.cover
93
+ *.py,cover
94
+ .hypothesis/
95
+ .pytest_cache/
96
+
97
+ # Ignore .cache
98
+ .cache/*
99
+
100
+ # Translations
101
+ *.mo
102
+ *.pot
103
+
104
+ # Django stuff:
105
+ *.log
106
+ local_settings.py
107
+ db.sqlite3
108
+ db.sqlite3-journal
109
+
110
+ # Flask stuff:
111
+ instance/
112
+ .webassets-cache
113
+
114
+ # Scrapy stuff:
115
+ .scrapy
116
+
117
+ # Sphinx documentation
118
+ docs/_build/
119
+
120
+ # PyBuilder
121
+ .pybuilder/
122
+ target/
123
+
124
+ # Jupyter Notebook
125
+ .ipynb_checkpoints
126
+
127
+ # IPython
128
+ profile_default/
129
+ ipython_config.py
130
+
131
+ # pyenv
132
+ .python-version
133
+
134
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
135
+ __pypackages__/
136
+
137
+ # Celery stuff
138
+ celerybeat-schedule
139
+ celerybeat.pid
140
+
141
+ # SageMath parsed files
142
+ *.sage.py
143
+
144
+ # Environments
145
+ .env
146
+ .venv
147
+ env/
148
+ venv/
149
+ env.bak/
150
+ venv.bak/
151
+
152
+ # Spyder project settings
153
+ .spyderproject
154
+ .spyproject
155
+
156
+ # Rope project settings
157
+ .ropeproject
158
+
159
+ # mkdocs documentation
160
+ /site
161
+
162
+ # mypy
163
+ .mypy_cache/
164
+ .dmypy.json
165
+ dmypy.json
166
+
167
+ # Pyre type checker
168
+ .pyre/
169
+
170
+ # pytype static type analyzer
171
+ .pytype/
172
+
173
+ # Cython debug symbols
174
+ cython_debug/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ exclude: "tests/artifacts/.*\\.safetensors$"
16
+ default_language_version:
17
+ python: python3.10
18
+ repos:
19
+ ##### Meta #####
20
+ - repo: meta
21
+ hooks:
22
+ - id: check-useless-excludes
23
+ - id: check-hooks-apply
24
+
25
+
26
+ ##### Style / Misc. #####
27
+ - repo: https://github.com/pre-commit/pre-commit-hooks
28
+ rev: v5.0.0
29
+ hooks:
30
+ - id: check-added-large-files
31
+ - id: debug-statements
32
+ - id: check-merge-conflict
33
+ - id: check-case-conflict
34
+ - id: check-yaml
35
+ - id: check-toml
36
+ - id: end-of-file-fixer
37
+ - id: trailing-whitespace
38
+
39
+ - repo: https://github.com/adhtruong/mirrors-typos
40
+ rev: v1.33.1
41
+ hooks:
42
+ - id: typos
43
+ args: [--force-exclude]
44
+
45
+ - repo: https://github.com/asottile/pyupgrade
46
+ rev: v3.20.0
47
+ hooks:
48
+ - id: pyupgrade
49
+
50
+ - repo: https://github.com/astral-sh/ruff-pre-commit
51
+ rev: v0.11.13
52
+ hooks:
53
+ - id: ruff
54
+ args: [--fix]
55
+ - id: ruff-format
56
+
57
+
58
+ ##### Security #####
59
+ - repo: https://github.com/gitleaks/gitleaks
60
+ rev: v8.27.2
61
+ hooks:
62
+ - id: gitleaks
63
+
64
+ - repo: https://github.com/woodruffw/zizmor-pre-commit
65
+ rev: v1.9.0
66
+ hooks:
67
+ - id: zizmor
68
+
69
+ - repo: https://github.com/PyCQA/bandit
70
+ rev: 1.8.3
71
+ hooks:
72
+ - id: bandit
73
+ args: ["-c", "pyproject.toml"]
74
+ additional_dependencies: ["bandit[toml]"]
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Contributor Covenant Code of Conduct
3
+
4
+ ## Our Pledge
5
+
6
+ We as members, contributors, and leaders pledge to make participation in our
7
+ community a harassment-free experience for everyone, regardless of age, body
8
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
9
+ identity and expression, level of experience, education, socio-economic status,
10
+ nationality, personal appearance, race, caste, color, religion, or sexual
11
+ identity and orientation.
12
+
13
+ We pledge to act and interact in ways that contribute to an open, welcoming,
14
+ diverse, inclusive, and healthy community.
15
+
16
+ ## Our Standards
17
+
18
+ Examples of behavior that contributes to a positive environment for our
19
+ community include:
20
+
21
+ * Demonstrating empathy and kindness toward other people
22
+ * Being respectful of differing opinions, viewpoints, and experiences
23
+ * Giving and gracefully accepting constructive feedback
24
+ * Accepting responsibility and apologizing to those affected by our mistakes,
25
+ and learning from the experience
26
+ * Focusing on what is best not just for us as individuals, but for the overall
27
+ community
28
+
29
+ Examples of unacceptable behavior include:
30
+
31
+ * The use of sexualized language or imagery, and sexual attention or advances of
32
+ any kind
33
+ * Trolling, insulting or derogatory comments, and personal or political attacks
34
+ * Public or private harassment
35
+ * Publishing others' private information, such as a physical or email address,
36
+ without their explicit permission
37
+ * Other conduct which could reasonably be considered inappropriate in a
38
+ professional setting
39
+
40
+ ## Enforcement Responsibilities
41
+
42
+ Community leaders are responsible for clarifying and enforcing our standards of
43
+ acceptable behavior and will take appropriate and fair corrective action in
44
+ response to any behavior that they deem inappropriate, threatening, offensive,
45
+ or harmful.
46
+
47
+ Community leaders have the right and responsibility to remove, edit, or reject
48
+ comments, commits, code, wiki edits, issues, and other contributions that are
49
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
50
+ decisions when appropriate.
51
+
52
+ ## Scope
53
+
54
+ This Code of Conduct applies within all community spaces, and also applies when
55
+ an individual is officially representing the community in public spaces.
56
+ Examples of representing our community include using an official email address,
57
+ posting via an official social media account, or acting as an appointed
58
+ representative at an online or offline event.
59
+
60
+ ## Enforcement
61
+
62
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
63
+ reported to the community leaders responsible for enforcement at
64
+ [feedback@huggingface.co](mailto:feedback@huggingface.co).
65
+ All complaints will be reviewed and investigated promptly and fairly.
66
+
67
+ All community leaders are obligated to respect the privacy and security of the
68
+ reporter of any incident.
69
+
70
+ ## Enforcement Guidelines
71
+
72
+ Community leaders will follow these Community Impact Guidelines in determining
73
+ the consequences for any action they deem in violation of this Code of Conduct:
74
+
75
+ ### 1. Correction
76
+
77
+ **Community Impact**: Use of inappropriate language or other behavior deemed
78
+ unprofessional or unwelcome in the community.
79
+
80
+ **Consequence**: A private, written warning from community leaders, providing
81
+ clarity around the nature of the violation and an explanation of why the
82
+ behavior was inappropriate. A public apology may be requested.
83
+
84
+ ### 2. Warning
85
+
86
+ **Community Impact**: A violation through a single incident or series of
87
+ actions.
88
+
89
+ **Consequence**: A warning with consequences for continued behavior. No
90
+ interaction with the people involved, including unsolicited interaction with
91
+ those enforcing the Code of Conduct, for a specified period of time. This
92
+ includes avoiding interactions in community spaces as well as external channels
93
+ like social media. Violating these terms may lead to a temporary or permanent
94
+ ban.
95
+
96
+ ### 3. Temporary Ban
97
+
98
+ **Community Impact**: A serious violation of community standards, including
99
+ sustained inappropriate behavior.
100
+
101
+ **Consequence**: A temporary ban from any sort of interaction or public
102
+ communication with the community for a specified period of time. No public or
103
+ private interaction with the people involved, including unsolicited interaction
104
+ with those enforcing the Code of Conduct, is allowed during this period.
105
+ Violating these terms may lead to a permanent ban.
106
+
107
+ ### 4. Permanent Ban
108
+
109
+ **Community Impact**: Demonstrating a pattern of violation of community
110
+ standards, including sustained inappropriate behavior, harassment of an
111
+ individual, or aggression toward or disparagement of classes of individuals.
112
+
113
+ **Consequence**: A permanent ban from any sort of public interaction within the
114
+ community.
115
+
116
+ ## Attribution
117
+
118
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
119
+ version 2.1, available at
120
+ [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
121
+
122
+ Community Impact Guidelines were inspired by
123
+ [Mozilla's code of conduct enforcement ladder][Mozilla CoC].
124
+
125
+ For answers to common questions about this code of conduct, see the FAQ at
126
+ [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
127
+ [https://www.contributor-covenant.org/translations][translations].
128
+
129
+ [homepage]: https://www.contributor-covenant.org
130
+ [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
131
+ [Mozilla CoC]: https://github.com/mozilla/diversity
132
+ [FAQ]: https://www.contributor-covenant.org/faq
133
+ [translations]: https://www.contributor-covenant.org/translations
CONTRIBUTING.md ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # How to contribute to 🤗 LeRobot?
2
+
3
+ Everyone is welcome to contribute, and we value everybody's contribution. Code
4
+ is thus not the only way to help the community. Answering questions, helping
5
+ others, reaching out and improving the documentations are immensely valuable to
6
+ the community.
7
+
8
+ It also helps us if you spread the word: reference the library from blog posts
9
+ on the awesome projects it made possible, shout out on Twitter when it has
10
+ helped you, or simply ⭐️ the repo to say "thank you".
11
+
12
+ Whichever way you choose to contribute, please be mindful to respect our
13
+ [code of conduct](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md).
14
+
15
+ ## You can contribute in so many ways!
16
+
17
+ Some of the ways you can contribute to 🤗 LeRobot:
18
+ * Fixing outstanding issues with the existing code.
19
+ * Implementing new models, datasets or simulation environments.
20
+ * Contributing to the examples or to the documentation.
21
+ * Submitting issues related to bugs or desired new features.
22
+
23
+ Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](mailto:remi.cadene@huggingface.co).
24
+
25
+ If you are not sure how to contribute or want to know the next features we working on, look on this project page: [LeRobot TODO](https://github.com/orgs/huggingface/projects/46)
26
+
27
+ ## Submitting a new issue or feature request
28
+
29
+ Do your best to follow these guidelines when submitting an issue or a feature
30
+ request. It will make it easier for us to come back to you quickly and with good
31
+ feedback.
32
+
33
+ ### Did you find a bug?
34
+
35
+ The 🤗 LeRobot library is robust and reliable thanks to the users who notify us of
36
+ the problems they encounter. So thank you for reporting an issue.
37
+
38
+ First, we would really appreciate it if you could **make sure the bug was not
39
+ already reported** (use the search bar on Github under Issues).
40
+
41
+ Did not find it? :( So we can act quickly on it, please follow these steps:
42
+
43
+ * Include your **OS type and version**, the versions of **Python** and **PyTorch**.
44
+ * A short, self-contained, code snippet that allows us to reproduce the bug in
45
+ less than 30s.
46
+ * The full traceback if an exception is raised.
47
+ * Attach any other additional information, like screenshots, you think may help.
48
+
49
+ ### Do you want a new feature?
50
+
51
+ A good feature request addresses the following points:
52
+
53
+ 1. Motivation first:
54
+ * Is it related to a problem/frustration with the library? If so, please explain
55
+ why. Providing a code snippet that demonstrates the problem is best.
56
+ * Is it related to something you would need for a project? We'd love to hear
57
+ about it!
58
+ * Is it something you worked on and think could benefit the community?
59
+ Awesome! Tell us what problem it solved for you.
60
+ 2. Write a *paragraph* describing the feature.
61
+ 3. Provide a **code snippet** that demonstrates its future use.
62
+ 4. In case this is related to a paper, please attach a link.
63
+ 5. Attach any additional information (drawings, screenshots, etc.) you think may help.
64
+
65
+ If your issue is well written we're already 80% of the way there by the time you
66
+ post it.
67
+
68
+ ## Adding new policies, datasets or environments
69
+
70
+ Look at our implementations for [datasets](./lerobot/common/datasets/), [policies](./lerobot/common/policies/),
71
+ environments ([aloha](https://github.com/huggingface/gym-aloha),
72
+ [xarm](https://github.com/huggingface/gym-xarm),
73
+ [pusht](https://github.com/huggingface/gym-pusht))
74
+ and follow the same api design.
75
+
76
+ When implementing a new dataset loadable with LeRobotDataset follow these steps:
77
+ - Update `available_datasets_per_env` in `lerobot/__init__.py`
78
+
79
+ When implementing a new environment (e.g. `gym_aloha`), follow these steps:
80
+ - Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
81
+
82
+ When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
83
+ - Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
84
+ - Set the required `name` class attribute.
85
+ - Update variables in `tests/test_available.py` by importing your new Policy class
86
+
87
+ ## Submitting a pull request (PR)
88
+
89
+ Before writing code, we strongly advise you to search through the existing PRs or
90
+ issues to make sure that nobody is already working on the same thing. If you are
91
+ unsure, it is always a good idea to open an issue to get some feedback.
92
+
93
+ You will need basic `git` proficiency to be able to contribute to
94
+ 🤗 LeRobot. `git` is not the easiest tool to use but it has the greatest
95
+ manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
96
+ Git](https://git-scm.com/book/en/v2) is a very good reference.
97
+
98
+ Follow these steps to start contributing:
99
+
100
+ 1. Fork the [repository](https://github.com/huggingface/lerobot) by
101
+ clicking on the 'Fork' button on the repository's page. This creates a copy of the code
102
+ under your GitHub user account.
103
+
104
+ 2. Clone your fork to your local disk, and add the base repository as a remote. The following command
105
+ assumes you have your public SSH key uploaded to GitHub. See the following guide for more
106
+ [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).
107
+
108
+ ```bash
109
+ git clone git@github.com:<your Github handle>/lerobot.git
110
+ cd lerobot
111
+ git remote add upstream https://github.com/huggingface/lerobot.git
112
+ ```
113
+
114
+ 3. Create a new branch to hold your development changes, and do this for every new PR you work on.
115
+
116
+ Start by synchronizing your `main` branch with the `upstream/main` branch (more details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):
117
+
118
+ ```bash
119
+ git checkout main
120
+ git fetch upstream
121
+ git rebase upstream/main
122
+ ```
123
+
124
+ Once your `main` branch is synchronized, create a new branch from it:
125
+
126
+ ```bash
127
+ git checkout -b a-descriptive-name-for-my-changes
128
+ ```
129
+
130
+ 🚨 **Do not** work on the `main` branch.
131
+
132
+ 4. for development, we advise to use a tool like `poetry` or `uv` instead of just `pip` to easily track our dependencies.
133
+ Follow the instructions to [install poetry](https://python-poetry.org/docs/#installation) (use a version >=2.1.0) or to [install uv](https://docs.astral.sh/uv/getting-started/installation/#installation-methods) if you don't have one of them already.
134
+
135
+ Set up a development environment with conda or miniconda:
136
+ ```bash
137
+ conda create -y -n lerobot-dev python=3.10 && conda activate lerobot-dev
138
+ ```
139
+
140
+ If you're using `uv`, it can manage python versions so you can instead do:
141
+ ```bash
142
+ uv venv --python 3.10 && source .venv/bin/activate
143
+ ```
144
+
145
+ To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library:
146
+
147
+ using `poetry`
148
+ ```bash
149
+ poetry sync --extras "dev test"
150
+ ```
151
+
152
+ using `uv`
153
+ ```bash
154
+ uv sync --extra dev --extra test
155
+ ```
156
+
157
+ You can also install the project with all its dependencies (including environments):
158
+
159
+ using `poetry`
160
+ ```bash
161
+ poetry sync --all-extras
162
+ ```
163
+
164
+ using `uv`
165
+ ```bash
166
+ uv sync --all-extras
167
+ ```
168
+
169
+ > **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they *will* be tested in the CI. In general, we advise you to install everything and test locally before pushing.
170
+
171
+ Whichever command you chose to install the project (e.g. `poetry sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
172
+
173
+ The equivalent of `pip install some-package`, would just be:
174
+
175
+ using `poetry`
176
+ ```bash
177
+ poetry add some-package
178
+ ```
179
+
180
+ using `uv`
181
+ ```bash
182
+ uv add some-package
183
+ ```
184
+
185
+ When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies.
186
+ using `poetry`
187
+ ```bash
188
+ poetry lock
189
+ ```
190
+
191
+ using `uv`
192
+ ```bash
193
+ uv lock
194
+ ```
195
+
196
+
197
+ 5. Develop the features on your branch.
198
+
199
+ As you work on the features, you should make sure that the test suite
200
+ passes. You should run the tests impacted by your changes like this (see
201
+ below an explanation regarding the environment variable):
202
+
203
+ ```bash
204
+ pytest tests/<TEST_TO_RUN>.py
205
+ ```
206
+
207
+ 6. Follow our style.
208
+
209
+ `lerobot` relies on `ruff` to format its source code
210
+ consistently. Set up [`pre-commit`](https://pre-commit.com/) to run these checks
211
+ automatically as Git commit hooks.
212
+
213
+ Install `pre-commit` hooks:
214
+ ```bash
215
+ pre-commit install
216
+ ```
217
+
218
+ You can run these hooks whenever you need on staged files with:
219
+ ```bash
220
+ pre-commit
221
+ ```
222
+
223
+ Once you're happy with your changes, add changed files using `git add` and
224
+ make a commit with `git commit` to record your changes locally:
225
+
226
+ ```bash
227
+ git add modified_file.py
228
+ git commit
229
+ ```
230
+
231
+ Note, if you already committed some changes that have a wrong formatting, you can use:
232
+ ```bash
233
+ pre-commit run --all-files
234
+ ```
235
+
236
+ Please write [good commit messages](https://chris.beams.io/posts/git-commit/).
237
+
238
+ It is a good idea to sync your copy of the code with the original
239
+ repository regularly. This way you can quickly account for changes:
240
+
241
+ ```bash
242
+ git fetch upstream
243
+ git rebase upstream/main
244
+ ```
245
+
246
+ Push the changes to your account using:
247
+
248
+ ```bash
249
+ git push -u origin a-descriptive-name-for-my-changes
250
+ ```
251
+
252
+ 6. Once you are satisfied (**and the checklist below is happy too**), go to the
253
+ webpage of your fork on GitHub. Click on 'Pull request' to send your changes
254
+ to the project maintainers for review.
255
+
256
+ 7. It's ok if maintainers ask you for changes. It happens to core contributors
257
+ too! So everyone can see the changes in the Pull request, work in your local
258
+ branch and push the changes to your fork. They will automatically appear in
259
+ the pull request.
260
+
261
+
262
+ ### Checklist
263
+
264
+ 1. The title of your pull request should be a summary of its contribution;
265
+ 2. If your pull request addresses an issue, please mention the issue number in
266
+ the pull request description to make sure they are linked (and people
267
+ consulting the issue know you are working on it);
268
+ 3. To indicate a work in progress please prefix the title with `[WIP]`, or preferably mark
269
+ the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
270
+ it from PRs ready to be merged;
271
+ 4. Make sure existing tests pass;
272
+
273
+ ### Tests
274
+
275
+ An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/lerobot/tree/main/tests).
276
+
277
+ Install [git lfs](https://git-lfs.com/) to retrieve test artifacts (if you don't have it already).
278
+
279
+ On Mac:
280
+ ```bash
281
+ brew install git-lfs
282
+ git lfs install
283
+ ```
284
+
285
+ On Ubuntu:
286
+ ```bash
287
+ sudo apt-get install git-lfs
288
+ git lfs install
289
+ ```
290
+
291
+ Pull artifacts if they're not in [tests/artifacts](tests/artifacts)
292
+ ```bash
293
+ git lfs pull
294
+ ```
295
+
296
+ We use `pytest` in order to run the tests. From the root of the
297
+ repository, here's how to run tests with `pytest` for the library:
298
+
299
+ ```bash
300
+ python -m pytest -sv ./tests
301
+ ```
302
+
303
+
304
+ You can specify a smaller set of tests in order to test only the feature
305
+ you're working on.
LICENSE ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2024 The Hugging Face team. All rights reserved.
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright [yyyy] [name of copyright owner]
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
204
+
205
+
206
+ ## Some of lerobot's code is derived from Diffusion Policy, which is subject to the following copyright notice:
207
+
208
+ MIT License
209
+
210
+ Copyright (c) 2023 Columbia Artificial Intelligence and Robotics Lab
211
+
212
+ Permission is hereby granted, free of charge, to any person obtaining a copy
213
+ of this software and associated documentation files (the "Software"), to deal
214
+ in the Software without restriction, including without limitation the rights
215
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
216
+ copies of the Software, and to permit persons to whom the Software is
217
+ furnished to do so, subject to the following conditions:
218
+
219
+ The above copyright notice and this permission notice shall be included in all
220
+ copies or substantial portions of the Software.
221
+
222
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
223
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
224
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
225
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
226
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
227
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
228
+ SOFTWARE.
229
+
230
+
231
+ ## Some of lerobot's code is derived from FOWM, which is subject to the following copyright notice:
232
+
233
+ MIT License
234
+
235
+ Copyright (c) 2023 Yunhai Feng
236
+
237
+ Permission is hereby granted, free of charge, to any person obtaining a copy
238
+ of this software and associated documentation files (the "Software"), to deal
239
+ in the Software without restriction, including without limitation the rights
240
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
241
+ copies of the Software, and to permit persons to whom the Software is
242
+ furnished to do so, subject to the following conditions:
243
+
244
+ The above copyright notice and this permission notice shall be included in all
245
+ copies or substantial portions of the Software.
246
+
247
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
248
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
249
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
250
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
251
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
252
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
253
+ SOFTWARE.
254
+
255
+
256
+ ## Some of lerobot's code is derived from simxarm, which is subject to the following copyright notice:
257
+
258
+ MIT License
259
+
260
+ Copyright (c) 2023 Nicklas Hansen & Yanjie Ze
261
+
262
+ Permission is hereby granted, free of charge, to any person obtaining a copy
263
+ of this software and associated documentation files (the "Software"), to deal
264
+ in the Software without restriction, including without limitation the rights
265
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
266
+ copies of the Software, and to permit persons to whom the Software is
267
+ furnished to do so, subject to the following conditions:
268
+
269
+ The above copyright notice and this permission notice shall be included in all
270
+ copies or substantial portions of the Software.
271
+
272
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
273
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
274
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
275
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
276
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
277
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
278
+ SOFTWARE.
279
+
280
+
281
+ ## Some of lerobot's code is derived from ALOHA, which is subject to the following copyright notice:
282
+
283
+ MIT License
284
+
285
+ Copyright (c) 2023 Tony Z. Zhao
286
+
287
+ Permission is hereby granted, free of charge, to any person obtaining a copy
288
+ of this software and associated documentation files (the "Software"), to deal
289
+ in the Software without restriction, including without limitation the rights
290
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
291
+ copies of the Software, and to permit persons to whom the Software is
292
+ furnished to do so, subject to the following conditions:
293
+
294
+ The above copyright notice and this permission notice shall be included in all
295
+ copies or substantial portions of the Software.
296
+
297
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
298
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
299
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
300
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
301
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
302
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
303
+ SOFTWARE.
304
+
305
+ ## Some of lerobot's code is derived from DETR, which is subject to the following copyright notice:
306
+
307
+ Apache License
308
+ Version 2.0, January 2004
309
+ http://www.apache.org/licenses/
310
+
311
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
312
+
313
+ 1. Definitions.
314
+
315
+ "License" shall mean the terms and conditions for use, reproduction,
316
+ and distribution as defined by Sections 1 through 9 of this document.
317
+
318
+ "Licensor" shall mean the copyright owner or entity authorized by
319
+ the copyright owner that is granting the License.
320
+
321
+ "Legal Entity" shall mean the union of the acting entity and all
322
+ other entities that control, are controlled by, or are under common
323
+ control with that entity. For the purposes of this definition,
324
+ "control" means (i) the power, direct or indirect, to cause the
325
+ direction or management of such entity, whether by contract or
326
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
327
+ outstanding shares, or (iii) beneficial ownership of such entity.
328
+
329
+ "You" (or "Your") shall mean an individual or Legal Entity
330
+ exercising permissions granted by this License.
331
+
332
+ "Source" form shall mean the preferred form for making modifications,
333
+ including but not limited to software source code, documentation
334
+ source, and configuration files.
335
+
336
+ "Object" form shall mean any form resulting from mechanical
337
+ transformation or translation of a Source form, including but
338
+ not limited to compiled object code, generated documentation,
339
+ and conversions to other media types.
340
+
341
+ "Work" shall mean the work of authorship, whether in Source or
342
+ Object form, made available under the License, as indicated by a
343
+ copyright notice that is included in or attached to the work
344
+ (an example is provided in the Appendix below).
345
+
346
+ "Derivative Works" shall mean any work, whether in Source or Object
347
+ form, that is based on (or derived from) the Work and for which the
348
+ editorial revisions, annotations, elaborations, or other modifications
349
+ represent, as a whole, an original work of authorship. For the purposes
350
+ of this License, Derivative Works shall not include works that remain
351
+ separable from, or merely link (or bind by name) to the interfaces of,
352
+ the Work and Derivative Works thereof.
353
+
354
+ "Contribution" shall mean any work of authorship, including
355
+ the original version of the Work and any modifications or additions
356
+ to that Work or Derivative Works thereof, that is intentionally
357
+ submitted to Licensor for inclusion in the Work by the copyright owner
358
+ or by an individual or Legal Entity authorized to submit on behalf of
359
+ the copyright owner. For the purposes of this definition, "submitted"
360
+ means any form of electronic, verbal, or written communication sent
361
+ to the Licensor or its representatives, including but not limited to
362
+ communication on electronic mailing lists, source code control systems,
363
+ and issue tracking systems that are managed by, or on behalf of, the
364
+ Licensor for the purpose of discussing and improving the Work, but
365
+ excluding communication that is conspicuously marked or otherwise
366
+ designated in writing by the copyright owner as "Not a Contribution."
367
+
368
+ "Contributor" shall mean Licensor and any individual or Legal Entity
369
+ on behalf of whom a Contribution has been received by Licensor and
370
+ subsequently incorporated within the Work.
371
+
372
+ 2. Grant of Copyright License. Subject to the terms and conditions of
373
+ this License, each Contributor hereby grants to You a perpetual,
374
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
375
+ copyright license to reproduce, prepare Derivative Works of,
376
+ publicly display, publicly perform, sublicense, and distribute the
377
+ Work and such Derivative Works in Source or Object form.
378
+
379
+ 3. Grant of Patent License. Subject to the terms and conditions of
380
+ this License, each Contributor hereby grants to You a perpetual,
381
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
382
+ (except as stated in this section) patent license to make, have made,
383
+ use, offer to sell, sell, import, and otherwise transfer the Work,
384
+ where such license applies only to those patent claims licensable
385
+ by such Contributor that are necessarily infringed by their
386
+ Contribution(s) alone or by combination of their Contribution(s)
387
+ with the Work to which such Contribution(s) was submitted. If You
388
+ institute patent litigation against any entity (including a
389
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
390
+ or a Contribution incorporated within the Work constitutes direct
391
+ or contributory patent infringement, then any patent licenses
392
+ granted to You under this License for that Work shall terminate
393
+ as of the date such litigation is filed.
394
+
395
+ 4. Redistribution. You may reproduce and distribute copies of the
396
+ Work or Derivative Works thereof in any medium, with or without
397
+ modifications, and in Source or Object form, provided that You
398
+ meet the following conditions:
399
+
400
+ (a) You must give any other recipients of the Work or
401
+ Derivative Works a copy of this License; and
402
+
403
+ (b) You must cause any modified files to carry prominent notices
404
+ stating that You changed the files; and
405
+
406
+ (c) You must retain, in the Source form of any Derivative Works
407
+ that You distribute, all copyright, patent, trademark, and
408
+ attribution notices from the Source form of the Work,
409
+ excluding those notices that do not pertain to any part of
410
+ the Derivative Works; and
411
+
412
+ (d) If the Work includes a "NOTICE" text file as part of its
413
+ distribution, then any Derivative Works that You distribute must
414
+ include a readable copy of the attribution notices contained
415
+ within such NOTICE file, excluding those notices that do not
416
+ pertain to any part of the Derivative Works, in at least one
417
+ of the following places: within a NOTICE text file distributed
418
+ as part of the Derivative Works; within the Source form or
419
+ documentation, if provided along with the Derivative Works; or,
420
+ within a display generated by the Derivative Works, if and
421
+ wherever such third-party notices normally appear. The contents
422
+ of the NOTICE file are for informational purposes only and
423
+ do not modify the License. You may add Your own attribution
424
+ notices within Derivative Works that You distribute, alongside
425
+ or as an addendum to the NOTICE text from the Work, provided
426
+ that such additional attribution notices cannot be construed
427
+ as modifying the License.
428
+
429
+ You may add Your own copyright statement to Your modifications and
430
+ may provide additional or different license terms and conditions
431
+ for use, reproduction, or distribution of Your modifications, or
432
+ for any such Derivative Works as a whole, provided Your use,
433
+ reproduction, and distribution of the Work otherwise complies with
434
+ the conditions stated in this License.
435
+
436
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
437
+ any Contribution intentionally submitted for inclusion in the Work
438
+ by You to the Licensor shall be under the terms and conditions of
439
+ this License, without any additional terms or conditions.
440
+ Notwithstanding the above, nothing herein shall supersede or modify
441
+ the terms of any separate license agreement you may have executed
442
+ with Licensor regarding such Contributions.
443
+
444
+ 6. Trademarks. This License does not grant permission to use the trade
445
+ names, trademarks, service marks, or product names of the Licensor,
446
+ except as required for reasonable and customary use in describing the
447
+ origin of the Work and reproducing the content of the NOTICE file.
448
+
449
+ 7. Disclaimer of Warranty. Unless required by applicable law or
450
+ agreed to in writing, Licensor provides the Work (and each
451
+ Contributor provides its Contributions) on an "AS IS" BASIS,
452
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
453
+ implied, including, without limitation, any warranties or conditions
454
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
455
+ PARTICULAR PURPOSE. You are solely responsible for determining the
456
+ appropriateness of using or redistributing the Work and assume any
457
+ risks associated with Your exercise of permissions under this License.
458
+
459
+ 8. Limitation of Liability. In no event and under no legal theory,
460
+ whether in tort (including negligence), contract, or otherwise,
461
+ unless required by applicable law (such as deliberate and grossly
462
+ negligent acts) or agreed to in writing, shall any Contributor be
463
+ liable to You for damages, including any direct, indirect, special,
464
+ incidental, or consequential damages of any character arising as a
465
+ result of this License or out of the use or inability to use the
466
+ Work (including but not limited to damages for loss of goodwill,
467
+ work stoppage, computer failure or malfunction, or any and all
468
+ other commercial damages or losses), even if such Contributor
469
+ has been advised of the possibility of such damages.
470
+
471
+ 9. Accepting Warranty or Additional Liability. While redistributing
472
+ the Work or Derivative Works thereof, You may choose to offer,
473
+ and charge a fee for, acceptance of support, warranty, indemnity,
474
+ or other liability obligations and/or rights consistent with this
475
+ License. However, in accepting such obligations, You may act only
476
+ on Your own behalf and on Your sole responsibility, not on behalf
477
+ of any other Contributor, and only if You agree to indemnify,
478
+ defend, and hold each Contributor harmless for any liability
479
+ incurred by, or claims asserted against, such Contributor by reason
480
+ of your accepting any such warranty or additional liability.
481
+
482
+ END OF TERMS AND CONDITIONS
483
+
484
+ APPENDIX: How to apply the Apache License to your work.
485
+
486
+ To apply the Apache License to your work, attach the following
487
+ boilerplate notice, with the fields enclosed by brackets "[]"
488
+ replaced with your own identifying information. (Don't include
489
+ the brackets!) The text should be enclosed in the appropriate
490
+ comment syntax for the file format. We also recommend that a
491
+ file or class name and description of purpose be included on the
492
+ same "printed page" as the copyright notice for easier
493
+ identification within third-party archives.
494
+
495
+ Copyright 2020 - present, Facebook, Inc
496
+
497
+ Licensed under the Apache License, Version 2.0 (the "License");
498
+ you may not use this file except in compliance with the License.
499
+ You may obtain a copy of the License at
500
+
501
+ http://www.apache.org/licenses/LICENSE-2.0
502
+
503
+ Unless required by applicable law or agreed to in writing, software
504
+ distributed under the License is distributed on an "AS IS" BASIS,
505
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
506
+ See the License for the specific language governing permissions and
507
+ limitations under the License.
Makefile ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ .PHONY: tests
16
+
17
+ PYTHON_PATH := $(shell which python)
18
+
19
+ # If uv is installed and a virtual environment exists, use it
20
+ UV_CHECK := $(shell command -v uv)
21
+ ifneq ($(UV_CHECK),)
22
+ PYTHON_PATH := $(shell .venv/bin/python)
23
+ endif
24
+
25
+ export PATH := $(dir $(PYTHON_PATH)):$(PATH)
26
+
27
+ DEVICE ?= cpu
28
+
29
+ build-cpu:
30
+ docker build -t lerobot:latest -f docker/lerobot-cpu/Dockerfile .
31
+
32
+ build-gpu:
33
+ docker build -t lerobot:latest -f docker/lerobot-gpu/Dockerfile .
34
+
35
+ test-end-to-end:
36
+ ${MAKE} DEVICE=$(DEVICE) test-act-ete-train
37
+ ${MAKE} DEVICE=$(DEVICE) test-act-ete-train-resume
38
+ ${MAKE} DEVICE=$(DEVICE) test-act-ete-eval
39
+ ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-train
40
+ ${MAKE} DEVICE=$(DEVICE) test-diffusion-ete-eval
41
+ ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-train
42
+ ${MAKE} DEVICE=$(DEVICE) test-tdmpc-ete-eval
43
+
44
+ test-act-ete-train:
45
+ python lerobot/scripts/train.py \
46
+ --policy.type=act \
47
+ --policy.dim_model=64 \
48
+ --policy.n_action_steps=20 \
49
+ --policy.chunk_size=20 \
50
+ --policy.device=$(DEVICE) \
51
+ --env.type=aloha \
52
+ --env.episode_length=5 \
53
+ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
54
+ --dataset.image_transforms.enable=true \
55
+ --dataset.episodes="[0]" \
56
+ --batch_size=2 \
57
+ --steps=4 \
58
+ --eval_freq=2 \
59
+ --eval.n_episodes=1 \
60
+ --eval.batch_size=1 \
61
+ --save_freq=2 \
62
+ --save_checkpoint=true \
63
+ --log_freq=1 \
64
+ --wandb.enable=false \
65
+ --output_dir=tests/outputs/act/
66
+
67
+ test-act-ete-train-resume:
68
+ python lerobot/scripts/train.py \
69
+ --config_path=tests/outputs/act/checkpoints/000002/pretrained_model/train_config.json \
70
+ --resume=true
71
+
72
+ test-act-ete-eval:
73
+ python lerobot/scripts/eval.py \
74
+ --policy.path=tests/outputs/act/checkpoints/000004/pretrained_model \
75
+ --policy.device=$(DEVICE) \
76
+ --env.type=aloha \
77
+ --env.episode_length=5 \
78
+ --eval.n_episodes=1 \
79
+ --eval.batch_size=1
80
+
81
+ test-diffusion-ete-train:
82
+ python lerobot/scripts/train.py \
83
+ --policy.type=diffusion \
84
+ --policy.down_dims='[64,128,256]' \
85
+ --policy.diffusion_step_embed_dim=32 \
86
+ --policy.num_inference_steps=10 \
87
+ --policy.device=$(DEVICE) \
88
+ --env.type=pusht \
89
+ --env.episode_length=5 \
90
+ --dataset.repo_id=lerobot/pusht \
91
+ --dataset.image_transforms.enable=true \
92
+ --dataset.episodes="[0]" \
93
+ --batch_size=2 \
94
+ --steps=2 \
95
+ --eval_freq=2 \
96
+ --eval.n_episodes=1 \
97
+ --eval.batch_size=1 \
98
+ --save_checkpoint=true \
99
+ --save_freq=2 \
100
+ --log_freq=1 \
101
+ --wandb.enable=false \
102
+ --output_dir=tests/outputs/diffusion/
103
+
104
+ test-diffusion-ete-eval:
105
+ python lerobot/scripts/eval.py \
106
+ --policy.path=tests/outputs/diffusion/checkpoints/000002/pretrained_model \
107
+ --policy.device=$(DEVICE) \
108
+ --env.type=pusht \
109
+ --env.episode_length=5 \
110
+ --eval.n_episodes=1 \
111
+ --eval.batch_size=1
112
+
113
+ test-tdmpc-ete-train:
114
+ python lerobot/scripts/train.py \
115
+ --policy.type=tdmpc \
116
+ --policy.device=$(DEVICE) \
117
+ --env.type=xarm \
118
+ --env.task=XarmLift-v0 \
119
+ --env.episode_length=5 \
120
+ --dataset.repo_id=lerobot/xarm_lift_medium \
121
+ --dataset.image_transforms.enable=true \
122
+ --dataset.episodes="[0]" \
123
+ --batch_size=2 \
124
+ --steps=2 \
125
+ --eval_freq=2 \
126
+ --eval.n_episodes=1 \
127
+ --eval.batch_size=1 \
128
+ --save_checkpoint=true \
129
+ --save_freq=2 \
130
+ --log_freq=1 \
131
+ --wandb.enable=false \
132
+ --output_dir=tests/outputs/tdmpc/
133
+
134
+ test-tdmpc-ete-eval:
135
+ python lerobot/scripts/eval.py \
136
+ --policy.path=tests/outputs/tdmpc/checkpoints/000002/pretrained_model \
137
+ --policy.device=$(DEVICE) \
138
+ --env.type=xarm \
139
+ --env.episode_length=5 \
140
+ --env.task=XarmLift-v0 \
141
+ --eval.n_episodes=1 \
142
+ --eval.batch_size=1
README.md ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <p align="center">
2
+ <picture>
3
+ <source media="(prefers-color-scheme: dark)" srcset="media/lerobot-logo-thumbnail.png">
4
+ <source media="(prefers-color-scheme: light)" srcset="media/lerobot-logo-thumbnail.png">
5
+ <img alt="LeRobot, Hugging Face Robotics Library" src="media/lerobot-logo-thumbnail.png" style="max-width: 100%;">
6
+ </picture>
7
+ <br/>
8
+ <br/>
9
+ </p>
10
+
11
+ <div align="center">
12
+
13
+ [![Tests](https://github.com/huggingface/lerobot/actions/workflows/nightly-tests.yml/badge.svg?branch=main)](https://github.com/huggingface/lerobot/actions/workflows/nightly-tests.yml?query=branch%3Amain)
14
+ [![Coverage](https://codecov.io/gh/huggingface/lerobot/branch/main/graph/badge.svg?token=TODO)](https://codecov.io/gh/huggingface/lerobot)
15
+ [![Python versions](https://img.shields.io/pypi/pyversions/lerobot)](https://www.python.org/downloads/)
16
+ [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/huggingface/lerobot/blob/main/LICENSE)
17
+ [![Status](https://img.shields.io/pypi/status/lerobot)](https://pypi.org/project/lerobot/)
18
+ [![Version](https://img.shields.io/pypi/v/lerobot)](https://pypi.org/project/lerobot/)
19
+ [![Examples](https://img.shields.io/badge/Examples-green.svg)](https://github.com/huggingface/lerobot/tree/main/examples)
20
+ [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.1%20adopted-ff69b4.svg)](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md)
21
+ [![Discord](https://dcbadge.vercel.app/api/server/C5P34WJ68S?style=flat)](https://discord.gg/s3KuuzsPFb)
22
+
23
+ </div>
24
+
25
+ <h2 align="center">
26
+ <p><a href="https://huggingface.co/docs/lerobot/so101">
27
+ Build Your Own SO-101 Robot!</a></p>
28
+ </h2>
29
+
30
+ <div align="center">
31
+ <div style="display: flex; gap: 1rem; justify-content: center; align-items: center;" >
32
+ <img
33
+ src="media/so101/so101.webp?raw=true"
34
+ alt="SO-101 follower arm"
35
+ title="SO-101 follower arm"
36
+ style="width: 40%;"
37
+ />
38
+ <img
39
+ src="media/so101/so101-leader.webp?raw=true"
40
+ alt="SO-101 leader arm"
41
+ title="SO-101 leader arm"
42
+ style="width: 40%;"
43
+ />
44
+ </div>
45
+
46
+
47
+ <p><strong>Meet the updated SO100, the SO-101 – Just €114 per arm!</strong></p>
48
+ <p>Train it in minutes with a few simple moves on your laptop.</p>
49
+ <p>Then sit back and watch your creation act autonomously! 🤯</p>
50
+
51
+ <p><a href="https://huggingface.co/docs/lerobot/so101">
52
+ See the full SO-101 tutorial here.</a></p>
53
+
54
+ <p>Want to take it to the next level? Make your SO-101 mobile by building LeKiwi!</p>
55
+ <p>Check out the <a href="https://huggingface.co/docs/lerobot/lekiwi">LeKiwi tutorial</a> and bring your robot to life on wheels.</p>
56
+
57
+ <img src="media/lekiwi/kiwi.webp?raw=true" alt="LeKiwi mobile robot" title="LeKiwi mobile robot" width="50%">
58
+ </div>
59
+
60
+ <br/>
61
+
62
+ <h3 align="center">
63
+ <p>LeRobot: State-of-the-art AI for real-world robotics</p>
64
+ </h3>
65
+
66
+ ---
67
+
68
+ 🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier to entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
69
+
70
+ 🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
71
+
72
+ 🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulation environments to get started without assembling a robot. In the coming weeks, the plan is to add more and more support for real-world robotics on the most affordable and capable robots out there.
73
+
74
+ 🤗 LeRobot hosts pretrained models and datasets on this Hugging Face community page: [huggingface.co/lerobot](https://huggingface.co/lerobot)
75
+
76
+ #### Examples of pretrained models on simulation environments
77
+
78
+ <table>
79
+ <tr>
80
+ <td><img src="media/gym/aloha_act.gif" width="100%" alt="ACT policy on ALOHA env"/></td>
81
+ <td><img src="media/gym/simxarm_tdmpc.gif" width="100%" alt="TDMPC policy on SimXArm env"/></td>
82
+ <td><img src="media/gym/pusht_diffusion.gif" width="100%" alt="Diffusion policy on PushT env"/></td>
83
+ </tr>
84
+ <tr>
85
+ <td align="center">ACT policy on ALOHA env</td>
86
+ <td align="center">TDMPC policy on SimXArm env</td>
87
+ <td align="center">Diffusion policy on PushT env</td>
88
+ </tr>
89
+ </table>
90
+
91
+ ### Acknowledgment
92
+
93
+ - The LeRobot team 🤗 for building SmolVLA [Paper](https://arxiv.org/abs/2506.01844), [Blog](https://huggingface.co/blog/smolvla).
94
+ - Thanks to Tony Zhao, Zipeng Fu and colleagues for open sourcing ACT policy, ALOHA environments and datasets. Ours are adapted from [ALOHA](https://tonyzhaozh.github.io/aloha) and [Mobile ALOHA](https://mobile-aloha.github.io).
95
+ - Thanks to Cheng Chi, Zhenjia Xu and colleagues for open sourcing Diffusion policy, Pusht environment and datasets, as well as UMI datasets. Ours are adapted from [Diffusion Policy](https://diffusion-policy.cs.columbia.edu) and [UMI Gripper](https://umi-gripper.github.io).
96
+ - Thanks to Nicklas Hansen, Yunhai Feng and colleagues for open sourcing TDMPC policy, Simxarm environments and datasets. Ours are adapted from [TDMPC](https://github.com/nicklashansen/tdmpc) and [FOWM](https://www.yunhaifeng.com/FOWM).
97
+ - Thanks to Antonio Loquercio and Ashish Kumar for their early support.
98
+ - Thanks to [Seungjae (Jay) Lee](https://sjlee.cc/), [Mahi Shafiullah](https://mahis.life/) and colleagues for open sourcing [VQ-BeT](https://sjlee.cc/vq-bet/) policy and helping us adapt the codebase to our repository. The policy is adapted from [VQ-BeT repo](https://github.com/jayLEE0301/vq_bet_official).
99
+
100
+
101
+ ## Installation
102
+
103
+ Download our source code:
104
+ ```bash
105
+ git clone https://github.com/huggingface/lerobot.git
106
+ cd lerobot
107
+ ```
108
+
109
+ Create a virtual environment with Python 3.10 and activate it, e.g. with [`miniconda`](https://docs.anaconda.com/free/miniconda/index.html):
110
+ ```bash
111
+ conda create -y -n lerobot python=3.10
112
+ conda activate lerobot
113
+ ```
114
+
115
+ When using `miniconda`, install `ffmpeg` in your environment:
116
+ ```bash
117
+ conda install ffmpeg -c conda-forge
118
+ ```
119
+
120
+ > **NOTE:** This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
121
+ > - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
122
+ > ```bash
123
+ > conda install ffmpeg=7.1.1 -c conda-forge
124
+ > ```
125
+ > - _[On Linux only]_ Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
126
+
127
+ Install 🤗 LeRobot:
128
+ ```bash
129
+ pip install -e .
130
+ ```
131
+
132
+ > **NOTE:** If you encounter build errors, you may need to install additional dependencies (`cmake`, `build-essential`, and `ffmpeg libs`). On Linux, run:
133
+ `sudo apt-get install cmake build-essential python3-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config`. For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
134
+
135
+ For simulations, 🤗 LeRobot comes with gymnasium environments that can be installed as extras:
136
+ - [aloha](https://github.com/huggingface/gym-aloha)
137
+ - [xarm](https://github.com/huggingface/gym-xarm)
138
+ - [pusht](https://github.com/huggingface/gym-pusht)
139
+
140
+ For instance, to install 🤗 LeRobot with aloha and pusht, use:
141
+ ```bash
142
+ pip install -e ".[aloha, pusht]"
143
+ ```
144
+
145
+ To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
146
+ ```bash
147
+ wandb login
148
+ ```
149
+
150
+ (note: you will also need to enable WandB in the configuration. See below.)
151
+
152
+ ## Walkthrough
153
+
154
+ ```
155
+ .
156
+ ├── examples # contains demonstration examples, start here to learn about LeRobot
157
+ | └── advanced # contains even more examples for those who have mastered the basics
158
+ ├── lerobot
159
+ | ├── configs # contains config classes with all options that you can override in the command line
160
+ | ├── common # contains classes and utilities
161
+ | | ├── datasets # various datasets of human demonstrations: aloha, pusht, xarm
162
+ | | ├── envs # various sim environments: aloha, pusht, xarm
163
+ | | ├── policies # various policies: act, diffusion, tdmpc
164
+ | | ├── robot_devices # various real devices: dynamixel motors, opencv cameras, koch robots
165
+ | | └── utils # various utilities
166
+ | └── scripts # contains functions to execute via command line
167
+ | ├── eval.py # load policy and evaluate it on an environment
168
+ | ├── train.py # train a policy via imitation learning and/or reinforcement learning
169
+ | ├── control_robot.py # teleoperate a real robot, record data, run a policy
170
+ | ├── push_dataset_to_hub.py # convert your dataset into LeRobot dataset format and upload it to the Hugging Face hub
171
+ | └── visualize_dataset.py # load a dataset and render its demonstrations
172
+ ├── outputs # contains results of scripts execution: logs, videos, model checkpoints
173
+ └── tests # contains pytest utilities for continuous integration
174
+ ```
175
+
176
+ ### Visualize datasets
177
+
178
+ Check out [example 1](./examples/1_load_lerobot_dataset.py) that illustrates how to use our dataset class which automatically downloads data from the Hugging Face hub.
179
+
180
+ You can also locally visualize episodes from a dataset on the hub by executing our script from the command line:
181
+ ```bash
182
+ python lerobot/scripts/visualize_dataset.py \
183
+ --repo-id lerobot/pusht \
184
+ --episode-index 0
185
+ ```
186
+
187
+ or from a dataset in a local folder with the `root` option and the `--local-files-only` (in the following case the dataset will be searched for in `./my_local_data_dir/lerobot/pusht`)
188
+ ```bash
189
+ python lerobot/scripts/visualize_dataset.py \
190
+ --repo-id lerobot/pusht \
191
+ --root ./my_local_data_dir \
192
+ --local-files-only 1 \
193
+ --episode-index 0
194
+ ```
195
+
196
+
197
+ It will open `rerun.io` and display the camera streams, robot states and actions, like this:
198
+
199
+ https://github-production-user-asset-6210df.s3.amazonaws.com/4681518/328035972-fd46b787-b532-47e2-bb6f-fd536a55a7ed.mov?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240505%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240505T172924Z&X-Amz-Expires=300&X-Amz-Signature=d680b26c532eeaf80740f08af3320d22ad0b8a4e4da1bcc4f33142c15b509eda&X-Amz-SignedHeaders=host&actor_id=24889239&key_id=0&repo_id=748713144
200
+
201
+
202
+ Our script can also visualize datasets stored on a distant server. See `python lerobot/scripts/visualize_dataset.py --help` for more instructions.
203
+
204
+ ### The `LeRobotDataset` format
205
+
206
+ A dataset in `LeRobotDataset` format is very simple to use. It can be loaded from a repository on the Hugging Face hub or a local folder simply with e.g. `dataset = LeRobotDataset("lerobot/aloha_static_coffee")` and can be indexed into like any Hugging Face and PyTorch dataset. For instance `dataset[0]` will retrieve a single temporal frame from the dataset containing observation(s) and an action as PyTorch tensors ready to be fed to a model.
207
+
208
+ A specificity of `LeRobotDataset` is that, rather than retrieving a single frame by its index, we can retrieve several frames based on their temporal relationship with the indexed frame, by setting `delta_timestamps` to a list of relative times with respect to the indexed frame. For example, with `delta_timestamps = {"observation.image": [-1, -0.5, -0.2, 0]}` one can retrieve, for a given index, 4 frames: 3 "previous" frames 1 second, 0.5 seconds, and 0.2 seconds before the indexed frame, and the indexed frame itself (corresponding to the 0 entry). See example [1_load_lerobot_dataset.py](examples/1_load_lerobot_dataset.py) for more details on `delta_timestamps`.
209
+
210
+ Under the hood, the `LeRobotDataset` format makes use of several ways to serialize data which can be useful to understand if you plan to work more closely with this format. We tried to make a flexible yet simple dataset format that would cover most type of features and specificities present in reinforcement learning and robotics, in simulation and in real-world, with a focus on cameras and robot states but easily extended to other types of sensory inputs as long as they can be represented by a tensor.
211
+
212
+ Here are the important details and internal structure organization of a typical `LeRobotDataset` instantiated with `dataset = LeRobotDataset("lerobot/aloha_static_coffee")`. The exact features will change from dataset to dataset but not the main aspects:
213
+
214
+ ```
215
+ dataset attributes:
216
+ ├ hf_dataset: a Hugging Face dataset (backed by Arrow/parquet). Typical features example:
217
+ │ ├ observation.images.cam_high (VideoFrame):
218
+ │ │ VideoFrame = {'path': path to a mp4 video, 'timestamp' (float32): timestamp in the video}
219
+ │ ├ observation.state (list of float32): position of an arm joints (for instance)
220
+ │ ... (more observations)
221
+ │ ├ action (list of float32): goal position of an arm joints (for instance)
222
+ │ ├ episode_index (int64): index of the episode for this sample
223
+ │ ├ frame_index (int64): index of the frame for this sample in the episode ; starts at 0 for each episode
224
+ │ ├ timestamp (float32): timestamp in the episode
225
+ │ ├ next.done (bool): indicates the end of an episode ; True for the last frame in each episode
226
+ │ └ index (int64): general index in the whole dataset
227
+ ├ episode_data_index: contains 2 tensors with the start and end indices of each episode
228
+ │ ├ from (1D int64 tensor): first frame index for each episode — shape (num episodes,) starts with 0
229
+ │ └ to: (1D int64 tensor): last frame index for each episode — shape (num episodes,)
230
+ ├ stats: a dictionary of statistics (max, mean, min, std) for each feature in the dataset, for instance
231
+ │ ├ observation.images.cam_high: {'max': tensor with same number of dimensions (e.g. `(c, 1, 1)` for images, `(c,)` for states), etc.}
232
+ │ ...
233
+ ├ info: a dictionary of metadata on the dataset
234
+ │ ├ codebase_version (str): this is to keep track of the codebase version the dataset was created with
235
+ │ ├ fps (float): frame per second the dataset is recorded/synchronized to
236
+ │ ├ video (bool): indicates if frames are encoded in mp4 video files to save space or stored as png files
237
+ │ └ encoding (dict): if video, this documents the main options that were used with ffmpeg to encode the videos
238
+ ├ videos_dir (Path): where the mp4 videos or png images are stored/accessed
239
+ └ camera_keys (list of string): the keys to access camera features in the item returned by the dataset (e.g. `["observation.images.cam_high", ...]`)
240
+ ```
241
+
242
+ A `LeRobotDataset` is serialised using several widespread file formats for each of its parts, namely:
243
+ - hf_dataset stored using Hugging Face datasets library serialization to parquet
244
+ - videos are stored in mp4 format to save space
245
+ - metadata are stored in plain json/jsonl files
246
+
247
+ Dataset can be uploaded/downloaded from the HuggingFace hub seamlessly. To work on a local dataset, you can specify its location with the `root` argument if it's not in the default `~/.cache/huggingface/lerobot` location.
248
+
249
+ ### Evaluate a pretrained policy
250
+
251
+ Check out [example 2](./examples/2_evaluate_pretrained_policy.py) that illustrates how to download a pretrained policy from Hugging Face hub, and run an evaluation on its corresponding environment.
252
+
253
+ We also provide a more capable script to parallelize the evaluation over multiple environments during the same rollout. Here is an example with a pretrained model hosted on [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht):
254
+ ```bash
255
+ python lerobot/scripts/eval.py \
256
+ --policy.path=lerobot/diffusion_pusht \
257
+ --env.type=pusht \
258
+ --eval.batch_size=10 \
259
+ --eval.n_episodes=10 \
260
+ --policy.use_amp=false \
261
+ --policy.device=cuda
262
+ ```
263
+
264
+ Note: After training your own policy, you can re-evaluate the checkpoints with:
265
+
266
+ ```bash
267
+ python lerobot/scripts/eval.py --policy.path={OUTPUT_DIR}/checkpoints/last/pretrained_model
268
+ ```
269
+
270
+ See `python lerobot/scripts/eval.py --help` for more instructions.
271
+
272
+ ### Train your own policy
273
+
274
+ Check out [example 3](./examples/3_train_policy.py) that illustrates how to train a model using our core library in python, and [example 4](./examples/4_train_policy_with_script.md) that shows how to use our training script from command line.
275
+
276
+ To use wandb for logging training and evaluation curves, make sure you've run `wandb login` as a one-time setup step. Then, when running the training command above, enable WandB in the configuration by adding `--wandb.enable=true`.
277
+
278
+ A link to the wandb logs for the run will also show up in yellow in your terminal. Here is an example of what they look like in your browser. Please also check [here](./examples/4_train_policy_with_script.md#typical-logs-and-metrics) for the explanation of some commonly used metrics in logs.
279
+
280
+ ![](media/wandb.png)
281
+
282
+ Note: For efficiency, during training every checkpoint is evaluated on a low number of episodes. You may use `--eval.n_episodes=500` to evaluate on more episodes than the default. Or, after training, you may want to re-evaluate your best checkpoints on more episodes or change the evaluation settings. See `python lerobot/scripts/eval.py --help` for more instructions.
283
+
284
+ #### Reproduce state-of-the-art (SOTA)
285
+
286
+ We provide some pretrained policies on our [hub page](https://huggingface.co/lerobot) that can achieve state-of-the-art performances.
287
+ You can reproduce their training by loading the config from their run. Simply running:
288
+ ```bash
289
+ python lerobot/scripts/train.py --config_path=lerobot/diffusion_pusht
290
+ ```
291
+ reproduces SOTA results for Diffusion Policy on the PushT task.
292
+
293
+ ## Contribute
294
+
295
+ If you would like to contribute to 🤗 LeRobot, please check out our [contribution guide](https://github.com/huggingface/lerobot/blob/main/CONTRIBUTING.md).
296
+
297
+ <!-- ### Add a new dataset
298
+
299
+ To add a dataset to the hub, you need to login using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
300
+ ```bash
301
+ huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
302
+ ```
303
+
304
+ Then point to your raw dataset folder (e.g. `data/aloha_static_pingpong_test_raw`), and push your dataset to the hub with:
305
+ ```bash
306
+ python lerobot/scripts/push_dataset_to_hub.py \
307
+ --raw-dir data/aloha_static_pingpong_test_raw \
308
+ --out-dir data \
309
+ --repo-id lerobot/aloha_static_pingpong_test \
310
+ --raw-format aloha_hdf5
311
+ ```
312
+
313
+ See `python lerobot/scripts/push_dataset_to_hub.py --help` for more instructions.
314
+
315
+ If your dataset format is not supported, implement your own in `lerobot/common/datasets/push_dataset_to_hub/${raw_format}_format.py` by copying examples like [pusht_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/pusht_zarr_format.py), [umi_zarr](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/umi_zarr_format.py), [aloha_hdf5](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py), or [xarm_pkl](https://github.com/huggingface/lerobot/blob/main/lerobot/common/datasets/push_dataset_to_hub/xarm_pkl_format.py). -->
316
+
317
+
318
+ ### Add a pretrained policy
319
+
320
+ Once you have trained a policy you may upload it to the Hugging Face hub using a hub id that looks like `${hf_user}/${repo_name}` (e.g. [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)).
321
+
322
+ You first need to find the checkpoint folder located inside your experiment directory (e.g. `outputs/train/2024-05-05/20-21-12_aloha_act_default/checkpoints/002500`). Within that there is a `pretrained_model` directory which should contain:
323
+ - `config.json`: A serialized version of the policy configuration (following the policy's dataclass config).
324
+ - `model.safetensors`: A set of `torch.nn.Module` parameters, saved in [Hugging Face Safetensors](https://huggingface.co/docs/safetensors/index) format.
325
+ - `train_config.json`: A consolidated configuration containing all parameters used for training. The policy configuration should match `config.json` exactly. This is useful for anyone who wants to evaluate your policy or for reproducibility.
326
+
327
+ To upload these to the hub, run the following:
328
+ ```bash
329
+ huggingface-cli upload ${hf_user}/${repo_name} path/to/pretrained_model
330
+ ```
331
+
332
+ See [eval.py](https://github.com/huggingface/lerobot/blob/main/lerobot/scripts/eval.py) for an example of how other people may use your policy.
333
+
334
+
335
+ ### Improve your code with profiling
336
+
337
+ An example of a code snippet to profile the evaluation of a policy:
338
+ ```python
339
+ from torch.profiler import profile, record_function, ProfilerActivity
340
+
341
+ def trace_handler(prof):
342
+ prof.export_chrome_trace(f"tmp/trace_schedule_{prof.step_num}.json")
343
+
344
+ with profile(
345
+ activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA],
346
+ schedule=torch.profiler.schedule(
347
+ wait=2,
348
+ warmup=2,
349
+ active=3,
350
+ ),
351
+ on_trace_ready=trace_handler
352
+ ) as prof:
353
+ with record_function("eval_policy"):
354
+ for i in range(num_episodes):
355
+ prof.step()
356
+ # insert code to profile, potentially whole body of eval_policy function
357
+ ```
358
+
359
+ ## Citation
360
+
361
+ If you want, you can cite this work with:
362
+ ```bibtex
363
+ @misc{cadene2024lerobot,
364
+ author = {Cadene, Remi and Alibert, Simon and Soare, Alexander and Gallouedec, Quentin and Zouitine, Adil and Palma, Steven and Kooijmans, Pepijn and Aractingi, Michel and Shukor, Mustafa and Aubakirova, Dana and Russi, Martino and Capuano, Francesco and Pascale, Caroline and Choghari, Jade and Moss, Jess and Wolf, Thomas},
365
+ title = {LeRobot: State-of-the-art Machine Learning for Real-World Robotics in Pytorch},
366
+ howpublished = "\url{https://github.com/huggingface/lerobot}",
367
+ year = {2024}
368
+ }
369
+ ```
370
+
371
+ Additionally, if you are using any of the particular policy architecture, pretrained models, or datasets, it is recommended to cite the original authors of the work as they appear below:
372
+ - [SmolVLA](https://arxiv.org/abs/2506.01844)
373
+ ```bibtex
374
+ @article{shukor2025smolvla,
375
+ title={SmolVLA: A Vision-Language-Action Model for Affordable and Efficient Robotics},
376
+ author={Shukor, Mustafa and Aubakirova, Dana and Capuano, Francesco and Kooijmans, Pepijn and Palma, Steven and Zouitine, Adil and Aractingi, Michel and Pascal, Caroline and Russi, Martino and Marafioti, Andres and Alibert, Simon and Cord, Matthieu and Wolf, Thomas and Cadene, Remi},
377
+ journal={arXiv preprint arXiv:2506.01844},
378
+ year={2025}
379
+ }
380
+ ```
381
+
382
+ - [Diffusion Policy](https://diffusion-policy.cs.columbia.edu)
383
+ ```bibtex
384
+ @article{chi2024diffusionpolicy,
385
+ author = {Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song},
386
+ title ={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion},
387
+ journal = {The International Journal of Robotics Research},
388
+ year = {2024},
389
+ }
390
+ ```
391
+ - [ACT or ALOHA](https://tonyzhaozh.github.io/aloha)
392
+ ```bibtex
393
+ @article{zhao2023learning,
394
+ title={Learning fine-grained bimanual manipulation with low-cost hardware},
395
+ author={Zhao, Tony Z and Kumar, Vikash and Levine, Sergey and Finn, Chelsea},
396
+ journal={arXiv preprint arXiv:2304.13705},
397
+ year={2023}
398
+ }
399
+ ```
400
+
401
+ - [TDMPC](https://www.nicklashansen.com/td-mpc/)
402
+
403
+ ```bibtex
404
+ @inproceedings{Hansen2022tdmpc,
405
+ title={Temporal Difference Learning for Model Predictive Control},
406
+ author={Nicklas Hansen and Xiaolong Wang and Hao Su},
407
+ booktitle={ICML},
408
+ year={2022}
409
+ }
410
+ ```
411
+
412
+ - [VQ-BeT](https://sjlee.cc/vq-bet/)
413
+ ```bibtex
414
+ @article{lee2024behavior,
415
+ title={Behavior generation with latent actions},
416
+ author={Lee, Seungjae and Wang, Yibin and Etukuru, Haritheja and Kim, H Jin and Shafiullah, Nur Muhammad Mahi and Pinto, Lerrel},
417
+ journal={arXiv preprint arXiv:2403.03181},
418
+ year={2024}
419
+ }
420
+ ```
421
+ ## Star History
422
+
423
+ [![Star History Chart](https://api.star-history.com/svg?repos=huggingface/lerobot&type=Timeline)](https://star-history.com/#huggingface/lerobot&Timeline)
benchmarks/video/README.md ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Video benchmark
2
+
3
+
4
+ ## Questions
5
+ What is the optimal trade-off between:
6
+ - maximizing loading time with random access,
7
+ - minimizing memory space on disk,
8
+ - maximizing success rate of policies,
9
+ - compatibility across devices/platforms for decoding videos (e.g. video players, web browsers).
10
+
11
+ How to encode videos?
12
+ - Which video codec (`-vcodec`) to use? h264, h265, AV1?
13
+ - What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`?
14
+ - How much compression (`-crf`)? No compression with `0`, intermediate compression with `25` or extreme with `50+`?
15
+ - Which frequency to chose for key frames (`-g`)? A key frame every `10` frames?
16
+
17
+ How to decode videos?
18
+ - Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`?
19
+ - What scenarios to use for the requesting timestamps during benchmark? (`timestamps_mode`)
20
+
21
+
22
+ ## Variables
23
+ **Image content & size**
24
+ We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an apartment, or in a factory, or outdoor, or with lots of moving objects in the scene, etc. Similarly, loading times might not vary linearly with the image size (resolution).
25
+ For these reasons, we run this benchmark on four representative datasets:
26
+ - `lerobot/pusht_image`: (96 x 96 pixels) simulation with simple geometric shapes, fixed camera.
27
+ - `aliberts/aloha_mobile_shrimp_image`: (480 x 640 pixels) real-world indoor, moving camera.
28
+ - `aliberts/paris_street`: (720 x 1280 pixels) real-world outdoor, moving camera.
29
+ - `aliberts/kitchen`: (1080 x 1920 pixels) real-world indoor, fixed camera.
30
+
31
+ Note: The datasets used for this benchmark need to be image datasets, not video datasets.
32
+
33
+ **Data augmentations**
34
+ We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.).
35
+
36
+ ### Encoding parameters
37
+ | parameter | values |
38
+ |-------------|--------------------------------------------------------------|
39
+ | **vcodec** | `libx264`, `libx265`, `libsvtav1` |
40
+ | **pix_fmt** | `yuv444p`, `yuv420p` |
41
+ | **g** | `1`, `2`, `3`, `4`, `5`, `6`, `10`, `15`, `20`, `40`, `None` |
42
+ | **crf** | `0`, `5`, `10`, `15`, `20`, `25`, `30`, `40`, `50`, `None` |
43
+
44
+ Note that `crf` value might be interpreted differently by various video codecs. In other words, the same value used with one codec doesn't necessarily translate into the same compression level with another codec. In fact, the default value (`None`) isn't the same amongst the different video codecs. Importantly, it is also the case for many other ffmpeg arguments like `g` which specifies the frequency of the key frames.
45
+
46
+ For a comprehensive list and documentation of these parameters, see the ffmpeg documentation depending on the video codec used:
47
+ - h264: https://trac.ffmpeg.org/wiki/Encode/H.264
48
+ - h265: https://trac.ffmpeg.org/wiki/Encode/H.265
49
+ - AV1: https://trac.ffmpeg.org/wiki/Encode/AV1
50
+
51
+ ### Decoding parameters
52
+ **Decoder**
53
+ We tested two video decoding backends from torchvision:
54
+ - `pyav`
55
+ - `video_reader` (requires to build torchvision from source)
56
+
57
+ **Requested timestamps**
58
+ Given the way video decoding works, once a keyframe has been loaded, the decoding of subsequent frames is fast.
59
+ This of course is affected by the `-g` parameter during encoding, which specifies the frequency of the keyframes. Given our typical use cases in robotics policies which might request a few timestamps in different random places, we want to replicate these use cases with the following scenarios:
60
+ - `1_frame`: 1 frame,
61
+ - `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`),
62
+ - `6_frames`: 6 consecutive frames (e.g. `[t + i / fps for i in range(6)]`)
63
+
64
+ Note that this differs significantly from a typical use case like watching a movie, in which every frame is loaded sequentially from the beginning to the end and it's acceptable to have big values for `-g`.
65
+
66
+ Additionally, because some policies might request single timestamps that are a few frames apart, we also have the following scenario:
67
+ - `2_frames_4_space`: 2 frames with 4 consecutive frames of spacing in between (e.g `[t, t + 5 / fps]`),
68
+
69
+ However, due to how video decoding is implemented with `pyav`, we don't have access to an accurate seek so in practice this scenario is essentially the same as `6_frames` since all 6 frames between `t` and `t + 5 / fps` will be decoded.
70
+
71
+
72
+ ## Metrics
73
+ **Data compression ratio (lower is better)**
74
+ `video_images_size_ratio` is the ratio of the memory space on disk taken by the encoded video over the memory space taken by the original images. For instance, `video_images_size_ratio=25%` means that the video takes 4 times less memory space on disk compared to the original images.
75
+
76
+ **Loading time ratio (lower is better)**
77
+ `video_images_load_time_ratio` is the ratio of the time it takes to decode frames from the video at a given timestamps over the time it takes to load the exact same original images. Lower is better. For instance, `video_images_load_time_ratio=200%` means that decoding from video is 2 times slower than loading the original images.
78
+
79
+ **Average Mean Square Error (lower is better)**
80
+ `avg_mse` is the average mean square error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes.
81
+
82
+ **Average Peak Signal to Noise Ratio (higher is better)**
83
+ `avg_psnr` measures the ratio between the maximum possible power of a signal and the power of corrupting noise that affects the fidelity of its representation. Higher PSNR indicates better quality.
84
+
85
+ **Average Structural Similarity Index Measure (higher is better)**
86
+ `avg_ssim` evaluates the perceived quality of images by comparing luminance, contrast, and structure. SSIM values range from -1 to 1, where 1 indicates perfect similarity.
87
+
88
+ One aspect that can't be measured here with those metrics is the compatibility of the encoding across platforms, in particular on web browser, for visualization purposes.
89
+ h264, h265 and AV1 are all commonly used codecs and should not pose an issue. However, the chroma subsampling (`pix_fmt`) format might affect compatibility:
90
+ - `yuv420p` is more widely supported across various platforms, including web browsers.
91
+ - `yuv444p` offers higher color fidelity but might not be supported as broadly.
92
+
93
+
94
+ <!-- **Loss of a pretrained policy (higher is better)** (not available)
95
+ `loss_pretrained` is the result of evaluating with the selected encoding/decoding settings a policy pretrained on original images. It is easier to understand than `avg_l2_error`.
96
+
97
+ **Success rate after retraining (higher is better)** (not available)
98
+ `success_rate` is the result of training and evaluating a policy with the selected encoding/decoding settings. It is the most difficult metric to get but also the very best. -->
99
+
100
+
101
+ ## How the benchmark works
102
+ The benchmark evaluates both encoding and decoding of video frames on the first episode of each dataset.
103
+
104
+ **Encoding:** for each `vcodec` and `pix_fmt` pair, we use a default value for `g` and `crf` upon which we change a single value (either `g` or `crf`) to one of the specified values (we don't test every combination of those as this would be computationally too heavy).
105
+ This gives a unique set of encoding parameters which is used to encode the episode.
106
+
107
+ **Decoding:** Then, for each of those unique encodings, we iterate through every combination of the decoding parameters `backend` and `timestamps_mode`. For each of them, we record the metrics of a number of samples (given by `--num-samples`). This is parallelized for efficiency and the number of processes can be controlled with `--num-workers`. Ideally, it's best to have a `--num-samples` that is divisible by `--num-workers`.
108
+
109
+ Intermediate results saved for each `vcodec` and `pix_fmt` combination in csv tables.
110
+ These are then all concatenated to a single table ready for analysis.
111
+
112
+ ## Caveats
113
+ We tried to measure the most impactful parameters for both encoding and decoding. However, for computational reasons we can't test out every combination.
114
+
115
+ Additional encoding parameters exist that are not included in this benchmark. In particular:
116
+ - `-preset` which allows for selecting encoding presets. This represents a collection of options that will provide a certain encoding speed to compression ratio. By leaving this parameter unspecified, it is considered to be `medium` for libx264 and libx265 and `8` for libsvtav1.
117
+ - `-tune` which allows to optimize the encoding for certain aspects (e.g. film quality, fast decoding, etc.).
118
+
119
+ See the documentation mentioned above for more detailed info on these settings and for a more comprehensive list of other parameters.
120
+
121
+ Similarly on the decoding side, other decoders exist but are not implemented in our current benchmark. To name a few:
122
+ - `torchaudio`
123
+ - `ffmpegio`
124
+ - `decord`
125
+ - `nvc`
126
+
127
+ Note as well that since we are mostly interested in the performance at decoding time (also because encoding is done only once before uploading a dataset), we did not measure encoding times nor have any metrics regarding encoding.
128
+ However, besides the necessity to build ffmpeg from source, encoding did not pose any issue and it didn't take a significant amount of time during this benchmark.
129
+
130
+
131
+ ## Install
132
+ Building ffmpeg from source is required to include libx265 and libaom/libsvtav1 (av1) video codecs ([compilation guide](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu)).
133
+
134
+ **Note:** While you still need to build torchvision with a conda-installed `ffmpeg<4.3` to use the `video_reader` decoder (as described in [#220](https://github.com/huggingface/lerobot/pull/220)), you also need another version which is custom-built with all the video codecs for encoding. For the script to then use that version, you can prepend the command above with `PATH="$HOME/bin:$PATH"`, which is where ffmpeg should be built.
135
+
136
+
137
+ ## Adding a video decoder
138
+ Right now, we're only benchmarking the two video decoder available with torchvision: `pyav` and `video_reader`.
139
+ You can easily add a new decoder to benchmark by adding it to this function in the script:
140
+ ```diff
141
+ def decode_video_frames(
142
+ video_path: str,
143
+ timestamps: list[float],
144
+ tolerance_s: float,
145
+ backend: str,
146
+ ) -> torch.Tensor:
147
+ if backend in ["pyav", "video_reader"]:
148
+ return decode_video_frames_torchvision(
149
+ video_path, timestamps, tolerance_s, backend
150
+ )
151
+ + elif backend == ["your_decoder"]:
152
+ + return your_decoder_function(
153
+ + video_path, timestamps, tolerance_s, backend
154
+ + )
155
+ else:
156
+ raise NotImplementedError(backend)
157
+ ```
158
+
159
+
160
+ ## Example
161
+ For a quick run, you can try these parameters:
162
+ ```bash
163
+ python benchmark/video/run_video_benchmark.py \
164
+ --output-dir outputs/video_benchmark \
165
+ --repo-ids \
166
+ lerobot/pusht_image \
167
+ aliberts/aloha_mobile_shrimp_image \
168
+ --vcodec libx264 libx265 \
169
+ --pix-fmt yuv444p yuv420p \
170
+ --g 2 20 None \
171
+ --crf 10 40 None \
172
+ --timestamps-modes 1_frame 2_frames \
173
+ --backends pyav video_reader \
174
+ --num-samples 5 \
175
+ --num-workers 5 \
176
+ --save-frames 0
177
+ ```
178
+
179
+
180
+ ## Results
181
+
182
+ ### Reproduce
183
+ We ran the benchmark with the following parameters:
184
+ ```bash
185
+ # h264 and h265 encodings
186
+ python benchmark/video/run_video_benchmark.py \
187
+ --output-dir outputs/video_benchmark \
188
+ --repo-ids \
189
+ lerobot/pusht_image \
190
+ aliberts/aloha_mobile_shrimp_image \
191
+ aliberts/paris_street \
192
+ aliberts/kitchen \
193
+ --vcodec libx264 libx265 \
194
+ --pix-fmt yuv444p yuv420p \
195
+ --g 1 2 3 4 5 6 10 15 20 40 None \
196
+ --crf 0 5 10 15 20 25 30 40 50 None \
197
+ --timestamps-modes 1_frame 2_frames 6_frames \
198
+ --backends pyav video_reader \
199
+ --num-samples 50 \
200
+ --num-workers 5 \
201
+ --save-frames 1
202
+
203
+ # av1 encoding (only compatible with yuv420p and pyav decoder)
204
+ python benchmark/video/run_video_benchmark.py \
205
+ --output-dir outputs/video_benchmark \
206
+ --repo-ids \
207
+ lerobot/pusht_image \
208
+ aliberts/aloha_mobile_shrimp_image \
209
+ aliberts/paris_street \
210
+ aliberts/kitchen \
211
+ --vcodec libsvtav1 \
212
+ --pix-fmt yuv420p \
213
+ --g 1 2 3 4 5 6 10 15 20 40 None \
214
+ --crf 0 5 10 15 20 25 30 40 50 None \
215
+ --timestamps-modes 1_frame 2_frames 6_frames \
216
+ --backends pyav \
217
+ --num-samples 50 \
218
+ --num-workers 5 \
219
+ --save-frames 1
220
+ ```
221
+
222
+ The full results are available [here](https://docs.google.com/spreadsheets/d/1OYJB43Qu8fC26k_OyoMFgGBBKfQRCi4BIuYitQnq3sw/edit?usp=sharing)
223
+
224
+
225
+ ### Parameters selected for LeRobotDataset
226
+ Considering these results, we chose what we think is the best set of encoding parameter:
227
+ - vcodec: `libsvtav1`
228
+ - pix-fmt: `yuv420p`
229
+ - g: `2`
230
+ - crf: `30`
231
+
232
+ Since we're using av1 encoding, we're choosing the `pyav` decoder as `video_reader` does not support it (and `pyav` doesn't require a custom build of `torchvision`).
233
+
234
+ ### Summary
235
+
236
+ These tables show the results for `g=2` and `crf=30`, using `timestamps-modes=6_frames` and `backend=pyav`
237
+
238
+ | video_images_size_ratio | vcodec | pix_fmt | | | |
239
+ |------------------------------------|------------|---------|-----------|-----------|-----------|
240
+ | | libx264 | | libx265 | | libsvtav1 |
241
+ | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
242
+ | lerobot/pusht_image | **16.97%** | 17.58% | 18.57% | 18.86% | 22.06% |
243
+ | aliberts/aloha_mobile_shrimp_image | 2.14% | 2.11% | 1.38% | **1.37%** | 5.59% |
244
+ | aliberts/paris_street | 2.12% | 2.13% | **1.54%** | **1.54%** | 4.43% |
245
+ | aliberts/kitchen | 1.40% | 1.39% | **1.00%** | **1.00%** | 2.52% |
246
+
247
+ | video_images_load_time_ratio | vcodec | pix_fmt | | | |
248
+ |------------------------------------|---------|---------|----------|---------|-----------|
249
+ | | libx264 | | libx265 | | libsvtav1 |
250
+ | repo_id | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
251
+ | lerobot/pusht_image | 6.45 | 5.19 | **1.90** | 2.12 | 2.47 |
252
+ | aliberts/aloha_mobile_shrimp_image | 11.80 | 7.92 | 0.71 | 0.85 | **0.48** |
253
+ | aliberts/paris_street | 2.21 | 2.05 | 0.36 | 0.49 | **0.30** |
254
+ | aliberts/kitchen | 1.46 | 1.46 | 0.28 | 0.51 | **0.26** |
255
+
256
+ | | | vcodec | pix_fmt | | | |
257
+ |------------------------------------|----------|----------|--------------|----------|-----------|--------------|
258
+ | | | libx264 | | libx265 | | libsvtav1 |
259
+ | repo_id | metric | yuv420p | yuv444p | yuv420p | yuv444p | yuv420p |
260
+ | lerobot/pusht_image | avg_mse | 2.90E-04 | **2.03E-04** | 3.13E-04 | 2.29E-04 | 2.19E-04 |
261
+ | | avg_psnr | 35.44 | 37.07 | 35.49 | **37.30** | 37.20 |
262
+ | | avg_ssim | 98.28% | **98.85%** | 98.31% | 98.84% | 98.72% |
263
+ | aliberts/aloha_mobile_shrimp_image | avg_mse | 2.76E-04 | 2.59E-04 | 3.17E-04 | 3.06E-04 | **1.30E-04** |
264
+ | | avg_psnr | 35.91 | 36.21 | 35.88 | 36.09 | **40.17** |
265
+ | | avg_ssim | 95.19% | 95.18% | 95.00% | 95.05% | **97.73%** |
266
+ | aliberts/paris_street | avg_mse | 6.89E-04 | 6.70E-04 | 4.03E-03 | 4.02E-03 | **3.09E-04** |
267
+ | | avg_psnr | 33.48 | 33.68 | 32.05 | 32.15 | **35.40** |
268
+ | | avg_ssim | 93.76% | 93.75% | 89.46% | 89.46% | **95.46%** |
269
+ | aliberts/kitchen | avg_mse | 2.50E-04 | 2.24E-04 | 4.28E-04 | 4.18E-04 | **1.53E-04** |
270
+ | | avg_psnr | 36.73 | 37.33 | 36.56 | 36.75 | **39.12** |
271
+ | | avg_ssim | 95.47% | 95.58% | 95.52% | 95.53% | **96.82%** |
benchmarks/video/capture_camera_feed.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Capture video feed from a camera as raw images."""
17
+
18
+ import argparse
19
+ import datetime as dt
20
+ import os
21
+ import time
22
+ from pathlib import Path
23
+
24
+ import cv2
25
+ import rerun as rr
26
+
27
+ # see https://rerun.io/docs/howto/visualization/limit-ram
28
+ RERUN_MEMORY_LIMIT = os.getenv("LEROBOT_RERUN_MEMORY_LIMIT", "5%")
29
+
30
+
31
+ def display_and_save_video_stream(output_dir: Path, fps: int, width: int, height: int, duration: int):
32
+ rr.init("lerobot_capture_camera_feed")
33
+ rr.spawn(memory_limit=RERUN_MEMORY_LIMIT)
34
+
35
+ now = dt.datetime.now()
36
+ capture_dir = output_dir / f"{now:%Y-%m-%d}" / f"{now:%H-%M-%S}"
37
+ if not capture_dir.exists():
38
+ capture_dir.mkdir(parents=True, exist_ok=True)
39
+
40
+ # Opens the default webcam
41
+ cap = cv2.VideoCapture(0)
42
+ if not cap.isOpened():
43
+ print("Error: Could not open video stream.")
44
+ return
45
+
46
+ cap.set(cv2.CAP_PROP_FPS, fps)
47
+ cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
48
+ cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
49
+
50
+ frame_index = 0
51
+ start_time = time.time()
52
+ while time.time() - start_time < duration:
53
+ ret, frame = cap.read()
54
+
55
+ if not ret:
56
+ print("Error: Could not read frame.")
57
+ break
58
+ rr.log("video/stream", rr.Image(frame.numpy()), static=True)
59
+ cv2.imwrite(str(capture_dir / f"frame_{frame_index:06d}.png"), frame)
60
+ frame_index += 1
61
+
62
+ # Release the capture
63
+ cap.release()
64
+
65
+ # TODO(Steven): Add a graceful shutdown via a close() method for the Viewer context, though not currently supported in the Rerun API.
66
+
67
+
68
+ if __name__ == "__main__":
69
+ parser = argparse.ArgumentParser()
70
+
71
+ parser.add_argument(
72
+ "--output-dir",
73
+ type=Path,
74
+ default=Path("outputs/cam_capture/"),
75
+ help="Directory where the capture images are written. A subfolder named with the current date & time will be created inside it for each capture.",
76
+ )
77
+ parser.add_argument(
78
+ "--fps",
79
+ type=int,
80
+ default=30,
81
+ help="Frames Per Second of the capture.",
82
+ )
83
+ parser.add_argument(
84
+ "--width",
85
+ type=int,
86
+ default=1280,
87
+ help="Width of the captured images.",
88
+ )
89
+ parser.add_argument(
90
+ "--height",
91
+ type=int,
92
+ default=720,
93
+ help="Height of the captured images.",
94
+ )
95
+ parser.add_argument(
96
+ "--duration",
97
+ type=int,
98
+ default=20,
99
+ help="Duration in seconds for which the video stream should be captured.",
100
+ )
101
+ args = parser.parse_args()
102
+ display_and_save_video_stream(**vars(args))
benchmarks/video/run_video_benchmark.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """Assess the performance of video decoding in various configurations.
17
+
18
+ This script will benchmark different video encoding and decoding parameters.
19
+ See the provided README.md or run `python benchmark/video/run_video_benchmark.py --help` for usage info.
20
+ """
21
+
22
+ import argparse
23
+ import datetime as dt
24
+ import random
25
+ import shutil
26
+ from collections import OrderedDict
27
+ from concurrent.futures import ThreadPoolExecutor, as_completed
28
+ from pathlib import Path
29
+
30
+ import einops
31
+ import numpy as np
32
+ import pandas as pd
33
+ import PIL
34
+ import torch
35
+ from skimage.metrics import mean_squared_error, peak_signal_noise_ratio, structural_similarity
36
+ from tqdm import tqdm
37
+
38
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
39
+ from lerobot.common.datasets.video_utils import (
40
+ decode_video_frames_torchvision,
41
+ encode_video_frames,
42
+ )
43
+ from lerobot.common.utils.benchmark import TimeBenchmark
44
+
45
+ BASE_ENCODING = OrderedDict(
46
+ [
47
+ ("vcodec", "libx264"),
48
+ ("pix_fmt", "yuv444p"),
49
+ ("g", 2),
50
+ ("crf", None),
51
+ # TODO(aliberts): Add fastdecode
52
+ # ("fastdecode", 0),
53
+ ]
54
+ )
55
+
56
+
57
+ # TODO(rcadene, aliberts): move to `utils.py` folder when we want to refactor
58
+ def parse_int_or_none(value) -> int | None:
59
+ if value.lower() == "none":
60
+ return None
61
+ try:
62
+ return int(value)
63
+ except ValueError as e:
64
+ raise argparse.ArgumentTypeError(f"Invalid int or None: {value}") from e
65
+
66
+
67
+ def check_datasets_formats(repo_ids: list) -> None:
68
+ for repo_id in repo_ids:
69
+ dataset = LeRobotDataset(repo_id)
70
+ if len(dataset.meta.video_keys) > 0:
71
+ raise ValueError(
72
+ f"Use only image dataset for running this benchmark. Video dataset provided: {repo_id}"
73
+ )
74
+
75
+
76
+ def get_directory_size(directory: Path) -> int:
77
+ total_size = 0
78
+ for item in directory.rglob("*"):
79
+ if item.is_file():
80
+ total_size += item.stat().st_size
81
+ return total_size
82
+
83
+
84
+ def load_original_frames(imgs_dir: Path, timestamps: list[float], fps: int) -> torch.Tensor:
85
+ frames = []
86
+ for ts in timestamps:
87
+ idx = int(ts * fps)
88
+ frame = PIL.Image.open(imgs_dir / f"frame_{idx:06d}.png")
89
+ frame = torch.from_numpy(np.array(frame))
90
+ frame = frame.type(torch.float32) / 255
91
+ frame = einops.rearrange(frame, "h w c -> c h w")
92
+ frames.append(frame)
93
+ return torch.stack(frames)
94
+
95
+
96
+ def save_decoded_frames(
97
+ imgs_dir: Path, save_dir: Path, frames: torch.Tensor, timestamps: list[float], fps: int
98
+ ) -> None:
99
+ if save_dir.exists() and len(list(save_dir.glob("frame_*.png"))) == len(timestamps):
100
+ return
101
+
102
+ save_dir.mkdir(parents=True, exist_ok=True)
103
+ for i, ts in enumerate(timestamps):
104
+ idx = int(ts * fps)
105
+ frame_hwc = (frames[i].permute((1, 2, 0)) * 255).type(torch.uint8).cpu().numpy()
106
+ PIL.Image.fromarray(frame_hwc).save(save_dir / f"frame_{idx:06d}_decoded.png")
107
+ shutil.copyfile(imgs_dir / f"frame_{idx:06d}.png", save_dir / f"frame_{idx:06d}_original.png")
108
+
109
+
110
+ def save_first_episode(imgs_dir: Path, dataset: LeRobotDataset) -> None:
111
+ ep_num_images = dataset.episode_data_index["to"][0].item()
112
+ if imgs_dir.exists() and len(list(imgs_dir.glob("frame_*.png"))) == ep_num_images:
113
+ return
114
+
115
+ imgs_dir.mkdir(parents=True, exist_ok=True)
116
+ hf_dataset = dataset.hf_dataset.with_format(None)
117
+
118
+ # We only save images from the first camera
119
+ img_keys = [key for key in hf_dataset.features if key.startswith("observation.image")]
120
+ imgs_dataset = hf_dataset.select_columns(img_keys[0])
121
+
122
+ for i, item in enumerate(
123
+ tqdm(imgs_dataset, desc=f"saving {dataset.repo_id} first episode images", leave=False)
124
+ ):
125
+ img = item[img_keys[0]]
126
+ img.save(str(imgs_dir / f"frame_{i:06d}.png"), quality=100)
127
+
128
+ if i >= ep_num_images - 1:
129
+ break
130
+
131
+
132
+ def sample_timestamps(timestamps_mode: str, ep_num_images: int, fps: int) -> list[float]:
133
+ # Start at 5 to allow for 2_frames_4_space and 6_frames
134
+ idx = random.randint(5, ep_num_images - 1)
135
+ match timestamps_mode:
136
+ case "1_frame":
137
+ frame_indexes = [idx]
138
+ case "2_frames":
139
+ frame_indexes = [idx - 1, idx]
140
+ case "2_frames_4_space":
141
+ frame_indexes = [idx - 5, idx]
142
+ case "6_frames":
143
+ frame_indexes = [idx - i for i in range(6)][::-1]
144
+ case _:
145
+ raise ValueError(timestamps_mode)
146
+
147
+ return [idx / fps for idx in frame_indexes]
148
+
149
+
150
+ def decode_video_frames(
151
+ video_path: str,
152
+ timestamps: list[float],
153
+ tolerance_s: float,
154
+ backend: str,
155
+ ) -> torch.Tensor:
156
+ if backend in ["pyav", "video_reader"]:
157
+ return decode_video_frames_torchvision(video_path, timestamps, tolerance_s, backend)
158
+ else:
159
+ raise NotImplementedError(backend)
160
+
161
+
162
+ def benchmark_decoding(
163
+ imgs_dir: Path,
164
+ video_path: Path,
165
+ timestamps_mode: str,
166
+ backend: str,
167
+ ep_num_images: int,
168
+ fps: int,
169
+ num_samples: int = 50,
170
+ num_workers: int = 4,
171
+ save_frames: bool = False,
172
+ ) -> dict:
173
+ def process_sample(sample: int):
174
+ time_benchmark = TimeBenchmark()
175
+ timestamps = sample_timestamps(timestamps_mode, ep_num_images, fps)
176
+ num_frames = len(timestamps)
177
+ result = {
178
+ "psnr_values": [],
179
+ "ssim_values": [],
180
+ "mse_values": [],
181
+ }
182
+
183
+ with time_benchmark:
184
+ frames = decode_video_frames(video_path, timestamps=timestamps, tolerance_s=5e-1, backend=backend)
185
+ result["load_time_video_ms"] = time_benchmark.result_ms / num_frames
186
+
187
+ with time_benchmark:
188
+ original_frames = load_original_frames(imgs_dir, timestamps, fps)
189
+ result["load_time_images_ms"] = time_benchmark.result_ms / num_frames
190
+
191
+ frames_np, original_frames_np = frames.numpy(), original_frames.numpy()
192
+ for i in range(num_frames):
193
+ result["mse_values"].append(mean_squared_error(original_frames_np[i], frames_np[i]))
194
+ result["psnr_values"].append(
195
+ peak_signal_noise_ratio(original_frames_np[i], frames_np[i], data_range=1.0)
196
+ )
197
+ result["ssim_values"].append(
198
+ structural_similarity(original_frames_np[i], frames_np[i], data_range=1.0, channel_axis=0)
199
+ )
200
+
201
+ if save_frames and sample == 0:
202
+ save_dir = video_path.with_suffix("") / f"{timestamps_mode}_{backend}"
203
+ save_decoded_frames(imgs_dir, save_dir, frames, timestamps, fps)
204
+
205
+ return result
206
+
207
+ load_times_video_ms = []
208
+ load_times_images_ms = []
209
+ mse_values = []
210
+ psnr_values = []
211
+ ssim_values = []
212
+
213
+ # A sample is a single set of decoded frames specified by timestamps_mode (e.g. a single frame, 2 frames, etc.).
214
+ # For each sample, we record metrics (loading time and quality metrics) which are then averaged over all samples.
215
+ # As these samples are independent, we run them in parallel threads to speed up the benchmark.
216
+ with ThreadPoolExecutor(max_workers=num_workers) as executor:
217
+ futures = [executor.submit(process_sample, i) for i in range(num_samples)]
218
+ for future in tqdm(as_completed(futures), total=num_samples, desc="samples", leave=False):
219
+ result = future.result()
220
+ load_times_video_ms.append(result["load_time_video_ms"])
221
+ load_times_images_ms.append(result["load_time_images_ms"])
222
+ psnr_values.extend(result["psnr_values"])
223
+ ssim_values.extend(result["ssim_values"])
224
+ mse_values.extend(result["mse_values"])
225
+
226
+ avg_load_time_video_ms = float(np.array(load_times_video_ms).mean())
227
+ avg_load_time_images_ms = float(np.array(load_times_images_ms).mean())
228
+ video_images_load_time_ratio = avg_load_time_video_ms / avg_load_time_images_ms
229
+
230
+ return {
231
+ "avg_load_time_video_ms": avg_load_time_video_ms,
232
+ "avg_load_time_images_ms": avg_load_time_images_ms,
233
+ "video_images_load_time_ratio": video_images_load_time_ratio,
234
+ "avg_mse": float(np.mean(mse_values)),
235
+ "avg_psnr": float(np.mean(psnr_values)),
236
+ "avg_ssim": float(np.mean(ssim_values)),
237
+ }
238
+
239
+
240
+ def benchmark_encoding_decoding(
241
+ dataset: LeRobotDataset,
242
+ video_path: Path,
243
+ imgs_dir: Path,
244
+ encoding_cfg: dict,
245
+ decoding_cfg: dict,
246
+ num_samples: int,
247
+ num_workers: int,
248
+ save_frames: bool,
249
+ overwrite: bool = False,
250
+ seed: int = 1337,
251
+ ) -> list[dict]:
252
+ fps = dataset.fps
253
+
254
+ if overwrite or not video_path.is_file():
255
+ tqdm.write(f"encoding {video_path}")
256
+ encode_video_frames(
257
+ imgs_dir=imgs_dir,
258
+ video_path=video_path,
259
+ fps=fps,
260
+ vcodec=encoding_cfg["vcodec"],
261
+ pix_fmt=encoding_cfg["pix_fmt"],
262
+ g=encoding_cfg.get("g"),
263
+ crf=encoding_cfg.get("crf"),
264
+ # fast_decode=encoding_cfg.get("fastdecode"),
265
+ overwrite=True,
266
+ )
267
+
268
+ ep_num_images = dataset.episode_data_index["to"][0].item()
269
+ width, height = tuple(dataset[0][dataset.meta.camera_keys[0]].shape[-2:])
270
+ num_pixels = width * height
271
+ video_size_bytes = video_path.stat().st_size
272
+ images_size_bytes = get_directory_size(imgs_dir)
273
+ video_images_size_ratio = video_size_bytes / images_size_bytes
274
+
275
+ random.seed(seed)
276
+ benchmark_table = []
277
+ for timestamps_mode in tqdm(
278
+ decoding_cfg["timestamps_modes"], desc="decodings (timestamps_modes)", leave=False
279
+ ):
280
+ for backend in tqdm(decoding_cfg["backends"], desc="decodings (backends)", leave=False):
281
+ benchmark_row = benchmark_decoding(
282
+ imgs_dir,
283
+ video_path,
284
+ timestamps_mode,
285
+ backend,
286
+ ep_num_images,
287
+ fps,
288
+ num_samples,
289
+ num_workers,
290
+ save_frames,
291
+ )
292
+ benchmark_row.update(
293
+ **{
294
+ "repo_id": dataset.repo_id,
295
+ "resolution": f"{width} x {height}",
296
+ "num_pixels": num_pixels,
297
+ "video_size_bytes": video_size_bytes,
298
+ "images_size_bytes": images_size_bytes,
299
+ "video_images_size_ratio": video_images_size_ratio,
300
+ "timestamps_mode": timestamps_mode,
301
+ "backend": backend,
302
+ },
303
+ **encoding_cfg,
304
+ )
305
+ benchmark_table.append(benchmark_row)
306
+
307
+ return benchmark_table
308
+
309
+
310
+ def main(
311
+ output_dir: Path,
312
+ repo_ids: list[str],
313
+ vcodec: list[str],
314
+ pix_fmt: list[str],
315
+ g: list[int],
316
+ crf: list[int],
317
+ # fastdecode: list[int],
318
+ timestamps_modes: list[str],
319
+ backends: list[str],
320
+ num_samples: int,
321
+ num_workers: int,
322
+ save_frames: bool,
323
+ ):
324
+ check_datasets_formats(repo_ids)
325
+ encoding_benchmarks = {
326
+ "g": g,
327
+ "crf": crf,
328
+ # "fastdecode": fastdecode,
329
+ }
330
+ decoding_benchmarks = {
331
+ "timestamps_modes": timestamps_modes,
332
+ "backends": backends,
333
+ }
334
+ headers = ["repo_id", "resolution", "num_pixels"]
335
+ headers += list(BASE_ENCODING.keys())
336
+ headers += [
337
+ "timestamps_mode",
338
+ "backend",
339
+ "video_size_bytes",
340
+ "images_size_bytes",
341
+ "video_images_size_ratio",
342
+ "avg_load_time_video_ms",
343
+ "avg_load_time_images_ms",
344
+ "video_images_load_time_ratio",
345
+ "avg_mse",
346
+ "avg_psnr",
347
+ "avg_ssim",
348
+ ]
349
+ file_paths = []
350
+ for video_codec in tqdm(vcodec, desc="encodings (vcodec)"):
351
+ for pixel_format in tqdm(pix_fmt, desc="encodings (pix_fmt)", leave=False):
352
+ benchmark_table = []
353
+ for repo_id in tqdm(repo_ids, desc="encodings (datasets)", leave=False):
354
+ dataset = LeRobotDataset(repo_id)
355
+ imgs_dir = output_dir / "images" / dataset.repo_id.replace("/", "_")
356
+ # We only use the first episode
357
+ save_first_episode(imgs_dir, dataset)
358
+ for key, values in tqdm(encoding_benchmarks.items(), desc="encodings (g, crf)", leave=False):
359
+ for value in tqdm(values, desc=f"encodings ({key})", leave=False):
360
+ encoding_cfg = BASE_ENCODING.copy()
361
+ encoding_cfg["vcodec"] = video_codec
362
+ encoding_cfg["pix_fmt"] = pixel_format
363
+ encoding_cfg[key] = value
364
+ args_path = Path("_".join(str(value) for value in encoding_cfg.values()))
365
+ video_path = output_dir / "videos" / args_path / f"{repo_id.replace('/', '_')}.mp4"
366
+ benchmark_table += benchmark_encoding_decoding(
367
+ dataset,
368
+ video_path,
369
+ imgs_dir,
370
+ encoding_cfg,
371
+ decoding_benchmarks,
372
+ num_samples,
373
+ num_workers,
374
+ save_frames,
375
+ )
376
+
377
+ # Save intermediate results
378
+ benchmark_df = pd.DataFrame(benchmark_table, columns=headers)
379
+ now = dt.datetime.now()
380
+ csv_path = (
381
+ output_dir
382
+ / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_{video_codec}_{pixel_format}_{num_samples}-samples.csv"
383
+ )
384
+ benchmark_df.to_csv(csv_path, header=True, index=False)
385
+ file_paths.append(csv_path)
386
+ del benchmark_df
387
+
388
+ # Concatenate all results
389
+ df_list = [pd.read_csv(csv_path) for csv_path in file_paths]
390
+ concatenated_df = pd.concat(df_list, ignore_index=True)
391
+ concatenated_path = output_dir / f"{now:%Y-%m-%d}_{now:%H-%M-%S}_all_{num_samples}-samples.csv"
392
+ concatenated_df.to_csv(concatenated_path, header=True, index=False)
393
+
394
+
395
+ if __name__ == "__main__":
396
+ parser = argparse.ArgumentParser()
397
+ parser.add_argument(
398
+ "--output-dir",
399
+ type=Path,
400
+ default=Path("outputs/video_benchmark"),
401
+ help="Directory where the video benchmark outputs are written.",
402
+ )
403
+ parser.add_argument(
404
+ "--repo-ids",
405
+ type=str,
406
+ nargs="*",
407
+ default=[
408
+ "lerobot/pusht_image",
409
+ "aliberts/aloha_mobile_shrimp_image",
410
+ "aliberts/paris_street",
411
+ "aliberts/kitchen",
412
+ ],
413
+ help="Datasets repo-ids to test against. First episodes only are used. Must be images.",
414
+ )
415
+ parser.add_argument(
416
+ "--vcodec",
417
+ type=str,
418
+ nargs="*",
419
+ default=["libx264", "hevc", "libsvtav1"],
420
+ help="Video codecs to be tested",
421
+ )
422
+ parser.add_argument(
423
+ "--pix-fmt",
424
+ type=str,
425
+ nargs="*",
426
+ default=["yuv444p", "yuv420p"],
427
+ help="Pixel formats (chroma subsampling) to be tested",
428
+ )
429
+ parser.add_argument(
430
+ "--g",
431
+ type=parse_int_or_none,
432
+ nargs="*",
433
+ default=[1, 2, 3, 4, 5, 6, 10, 15, 20, 40, 100, None],
434
+ help="Group of pictures sizes to be tested.",
435
+ )
436
+ parser.add_argument(
437
+ "--crf",
438
+ type=parse_int_or_none,
439
+ nargs="*",
440
+ default=[0, 5, 10, 15, 20, 25, 30, 40, 50, None],
441
+ help="Constant rate factors to be tested.",
442
+ )
443
+ # parser.add_argument(
444
+ # "--fastdecode",
445
+ # type=int,
446
+ # nargs="*",
447
+ # default=[0, 1],
448
+ # help="Use the fastdecode tuning option. 0 disables it. "
449
+ # "For libx264 and libx265/hevc, only 1 is possible. "
450
+ # "For libsvtav1, 1, 2 or 3 are possible values with a higher number meaning a faster decoding optimization",
451
+ # )
452
+ parser.add_argument(
453
+ "--timestamps-modes",
454
+ type=str,
455
+ nargs="*",
456
+ default=[
457
+ "1_frame",
458
+ "2_frames",
459
+ "2_frames_4_space",
460
+ "6_frames",
461
+ ],
462
+ help="Timestamps scenarios to be tested.",
463
+ )
464
+ parser.add_argument(
465
+ "--backends",
466
+ type=str,
467
+ nargs="*",
468
+ default=["pyav", "video_reader"],
469
+ help="Torchvision decoding backend to be tested.",
470
+ )
471
+ parser.add_argument(
472
+ "--num-samples",
473
+ type=int,
474
+ default=50,
475
+ help="Number of samples for each encoding x decoding config.",
476
+ )
477
+ parser.add_argument(
478
+ "--num-workers",
479
+ type=int,
480
+ default=10,
481
+ help="Number of processes for parallelized sample processing.",
482
+ )
483
+ parser.add_argument(
484
+ "--save-frames",
485
+ type=int,
486
+ default=0,
487
+ help="Whether to save decoded frames or not. Enter a non-zero number for true.",
488
+ )
489
+ args = parser.parse_args()
490
+ main(**vars(args))
docker/lerobot-cpu/Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configure image
2
+ ARG PYTHON_VERSION=3.10
3
+ FROM python:${PYTHON_VERSION}-slim
4
+
5
+ # Configure environment variables
6
+ ARG PYTHON_VERSION
7
+ ENV DEBIAN_FRONTEND=noninteractive
8
+ ENV MUJOCO_GL="egl"
9
+ ENV PATH="/opt/venv/bin:$PATH"
10
+
11
+ # Install dependencies and set up Python in a single layer
12
+ RUN apt-get update && apt-get install -y --no-install-recommends \
13
+ build-essential cmake git \
14
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
15
+ speech-dispatcher libgeos-dev \
16
+ && ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
17
+ && python -m venv /opt/venv \
18
+ && apt-get clean && rm -rf /var/lib/apt/lists/* \
19
+ && echo "source /opt/venv/bin/activate" >> /root/.bashrc
20
+
21
+ # Clone repository and install LeRobot in a single layer
22
+ COPY . /lerobot
23
+ WORKDIR /lerobot
24
+ RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
25
+ && /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht]" \
26
+ --extra-index-url https://download.pytorch.org/whl/cpu
27
+
28
+ # Execute in bash shell rather than python
29
+ CMD ["/bin/bash"]
docker/lerobot-gpu-dev/Dockerfile ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.2.2-devel-ubuntu22.04
2
+
3
+ # Configure image
4
+ ARG PYTHON_VERSION=3.10
5
+ ARG DEBIAN_FRONTEND=noninteractive
6
+
7
+ # Install apt dependencies
8
+ RUN apt-get update && apt-get install -y --no-install-recommends \
9
+ build-essential cmake \
10
+ git git-lfs openssh-client \
11
+ nano vim less util-linux tree \
12
+ htop atop nvtop \
13
+ sed gawk grep curl wget zip unzip \
14
+ tcpdump sysstat screen tmux \
15
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa \
16
+ speech-dispatcher portaudio19-dev libgeos-dev \
17
+ python${PYTHON_VERSION} python${PYTHON_VERSION}-venv python${PYTHON_VERSION}-dev \
18
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
19
+
20
+ # Install ffmpeg build dependencies. See:
21
+ # https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu
22
+ # TODO(aliberts): create image to build dependencies from source instead
23
+ RUN apt-get update && apt-get install -y --no-install-recommends \
24
+ autoconf automake yasm \
25
+ libass-dev \
26
+ libfreetype6-dev \
27
+ libgnutls28-dev \
28
+ libunistring-dev \
29
+ libmp3lame-dev \
30
+ libtool \
31
+ libvorbis-dev \
32
+ meson \
33
+ ninja-build \
34
+ pkg-config \
35
+ texinfo \
36
+ yasm \
37
+ zlib1g-dev \
38
+ nasm \
39
+ libx264-dev \
40
+ libx265-dev libnuma-dev \
41
+ libvpx-dev \
42
+ libfdk-aac-dev \
43
+ libopus-dev \
44
+ libsvtav1-dev libsvtav1enc-dev libsvtav1dec-dev \
45
+ libdav1d-dev
46
+
47
+ # Install gh cli tool
48
+ RUN (type -p wget >/dev/null || (apt update && apt-get install wget -y)) \
49
+ && mkdir -p -m 755 /etc/apt/keyrings \
50
+ && wget -qO- https://cli.github.com/packages/githubcli-archive-keyring.gpg | tee /etc/apt/keyrings/githubcli-archive-keyring.gpg > /dev/null \
51
+ && chmod go+r /etc/apt/keyrings/githubcli-archive-keyring.gpg \
52
+ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
53
+ && apt update \
54
+ && apt install gh -y \
55
+ && apt clean && rm -rf /var/lib/apt/lists/*
56
+
57
+ # Setup `python`
58
+ RUN ln -s /usr/bin/python3 /usr/bin/python
59
+
60
+ # Install poetry
61
+ RUN curl -sSL https://install.python-poetry.org | python -
62
+ ENV PATH="/root/.local/bin:$PATH"
63
+ RUN echo 'if [ "$HOME" != "/root" ]; then ln -sf /root/.local/bin/poetry $HOME/.local/bin/poetry; fi' >> /root/.bashrc
64
+ RUN poetry config virtualenvs.create false
65
+ RUN poetry config virtualenvs.in-project true
66
+
67
+ # Set EGL as the rendering backend for MuJoCo
68
+ ENV MUJOCO_GL="egl"
docker/lerobot-gpu/Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:12.4.1-base-ubuntu22.04
2
+
3
+ # Configure environment variables
4
+ ARG PYTHON_VERSION=3.10
5
+ ENV DEBIAN_FRONTEND=noninteractive
6
+ ENV MUJOCO_GL="egl"
7
+ ENV PATH="/opt/venv/bin:$PATH"
8
+
9
+ # Install dependencies and set up Python in a single layer
10
+ RUN apt-get update && apt-get install -y --no-install-recommends \
11
+ build-essential cmake git \
12
+ libglib2.0-0 libgl1-mesa-glx libegl1-mesa ffmpeg \
13
+ speech-dispatcher libgeos-dev \
14
+ python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \
15
+ && ln -s /usr/bin/python${PYTHON_VERSION} /usr/bin/python \
16
+ && python -m venv /opt/venv \
17
+ && apt-get clean && rm -rf /var/lib/apt/lists/* \
18
+ && echo "source /opt/venv/bin/activate" >> /root/.bashrc
19
+
20
+ # Clone repository and install LeRobot in a single layer
21
+ COPY . /lerobot
22
+ WORKDIR /lerobot
23
+ RUN /opt/venv/bin/pip install --upgrade --no-cache-dir pip \
24
+ && /opt/venv/bin/pip install --no-cache-dir ".[test, aloha, xarm, pusht, dynamixel]"
docs/README.md ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!---
2
+ Copyright 2020 The HuggingFace Team. All rights reserved.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ -->
16
+
17
+ # Generating the documentation
18
+
19
+ To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
20
+ you can install them with the following command, at the root of the code repository:
21
+
22
+ ```bash
23
+ pip install -e ".[docs]"
24
+ ```
25
+
26
+ You will also need `nodejs`. Please refer to their [installation page](https://nodejs.org/en/download)
27
+
28
+ ---
29
+ **NOTE**
30
+
31
+ You only need to generate the documentation to inspect it locally (if you're planning changes and want to
32
+ check how they look before committing for instance). You don't have to `git commit` the built documentation.
33
+
34
+ ---
35
+
36
+ ## Building the documentation
37
+
38
+ Once you have setup the `doc-builder` and additional packages, you can generate the documentation by
39
+ typing the following command:
40
+
41
+ ```bash
42
+ doc-builder build lerobot docs/source/ --build_dir ~/tmp/test-build
43
+ ```
44
+
45
+ You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
46
+ the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
47
+ Markdown editor.
48
+
49
+ ## Previewing the documentation
50
+
51
+ To preview the docs, first install the `watchdog` module with:
52
+
53
+ ```bash
54
+ pip install watchdog
55
+ ```
56
+
57
+ Then run the following command:
58
+
59
+ ```bash
60
+ doc-builder preview lerobot docs/source/
61
+ ```
62
+
63
+ The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
64
+
65
+ ---
66
+ **NOTE**
67
+
68
+ The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
69
+
70
+ ---
71
+
72
+ ## Adding a new element to the navigation bar
73
+
74
+ Accepted files are Markdown (.md).
75
+
76
+ Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
77
+ the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/lerobot/blob/main/docs/source/_toctree.yml) file.
78
+
79
+ ## Renaming section headers and moving sections
80
+
81
+ It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
82
+
83
+ Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
84
+
85
+ So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
86
+
87
+ ```
88
+ Sections that were moved:
89
+
90
+ [ <a href="#section-b">Section A</a><a id="section-a"></a> ]
91
+ ```
92
+ and of course, if you moved it to another file, then:
93
+
94
+ ```
95
+ Sections that were moved:
96
+
97
+ [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
98
+ ```
99
+
100
+ Use the relative style to link to the new file so that the versioned docs continue to work.
101
+
102
+ For an example of a rich moved sections set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
103
+
104
+ ### Adding a new tutorial
105
+
106
+ Adding a new tutorial or section is done in two steps:
107
+
108
+ - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md).
109
+ - Link that file in `./source/_toctree.yml` on the correct toc-tree.
110
+
111
+ Make sure to put your new file under the proper section. If you have a doubt, feel free to ask in a Github Issue or PR.
112
+
113
+ ### Writing source documentation
114
+
115
+ Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
116
+ and objects like True, None or any strings should usually be put in `code`.
117
+
118
+ #### Writing a multi-line code block
119
+
120
+ Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
121
+
122
+
123
+ ````
124
+ ```
125
+ # first line of code
126
+ # second line
127
+ # etc
128
+ ```
129
+ ````
130
+
131
+ #### Adding an image
132
+
133
+ Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
134
+ the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
135
+ them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
136
+ If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
137
+ to this dataset.
docs/source/_toctree.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ - sections:
2
+ - local: index
3
+ title: LeRobot
4
+ - local: installation
5
+ title: Installation
6
+ title: Get started
7
+ - sections:
8
+ - local: getting_started_real_world_robot
9
+ title: Getting Started with Real-World Robots
10
+ - local: cameras
11
+ title: Cameras
12
+ title: "Tutorials"
13
+ - sections:
14
+ - local: so101
15
+ title: SO-101
16
+ - local: so100
17
+ title: SO-100
18
+ - local: koch
19
+ title: Koch v1.1
20
+ - local: lekiwi
21
+ title: LeKiwi
22
+ title: "Robots"
23
+ - sections:
24
+ - local: contributing
25
+ title: Contribute to LeRobot
26
+ - local: backwardcomp
27
+ title: Backward compatibility
28
+ title: "About"
docs/source/backwardcomp.mdx ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Backward compatibility
2
+
3
+ ## Hardware API redesign
4
+
5
+ PR [#777](https://github.com/huggingface/lerobot/pull/777) improves the LeRobot calibration but is **not backward-compatible**. Below is a overview of what changed and how you can continue to work with datasets created before this pull request.
6
+
7
+ ### What changed?
8
+
9
+ | | Before PR #777 | After PR #777 |
10
+ | --------------------------------- | ------------------------------------------------- | --------------------------------------------------------------------------- |
11
+ | **Joint range** | Degrees `-180...180°` | **Normalised range** Joints: `–100...100` Gripper: `0...100` |
12
+ | **Zero position (SO100 / SO101)** | Arm fully extended horizontally | **In middle of the range for each joint** |
13
+ | **Boundary handling** | Software safeguards to detect ±180 ° wrap-arounds | No wrap-around logic needed due to mid-range zero |
14
+
15
+ ---
16
+
17
+ ### Impact on existing datasets
18
+
19
+ * Recorded trajectories created **before** PR #777 will replay incorrectly if loaded directly:
20
+ * Joint angles are offset and incorrectly normalized.
21
+ * Any models directly finetuned or trained on the old data will need their inputs and outputs converted.
22
+
23
+ ### Using datasets made with the previous calibration system
24
+ We provide a migration example script for replaying an episode recorded with the previous calibration here: `examples/backward_compatibility/replay.py`.
25
+ Below we take you through the modifications that are done in the example script to make the previous calibration datasets work.
26
+
27
+ ```diff
28
+ + key = f"{name.removeprefix('main_')}.pos"
29
+ action[key] = action_array[i].item()
30
+ + action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
31
+ + action["elbow_flex.pos"] -= 90
32
+ ```
33
+
34
+ Let's break this down.
35
+ New codebase uses `.pos` suffix for the position observations and we have removed `main_` prefix:
36
+ ```python
37
+ key = f"{name.removeprefix('main_')}.pos"
38
+ ```
39
+
40
+ For `"shoulder_lift"` (id = 2), the 0 position is changed by -90 degrees and the direction is reversed compared to old calibration/code.
41
+ ```python
42
+ action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
43
+ ```
44
+ For `"elbow_flex"` (id = 3), the 0 position is changed by -90 degrees compared to old calibration/code.
45
+ ```python
46
+ action["elbow_flex.pos"] -= 90
47
+ ```
48
+
49
+ To use degrees normalization we then set the `--robot.use_degrees` option to `true`.
50
+ ```diff
51
+ python examples/backward_compatibility/replay.py \
52
+ --robot.type=so101_follower \
53
+ --robot.port=/dev/tty.usbmodem5A460814411 \
54
+ --robot.id=blue \
55
+ + --robot.use_degrees=true \
56
+ --dataset.repo_id=my_dataset_id \
57
+ --dataset.episode=0
58
+ ```
59
+
60
+ ### Using policies trained with the previous calibration system
61
+
62
+ Policies output actions in the same format as the datasets (`torch.Tensors`). Therefore, the same transformations should be applied.
63
+
64
+ To find these transformations, we recommend to first try and and replay an episode of the dataset your policy was trained on using the section above.
65
+ Then, add these same transformations on your inference script (shown here in the `record.py` script):
66
+ ```diff
67
+ action_values = predict_action(
68
+ observation_frame,
69
+ policy,
70
+ get_safe_torch_device(policy.config.device),
71
+ policy.config.use_amp,
72
+ task=single_task,
73
+ robot_type=robot.robot_type,
74
+ )
75
+ action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)}
76
+
77
+ + action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
78
+ + action["elbow_flex.pos"] -= 90
79
+ robot.send_action(action)
80
+ ```
81
+
82
+ If you have questions or run into migration issues, feel free to ask them on [Discord](https://discord.gg/s3KuuzsPFb)
docs/source/cameras.mdx ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Cameras
2
+
3
+ LeRobot offers multiple options for video capture, including phone cameras, built-in laptop cameras, external webcams, and Intel RealSense cameras. To efficiently record frames from most cameras, you can use either the `OpenCVCamera` or `RealSenseCamera` class. For additional compatibility details on the `OpenCVCamera` class, refer to the [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
4
+
5
+ ### Finding your camera
6
+
7
+ To instantiate a camera, you need a camera identifier. This identifier might change if you reboot your computer or re-plug your camera, a behavior mostly dependant on your operating system.
8
+
9
+ To find the camera indices of the cameras plugged into your system, run the following script:
10
+ ```bash
11
+ python lerobot/find_cameras.py opencv # or realsense for Intel Realsense cameras
12
+ ```
13
+
14
+ The output will look something like this if you have two cameras connected:
15
+ ```
16
+ --- Detected Cameras ---
17
+ Camera #0:
18
+ Name: OpenCV Camera @ 0
19
+ Type: OpenCV
20
+ Id: 0
21
+ Backend api: AVFOUNDATION
22
+ Default stream profile:
23
+ Format: 16.0
24
+ Width: 1920
25
+ Height: 1080
26
+ Fps: 15.0
27
+ --------------------
28
+ (more cameras ...)
29
+ ```
30
+
31
+ > [!WARNING]
32
+ > When using Intel RealSense cameras in `macOS`, you could get this [error](https://github.com/IntelRealSense/librealsense/issues/12307): `Error finding RealSense cameras: failed to set power state`, this can be solved by running the same command with `sudo` permissions. Note that using RealSense cameras in `macOS` is unstable.
33
+
34
+
35
+ ## Use Cameras
36
+
37
+ Below are two examples, demonstrating how to work with the API.
38
+
39
+ - **Asynchronous frame capture** using an OpenCV-based camera
40
+ - **Color and depth capture** using an Intel RealSense camera
41
+
42
+
43
+ <hfoptions id="shell_restart">
44
+ <hfoption id="Open CV Camera">
45
+
46
+ ```python
47
+ from lerobot.common.cameras.opencv.configuration_opencv import OpenCVCameraConfig
48
+ from lerobot.common.cameras.opencv.camera_opencv import OpenCVCamera
49
+ from lerobot.common.cameras.configs import ColorMode, Cv2Rotation
50
+
51
+ # Construct an `OpenCVCameraConfig` with your desired FPS, resolution, color mode, and rotation.
52
+ config = OpenCVCameraConfig(
53
+ index_or_path=0,
54
+ fps=15,
55
+ width=1920,
56
+ height=1080,
57
+ color_mode=ColorMode.RGB,
58
+ rotation=Cv2Rotation.NO_ROTATION
59
+ )
60
+
61
+ # Instantiate and connect an `OpenCVCamera`, performing a warm-up read (default).
62
+ camera = OpenCVCamera(config)
63
+ camera.connect()
64
+
65
+ # Read frames asynchronously in a loop via `async_read(timeout_ms)`
66
+ try:
67
+ for i in range(10):
68
+ frame = camera.async_read(timeout_ms=200)
69
+ print(f"Async frame {i} shape:", frame.shape)
70
+ finally:
71
+ camera.disconnect()
72
+ ```
73
+
74
+ </hfoption>
75
+ <hfoption id="Intel Realsense Camera">
76
+
77
+ ```python
78
+ from lerobot.common.cameras.realsense.configuration_realsense import RealSenseCameraConfig
79
+ from lerobot.common.cameras.realsense.camera_realsense import RealSenseCamera
80
+ from lerobot.common.cameras.configs import ColorMode, Cv2Rotation
81
+
82
+ # Create a `RealSenseCameraConfig` specifying your camera’s serial number and enabling depth.
83
+ config = RealSenseCameraConfig(
84
+ serial_number_or_name="233522074606",
85
+ fps=15,
86
+ width=640,
87
+ height=480,
88
+ color_mode=ColorMode.RGB,
89
+ use_depth=True,
90
+ rotation=Cv2Rotation.NO_ROTATION
91
+ )
92
+
93
+ # Instantiate and connect a `RealSenseCamera` with warm-up read (default).
94
+ camera = RealSenseCamera(config)
95
+ camera.connect()
96
+
97
+ # Capture a color frame via `read()` and a depth map via `read_depth()`.
98
+ try:
99
+ color_frame = camera.read()
100
+ depth_map = camera.read_depth()
101
+ print("Color frame shape:", color_frame.shape)
102
+ print("Depth map shape:", depth_map.shape)
103
+ finally:
104
+ camera.disconnect()
105
+ ```
106
+ </hfoption>
107
+ </hfoptions>
108
+
109
+
110
+ ## Use your phone
111
+ <hfoptions id="use phone">
112
+ <hfoption id="Mac">
113
+
114
+ To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
115
+ - Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
116
+ - Sign in both devices with the same Apple ID.
117
+ - Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
118
+
119
+ For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
120
+
121
+ Your iPhone should be detected automatically when running the camera setup script in the next section.
122
+
123
+ </hfoption>
124
+ <hfoption id="Linux">
125
+
126
+ If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
127
+
128
+ 1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
129
+ ```python
130
+ sudo apt install v4l2loopback-dkms v4l-utils
131
+ ```
132
+ 2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android.
133
+ 3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
134
+ ```python
135
+ flatpak install flathub com.obsproject.Studio
136
+ ```
137
+ 4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with:
138
+ ```python
139
+ flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
140
+ ```
141
+ 5. *Start OBS Studio*. Launch with:
142
+ ```python
143
+ flatpak run com.obsproject.Studio
144
+ ```
145
+ 6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
146
+ 7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
147
+ 8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
148
+ 9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices:
149
+ ```python
150
+ v4l2-ctl --list-devices
151
+ ```
152
+ You should see an entry like:
153
+ ```
154
+ VirtualCam (platform:v4l2loopback-000):
155
+ /dev/video1
156
+ ```
157
+ 10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
158
+ ```python
159
+ v4l2-ctl -d /dev/video1 --get-fmt-video
160
+ ```
161
+ You should see an entry like:
162
+ ```
163
+ >>> Format Video Capture:
164
+ >>> Width/Height : 640/480
165
+ >>> Pixel Format : 'YUYV' (YUYV 4:2:2)
166
+ ```
167
+
168
+ Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
169
+
170
+ If everything is set up correctly, you can proceed with the rest of the tutorial.
171
+
172
+ </hfoption>
173
+ </hfoptions>
docs/source/contributing.md ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../CONTRIBUTING.md
docs/source/getting_started_real_world_robot.mdx ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Getting Started with Real-World Robots
2
+
3
+ This tutorial will explain how to train a neural network to control a real robot autonomously.
4
+
5
+ **You'll learn:**
6
+ 1. How to record and visualize your dataset.
7
+ 2. How to train a policy using your data and prepare it for evaluation.
8
+ 3. How to evaluate your policy and visualize the results.
9
+
10
+ By following these steps, you'll be able to replicate tasks, such as picking up a Lego block and placing it in a bin with a high success rate, as shown in the video below.
11
+
12
+ <details>
13
+ <summary><strong>Video: pickup lego block task</strong></summary>
14
+
15
+ <div class="video-container">
16
+ <video controls width="600">
17
+ <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot_task.mp4" type="video/mp4" />
18
+ </video>
19
+ </div>
20
+
21
+ </details>
22
+
23
+ This tutorial isn’t tied to a specific robot: we walk you through the commands and API snippets you can adapt for any supported platform.
24
+
25
+ During data collection, you’ll use a “teloperation” device, such as a leader arm or keyboard to teleoperate the robot and record its motion trajectories.
26
+
27
+ Once you’ve gathered enough trajectories, you’ll train a neural network to imitate these trajectories and deploy the trained model so your robot can perform the task autonomously.
28
+
29
+ If you run into any issues at any point, jump into our [Discord community](https://discord.com/invite/s3KuuzsPFb) for support.
30
+
31
+ ## Set up and Calibrate
32
+
33
+ If you haven't yet set up and calibrated your robot and teleop device, please do so by following the robot-specific tutorial.
34
+
35
+ ## Teleoperate
36
+
37
+ In this example, we’ll demonstrate how to teleoperate the SO101 robot. For each command, we also provide a corresponding API example.
38
+
39
+ Note that the `id` associated with a robot is used to store the calibration file. It's important to use the same `id` when teleoperating, recording, and evaluating when using the same setup.
40
+
41
+ <hfoptions id="teleoperate_so101">
42
+ <hfoption id="Command">
43
+ ```bash
44
+ python -m lerobot.teleoperate \
45
+ --robot.type=so101_follower \
46
+ --robot.port=/dev/tty.usbmodem58760431541 \
47
+ --robot.id=my_awesome_follower_arm \
48
+ --teleop.type=so101_leader \
49
+ --teleop.port=/dev/tty.usbmodem58760431551 \
50
+ --teleop.id=my_awesome_leader_arm
51
+ ```
52
+ </hfoption>
53
+ <hfoption id="API example">
54
+ ```python
55
+ from lerobot.common.teleoperators.so101_leader import SO101LeaderConfig, SO101Leader
56
+ from lerobot.common.robots.so101_follower import SO101FollowerConfig, SO101Follower
57
+
58
+ robot_config = SO101FollowerConfig(
59
+ port="/dev/tty.usbmodem58760431541",
60
+ id="my_red_robot_arm",
61
+ )
62
+
63
+ teleop_config = SO101LeaderConfig(
64
+ port="/dev/tty.usbmodem58760431551",
65
+ id="my_blue_leader_arm",
66
+ )
67
+
68
+ robot = SO101Follower(robot_config)
69
+ teleop_device = SO101Leader(teleop_config)
70
+ robot.connect()
71
+ teleop_device.connect()
72
+
73
+ while True:
74
+ action = teleop_device.get_action()
75
+ robot.send_action(action)
76
+ ```
77
+ </hfoption>
78
+ </hfoptions>
79
+
80
+ The teleoperate command will automatically:
81
+ 1. Identify any missing calibrations and initiate the calibration procedure.
82
+ 2. Connect the robot and teleop device and start teleoperation.
83
+
84
+ ## Cameras
85
+
86
+ To add cameras to your setup, follow this [Guide](./cameras#setup-cameras).
87
+
88
+ ## Teleoperate with cameras
89
+
90
+ With `rerun`, you can teleoperate again while simultaneously visualizing the camera feeds and joint positions. In this example, we’re using the Koch arm.
91
+
92
+ <hfoptions id="teleoperate_koch_camera">
93
+ <hfoption id="Command">
94
+ ```bash
95
+ python -m lerobot.teleoperate \
96
+ --robot.type=koch_follower \
97
+ --robot.port=/dev/tty.usbmodem58760431541 \
98
+ --robot.id=my_awesome_follower_arm \
99
+ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
100
+ --teleop.type=koch_leader \
101
+ --teleop.port=/dev/tty.usbmodem58760431551 \
102
+ --teleop.id=my_awesome_leader_arm \
103
+ --display_data=true
104
+ ```
105
+ </hfoption>
106
+ <hfoption id="API example">
107
+ ```python
108
+ from lerobot.common.cameras.opencv.configuration_opencv import OpenCVCameraConfig
109
+ from lerobot.common.teleoperators.koch_leader import KochLeaderConfig, KochLeader
110
+ from lerobot.common.robots.koch_follower import KochFollowerConfig, KochFollower
111
+
112
+ camera_config = {
113
+ "front": OpenCVCameraConfig(index_or_path=0, width=1920, height=1080, fps=30)
114
+ }
115
+
116
+ robot_config = KochFollowerConfig(
117
+ port="/dev/tty.usbmodem585A0076841",
118
+ id="my_red_robot_arm",
119
+ cameras=camera_config
120
+ )
121
+
122
+ teleop_config = KochLeaderConfig(
123
+ port="/dev/tty.usbmodem58760431551",
124
+ id="my_blue_leader_arm",
125
+ )
126
+
127
+ robot = KochFollower(robot_config)
128
+ teleop_device = KochLeader(teleop_config)
129
+ robot.connect()
130
+ teleop_device.connect()
131
+
132
+ while True:
133
+ observation = robot.get_observation()
134
+ action = teleop_device.get_action()
135
+ robot.send_action(action)
136
+ ```
137
+ </hfoption>
138
+ </hfoptions>
139
+
140
+ ## Record a dataset
141
+
142
+ Once you're familiar with teleoperation, you can record your first dataset.
143
+
144
+ We use the Hugging Face hub features for uploading your dataset. If you haven't previously used the Hub, make sure you can login via the cli using a write-access token, this token can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens).
145
+
146
+ Add your token to the CLI by running this command:
147
+ ```bash
148
+ huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
149
+ ```
150
+
151
+ Then store your Hugging Face repository name in a variable:
152
+ ```bash
153
+ HF_USER=$(huggingface-cli whoami | head -n 1)
154
+ echo $HF_USER
155
+ ```
156
+
157
+ Now you can record a dataset. To record 2 episodes and upload your dataset to the hub, execute this command tailored to the SO101.
158
+ ```bash
159
+ python -m lerobot.record \
160
+ --robot.type=so101_follower \
161
+ --robot.port=/dev/tty.usbmodem585A0076841 \
162
+ --robot.id=my_awesome_follower_arm \
163
+ --robot.cameras="{ front: {type: opencv, index_or_path: 0, width: 1920, height: 1080, fps: 30}}" \
164
+ --teleop.type=so101_leader \
165
+ --teleop.port=/dev/tty.usbmodem58760431551 \
166
+ --teleop.id=my_awesome_leader_arm \
167
+ --display_data=true \
168
+ --dataset.repo_id=${HF_USER}/record-test \
169
+ --dataset.num_episodes=2 \
170
+ --dataset.single_task="Grab the black cube"
171
+ ```
172
+
173
+ #### Dataset upload
174
+ Locally, your dataset is stored in this folder: `~/.cache/huggingface/lerobot/{repo-id}`. At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/so101_test) that you can obtain by running:
175
+ ```bash
176
+ echo https://huggingface.co/datasets/${HF_USER}/so101_test
177
+ ```
178
+ Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
179
+
180
+ You can look for other LeRobot datasets on the hub by searching for `LeRobot` [tags](https://huggingface.co/datasets?other=LeRobot).
181
+
182
+ #### Record function
183
+
184
+ The `record` function provides a suite of tools for capturing and managing data during robot operation:
185
+
186
+ ##### 1. Data Storage
187
+ - Data is stored using the `LeRobotDataset` format and is stored on disk during recording.
188
+ - By default, the dataset is pushed to your Hugging Face page after recording.
189
+ - To disable uploading, use `--dataset.push_to_hub=False`.
190
+
191
+ ##### 2. Checkpointing and Resuming
192
+ - Checkpoints are automatically created during recording.
193
+ - If an issue occurs, you can resume by re-running the same command with `--control.resume=true`.
194
+ - To start recording from scratch, **manually delete** the dataset directory.
195
+
196
+ ##### 3. Recording Parameters
197
+ Set the flow of data recording using command-line arguments:
198
+ - `--dataset.episode_time_s=60`
199
+ Duration of each data recording episode (default: **60 seconds**).
200
+ - `--dataset.reset_time_s=60`
201
+ Duration for resetting the environment after each episode (default: **60 seconds**).
202
+ - `--dataset.num_episodes=50`
203
+ Total number of episodes to record (default: **50**).
204
+
205
+ ##### 4. Keyboard Controls During Recording
206
+ Control the data recording flow using keyboard shortcuts:
207
+ - Press **Right Arrow (`→`)**: Early stop the current episode or reset time and move to the next.
208
+ - Press **Left Arrow (`←`)**: Cancel the current episode and re-record it.
209
+ - Press **Escape (`ESC`)**: Immediately stop the session, encode videos, and upload the dataset.
210
+
211
+ #### Tips for gathering data
212
+
213
+ Once you're comfortable with data recording, you can create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings. Also make sure the object you are manipulating is visible on the camera's. A good rule of thumb is you should be able to do the task yourself by only looking at the camera images.
214
+
215
+ In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
216
+
217
+ Avoid adding too much variation too quickly, as it may hinder your results.
218
+
219
+ If you want to dive deeper into this important topic, you can check out the [blog post](https://huggingface.co/blog/lerobot-datasets#what-makes-a-good-dataset) we wrote on what makes a good dataset.
220
+
221
+
222
+ #### Troubleshooting:
223
+ - On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
224
+
225
+ ## Visualize a dataset
226
+
227
+ If you uploaded your dataset to the hub with `--control.push_to_hub=true`, you can [visualize your dataset online](https://huggingface.co/spaces/lerobot/visualize_dataset) by copy pasting your repo id given by:
228
+ ```bash
229
+ echo ${HF_USER}/so101_test
230
+ ```
231
+
232
+ ## Replay an episode
233
+
234
+ A useful feature is the `replay` function, which allows you to replay any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
235
+
236
+ You can replay the first episode on your robot with:
237
+ ```bash
238
+ python -m lerobot.replay \
239
+ --robot.type=so101_follower \
240
+ --robot.port=/dev/tty.usbmodem58760431541 \
241
+ --robot.id=my_awesome_follower_arm \
242
+ --dataset.repo_id=${HF_USER}/record-test \
243
+ --dataset.episode=0 # choose the episode you want to replay
244
+ ```
245
+
246
+ Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
247
+
248
+ ## Train a policy
249
+
250
+ To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
251
+ ```bash
252
+ python lerobot/scripts/train.py \
253
+ --dataset.repo_id=${HF_USER}/so101_test \
254
+ --policy.type=act \
255
+ --output_dir=outputs/train/act_so101_test \
256
+ --job_name=act_so101_test \
257
+ --policy.device=cuda \
258
+ --wandb.enable=true
259
+ ```
260
+
261
+ Let's explain the command:
262
+ 1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/so101_test`.
263
+ 2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor states, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
264
+ 4. We provided `policy.device=cuda` since we are training on a Nvidia GPU, but you could use `policy.device=mps` to train on Apple silicon.
265
+ 5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
266
+
267
+ Training should take several hours. You will find checkpoints in `outputs/train/act_so101_test/checkpoints`.
268
+
269
+ To resume training from a checkpoint, below is an example command to resume from `last` checkpoint of the `act_so101_test` policy:
270
+ ```bash
271
+ python lerobot/scripts/train.py \
272
+ --config_path=outputs/train/act_so101_test/checkpoints/last/pretrained_model/train_config.json \
273
+ --resume=true
274
+ ```
275
+
276
+ #### Upload policy checkpoints
277
+
278
+ Once training is done, upload the latest checkpoint with:
279
+ ```bash
280
+ huggingface-cli upload ${HF_USER}/act_so101_test \
281
+ outputs/train/act_so101_test/checkpoints/last/pretrained_model
282
+ ```
283
+
284
+ You can also upload intermediate checkpoints with:
285
+ ```bash
286
+ CKPT=010000
287
+ huggingface-cli upload ${HF_USER}/act_so101_test${CKPT} \
288
+ outputs/train/act_so101_test/checkpoints/${CKPT}/pretrained_model
289
+ ```
290
+
291
+ ## Evaluate your policy
292
+
293
+ You can use the `record` script from [`lerobot/record.py`](https://github.com/huggingface/lerobot/blob/main/lerobot/record.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
294
+ ```bash
295
+ python -m lerobot.record \
296
+ --robot.type=so100_follower \
297
+ --robot.port=/dev/ttyACM1 \
298
+ --robot.cameras="{ up: {type: opencv, index_or_path: /dev/video10, width: 640, height: 480, fps: 30}, side: {type: intelrealsense, serial_number_or_name: 233522074606, width: 640, height: 480, fps: 30}}" \
299
+ --robot.id=my_awesome_follower_arm \
300
+ --teleop.type=so100_leader \
301
+ --teleop.port=/dev/ttyACM0 \
302
+ --teleop.id=my_awesome_leader_arm \
303
+ --display_data=false \
304
+ --dataset.repo_id=$HF_USER/eval_so100 \
305
+ --dataset.single_task="Put lego brick into the transparent box" \
306
+ --policy.path=${HF_USER}/my_policy
307
+ ```
308
+
309
+ As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
310
+ 1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_act_so101_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_so101_test`).
311
+ 2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_so101_test`).
docs/source/index.mdx ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div class="flex justify-center">
2
+ <a target="_blank" href="https://huggingface.co/lerobot">
3
+ <img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/lerobot/lerobot-logo-thumbnail.png" style="width: 100%"></img>
4
+ </a>
5
+ </div>
6
+
7
+ # LeRobot
8
+
9
+ **State-of-the-art machine learning for real-world robotics**
10
+
11
+ 🤗 LeRobot aims to provide models, datasets, and tools for real-world robotics in PyTorch. The goal is to lower the barrier for entry to robotics so that everyone can contribute and benefit from sharing datasets and pretrained models.
12
+
13
+ 🤗 LeRobot contains state-of-the-art approaches that have been shown to transfer to the real-world with a focus on imitation learning and reinforcement learning.
14
+
15
+ 🤗 LeRobot already provides a set of pretrained models, datasets with human collected demonstrations, and simulated environments so that everyone can get started.
16
+
17
+ 🤗 LeRobot hosts pretrained models and datasets on the LeRobot HuggingFace page.
18
+
19
+ Join the LeRobot community on [Discord](https://discord.gg/s3KuuzsPFb)
docs/source/installation.mdx ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Installation
2
+
3
+ ## Install LeRobot
4
+
5
+ Currently only available from source.
6
+
7
+ Download our source code:
8
+ ```bash
9
+ git clone https://github.com/huggingface/lerobot.git
10
+ cd lerobot
11
+ ```
12
+
13
+ Create a virtual environment with Python 3.10, using [`Miniconda`](https://docs.anaconda.com/miniconda/install/#quick-command-line-install)
14
+ ```bash
15
+ conda create -y -n lerobot python=3.10
16
+ ```
17
+
18
+ Then activate your conda environment, you have to do this each time you open a shell to use lerobot:
19
+ ```bash
20
+ conda activate lerobot
21
+ ```
22
+
23
+ When using `miniconda`, install `ffmpeg` in your environment:
24
+ ```bash
25
+ conda install ffmpeg -c conda-forge
26
+ ```
27
+
28
+ > [!TIP]
29
+ > This usually installs `ffmpeg 7.X` for your platform compiled with the `libsvtav1` encoder. If `libsvtav1` is not supported (check supported encoders with `ffmpeg -encoders`), you can:
30
+ > - _[On any platform]_ Explicitly install `ffmpeg 7.X` using:
31
+ > ```bash
32
+ > conda install ffmpeg=7.1.1 -c conda-forge
33
+ > ```
34
+ > - _[On Linux only]_ If you want to bring your own ffmpeg: Install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1), and make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
35
+
36
+ Install 🤗 LeRobot:
37
+ ```bash
38
+ pip install -e .
39
+ ```
40
+
41
+ ### Troubleshooting
42
+ If you encounter build errors, you may need to install additional dependencies: `cmake`, `build-essential`, and `ffmpeg libs`.
43
+ To install these for linux run:
44
+ ```bash
45
+ sudo apt-get install cmake build-essential python-dev pkg-config libavformat-dev libavcodec-dev libavdevice-dev libavutil-dev libswscale-dev libswresample-dev libavfilter-dev pkg-config
46
+ ```
47
+ For other systems, see: [Compiling PyAV](https://pyav.org/docs/develop/overview/installation.html#bring-your-own-ffmpeg)
48
+
49
+ ## Optional dependencies
50
+
51
+ LeRobot provides optional extras for specific functionalities. Multiple extras can be combined (e.g., `.[aloha,feetech]`). For all available extras, refer to `pyproject.toml`.
52
+
53
+ ### Simulations
54
+ Install environment packages: `aloha` ([gym-aloha](https://github.com/huggingface/gym-aloha)), `xarm` ([gym-xarm](https://github.com/huggingface/gym-xarm)), or `pusht` ([gym-pusht](https://github.com/huggingface/gym-pusht))
55
+ Example:
56
+ ```bash
57
+ pip install -e ".[aloha]" # or "[pusht]" for example
58
+ ```
59
+
60
+ ### Motor Control
61
+ For Koch v1.1 install the Dynamixel SDK, for SO100/SO101/Moss install the Feetech SDK.
62
+ ```bash
63
+ pip install -e ".[feetech]" # or "[dynamixel]" for example
64
+ ```
65
+
66
+ ### Experiment Tracking
67
+ To use [Weights and Biases](https://docs.wandb.ai/quickstart) for experiment tracking, log in with
68
+ ```bash
69
+ wandb login
70
+ ```
docs/source/koch.mdx ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../lerobot/common/robots/koch_follower/koch.mdx
docs/source/lekiwi.mdx ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../lerobot/common/robots/lekiwi/lekiwi.mdx
docs/source/so100.mdx ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../lerobot/common/robots/so100_follower/so100.mdx
docs/source/so101.mdx ADDED
@@ -0,0 +1 @@
 
 
1
+ ../../lerobot/common/robots/so101_follower/so101.mdx
examples/1_load_lerobot_dataset.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script demonstrates the use of `LeRobotDataset` class for handling and processing robotic datasets from Hugging Face.
17
+ It illustrates how to load datasets, manipulate them, and apply transformations suitable for machine learning tasks in PyTorch.
18
+
19
+ Features included in this script:
20
+ - Viewing a dataset's metadata and exploring its properties.
21
+ - Loading an existing dataset from the hub or a subset of it.
22
+ - Accessing frames by episode number.
23
+ - Using advanced dataset features like timestamp-based frame selection.
24
+ - Demonstrating compatibility with PyTorch DataLoader for batch processing.
25
+
26
+ The script ends with examples of how to batch process data using PyTorch's DataLoader.
27
+ """
28
+
29
+ from pprint import pprint
30
+
31
+ import torch
32
+ from huggingface_hub import HfApi
33
+
34
+ import lerobot
35
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
36
+
37
+ # We ported a number of existing datasets ourselves, use this to see the list:
38
+ print("List of available datasets:")
39
+ pprint(lerobot.available_datasets)
40
+
41
+ # You can also browse through the datasets created/ported by the community on the hub using the hub api:
42
+ hub_api = HfApi()
43
+ repo_ids = [info.id for info in hub_api.list_datasets(task_categories="robotics", tags=["LeRobot"])]
44
+ pprint(repo_ids)
45
+
46
+ # Or simply explore them in your web browser directly at:
47
+ # https://huggingface.co/datasets?other=LeRobot
48
+
49
+ # Let's take this one for this example
50
+ repo_id = "lerobot/aloha_mobile_cabinet"
51
+ # We can have a look and fetch its metadata to know more about it:
52
+ ds_meta = LeRobotDatasetMetadata(repo_id)
53
+
54
+ # By instantiating just this class, you can quickly access useful information about the content and the
55
+ # structure of the dataset without downloading the actual data yet (only metadata files — which are
56
+ # lightweight).
57
+ print(f"Total number of episodes: {ds_meta.total_episodes}")
58
+ print(f"Average number of frames per episode: {ds_meta.total_frames / ds_meta.total_episodes:.3f}")
59
+ print(f"Frames per second used during data collection: {ds_meta.fps}")
60
+ print(f"Robot type: {ds_meta.robot_type}")
61
+ print(f"keys to access images from cameras: {ds_meta.camera_keys=}\n")
62
+
63
+ print("Tasks:")
64
+ print(ds_meta.tasks)
65
+ print("Features:")
66
+ pprint(ds_meta.features)
67
+
68
+ # You can also get a short summary by simply printing the object:
69
+ print(ds_meta)
70
+
71
+ # You can then load the actual dataset from the hub.
72
+ # Either load any subset of episodes:
73
+ dataset = LeRobotDataset(repo_id, episodes=[0, 10, 11, 23])
74
+
75
+ # And see how many frames you have:
76
+ print(f"Selected episodes: {dataset.episodes}")
77
+ print(f"Number of episodes selected: {dataset.num_episodes}")
78
+ print(f"Number of frames selected: {dataset.num_frames}")
79
+
80
+ # Or simply load the entire dataset:
81
+ dataset = LeRobotDataset(repo_id)
82
+ print(f"Number of episodes selected: {dataset.num_episodes}")
83
+ print(f"Number of frames selected: {dataset.num_frames}")
84
+
85
+ # The previous metadata class is contained in the 'meta' attribute of the dataset:
86
+ print(dataset.meta)
87
+
88
+ # LeRobotDataset actually wraps an underlying Hugging Face dataset
89
+ # (see https://huggingface.co/docs/datasets for more information).
90
+ print(dataset.hf_dataset)
91
+
92
+ # LeRobot datasets also subclasses PyTorch datasets so you can do everything you know and love from working
93
+ # with the latter, like iterating through the dataset.
94
+ # The __getitem__ iterates over the frames of the dataset. Since our datasets are also structured by
95
+ # episodes, you can access the frame indices of any episode using the episode_data_index. Here, we access
96
+ # frame indices associated to the first episode:
97
+ episode_index = 0
98
+ from_idx = dataset.episode_data_index["from"][episode_index].item()
99
+ to_idx = dataset.episode_data_index["to"][episode_index].item()
100
+
101
+ # Then we grab all the image frames from the first camera:
102
+ camera_key = dataset.meta.camera_keys[0]
103
+ frames = [dataset[idx][camera_key] for idx in range(from_idx, to_idx)]
104
+
105
+ # The objects returned by the dataset are all torch.Tensors
106
+ print(type(frames[0]))
107
+ print(frames[0].shape)
108
+
109
+ # Since we're using pytorch, the shape is in pytorch, channel-first convention (c, h, w).
110
+ # We can compare this shape with the information available for that feature
111
+ pprint(dataset.features[camera_key])
112
+ # In particular:
113
+ print(dataset.features[camera_key]["shape"])
114
+ # The shape is in (h, w, c) which is a more universal format.
115
+
116
+ # For many machine learning applications we need to load the history of past observations or trajectories of
117
+ # future actions. Our datasets can load previous and future frames for each key/modality, using timestamps
118
+ # differences with the current loaded frame. For instance:
119
+ delta_timestamps = {
120
+ # loads 4 images: 1 second before current frame, 500 ms before, 200 ms before, and current frame
121
+ camera_key: [-1, -0.5, -0.20, 0],
122
+ # loads 6 state vectors: 1.5 seconds before, 1 second before, ... 200 ms, 100 ms, and current frame
123
+ "observation.state": [-1.5, -1, -0.5, -0.20, -0.10, 0],
124
+ # loads 64 action vectors: current frame, 1 frame in the future, 2 frames, ... 63 frames in the future
125
+ "action": [t / dataset.fps for t in range(64)],
126
+ }
127
+ # Note that in any case, these delta_timestamps values need to be multiples of (1/fps) so that added to any
128
+ # timestamp, you still get a valid timestamp.
129
+
130
+ dataset = LeRobotDataset(repo_id, delta_timestamps=delta_timestamps)
131
+ print(f"\n{dataset[0][camera_key].shape=}") # (4, c, h, w)
132
+ print(f"{dataset[0]['observation.state'].shape=}") # (6, c)
133
+ print(f"{dataset[0]['action'].shape=}\n") # (64, c)
134
+
135
+ # Finally, our datasets are fully compatible with PyTorch dataloaders and samplers because they are just
136
+ # PyTorch datasets.
137
+ dataloader = torch.utils.data.DataLoader(
138
+ dataset,
139
+ num_workers=0,
140
+ batch_size=32,
141
+ shuffle=True,
142
+ )
143
+
144
+ for batch in dataloader:
145
+ print(f"{batch[camera_key].shape=}") # (32, 4, c, h, w)
146
+ print(f"{batch['observation.state'].shape=}") # (32, 6, c)
147
+ print(f"{batch['action'].shape=}") # (32, 64, c)
148
+ break
examples/2_evaluate_pretrained_policy.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script demonstrates how to evaluate a pretrained policy from the HuggingFace Hub or from your local
17
+ training outputs directory. In the latter case, you might want to run examples/3_train_policy.py first.
18
+
19
+ It requires the installation of the 'gym_pusht' simulation environment. Install it by running:
20
+ ```bash
21
+ pip install -e ".[pusht]"
22
+ ```
23
+ """
24
+
25
+ from pathlib import Path
26
+
27
+ import gym_pusht # noqa: F401
28
+ import gymnasium as gym
29
+ import imageio
30
+ import numpy
31
+ import torch
32
+
33
+ from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
34
+
35
+ # Create a directory to store the video of the evaluation
36
+ output_directory = Path("outputs/eval/example_pusht_diffusion")
37
+ output_directory.mkdir(parents=True, exist_ok=True)
38
+
39
+ # Select your device
40
+ device = "cuda"
41
+
42
+ # Provide the [hugging face repo id](https://huggingface.co/lerobot/diffusion_pusht):
43
+ pretrained_policy_path = "lerobot/diffusion_pusht"
44
+ # OR a path to a local outputs/train folder.
45
+ # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
46
+
47
+ policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
48
+
49
+ # Initialize evaluation environment to render two observation types:
50
+ # an image of the scene and state/position of the agent. The environment
51
+ # also automatically stops running after 300 interactions/steps.
52
+ env = gym.make(
53
+ "gym_pusht/PushT-v0",
54
+ obs_type="pixels_agent_pos",
55
+ max_episode_steps=300,
56
+ )
57
+
58
+ # We can verify that the shapes of the features expected by the policy match the ones from the observations
59
+ # produced by the environment
60
+ print(policy.config.input_features)
61
+ print(env.observation_space)
62
+
63
+ # Similarly, we can check that the actions produced by the policy will match the actions expected by the
64
+ # environment
65
+ print(policy.config.output_features)
66
+ print(env.action_space)
67
+
68
+ # Reset the policy and environments to prepare for rollout
69
+ policy.reset()
70
+ numpy_observation, info = env.reset(seed=42)
71
+
72
+ # Prepare to collect every rewards and all the frames of the episode,
73
+ # from initial state to final state.
74
+ rewards = []
75
+ frames = []
76
+
77
+ # Render frame of the initial state
78
+ frames.append(env.render())
79
+
80
+ step = 0
81
+ done = False
82
+ while not done:
83
+ # Prepare observation for the policy running in Pytorch
84
+ state = torch.from_numpy(numpy_observation["agent_pos"])
85
+ image = torch.from_numpy(numpy_observation["pixels"])
86
+
87
+ # Convert to float32 with image from channel first in [0,255]
88
+ # to channel last in [0,1]
89
+ state = state.to(torch.float32)
90
+ image = image.to(torch.float32) / 255
91
+ image = image.permute(2, 0, 1)
92
+
93
+ # Send data tensors from CPU to GPU
94
+ state = state.to(device, non_blocking=True)
95
+ image = image.to(device, non_blocking=True)
96
+
97
+ # Add extra (empty) batch dimension, required to forward the policy
98
+ state = state.unsqueeze(0)
99
+ image = image.unsqueeze(0)
100
+
101
+ # Create the policy input dictionary
102
+ observation = {
103
+ "observation.state": state,
104
+ "observation.image": image,
105
+ }
106
+
107
+ # Predict the next action with respect to the current observation
108
+ with torch.inference_mode():
109
+ action = policy.select_action(observation)
110
+
111
+ # Prepare the action for the environment
112
+ numpy_action = action.squeeze(0).to("cpu").numpy()
113
+
114
+ # Step through the environment and receive a new observation
115
+ numpy_observation, reward, terminated, truncated, info = env.step(numpy_action)
116
+ print(f"{step=} {reward=} {terminated=}")
117
+
118
+ # Keep track of all the rewards and frames
119
+ rewards.append(reward)
120
+ frames.append(env.render())
121
+
122
+ # The rollout is considered done when the success state is reached (i.e. terminated is True),
123
+ # or the maximum number of iterations is reached (i.e. truncated is True)
124
+ done = terminated | truncated | done
125
+ step += 1
126
+
127
+ if terminated:
128
+ print("Success!")
129
+ else:
130
+ print("Failure!")
131
+
132
+ # Get the speed of environment (i.e. its number of frames per second).
133
+ fps = env.metadata["render_fps"]
134
+
135
+ # Encode all frames into a mp4 video.
136
+ video_path = output_directory / "rollout.mp4"
137
+ imageio.mimsave(str(video_path), numpy.stack(frames), fps=fps)
138
+
139
+ print(f"Video of the evaluation is available in '{video_path}'.")
examples/3_train_policy.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """This script demonstrates how to train Diffusion Policy on the PushT environment.
16
+
17
+ Once you have trained a model with this script, you can try to evaluate it on
18
+ examples/2_evaluate_pretrained_policy.py
19
+ """
20
+
21
+ from pathlib import Path
22
+
23
+ import torch
24
+
25
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
26
+ from lerobot.common.datasets.utils import dataset_to_policy_features
27
+ from lerobot.common.policies.diffusion.configuration_diffusion import DiffusionConfig
28
+ from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
29
+ from lerobot.configs.types import FeatureType
30
+
31
+
32
+ def main():
33
+ # Create a directory to store the training checkpoint.
34
+ output_directory = Path("outputs/train/example_pusht_diffusion")
35
+ output_directory.mkdir(parents=True, exist_ok=True)
36
+
37
+ # # Select your device
38
+ device = torch.device("cuda")
39
+
40
+ # Number of offline training steps (we'll only do offline training for this example.)
41
+ # Adjust as you prefer. 5000 steps are needed to get something worth evaluating.
42
+ training_steps = 5000
43
+ log_freq = 1
44
+
45
+ # When starting from scratch (i.e. not from a pretrained policy), we need to specify 2 things before
46
+ # creating the policy:
47
+ # - input/output shapes: to properly size the policy
48
+ # - dataset stats: for normalization and denormalization of input/outputs
49
+ dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
50
+ features = dataset_to_policy_features(dataset_metadata.features)
51
+ output_features = {key: ft for key, ft in features.items() if ft.type is FeatureType.ACTION}
52
+ input_features = {key: ft for key, ft in features.items() if key not in output_features}
53
+
54
+ # Policies are initialized with a configuration class, in this case `DiffusionConfig`. For this example,
55
+ # we'll just use the defaults and so no arguments other than input/output features need to be passed.
56
+ cfg = DiffusionConfig(input_features=input_features, output_features=output_features)
57
+
58
+ # We can now instantiate our policy with this config and the dataset stats.
59
+ policy = DiffusionPolicy(cfg, dataset_stats=dataset_metadata.stats)
60
+ policy.train()
61
+ policy.to(device)
62
+
63
+ # Another policy-dataset interaction is with the delta_timestamps. Each policy expects a given number frames
64
+ # which can differ for inputs, outputs and rewards (if there are some).
65
+ delta_timestamps = {
66
+ "observation.image": [i / dataset_metadata.fps for i in cfg.observation_delta_indices],
67
+ "observation.state": [i / dataset_metadata.fps for i in cfg.observation_delta_indices],
68
+ "action": [i / dataset_metadata.fps for i in cfg.action_delta_indices],
69
+ }
70
+
71
+ # In this case with the standard configuration for Diffusion Policy, it is equivalent to this:
72
+ delta_timestamps = {
73
+ # Load the previous image and state at -0.1 seconds before current frame,
74
+ # then load current image and state corresponding to 0.0 second.
75
+ "observation.image": [-0.1, 0.0],
76
+ "observation.state": [-0.1, 0.0],
77
+ # Load the previous action (-0.1), the next action to be executed (0.0),
78
+ # and 14 future actions with a 0.1 seconds spacing. All these actions will be
79
+ # used to supervise the policy.
80
+ "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
81
+ }
82
+
83
+ # We can then instantiate the dataset with these delta_timestamps configuration.
84
+ dataset = LeRobotDataset("lerobot/pusht", delta_timestamps=delta_timestamps)
85
+
86
+ # Then we create our optimizer and dataloader for offline training.
87
+ optimizer = torch.optim.Adam(policy.parameters(), lr=1e-4)
88
+ dataloader = torch.utils.data.DataLoader(
89
+ dataset,
90
+ num_workers=4,
91
+ batch_size=64,
92
+ shuffle=True,
93
+ pin_memory=device.type != "cpu",
94
+ drop_last=True,
95
+ )
96
+
97
+ # Run training loop.
98
+ step = 0
99
+ done = False
100
+ while not done:
101
+ for batch in dataloader:
102
+ batch = {k: (v.to(device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()}
103
+ loss, _ = policy.forward(batch)
104
+ loss.backward()
105
+ optimizer.step()
106
+ optimizer.zero_grad()
107
+
108
+ if step % log_freq == 0:
109
+ print(f"step: {step} loss: {loss.item():.3f}")
110
+ step += 1
111
+ if step >= training_steps:
112
+ done = True
113
+ break
114
+
115
+ # Save a policy checkpoint.
116
+ policy.save_pretrained(output_directory)
117
+
118
+
119
+ if __name__ == "__main__":
120
+ main()
examples/4_train_policy_with_script.md ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This tutorial will explain the training script, how to use it, and particularly how to configure everything needed for the training run.
2
+ > **Note:** The following assumes you're running these commands on a machine equipped with a cuda GPU. If you don't have one (or if you're using a Mac), you can add `--policy.device=cpu` (`--policy.device=mps` respectively). However, be advised that the code executes much slower on cpu.
3
+
4
+
5
+ ## The training script
6
+
7
+ LeRobot offers a training script at [`lerobot/scripts/train.py`](../lerobot/scripts/train.py). At a high level it does the following:
8
+
9
+ - Initialize/load a configuration for the following steps using.
10
+ - Instantiates a dataset.
11
+ - (Optional) Instantiates a simulation environment corresponding to that dataset.
12
+ - Instantiates a policy.
13
+ - Runs a standard training loop with forward pass, backward pass, optimization step, and occasional logging, evaluation (of the policy on the environment), and checkpointing.
14
+
15
+ ## Overview of the configuration system
16
+
17
+ In the training script, the main function `train` expects a `TrainPipelineConfig` object:
18
+ ```python
19
+ # train.py
20
+ @parser.wrap()
21
+ def train(cfg: TrainPipelineConfig):
22
+ ```
23
+
24
+ You can inspect the `TrainPipelineConfig` defined in [`lerobot/configs/train.py`](../lerobot/configs/train.py) (which is heavily commented and meant to be a reference to understand any option)
25
+
26
+ When running the script, inputs for the command line are parsed thanks to the `@parser.wrap()` decorator and an instance of this class is automatically generated. Under the hood, this is done with [Draccus](https://github.com/dlwh/draccus) which is a tool dedicated to this purpose. If you're familiar with Hydra, Draccus can similarly load configurations from config files (.json, .yaml) and also override their values through command line inputs. Unlike Hydra, these configurations are pre-defined in the code through dataclasses rather than being defined entirely in config files. This allows for more rigorous serialization/deserialization, typing, and to manipulate configuration as objects directly in the code and not as dictionaries or namespaces (which enables nice features in an IDE such as autocomplete, jump-to-def, etc.)
27
+
28
+ Let's have a look at a simplified example. Amongst other attributes, the training config has the following attributes:
29
+ ```python
30
+ @dataclass
31
+ class TrainPipelineConfig:
32
+ dataset: DatasetConfig
33
+ env: envs.EnvConfig | None = None
34
+ policy: PreTrainedConfig | None = None
35
+ ```
36
+ in which `DatasetConfig` for example is defined as such:
37
+ ```python
38
+ @dataclass
39
+ class DatasetConfig:
40
+ repo_id: str
41
+ episodes: list[int] | None = None
42
+ video_backend: str = "pyav"
43
+ ```
44
+
45
+ This creates a hierarchical relationship where, for example assuming we have a `cfg` instance of `TrainPipelineConfig`, we can access the `repo_id` value with `cfg.dataset.repo_id`.
46
+ From the command line, we can specify this value by using a very similar syntax `--dataset.repo_id=repo/id`.
47
+
48
+ By default, every field takes its default value specified in the dataclass. If a field doesn't have a default value, it needs to be specified either from the command line or from a config file – which path is also given in the command line (more in this below). In the example above, the `dataset` field doesn't have a default value which means it must be specified.
49
+
50
+
51
+ ## Specifying values from the CLI
52
+
53
+ Let's say that we want to train [Diffusion Policy](../lerobot/common/policies/diffusion) on the [pusht](https://huggingface.co/datasets/lerobot/pusht) dataset, using the [gym_pusht](https://github.com/huggingface/gym-pusht) environment for evaluation. The command to do so would look like this:
54
+ ```bash
55
+ python lerobot/scripts/train.py \
56
+ --dataset.repo_id=lerobot/pusht \
57
+ --policy.type=diffusion \
58
+ --env.type=pusht
59
+ ```
60
+
61
+ Let's break this down:
62
+ - To specify the dataset, we just need to specify its `repo_id` on the hub which is the only required argument in the `DatasetConfig`. The rest of the fields have default values and in this case we are fine with those so we can just add the option `--dataset.repo_id=lerobot/pusht`.
63
+ - To specify the policy, we can just select diffusion policy using `--policy` appended with `.type`. Here, `.type` is a special argument which allows us to select config classes inheriting from `draccus.ChoiceRegistry` and that have been decorated with the `register_subclass()` method. To have a better explanation of this feature, have a look at this [Draccus demo](https://github.com/dlwh/draccus?tab=readme-ov-file#more-flexible-configuration-with-choice-types). In our code, we use this mechanism mainly to select policies, environments, robots, and some other components like optimizers. The policies available to select are located in [lerobot/common/policies](../lerobot/common/policies)
64
+ - Similarly, we select the environment with `--env.type=pusht`. The different environment configs are available in [`lerobot/common/envs/configs.py`](../lerobot/common/envs/configs.py)
65
+
66
+ Let's see another example. Let's say you've been training [ACT](../lerobot/common/policies/act) on [lerobot/aloha_sim_insertion_human](https://huggingface.co/datasets/lerobot/aloha_sim_insertion_human) using the [gym-aloha](https://github.com/huggingface/gym-aloha) environment for evaluation with:
67
+ ```bash
68
+ python lerobot/scripts/train.py \
69
+ --policy.type=act \
70
+ --dataset.repo_id=lerobot/aloha_sim_insertion_human \
71
+ --env.type=aloha \
72
+ --output_dir=outputs/train/act_aloha_insertion
73
+ ```
74
+ > Notice we added `--output_dir` to explicitly tell where to write outputs from this run (checkpoints, training state, configs etc.). This is not mandatory and if you don't specify it, a default directory will be created from the current date and time, env.type and policy.type. This will typically look like `outputs/train/2025-01-24/16-10-05_aloha_act`.
75
+
76
+ We now want to train a different policy for aloha on another task. We'll change the dataset and use [lerobot/aloha_sim_transfer_cube_human](https://huggingface.co/datasets/lerobot/aloha_sim_transfer_cube_human) instead. Of course, we also need to change the task of the environment as well to match this other task.
77
+ Looking at the [`AlohaEnv`](../lerobot/common/envs/configs.py) config, the task is `"AlohaInsertion-v0"` by default, which corresponds to the task we trained on in the command above. The [gym-aloha](https://github.com/huggingface/gym-aloha?tab=readme-ov-file#description) environment also has the `AlohaTransferCube-v0` task which corresponds to this other task we want to train on. Putting this together, we can train this new policy on this different task using:
78
+ ```bash
79
+ python lerobot/scripts/train.py \
80
+ --policy.type=act \
81
+ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
82
+ --env.type=aloha \
83
+ --env.task=AlohaTransferCube-v0 \
84
+ --output_dir=outputs/train/act_aloha_transfer
85
+ ```
86
+
87
+ ## Loading from a config file
88
+
89
+ Now, let's assume that we want to reproduce the run just above. That run has produced a `train_config.json` file in its checkpoints, which serializes the `TrainPipelineConfig` instance it used:
90
+ ```json
91
+ {
92
+ "dataset": {
93
+ "repo_id": "lerobot/aloha_sim_transfer_cube_human",
94
+ "episodes": null,
95
+ ...
96
+ },
97
+ "env": {
98
+ "type": "aloha",
99
+ "task": "AlohaTransferCube-v0",
100
+ "fps": 50,
101
+ ...
102
+ },
103
+ "policy": {
104
+ "type": "act",
105
+ "n_obs_steps": 1,
106
+ ...
107
+ },
108
+ ...
109
+ }
110
+ ```
111
+
112
+ We can then simply load the config values from this file using:
113
+ ```bash
114
+ python lerobot/scripts/train.py \
115
+ --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
116
+ --output_dir=outputs/train/act_aloha_transfer_2
117
+ ```
118
+ `--config_path` is also a special argument which allows to initialize the config from a local config file. It can point to a directory that contains `train_config.json` or to the config file itself directly.
119
+
120
+ Similarly to Hydra, we can still override some parameters in the CLI if we want to, e.g.:
121
+ ```bash
122
+ python lerobot/scripts/train.py \
123
+ --config_path=outputs/train/act_aloha_transfer/checkpoints/last/pretrained_model/ \
124
+ --output_dir=outputs/train/act_aloha_transfer_2
125
+ --policy.n_action_steps=80
126
+ ```
127
+ > Note: While `--output_dir` is not required in general, in this case we need to specify it since it will otherwise take the value from the `train_config.json` (which is `outputs/train/act_aloha_transfer`). In order to prevent accidental deletion of previous run checkpoints, we raise an error if you're trying to write in an existing directory. This is not the case when resuming a run, which is what you'll learn next.
128
+
129
+ `--config_path` can also accept the repo_id of a repo on the hub that contains a `train_config.json` file, e.g. running:
130
+ ```bash
131
+ python lerobot/scripts/train.py --config_path=lerobot/diffusion_pusht
132
+ ```
133
+ will start a training run with the same configuration used for training [lerobot/diffusion_pusht](https://huggingface.co/lerobot/diffusion_pusht)
134
+
135
+
136
+ ## Resume training
137
+
138
+ Being able to resume a training run is important in case it crashed or aborted for any reason. We'll demonstrate how to do that here.
139
+
140
+ Let's reuse the command from the previous run and add a few more options:
141
+ ```bash
142
+ python lerobot/scripts/train.py \
143
+ --policy.type=act \
144
+ --dataset.repo_id=lerobot/aloha_sim_transfer_cube_human \
145
+ --env.type=aloha \
146
+ --env.task=AlohaTransferCube-v0 \
147
+ --log_freq=25 \
148
+ --save_freq=100 \
149
+ --output_dir=outputs/train/run_resumption
150
+ ```
151
+
152
+ Here we've taken care to set up the log frequency and checkpointing frequency to low numbers so we can showcase resumption. You should be able to see some logging and have a first checkpoint within 1 minute (depending on hardware). Wait for the first checkpoint to happen, you should see a line that looks like this in your terminal:
153
+ ```
154
+ INFO 2025-01-24 16:10:56 ts/train.py:263 Checkpoint policy after step 100
155
+ ```
156
+ Now let's simulate a crash by killing the process (hit `ctrl`+`c`). We can then simply resume this run from the last checkpoint available with:
157
+ ```bash
158
+ python lerobot/scripts/train.py \
159
+ --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
160
+ --resume=true
161
+ ```
162
+ You should see from the logging that your training picks up from where it left off.
163
+
164
+ Another reason for which you might want to resume a run is simply to extend training and add more training steps. The number of training steps is set by the option `--steps`, which is 100 000 by default.
165
+ You could double the number of steps of the previous run with:
166
+ ```bash
167
+ python lerobot/scripts/train.py \
168
+ --config_path=outputs/train/run_resumption/checkpoints/last/pretrained_model/ \
169
+ --resume=true \
170
+ --steps=200000
171
+ ```
172
+
173
+ ## Outputs of a run
174
+ In the output directory, there will be a folder called `checkpoints` with the following structure:
175
+ ```bash
176
+ outputs/train/run_resumption/checkpoints
177
+ ├── 000100 # checkpoint_dir for training step 100
178
+ │ ├── pretrained_model/
179
+ │ │ ├── config.json # policy config
180
+ │ │ ├── model.safetensors # policy weights
181
+ │ │ └── train_config.json # train config
182
+ │ └── training_state/
183
+ │ ├── optimizer_param_groups.json # optimizer param groups
184
+ │ ├── optimizer_state.safetensors # optimizer state
185
+ │ ├── rng_state.safetensors # rng states
186
+ │ ├── scheduler_state.json # scheduler state
187
+ │ └── training_step.json # training step
188
+ ├── 000200
189
+ └── last -> 000200 # symlink to the last available checkpoint
190
+ ```
191
+
192
+ ## Fine-tuning a pre-trained policy
193
+
194
+ In addition to the features currently in Draccus, we've added a special `.path` argument for the policy, which allows to load a policy as you would with `PreTrainedPolicy.from_pretrained()`. In that case, `path` can be a local directory that contains a checkpoint or a repo_id pointing to a pretrained policy on the hub.
195
+
196
+ For example, we could fine-tune a [policy pre-trained on the aloha transfer task](https://huggingface.co/lerobot/act_aloha_sim_transfer_cube_human) on the aloha insertion task. We can achieve this with:
197
+ ```bash
198
+ python lerobot/scripts/train.py \
199
+ --policy.path=lerobot/act_aloha_sim_transfer_cube_human \
200
+ --dataset.repo_id=lerobot/aloha_sim_insertion_human \
201
+ --env.type=aloha \
202
+ --env.task=AlohaInsertion-v0
203
+ ```
204
+
205
+ When doing so, keep in mind that the features of the fine-tuning dataset would have to match the input/output features of the pretrained policy.
206
+
207
+ ## Typical logs and metrics
208
+
209
+ When you start the training process, you will first see your full configuration being printed in the terminal. You can check it to make sure that you configured your run correctly. The final configuration will also be saved with the checkpoint.
210
+
211
+ After that, you will see training log like this one:
212
+ ```
213
+ INFO 2024-08-14 13:35:12 ts/train.py:192 step:0 smpl:64 ep:1 epch:0.00 loss:1.112 grdn:15.387 lr:2.0e-07 updt_s:1.738 data_s:4.774
214
+ ```
215
+ or evaluation log:
216
+ ```
217
+ INFO 2024-08-14 13:38:45 ts/train.py:226 step:100 smpl:6K ep:52 epch:0.25 ∑rwrd:20.693 success:0.0% eval_s:120.266
218
+ ```
219
+
220
+ These logs will also be saved in wandb if `wandb.enable` is set to `true`. Here are the meaning of some abbreviations:
221
+ - `smpl`: number of samples seen during training.
222
+ - `ep`: number of episodes seen during training. An episode contains multiple samples in a complete manipulation task.
223
+ - `epch`: number of time all unique samples are seen (epoch).
224
+ - `grdn`: gradient norm.
225
+ - `∑rwrd`: compute the sum of rewards in every evaluation episode and then take an average of them.
226
+ - `success`: average success rate of eval episodes. Reward and success are usually different except for the sparsing reward setting, where reward=1 only when the task is completed successfully.
227
+ - `eval_s`: time to evaluate the policy in the environment, in second.
228
+ - `updt_s`: time to update the network parameters, in second.
229
+ - `data_s`: time to load a batch of data, in second.
230
+
231
+ Some metrics are useful for initial performance profiling. For example, if you find the current GPU utilization is low via the `nvidia-smi` command and `data_s` sometimes is too high, you may need to modify batch size or number of dataloading workers to accelerate dataloading. We also recommend [pytorch profiler](https://github.com/huggingface/lerobot?tab=readme-ov-file#improve-your-code-with-profiling) for detailed performance probing.
232
+
233
+ ## In short
234
+
235
+ We'll summarize here the main use cases to remember from this tutorial.
236
+
237
+ #### Train a policy from scratch – CLI
238
+ ```bash
239
+ python lerobot/scripts/train.py \
240
+ --policy.type=act \ # <- select 'act' policy
241
+ --env.type=pusht \ # <- select 'pusht' environment
242
+ --dataset.repo_id=lerobot/pusht # <- train on this dataset
243
+ ```
244
+
245
+ #### Train a policy from scratch - config file + CLI
246
+ ```bash
247
+ python lerobot/scripts/train.py \
248
+ --config_path=path/to/pretrained_model \ # <- can also be a repo_id
249
+ --policy.n_action_steps=80 # <- you may still override values
250
+ ```
251
+
252
+ #### Resume/continue a training run
253
+ ```bash
254
+ python lerobot/scripts/train.py \
255
+ --config_path=checkpoint/pretrained_model/ \
256
+ --resume=true \
257
+ --steps=200000 # <- you can change some training parameters
258
+ ```
259
+
260
+ #### Fine-tuning
261
+ ```bash
262
+ python lerobot/scripts/train.py \
263
+ --policy.path=lerobot/act_aloha_sim_transfer_cube_human \ # <- can also be a local path to a checkpoint
264
+ --dataset.repo_id=lerobot/aloha_sim_insertion_human \
265
+ --env.type=aloha \
266
+ --env.task=AlohaInsertion-v0
267
+ ```
268
+
269
+ ---
270
+
271
+ Now that you know the basics of how to train a policy, you might want to know how to apply this knowledge to actual robots, or how to record your own datasets and train policies on your specific task?
272
+ If that's the case, head over to the next tutorial [`7_get_started_with_real_robot.md`](./7_get_started_with_real_robot.md).
273
+
274
+ Or in the meantime, happy training! 🤗
examples/advanced/1_add_image_transforms.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This script demonstrates how to use torchvision's image transformation with LeRobotDataset for data
17
+ augmentation purposes. The transformations are passed to the dataset as an argument upon creation, and
18
+ transforms are applied to the observation images before they are returned in the dataset's __getitem__.
19
+ """
20
+
21
+ from pathlib import Path
22
+
23
+ from torchvision.transforms import ToPILImage, v2
24
+
25
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
26
+
27
+ dataset_repo_id = "lerobot/aloha_static_screw_driver"
28
+
29
+ # Create a LeRobotDataset with no transformations
30
+ dataset = LeRobotDataset(dataset_repo_id, episodes=[0])
31
+ # This is equivalent to `dataset = LeRobotDataset(dataset_repo_id, image_transforms=None)`
32
+
33
+ # Get the index of the first observation in the first episode
34
+ first_idx = dataset.episode_data_index["from"][0].item()
35
+
36
+ # Get the frame corresponding to the first camera
37
+ frame = dataset[first_idx][dataset.meta.camera_keys[0]]
38
+
39
+
40
+ # Define the transformations
41
+ transforms = v2.Compose(
42
+ [
43
+ v2.ColorJitter(brightness=(0.5, 1.5)),
44
+ v2.ColorJitter(contrast=(0.5, 1.5)),
45
+ v2.ColorJitter(hue=(-0.1, 0.1)),
46
+ v2.RandomAdjustSharpness(sharpness_factor=2, p=1),
47
+ ]
48
+ )
49
+
50
+ # Create another LeRobotDataset with the defined transformations
51
+ transformed_dataset = LeRobotDataset(dataset_repo_id, episodes=[0], image_transforms=transforms)
52
+
53
+ # Get a frame from the transformed dataset
54
+ transformed_frame = transformed_dataset[first_idx][transformed_dataset.meta.camera_keys[0]]
55
+
56
+ # Create a directory to store output images
57
+ output_dir = Path("outputs/image_transforms")
58
+ output_dir.mkdir(parents=True, exist_ok=True)
59
+
60
+ # Save the original frame
61
+ to_pil = ToPILImage()
62
+ to_pil(frame).save(output_dir / "original_frame.png", quality=100)
63
+ print(f"Original frame saved to {output_dir / 'original_frame.png'}.")
64
+
65
+ # Save the transformed frame
66
+ to_pil(transformed_frame).save(output_dir / "transformed_frame.png", quality=100)
67
+ print(f"Transformed frame saved to {output_dir / 'transformed_frame.png'}.")
examples/advanced/2_calculate_validation_loss.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """This script demonstrates how to slice a dataset and calculate the loss on a subset of the data.
16
+
17
+ This technique can be useful for debugging and testing purposes, as well as identifying whether a policy
18
+ is learning effectively.
19
+
20
+ Furthermore, relying on validation loss to evaluate performance is generally not considered a good practice,
21
+ especially in the context of imitation learning. The most reliable approach is to evaluate the policy directly
22
+ on the target environment, whether that be in simulation or the real world.
23
+ """
24
+
25
+ import math
26
+
27
+ import torch
28
+
29
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset, LeRobotDatasetMetadata
30
+ from lerobot.common.policies.diffusion.modeling_diffusion import DiffusionPolicy
31
+
32
+
33
+ def main():
34
+ device = torch.device("cuda")
35
+
36
+ # Download the diffusion policy for pusht environment
37
+ pretrained_policy_path = "lerobot/diffusion_pusht"
38
+ # OR uncomment the following to evaluate a policy from the local outputs/train folder.
39
+ # pretrained_policy_path = Path("outputs/train/example_pusht_diffusion")
40
+
41
+ policy = DiffusionPolicy.from_pretrained(pretrained_policy_path)
42
+ policy.eval()
43
+ policy.to(device)
44
+
45
+ # Set up the dataset.
46
+ delta_timestamps = {
47
+ # Load the previous image and state at -0.1 seconds before current frame,
48
+ # then load current image and state corresponding to 0.0 second.
49
+ "observation.image": [-0.1, 0.0],
50
+ "observation.state": [-0.1, 0.0],
51
+ # Load the previous action (-0.1), the next action to be executed (0.0),
52
+ # and 14 future actions with a 0.1 seconds spacing. All these actions will be
53
+ # used to calculate the loss.
54
+ "action": [-0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4],
55
+ }
56
+
57
+ # Load the last 10% of episodes of the dataset as a validation set.
58
+ # - Load dataset metadata
59
+ dataset_metadata = LeRobotDatasetMetadata("lerobot/pusht")
60
+ # - Calculate train and val episodes
61
+ total_episodes = dataset_metadata.total_episodes
62
+ episodes = list(range(dataset_metadata.total_episodes))
63
+ num_train_episodes = math.floor(total_episodes * 90 / 100)
64
+ train_episodes = episodes[:num_train_episodes]
65
+ val_episodes = episodes[num_train_episodes:]
66
+ print(f"Number of episodes in full dataset: {total_episodes}")
67
+ print(f"Number of episodes in training dataset (90% subset): {len(train_episodes)}")
68
+ print(f"Number of episodes in validation dataset (10% subset): {len(val_episodes)}")
69
+ # - Load train and val datasets
70
+ train_dataset = LeRobotDataset(
71
+ "lerobot/pusht", episodes=train_episodes, delta_timestamps=delta_timestamps
72
+ )
73
+ val_dataset = LeRobotDataset("lerobot/pusht", episodes=val_episodes, delta_timestamps=delta_timestamps)
74
+ print(f"Number of frames in training dataset (90% subset): {len(train_dataset)}")
75
+ print(f"Number of frames in validation dataset (10% subset): {len(val_dataset)}")
76
+
77
+ # Create dataloader for evaluation.
78
+ val_dataloader = torch.utils.data.DataLoader(
79
+ val_dataset,
80
+ num_workers=4,
81
+ batch_size=64,
82
+ shuffle=False,
83
+ pin_memory=device != torch.device("cpu"),
84
+ drop_last=False,
85
+ )
86
+
87
+ # Run validation loop.
88
+ loss_cumsum = 0
89
+ n_examples_evaluated = 0
90
+ for batch in val_dataloader:
91
+ batch = {k: v.to(device, non_blocking=True) for k, v in batch.items()}
92
+ loss, _ = policy.forward(batch)
93
+
94
+ loss_cumsum += loss.item()
95
+ n_examples_evaluated += batch["index"].shape[0]
96
+
97
+ # Calculate the average loss over the validation set.
98
+ average_loss = loss_cumsum / n_examples_evaluated
99
+
100
+ print(f"Average loss on validation set: {average_loss:.4f}")
101
+
102
+
103
+ if __name__ == "__main__":
104
+ main()
examples/backward_compatibility/replay.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Replays the actions of an episode from a dataset on a robot.
17
+
18
+ Example:
19
+
20
+ ```shell
21
+ python -m lerobot.replay \
22
+ --robot.type=so100_follower \
23
+ --robot.port=/dev/tty.usbmodem58760431541 \
24
+ --robot.id=black \
25
+ --dataset.repo_id=aliberts/record-test \
26
+ --dataset.episode=2
27
+ ```
28
+ """
29
+
30
+ import logging
31
+ import time
32
+ from dataclasses import asdict, dataclass
33
+ from pathlib import Path
34
+ from pprint import pformat
35
+
36
+ import draccus
37
+
38
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
39
+ from lerobot.common.robots import ( # noqa: F401
40
+ Robot,
41
+ RobotConfig,
42
+ koch_follower,
43
+ make_robot_from_config,
44
+ so100_follower,
45
+ so101_follower,
46
+ )
47
+ from lerobot.common.utils.robot_utils import busy_wait
48
+ from lerobot.common.utils.utils import (
49
+ init_logging,
50
+ log_say,
51
+ )
52
+
53
+
54
+ @dataclass
55
+ class DatasetReplayConfig:
56
+ # Dataset identifier. By convention it should match '{hf_username}/{dataset_name}' (e.g. `lerobot/test`).
57
+ repo_id: str
58
+ # Episode to replay.
59
+ episode: int
60
+ # Root directory where the dataset will be stored (e.g. 'dataset/path').
61
+ root: str | Path | None = None
62
+ # Limit the frames per second. By default, uses the policy fps.
63
+ fps: int = 30
64
+
65
+
66
+ @dataclass
67
+ class ReplayConfig:
68
+ robot: RobotConfig
69
+ dataset: DatasetReplayConfig
70
+ # Use vocal synthesis to read events.
71
+ play_sounds: bool = True
72
+
73
+
74
+ @draccus.wrap()
75
+ def replay(cfg: ReplayConfig):
76
+ init_logging()
77
+ logging.info(pformat(asdict(cfg)))
78
+
79
+ robot = make_robot_from_config(cfg.robot)
80
+ dataset = LeRobotDataset(cfg.dataset.repo_id, root=cfg.dataset.root, episodes=[cfg.dataset.episode])
81
+ actions = dataset.hf_dataset.select_columns("action")
82
+ robot.connect()
83
+
84
+ log_say("Replaying episode", cfg.play_sounds, blocking=True)
85
+ for idx in range(dataset.num_frames):
86
+ start_episode_t = time.perf_counter()
87
+
88
+ action_array = actions[idx]["action"]
89
+ action = {}
90
+ for i, name in enumerate(dataset.features["action"]["names"]):
91
+ key = f"{name.removeprefix('main_')}.pos"
92
+ action[key] = action_array[i].item()
93
+
94
+ action["shoulder_lift.pos"] = -(action["shoulder_lift.pos"] - 90)
95
+ action["elbow_flex.pos"] -= 90
96
+ robot.send_action(action)
97
+
98
+ dt_s = time.perf_counter() - start_episode_t
99
+ busy_wait(1 / dataset.fps - dt_s)
100
+
101
+ robot.disconnect()
102
+
103
+
104
+ if __name__ == "__main__":
105
+ replay()
examples/lekiwi/evaluate.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lerobot.common.datasets.utils import build_dataset_frame, hw_to_dataset_features
2
+ from lerobot.common.policies.act.modeling_act import ACTPolicy
3
+ from lerobot.common.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
4
+ from lerobot.common.utils.control_utils import predict_action
5
+ from lerobot.common.utils.utils import get_safe_torch_device
6
+
7
+ NB_CYCLES_CLIENT_CONNECTION = 1000
8
+
9
+ robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
10
+ robot = LeKiwiClient(robot_config)
11
+
12
+ robot.connect()
13
+
14
+ policy = ACTPolicy.from_pretrained("pepijn223/act_lekiwi_circle")
15
+ policy.reset()
16
+
17
+ obs_features = hw_to_dataset_features(robot.observation_features, "observation")
18
+
19
+ print("Running inference")
20
+ i = 0
21
+ while i < NB_CYCLES_CLIENT_CONNECTION:
22
+ obs = robot.get_observation()
23
+
24
+ observation_frame = build_dataset_frame(obs_features, obs, prefix="observation")
25
+ action_values = predict_action(
26
+ observation_frame, policy, get_safe_torch_device(policy.config.device), policy.config.use_amp
27
+ )
28
+ action = {key: action_values[i].item() for i, key in enumerate(robot.action_features)}
29
+ robot.send_action(action)
30
+ i += 1
31
+
32
+ robot.disconnect()
examples/lekiwi/record.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
4
+ from lerobot.common.datasets.utils import hw_to_dataset_features
5
+ from lerobot.common.robots.lekiwi.config_lekiwi import LeKiwiClientConfig
6
+ from lerobot.common.robots.lekiwi.lekiwi_client import LeKiwiClient
7
+ from lerobot.common.teleoperators.keyboard import KeyboardTeleop, KeyboardTeleopConfig
8
+ from lerobot.common.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig
9
+
10
+ NB_CYCLES_CLIENT_CONNECTION = 250
11
+
12
+ leader_arm_config = SO100LeaderConfig(port="/dev/tty.usbmodem58760431551")
13
+ leader_arm = SO100Leader(leader_arm_config)
14
+
15
+ keyboard_config = KeyboardTeleopConfig()
16
+ keyboard = KeyboardTeleop(keyboard_config)
17
+
18
+ robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
19
+ robot = LeKiwiClient(robot_config)
20
+
21
+ action_features = hw_to_dataset_features(robot.action_features, "action")
22
+ obs_features = hw_to_dataset_features(robot.observation_features, "observation")
23
+ dataset_features = {**action_features, **obs_features}
24
+
25
+ dataset = LeRobotDataset.create(
26
+ repo_id="pepijn223/lekiwi" + str(int(time.time())),
27
+ fps=10,
28
+ features=dataset_features,
29
+ robot_type=robot.name,
30
+ )
31
+
32
+ leader_arm.connect()
33
+ keyboard.connect()
34
+ robot.connect()
35
+
36
+ if not robot.is_connected or not leader_arm.is_connected or not keyboard.is_connected:
37
+ exit()
38
+
39
+ print("Starting LeKiwi recording")
40
+ i = 0
41
+ while i < NB_CYCLES_CLIENT_CONNECTION:
42
+ arm_action = leader_arm.get_action()
43
+ arm_action = {f"arm_{k}": v for k, v in arm_action.items()}
44
+
45
+ keyboard_keys = keyboard.get_action()
46
+
47
+ base_action = robot._from_keyboard_to_base_action(keyboard_keys)
48
+
49
+ action = {**arm_action, **base_action} if len(base_action) > 0 else arm_action
50
+
51
+ action_sent = robot.send_action(action)
52
+ observation = robot.get_observation()
53
+
54
+ frame = {**action_sent, **observation}
55
+ task = "Dummy Example Task Dataset"
56
+
57
+ dataset.add_frame(frame, task)
58
+ i += 1
59
+
60
+ print("Disconnecting Teleop Devices and LeKiwi Client")
61
+ robot.disconnect()
62
+ leader_arm.disconnect()
63
+ keyboard.disconnect()
64
+
65
+ print("Uploading dataset to the hub")
66
+ dataset.save_episode()
67
+ dataset.push_to_hub()
examples/lekiwi/replay.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+
3
+ from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
4
+ from lerobot.common.robots.lekiwi.config_lekiwi import LeKiwiClientConfig
5
+ from lerobot.common.robots.lekiwi.lekiwi_client import LeKiwiClient
6
+ from lerobot.common.utils.robot_utils import busy_wait
7
+
8
+ robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="lekiwi")
9
+ robot = LeKiwiClient(robot_config)
10
+
11
+ dataset = LeRobotDataset("pepijn223/lekiwi1749025613", episodes=[0])
12
+
13
+ robot.connect()
14
+
15
+ print("Replaying episode…")
16
+ for _, action_array in enumerate(dataset.hf_dataset["action"]):
17
+ t0 = time.perf_counter()
18
+
19
+ action = {name: float(action_array[i]) for i, name in enumerate(dataset.features["action"]["names"])}
20
+ robot.send_action(action)
21
+
22
+ busy_wait(max(1.0 / dataset.fps - (time.perf_counter() - t0), 0.0))
23
+
24
+ print("Disconnecting LeKiwi Client")
25
+ robot.disconnect()
examples/lekiwi/teleoperate.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lerobot.common.robots.lekiwi import LeKiwiClient, LeKiwiClientConfig
2
+ from lerobot.common.teleoperators.keyboard.teleop_keyboard import KeyboardTeleop, KeyboardTeleopConfig
3
+ from lerobot.common.teleoperators.so100_leader import SO100Leader, SO100LeaderConfig
4
+
5
+ robot_config = LeKiwiClientConfig(remote_ip="172.18.134.136", id="my_lekiwi")
6
+
7
+ teleop__arm_config = SO100LeaderConfig(
8
+ port="/dev/tty.usbmodem58760431551",
9
+ id="my_awesome_leader_arm",
10
+ )
11
+
12
+ teleop_keyboard_config = KeyboardTeleopConfig(
13
+ id="my_laptop_keyboard",
14
+ )
15
+
16
+ robot = LeKiwiClient(robot_config)
17
+ teleop_arm = SO100Leader(teleop__arm_config)
18
+ telep_keyboard = KeyboardTeleop(teleop_keyboard_config)
19
+ robot.connect()
20
+ teleop_arm.connect()
21
+ telep_keyboard.connect()
22
+
23
+ while True:
24
+ observation = robot.get_observation()
25
+
26
+ arm_action = teleop_arm.get_action()
27
+ arm_action = {f"arm_{k}": v for k, v in arm_action.items()}
28
+
29
+ keyboard_keys = telep_keyboard.get_action()
30
+ base_action = robot._from_keyboard_to_base_action(keyboard_keys)
31
+
32
+ robot.send_action(arm_action | base_action)
lerobot/__init__.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ This file contains lists of available environments, dataset and policies to reflect the current state of LeRobot library.
18
+ We do not want to import all the dependencies, but instead we keep it lightweight to ensure fast access to these variables.
19
+
20
+ Example:
21
+ ```python
22
+ import lerobot
23
+ print(lerobot.available_envs)
24
+ print(lerobot.available_tasks_per_env)
25
+ print(lerobot.available_datasets)
26
+ print(lerobot.available_datasets_per_env)
27
+ print(lerobot.available_real_world_datasets)
28
+ print(lerobot.available_policies)
29
+ print(lerobot.available_policies_per_env)
30
+ print(lerobot.available_robots)
31
+ print(lerobot.available_cameras)
32
+ print(lerobot.available_motors)
33
+ ```
34
+
35
+ When implementing a new dataset loadable with LeRobotDataset follow these steps:
36
+ - Update `available_datasets_per_env` in `lerobot/__init__.py`
37
+
38
+ When implementing a new environment (e.g. `gym_aloha`), follow these steps:
39
+ - Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
40
+
41
+ When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
42
+ - Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
43
+ - Set the required `name` class attribute.
44
+ - Update variables in `tests/test_available.py` by importing your new Policy class
45
+ """
46
+
47
+ import itertools
48
+
49
+ from lerobot.__version__ import __version__ # noqa: F401
50
+
51
+ # TODO(rcadene): Improve policies and envs. As of now, an item in `available_policies`
52
+ # refers to a yaml file AND a modeling name. Same for `available_envs` which refers to
53
+ # a yaml file AND a environment name. The difference should be more obvious.
54
+ available_tasks_per_env = {
55
+ "aloha": [
56
+ "AlohaInsertion-v0",
57
+ "AlohaTransferCube-v0",
58
+ ],
59
+ "pusht": ["PushT-v0"],
60
+ "xarm": ["XarmLift-v0"],
61
+ }
62
+ available_envs = list(available_tasks_per_env.keys())
63
+
64
+ available_datasets_per_env = {
65
+ "aloha": [
66
+ "lerobot/aloha_sim_insertion_human",
67
+ "lerobot/aloha_sim_insertion_scripted",
68
+ "lerobot/aloha_sim_transfer_cube_human",
69
+ "lerobot/aloha_sim_transfer_cube_scripted",
70
+ "lerobot/aloha_sim_insertion_human_image",
71
+ "lerobot/aloha_sim_insertion_scripted_image",
72
+ "lerobot/aloha_sim_transfer_cube_human_image",
73
+ "lerobot/aloha_sim_transfer_cube_scripted_image",
74
+ ],
75
+ # TODO(alexander-soare): Add "lerobot/pusht_keypoints". Right now we can't because this is too tightly
76
+ # coupled with tests.
77
+ "pusht": ["lerobot/pusht", "lerobot/pusht_image"],
78
+ "xarm": [
79
+ "lerobot/xarm_lift_medium",
80
+ "lerobot/xarm_lift_medium_replay",
81
+ "lerobot/xarm_push_medium",
82
+ "lerobot/xarm_push_medium_replay",
83
+ "lerobot/xarm_lift_medium_image",
84
+ "lerobot/xarm_lift_medium_replay_image",
85
+ "lerobot/xarm_push_medium_image",
86
+ "lerobot/xarm_push_medium_replay_image",
87
+ ],
88
+ }
89
+
90
+ available_real_world_datasets = [
91
+ "lerobot/aloha_mobile_cabinet",
92
+ "lerobot/aloha_mobile_chair",
93
+ "lerobot/aloha_mobile_elevator",
94
+ "lerobot/aloha_mobile_shrimp",
95
+ "lerobot/aloha_mobile_wash_pan",
96
+ "lerobot/aloha_mobile_wipe_wine",
97
+ "lerobot/aloha_static_battery",
98
+ "lerobot/aloha_static_candy",
99
+ "lerobot/aloha_static_coffee",
100
+ "lerobot/aloha_static_coffee_new",
101
+ "lerobot/aloha_static_cups_open",
102
+ "lerobot/aloha_static_fork_pick_up",
103
+ "lerobot/aloha_static_pingpong_test",
104
+ "lerobot/aloha_static_pro_pencil",
105
+ "lerobot/aloha_static_screw_driver",
106
+ "lerobot/aloha_static_tape",
107
+ "lerobot/aloha_static_thread_velcro",
108
+ "lerobot/aloha_static_towel",
109
+ "lerobot/aloha_static_vinh_cup",
110
+ "lerobot/aloha_static_vinh_cup_left",
111
+ "lerobot/aloha_static_ziploc_slide",
112
+ "lerobot/umi_cup_in_the_wild",
113
+ "lerobot/unitreeh1_fold_clothes",
114
+ "lerobot/unitreeh1_rearrange_objects",
115
+ "lerobot/unitreeh1_two_robot_greeting",
116
+ "lerobot/unitreeh1_warehouse",
117
+ "lerobot/nyu_rot_dataset",
118
+ "lerobot/utokyo_saytap",
119
+ "lerobot/imperialcollege_sawyer_wrist_cam",
120
+ "lerobot/utokyo_xarm_bimanual",
121
+ "lerobot/tokyo_u_lsmo",
122
+ "lerobot/utokyo_pr2_opening_fridge",
123
+ "lerobot/cmu_franka_exploration_dataset",
124
+ "lerobot/cmu_stretch",
125
+ "lerobot/asu_table_top",
126
+ "lerobot/utokyo_pr2_tabletop_manipulation",
127
+ "lerobot/utokyo_xarm_pick_and_place",
128
+ "lerobot/ucsd_kitchen_dataset",
129
+ "lerobot/austin_buds_dataset",
130
+ "lerobot/dlr_sara_grid_clamp",
131
+ "lerobot/conq_hose_manipulation",
132
+ "lerobot/columbia_cairlab_pusht_real",
133
+ "lerobot/dlr_sara_pour",
134
+ "lerobot/dlr_edan_shared_control",
135
+ "lerobot/ucsd_pick_and_place_dataset",
136
+ "lerobot/berkeley_cable_routing",
137
+ "lerobot/nyu_franka_play_dataset",
138
+ "lerobot/austin_sirius_dataset",
139
+ "lerobot/cmu_play_fusion",
140
+ "lerobot/berkeley_gnm_sac_son",
141
+ "lerobot/nyu_door_opening_surprising_effectiveness",
142
+ "lerobot/berkeley_fanuc_manipulation",
143
+ "lerobot/jaco_play",
144
+ "lerobot/viola",
145
+ "lerobot/kaist_nonprehensile",
146
+ "lerobot/berkeley_mvp",
147
+ "lerobot/uiuc_d3field",
148
+ "lerobot/berkeley_gnm_recon",
149
+ "lerobot/austin_sailor_dataset",
150
+ "lerobot/utaustin_mutex",
151
+ "lerobot/roboturk",
152
+ "lerobot/stanford_hydra_dataset",
153
+ "lerobot/berkeley_autolab_ur5",
154
+ "lerobot/stanford_robocook",
155
+ "lerobot/toto",
156
+ "lerobot/fmb",
157
+ "lerobot/droid_100",
158
+ "lerobot/berkeley_rpt",
159
+ "lerobot/stanford_kuka_multimodal_dataset",
160
+ "lerobot/iamlab_cmu_pickup_insert",
161
+ "lerobot/taco_play",
162
+ "lerobot/berkeley_gnm_cory_hall",
163
+ "lerobot/usc_cloth_sim",
164
+ ]
165
+
166
+ available_datasets = sorted(
167
+ set(itertools.chain(*available_datasets_per_env.values(), available_real_world_datasets))
168
+ )
169
+
170
+ # lists all available policies from `lerobot/common/policies`
171
+ available_policies = ["act", "diffusion", "tdmpc", "vqbet"]
172
+
173
+ # lists all available robots from `lerobot/common/robot_devices/robots`
174
+ available_robots = [
175
+ "koch",
176
+ "koch_bimanual",
177
+ "aloha",
178
+ "so100",
179
+ "so101",
180
+ ]
181
+
182
+ # lists all available cameras from `lerobot/common/robot_devices/cameras`
183
+ available_cameras = [
184
+ "opencv",
185
+ "intelrealsense",
186
+ ]
187
+
188
+ # lists all available motors from `lerobot/common/robot_devices/motors`
189
+ available_motors = [
190
+ "dynamixel",
191
+ "feetech",
192
+ ]
193
+
194
+ # keys and values refer to yaml files
195
+ available_policies_per_env = {
196
+ "aloha": ["act"],
197
+ "pusht": ["diffusion", "vqbet"],
198
+ "xarm": ["tdmpc"],
199
+ "koch_real": ["act_koch_real"],
200
+ "aloha_real": ["act_aloha_real"],
201
+ }
202
+
203
+ env_task_pairs = [(env, task) for env, tasks in available_tasks_per_env.items() for task in tasks]
204
+ env_dataset_pairs = [
205
+ (env, dataset) for env, datasets in available_datasets_per_env.items() for dataset in datasets
206
+ ]
207
+ env_dataset_policy_triplets = [
208
+ (env, dataset, policy)
209
+ for env, datasets in available_datasets_per_env.items()
210
+ for dataset in datasets
211
+ for policy in available_policies_per_env[env]
212
+ ]