lucid-hf commited on
Commit
98a3af2
·
verified ·
1 Parent(s): 5bf7ea9

CI: deploy Docker/PDM Space

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +12 -0
  2. .gitignore +139 -0
  3. Dockerfile +68 -0
  4. README.md +47 -7
  5. deploy-development.yaml +78 -0
  6. deploy-main.yaml +78 -0
  7. pdm.lock +1339 -0
  8. pyproject.toml +148 -0
  9. services/app_service/.streamlit/config.toml +10 -0
  10. services/app_service/app.py +148 -0
  11. services/app_service/app_old.py +204 -0
  12. services/app_service/deim_model.py +228 -0
  13. services/app_service/model.py +90 -0
  14. services/app_service/models/model_deimhgnetV2m_cpu_v0.json +4 -0
  15. services/app_service/models/model_deimhgnetV2m_cpu_v0.pt +3 -0
  16. services/app_service/models/model_deimhgnetV2m_cpu_v0.pt.backup +3 -0
  17. services/app_service/models/model_deimhgnetV2m_cpu_v2.json +4 -0
  18. services/app_service/models/model_deimhgnetV2m_cpu_v2.pt +3 -0
  19. services/app_service/models/model_deimhgnetV2m_cuda_v2.json +4 -0
  20. services/app_service/models/model_deimhgnetV2m_cuda_v2.pt +3 -0
  21. services/app_service/models/yolov8n.pt +3 -0
  22. services/app_service/pages/bushland_beacon.py +116 -0
  23. services/app_service/pages/lost_at_sea.py +118 -0
  24. services/app_service/pages/signal_watch.py +255 -0
  25. services/app_service/resources/images/rescue.png +3 -0
  26. services/app_service/resources/images/rescue1.png +3 -0
  27. services/app_service/resources/images/rescue2.jpg +3 -0
  28. services/app_service/resources/images/test.jpg +3 -0
  29. services/app_service/utils/__init__.py +0 -0
  30. services/app_service/utils/cache.py +6 -0
  31. services/app_service/utils/inference.py +30 -0
  32. services/app_service/utils/ir_sim.py +7 -0
  33. services/app_service/utils/overlays.py +20 -0
  34. services/app_service/utils/video_io.py +25 -0
  35. services/training_service/LICENSE +208 -0
  36. services/training_service/README.md +449 -0
  37. services/training_service/configs/base/dataloader.yml +40 -0
  38. services/training_service/configs/base/deim.yml +49 -0
  39. services/training_service/configs/base/dfine_hgnetv2.yml +90 -0
  40. services/training_service/configs/base/optimizer.yml +39 -0
  41. services/training_service/configs/base/rt_deim.yml +49 -0
  42. services/training_service/configs/base/rt_optimizer.yml +37 -0
  43. services/training_service/configs/base/rtdetrv2_r50vd.yml +90 -0
  44. services/training_service/configs/base/wandb.yml +22 -0
  45. services/training_service/configs/dataset/coco_detection.yml +41 -0
  46. services/training_service/configs/dataset/crowdhuman_detection.yml +41 -0
  47. services/training_service/configs/dataset/custom_detection.yml +41 -0
  48. services/training_service/configs/dataset/drone_detection.yml +41 -0
  49. services/training_service/configs/dataset/obj365_detection.yml +41 -0
  50. services/training_service/configs/dataset/voc_detection.yml +40 -0
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ models/*.pt filter=lfs diff=lfs merge=lfs -text
37
+ *.png filter=lfs diff=lfs merge=lfs -text
38
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
39
+ *.mov filter=lfs diff=lfs merge=lfs -text
40
+ *.webm filter=lfs diff=lfs merge=lfs -text
41
+ annotated_video/* filter=lfs diff=lfs merge=lfs -text
42
+ *.jpg filter=lfs diff=lfs merge=lfs -text
43
+ *.gif filter=lfs diff=lfs merge=lfs -text
44
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
45
+ *.avi filter=lfs diff=lfs merge=lfs -text
46
+ *.json filter=lfs diff=lfs merge=lfs -text
47
+ services/app_service/models/model_deimhgnetV2m_cpu_v0.pt.backup filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ local_settings.py
60
+ db.sqlite3
61
+ db.sqlite3-journal
62
+
63
+ # Flask stuff:
64
+ instance/
65
+ .webassets-cache
66
+
67
+ # Scrapy stuff:
68
+ .scrapy
69
+
70
+ # Sphinx documentation
71
+ docs/_build/
72
+
73
+ # PyBuilder
74
+ target/
75
+
76
+ # Jupyter Notebook
77
+ .ipynb_checkpoints
78
+
79
+ # IPython
80
+ profile_default/
81
+ ipython_config.py
82
+
83
+ # pyenv
84
+ .python-version
85
+
86
+ # pipenv
87
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
88
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
89
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
90
+ # install all needed dependencies.
91
+ #Pipfile.lock
92
+
93
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
94
+ __pypackages__/
95
+
96
+ # Celery stuff
97
+ celerybeat-schedule
98
+ celerybeat.pid
99
+
100
+ # SageMath parsed files
101
+ *.sage.py
102
+
103
+ # Environments
104
+ .env
105
+ .venv
106
+ env/
107
+ venv/
108
+ ENV/
109
+ env.bak/
110
+ venv.bak/
111
+
112
+ # Spyder project settings
113
+ .spyderproject
114
+ .spyproject
115
+
116
+ # Rope project settings
117
+ .ropeproject
118
+
119
+ # mkdocs documentation
120
+ /site
121
+
122
+ # mypy
123
+ .mypy_cache/
124
+ .dmypy.json
125
+ dmypy.json
126
+
127
+ # Pyre type checker
128
+ .pyre/
129
+
130
+ # PyCharm
131
+ training/DEIM/.idea
132
+
133
+ *.onnx
134
+ *.zip
135
+ *.html
136
+ .DS_Store
137
+
138
+ # idea
139
+ .idea
Dockerfile ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /services/app_service
4
+
5
+ # System packages for video/image processing and OpenCV runtime
6
+ RUN apt-get update && apt-get install -y \
7
+ ffmpeg \
8
+ curl \
9
+ git \
10
+ git-lfs \
11
+ libgl1 \
12
+ libglib2.0-0 \
13
+ && rm -rf /var/lib/apt/lists/* \
14
+ && git lfs install
15
+
16
+ # Reduce build-time and run-time memory usage and set writable config dirs
17
+ ENV PIP_NO_CACHE_DIR=1 \
18
+ PIP_ROOT_USER_ACTION=ignore \
19
+ OMP_NUM_THREADS=1 \
20
+ MKL_NUM_THREADS=1 \
21
+ HOME=/tmp \
22
+ XDG_CACHE_HOME=/tmp/.cache \
23
+ MPLCONFIGDIR=/tmp/matplotlib \
24
+ YOLO_CONFIG_DIR=/tmp/Ultralytics \
25
+ STREAMLIT_BROWSER_GATHER_USAGE_STATS=false
26
+
27
+ # Create config/cache dirs to avoid permission issues
28
+ RUN mkdir -p /tmp/.streamlit /tmp/.cache /tmp/matplotlib /tmp/Ultralytics \
29
+ && chmod -R 777 /tmp/.streamlit /tmp/.cache /tmp/matplotlib /tmp/Ultralytics
30
+
31
+ # Python deps (CPU-only PyTorch wheels)
32
+ RUN python -m pip install --upgrade pip setuptools wheel && \
33
+ pip install --no-cache-dir \
34
+ streamlit==1.49.1 \
35
+ moviepy==2.2.1 \
36
+ pillow>=10.0.0 \
37
+ ultralytics==8.0.196 \
38
+ torch==2.5.1 torchvision==0.20.1 --extra-index-url https://download.pytorch.org/whl/cpu
39
+
40
+
41
+ # Copy entire repository and download script
42
+ COPY . /tmp/repo
43
+ WORKDIR /tmp/repo
44
+
45
+ # Pull LFS files and copy app service
46
+ RUN git lfs pull && cp -r services/app_service/* /services/app_service/
47
+
48
+ WORKDIR /services/app_service
49
+
50
+ # Ensure app directory is writable at runtime
51
+ RUN chmod -R 777 /services/app_service
52
+
53
+ # Streamlit config via env
54
+ ENV STREAMLIT_SERVER_ENABLECORS=false \
55
+ STREAMLIT_SERVER_ENABLE_XSRF_PROTECTION=false \
56
+ STREAMLIT_SERVER_ADDRESS=0.0.0.0 \
57
+ STREAMLIT_SERVER_HEADLESS=true \
58
+ STREAMLIT_SERVER_MAX_UPLOAD_SIZE=200
59
+
60
+ # Hugging Face Spaces will set $PORT (default 7860) — listen on that
61
+ ENV PORT=7860
62
+ EXPOSE 7860
63
+
64
+ # Healthcheck should also use $PORT
65
+ HEALTHCHECK CMD curl --fail http://localhost:${PORT}/_stcore/health || exit 1
66
+
67
+ # Use $PORT instead of hard-coding 8501
68
+ ENTRYPOINT ["sh", "-c", "streamlit run app.py --server.port=$PORT --server.address=0.0.0.0"]
README.md CHANGED
@@ -1,12 +1,52 @@
1
  ---
2
- title: Lucid Natsar Dev
3
- emoji: 🚀
4
- colorFrom: pink
5
- colorTo: blue
6
  sdk: docker
7
  pinned: false
8
- license: apache-2.0
9
- short_description: natsar project development environment
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Natsar Demo
3
+ emoji: "🚀"
4
+ colorFrom: red
5
+ colorTo: red
6
  sdk: docker
7
  pinned: false
 
 
8
  ---
9
 
10
+ # Introduction
11
+
12
+ Computer Vision object detection for National Search and Rescue (NATSAR)
13
+
14
+ ## Prerequisites
15
+
16
+ 1. Install conda (environment management) using terminal on VScode
17
+
18
+ - for mac user [https://www.anaconda.com/docs/getting-started/miniconda/main]
19
+ - for window user [https://www.anaconda.com/docs/getting-started/miniconda/main]
20
+
21
+ 2. Create env from conda by using the following command
22
+ `conda create --name natsar python=3.11`
23
+ it will create `natsar` (can be diffent name) environtment for this project.
24
+ and it is also a good practice to create separate environtment for specific project.
25
+
26
+ 3. Activate the environment
27
+ `conda activate natsar`
28
+
29
+ 4. Install PDM (package and dependency manager) to avoid conflict dependency
30
+ `pip install pdm`
31
+ sometimes `conda` doesn't support some libraries, then `pip` will be allowed to do. BUT use pip within the `natsar` env.
32
+
33
+ 5. Intstall packages and dependencies
34
+
35
+ hello
36
+ `pdm install`
37
+
38
+ ## Running the project locally
39
+
40
+ after install dependencies, make sure to activate the environment
41
+
42
+ 1. go to folder src using `cd src` on terminal
43
+ 2. run `app.py` file using `pdm run streamlit run app.py`
44
+
45
+ \*\*if cloning from huggingface it might need to mount large file with git lfs
46
+ use `pip install git-lfs` then `git lfs install`
47
+ then `git lfs pull` to pull the files to local and `pdm run streamlit run app.py` to run
48
+
49
+ ## Build and Test
50
+
51
+ - Main app.py file to be placed at root of NATSAR-DEMO repo.
52
+ - The app to point to different models that sit within the nominated sub-folders
deploy-development.yaml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # azure-pipelines.yml
2
+ # CI/CD: Deploy entire repo to Hugging Face Space (Docker + PDM)
3
+ # Space will build from your Dockerfile and run `src/app.py`.
4
+
5
+ trigger:
6
+ branches:
7
+ include:
8
+ - development
9
+
10
+ pool:
11
+ vmImage: "ubuntu-latest"
12
+
13
+ variables:
14
+ - group: HF_TOKEN_NATSAR
15
+ - name: PYTHON_VERSION
16
+ value: "3.10"
17
+ - name: HF_SPACE_ID_DEV
18
+ value: "lucid-hf/lucid-natsar-dev"
19
+
20
+ steps:
21
+ - checkout: self
22
+ lfs: true
23
+
24
+ - script: |
25
+ git lfs install --local
26
+ git lfs pull
27
+ displayName: "Fetch Git LFS assets"
28
+
29
+ - task: UsePythonVersion@0
30
+ inputs:
31
+ versionSpec: "$(PYTHON_VERSION)"
32
+
33
+ - script: |
34
+ python -m pip install --upgrade pip
35
+ pip install huggingface_hub==0.25.*
36
+ python - <<'PY'
37
+ import os
38
+ from huggingface_hub import HfApi, upload_folder
39
+
40
+ token = os.environ["HF_TOKEN"] # provided via Pipeline variable (Secret)
41
+ space_id = os.environ["HF_SPACE_ID"] # from variables above
42
+
43
+ api = HfApi(token=token)
44
+
45
+ # Ensure Space exists and uses Docker
46
+ api.create_repo(
47
+ repo_id=space_id,
48
+ repo_type="space",
49
+ exist_ok=True,
50
+ space_sdk="docker"
51
+ )
52
+
53
+ # Upload repo contents (respect ignore patterns to speed builds)
54
+ upload_folder(
55
+ folder_path=".", # whole repo: Dockerfile, pyproject.toml, src/, models/, etc.
56
+ repo_id=space_id,
57
+ repo_type="space",
58
+ path_in_repo=".", # put at Space root
59
+ token=token,
60
+ commit_message="CI: deploy Docker/PDM Space",
61
+ ignore_patterns=[
62
+ ".git/*",
63
+ "__pycache__/*",
64
+ "*.mp4", "*.avi", "*.mov",
65
+ "*.zip", "*.tar", "*.tar.gz",
66
+ "*.ipynb", "*.ipynb_checkpoints/*",
67
+ "venv/*", ".venv/*",
68
+ "dist/*", "build/*",
69
+ ".mypy_cache/*", ".pytest_cache/*",
70
+ "annotated_video/*", "annotated_images/*",
71
+ "training_model/*"
72
+ ]
73
+ )
74
+ PY
75
+ displayName: "Deploy to Hugging Face Space (Docker/PDM)"
76
+ env:
77
+ HF_TOKEN: $(HF_TOKEN_DEV) # Add this as a secret variable in Pipeline settings
78
+ HF_SPACE_ID: $(HF_SPACE_ID_DEV)
deploy-main.yaml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # azure-pipelines.yml
2
+ # CI/CD: Deploy entire repo to Hugging Face Space (Docker + PDM)
3
+ # Space will build from your Dockerfile and run `src/app.py`.
4
+
5
+ trigger:
6
+ branches:
7
+ include:
8
+ - main
9
+
10
+ pool:
11
+ vmImage: "ubuntu-latest"
12
+
13
+ variables:
14
+ - group: HF_TOKEN_NATSAR
15
+ - name: PYTHON_VERSION
16
+ value: "3.10"
17
+ - name: HF_SPACE_ID_PROD
18
+ value: "lucid-hf/lucid-natsar"
19
+
20
+ steps:
21
+ - checkout: self
22
+ lfs: true
23
+
24
+ - script: |
25
+ git lfs install --local
26
+ git lfs pull
27
+ displayName: "Fetch Git LFS assets"
28
+
29
+ - task: UsePythonVersion@0
30
+ inputs:
31
+ versionSpec: "$(PYTHON_VERSION)"
32
+
33
+ - script: |
34
+ python -m pip install --upgrade pip
35
+ pip install huggingface_hub==0.25.*
36
+ python - <<'PY'
37
+ import os
38
+ from huggingface_hub import HfApi, upload_folder
39
+
40
+ token = os.environ["HF_TOKEN_PROD"] # provided via Pipeline variable (Secret)
41
+ space_id = os.environ["HF_SPACE_ID_PROD"] # from variables above
42
+
43
+ api = HfApi(token=token)
44
+
45
+ # Ensure Space exists and uses Docker
46
+ api.create_repo(
47
+ repo_id=space_id,
48
+ repo_type="space",
49
+ exist_ok=True,
50
+ space_sdk="docker"
51
+ )
52
+
53
+ # Upload repo contents (respect ignore patterns to speed builds)
54
+ upload_folder(
55
+ folder_path=".", # whole repo: Dockerfile, pyproject.toml, src/, models/, etc.
56
+ repo_id=space_id,
57
+ repo_type="space",
58
+ path_in_repo=".", # put at Space root
59
+ token=token,
60
+ commit_message="CI: deploy Docker/PDM Space",
61
+ ignore_patterns=[
62
+ ".git/*",
63
+ "__pycache__/*",
64
+ "*.mp4", "*.avi", "*.mov",
65
+ "*.zip", "*.tar", "*.tar.gz",
66
+ "*.ipynb", "*.ipynb_checkpoints/*",
67
+ "venv/*", ".venv/*",
68
+ "dist/*", "build/*",
69
+ ".mypy_cache/*", ".pytest_cache/*",
70
+ "annotated_video/*", "annotated_images/*",
71
+ "training_model/*"
72
+ ]
73
+ )
74
+ PY
75
+ displayName: "Deploy to Hugging Face Space (Docker/PDM)"
76
+ env:
77
+ HF_TOKEN: $(HF_TOKEN_PROD) # Add this as a secret variable in Pipeline settings
78
+ HF_SPACE_ID: $(HF_SPACE_ID_PROD)
pdm.lock ADDED
@@ -0,0 +1,1339 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is @generated by PDM.
2
+ # It is not intended for manual editing.
3
+
4
+ [metadata]
5
+ groups = ["default", "dev"]
6
+ strategy = ["inherit_metadata"]
7
+ lock_version = "4.5.0"
8
+ content_hash = "sha256:41dd8a8c5e0a214f7540666a09b18cdbb412b540a64ee20a349849fb951b8ae4"
9
+
10
+ [[metadata.targets]]
11
+ requires_python = "==3.11.*"
12
+
13
+ [[package]]
14
+ name = "altair"
15
+ version = "5.5.0"
16
+ requires_python = ">=3.9"
17
+ summary = "Vega-Altair: A declarative statistical visualization library for Python."
18
+ groups = ["default"]
19
+ dependencies = [
20
+ "jinja2",
21
+ "jsonschema>=3.0",
22
+ "narwhals>=1.14.2",
23
+ "packaging",
24
+ "typing-extensions>=4.10.0; python_version < \"3.14\"",
25
+ ]
26
+ files = [
27
+ {file = "altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c"},
28
+ {file = "altair-5.5.0.tar.gz", hash = "sha256:d960ebe6178c56de3855a68c47b516be38640b73fb3b5111c2a9ca90546dd73d"},
29
+ ]
30
+
31
+ [[package]]
32
+ name = "attrs"
33
+ version = "25.3.0"
34
+ requires_python = ">=3.8"
35
+ summary = "Classes Without Boilerplate"
36
+ groups = ["default"]
37
+ files = [
38
+ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
39
+ {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
40
+ ]
41
+
42
+ [[package]]
43
+ name = "blinker"
44
+ version = "1.9.0"
45
+ requires_python = ">=3.9"
46
+ summary = "Fast, simple object-to-object and broadcast signaling"
47
+ groups = ["default"]
48
+ files = [
49
+ {file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"},
50
+ {file = "blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf"},
51
+ ]
52
+
53
+ [[package]]
54
+ name = "cachetools"
55
+ version = "6.2.0"
56
+ requires_python = ">=3.9"
57
+ summary = "Extensible memoizing collections and decorators"
58
+ groups = ["default"]
59
+ files = [
60
+ {file = "cachetools-6.2.0-py3-none-any.whl", hash = "sha256:1c76a8960c0041fcc21097e357f882197c79da0dbff766e7317890a65d7d8ba6"},
61
+ {file = "cachetools-6.2.0.tar.gz", hash = "sha256:38b328c0889450f05f5e120f56ab68c8abaf424e1275522b138ffc93253f7e32"},
62
+ ]
63
+
64
+ [[package]]
65
+ name = "certifi"
66
+ version = "2025.8.3"
67
+ requires_python = ">=3.7"
68
+ summary = "Python package for providing Mozilla's CA Bundle."
69
+ groups = ["default"]
70
+ files = [
71
+ {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"},
72
+ {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"},
73
+ ]
74
+
75
+ [[package]]
76
+ name = "charset-normalizer"
77
+ version = "3.4.3"
78
+ requires_python = ">=3.7"
79
+ summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
80
+ groups = ["default"]
81
+ files = [
82
+ {file = "charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b"},
83
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64"},
84
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91"},
85
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f"},
86
+ {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07"},
87
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30"},
88
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14"},
89
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c"},
90
+ {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae"},
91
+ {file = "charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849"},
92
+ {file = "charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c"},
93
+ {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"},
94
+ {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"},
95
+ ]
96
+
97
+ [[package]]
98
+ name = "click"
99
+ version = "8.2.1"
100
+ requires_python = ">=3.10"
101
+ summary = "Composable command line interface toolkit"
102
+ groups = ["default"]
103
+ dependencies = [
104
+ "colorama; platform_system == \"Windows\"",
105
+ ]
106
+ files = [
107
+ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"},
108
+ {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"},
109
+ ]
110
+
111
+ [[package]]
112
+ name = "colorama"
113
+ version = "0.4.6"
114
+ requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
115
+ summary = "Cross-platform colored terminal text."
116
+ groups = ["default"]
117
+ marker = "platform_system == \"Windows\""
118
+ files = [
119
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
120
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
121
+ ]
122
+
123
+ [[package]]
124
+ name = "contourpy"
125
+ version = "1.3.3"
126
+ requires_python = ">=3.11"
127
+ summary = "Python library for calculating contours of 2D quadrilateral grids"
128
+ groups = ["default"]
129
+ dependencies = [
130
+ "numpy>=1.25",
131
+ ]
132
+ files = [
133
+ {file = "contourpy-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:709a48ef9a690e1343202916450bc48b9e51c049b089c7f79a267b46cffcdaa1"},
134
+ {file = "contourpy-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:23416f38bfd74d5d28ab8429cc4d63fa67d5068bd711a85edb1c3fb0c3e2f381"},
135
+ {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:929ddf8c4c7f348e4c0a5a3a714b5c8542ffaa8c22954862a46ca1813b667ee7"},
136
+ {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9e999574eddae35f1312c2b4b717b7885d4edd6cb46700e04f7f02db454e67c1"},
137
+ {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0bf67e0e3f482cb69779dd3061b534eb35ac9b17f163d851e2a547d56dba0a3a"},
138
+ {file = "contourpy-1.3.3-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:51e79c1f7470158e838808d4a996fa9bac72c498e93d8ebe5119bc1e6becb0db"},
139
+ {file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:598c3aaece21c503615fd59c92a3598b428b2f01bfb4b8ca9c4edeecc2438620"},
140
+ {file = "contourpy-1.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:322ab1c99b008dad206d406bb61d014cf0174df491ae9d9d0fac6a6fda4f977f"},
141
+ {file = "contourpy-1.3.3-cp311-cp311-win32.whl", hash = "sha256:fd907ae12cd483cd83e414b12941c632a969171bf90fc937d0c9f268a31cafff"},
142
+ {file = "contourpy-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:3519428f6be58431c56581f1694ba8e50626f2dd550af225f82fb5f5814d2a42"},
143
+ {file = "contourpy-1.3.3-cp311-cp311-win_arm64.whl", hash = "sha256:15ff10bfada4bf92ec8b31c62bf7c1834c244019b4a33095a68000d7075df470"},
144
+ {file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:cd5dfcaeb10f7b7f9dc8941717c6c2ade08f587be2226222c12b25f0483ed497"},
145
+ {file = "contourpy-1.3.3-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:0c1fc238306b35f246d61a1d416a627348b5cf0648648a031e14bb8705fcdfe8"},
146
+ {file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70f9aad7de812d6541d29d2bbf8feb22ff7e1c299523db288004e3157ff4674e"},
147
+ {file = "contourpy-1.3.3-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5ed3657edf08512fc3fe81b510e35c2012fbd3081d2e26160f27ca28affec989"},
148
+ {file = "contourpy-1.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77"},
149
+ {file = "contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880"},
150
+ ]
151
+
152
+ [[package]]
153
+ name = "cycler"
154
+ version = "0.12.1"
155
+ requires_python = ">=3.8"
156
+ summary = "Composable style cycles"
157
+ groups = ["default"]
158
+ files = [
159
+ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
160
+ {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
161
+ ]
162
+
163
+ [[package]]
164
+ name = "decorator"
165
+ version = "5.2.1"
166
+ requires_python = ">=3.8"
167
+ summary = "Decorators for Humans"
168
+ groups = ["default"]
169
+ files = [
170
+ {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"},
171
+ {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"},
172
+ ]
173
+
174
+ [[package]]
175
+ name = "filelock"
176
+ version = "3.19.1"
177
+ requires_python = ">=3.9"
178
+ summary = "A platform independent file lock."
179
+ groups = ["default"]
180
+ files = [
181
+ {file = "filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d"},
182
+ {file = "filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58"},
183
+ ]
184
+
185
+ [[package]]
186
+ name = "fonttools"
187
+ version = "4.59.2"
188
+ requires_python = ">=3.9"
189
+ summary = "Tools to manipulate font files"
190
+ groups = ["default"]
191
+ files = [
192
+ {file = "fonttools-4.59.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:511946e8d7ea5c0d6c7a53c4cb3ee48eda9ab9797cd9bf5d95829a398400354f"},
193
+ {file = "fonttools-4.59.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8e5e2682cf7be766d84f462ba8828d01e00c8751a8e8e7ce12d7784ccb69a30d"},
194
+ {file = "fonttools-4.59.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5729e12a982dba3eeae650de48b06f3b9ddb51e9aee2fcaf195b7d09a96250e2"},
195
+ {file = "fonttools-4.59.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c52694eae5d652361d59ecdb5a2246bff7cff13b6367a12da8499e9df56d148d"},
196
+ {file = "fonttools-4.59.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f1f1bbc23ba1312bd8959896f46f667753b90216852d2a8cfa2d07e0cb234144"},
197
+ {file = "fonttools-4.59.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1a1bfe5378962825dabe741720885e8b9ae9745ec7ecc4a5ec1f1ce59a6062bf"},
198
+ {file = "fonttools-4.59.2-cp311-cp311-win32.whl", hash = "sha256:e937790f3c2c18a1cbc7da101550a84319eb48023a715914477d2e7faeaba570"},
199
+ {file = "fonttools-4.59.2-cp311-cp311-win_amd64.whl", hash = "sha256:9836394e2f4ce5f9c0a7690ee93bd90aa1adc6b054f1a57b562c5d242c903104"},
200
+ {file = "fonttools-4.59.2-py3-none-any.whl", hash = "sha256:8bd0f759020e87bb5d323e6283914d9bf4ae35a7307dafb2cbd1e379e720ad37"},
201
+ {file = "fonttools-4.59.2.tar.gz", hash = "sha256:e72c0749b06113f50bcb80332364c6be83a9582d6e3db3fe0b280f996dc2ef22"},
202
+ ]
203
+
204
+ [[package]]
205
+ name = "fsspec"
206
+ version = "2025.9.0"
207
+ requires_python = ">=3.9"
208
+ summary = "File-system specification"
209
+ groups = ["default"]
210
+ files = [
211
+ {file = "fsspec-2025.9.0-py3-none-any.whl", hash = "sha256:530dc2a2af60a414a832059574df4a6e10cce927f6f4a78209390fe38955cfb7"},
212
+ {file = "fsspec-2025.9.0.tar.gz", hash = "sha256:19fd429483d25d28b65ec68f9f4adc16c17ea2c7c7bf54ec61360d478fb19c19"},
213
+ ]
214
+
215
+ [[package]]
216
+ name = "gitdb"
217
+ version = "4.0.12"
218
+ requires_python = ">=3.7"
219
+ summary = "Git Object Database"
220
+ groups = ["default"]
221
+ dependencies = [
222
+ "smmap<6,>=3.0.1",
223
+ ]
224
+ files = [
225
+ {file = "gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf"},
226
+ {file = "gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571"},
227
+ ]
228
+
229
+ [[package]]
230
+ name = "gitpython"
231
+ version = "3.1.45"
232
+ requires_python = ">=3.7"
233
+ summary = "GitPython is a Python library used to interact with Git repositories"
234
+ groups = ["default"]
235
+ dependencies = [
236
+ "gitdb<5,>=4.0.1",
237
+ "typing-extensions>=3.10.0.2; python_version < \"3.10\"",
238
+ ]
239
+ files = [
240
+ {file = "gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77"},
241
+ {file = "gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c"},
242
+ ]
243
+
244
+ [[package]]
245
+ name = "idna"
246
+ version = "3.10"
247
+ requires_python = ">=3.6"
248
+ summary = "Internationalized Domain Names in Applications (IDNA)"
249
+ groups = ["default"]
250
+ files = [
251
+ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
252
+ {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
253
+ ]
254
+
255
+ [[package]]
256
+ name = "imageio"
257
+ version = "2.37.0"
258
+ requires_python = ">=3.9"
259
+ summary = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats."
260
+ groups = ["default"]
261
+ dependencies = [
262
+ "numpy",
263
+ "pillow>=8.3.2",
264
+ ]
265
+ files = [
266
+ {file = "imageio-2.37.0-py3-none-any.whl", hash = "sha256:11efa15b87bc7871b61590326b2d635439acc321cf7f8ce996f812543ce10eed"},
267
+ {file = "imageio-2.37.0.tar.gz", hash = "sha256:71b57b3669666272c818497aebba2b4c5f20d5b37c81720e5e1a56d59c492996"},
268
+ ]
269
+
270
+ [[package]]
271
+ name = "imageio-ffmpeg"
272
+ version = "0.6.0"
273
+ requires_python = ">=3.9"
274
+ summary = "FFMPEG wrapper for Python"
275
+ groups = ["default"]
276
+ files = [
277
+ {file = "imageio_ffmpeg-0.6.0-py3-none-macosx_10_9_intel.macosx_10_9_x86_64.whl", hash = "sha256:9d2baaf867088508d4a3458e61eeb30e945c4ad8016025545f66c4b5aaef0a61"},
278
+ {file = "imageio_ffmpeg-0.6.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b1ae3173414b5fc5f538a726c4e48ea97edc0d2cdc11f103afee655c463fa742"},
279
+ {file = "imageio_ffmpeg-0.6.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:1d47bebd83d2c5fc770720d211855f208af8a596c82d17730aa51e815cdee6dc"},
280
+ {file = "imageio_ffmpeg-0.6.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:c7e46fcec401dd990405049d2e2f475e2b397779df2519b544b8aab515195282"},
281
+ {file = "imageio_ffmpeg-0.6.0-py3-none-win32.whl", hash = "sha256:196faa79366b4a82f95c0f4053191d2013f4714a715780f0ad2a68ff37483cc2"},
282
+ {file = "imageio_ffmpeg-0.6.0-py3-none-win_amd64.whl", hash = "sha256:02fa47c83703c37df6bfe4896aab339013f62bf02c5ebf2dce6da56af04ffc0a"},
283
+ {file = "imageio_ffmpeg-0.6.0.tar.gz", hash = "sha256:e2556bed8e005564a9f925bb7afa4002d82770d6b08825078b7697ab88ba1755"},
284
+ ]
285
+
286
+ [[package]]
287
+ name = "jinja2"
288
+ version = "3.1.6"
289
+ requires_python = ">=3.7"
290
+ summary = "A very fast and expressive template engine."
291
+ groups = ["default"]
292
+ dependencies = [
293
+ "MarkupSafe>=2.0",
294
+ ]
295
+ files = [
296
+ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"},
297
+ {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"},
298
+ ]
299
+
300
+ [[package]]
301
+ name = "jsonschema"
302
+ version = "4.25.1"
303
+ requires_python = ">=3.9"
304
+ summary = "An implementation of JSON Schema validation for Python"
305
+ groups = ["default"]
306
+ dependencies = [
307
+ "attrs>=22.2.0",
308
+ "jsonschema-specifications>=2023.03.6",
309
+ "referencing>=0.28.4",
310
+ "rpds-py>=0.7.1",
311
+ ]
312
+ files = [
313
+ {file = "jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63"},
314
+ {file = "jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85"},
315
+ ]
316
+
317
+ [[package]]
318
+ name = "jsonschema-specifications"
319
+ version = "2025.9.1"
320
+ requires_python = ">=3.9"
321
+ summary = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
322
+ groups = ["default"]
323
+ dependencies = [
324
+ "referencing>=0.31.0",
325
+ ]
326
+ files = [
327
+ {file = "jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe"},
328
+ {file = "jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d"},
329
+ ]
330
+
331
+ [[package]]
332
+ name = "kiwisolver"
333
+ version = "1.4.9"
334
+ requires_python = ">=3.10"
335
+ summary = "A fast implementation of the Cassowary constraint solver"
336
+ groups = ["default"]
337
+ files = [
338
+ {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eb14a5da6dc7642b0f3a18f13654847cd8b7a2550e2645a5bda677862b03ba16"},
339
+ {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39a219e1c81ae3b103643d2aedb90f1ef22650deb266ff12a19e7773f3e5f089"},
340
+ {file = "kiwisolver-1.4.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2405a7d98604b87f3fc28b1716783534b1b4b8510d8142adca34ee0bc3c87543"},
341
+ {file = "kiwisolver-1.4.9-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dc1ae486f9abcef254b5618dfb4113dd49f94c68e3e027d03cf0143f3f772b61"},
342
+ {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a1f570ce4d62d718dce3f179ee78dac3b545ac16c0c04bb363b7607a949c0d1"},
343
+ {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb27e7b78d716c591e88e0a09a2139c6577865d7f2e152488c2cc6257f460872"},
344
+ {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:15163165efc2f627eb9687ea5f3a28137217d217ac4024893d753f46bce9de26"},
345
+ {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bdee92c56a71d2b24c33a7d4c2856bd6419d017e08caa7802d2963870e315028"},
346
+ {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:412f287c55a6f54b0650bd9b6dce5aceddb95864a1a90c87af16979d37c89771"},
347
+ {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2c93f00dcba2eea70af2be5f11a830a742fe6b579a1d4e00f47760ef13be247a"},
348
+ {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f117e1a089d9411663a3207ba874f31be9ac8eaa5b533787024dc07aeb74f464"},
349
+ {file = "kiwisolver-1.4.9-cp311-cp311-win_amd64.whl", hash = "sha256:be6a04e6c79819c9a8c2373317d19a96048e5a3f90bec587787e86a1153883c2"},
350
+ {file = "kiwisolver-1.4.9-cp311-cp311-win_arm64.whl", hash = "sha256:0ae37737256ba2de764ddc12aed4956460277f00c4996d51a197e72f62f5eec7"},
351
+ {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:720e05574713db64c356e86732c0f3c5252818d05f9df320f0ad8380641acea5"},
352
+ {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:17680d737d5335b552994a2008fab4c851bcd7de33094a82067ef3a576ff02fa"},
353
+ {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85b5352f94e490c028926ea567fc569c52ec79ce131dadb968d3853e809518c2"},
354
+ {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:464415881e4801295659462c49461a24fb107c140de781d55518c4b80cb6790f"},
355
+ {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fb940820c63a9590d31d88b815e7a3aa5915cad3ce735ab45f0c730b39547de1"},
356
+ {file = "kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d"},
357
+ ]
358
+
359
+ [[package]]
360
+ name = "markupsafe"
361
+ version = "3.0.2"
362
+ requires_python = ">=3.9"
363
+ summary = "Safely add untrusted strings to HTML/XML markup."
364
+ groups = ["default"]
365
+ files = [
366
+ {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"},
367
+ {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"},
368
+ {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"},
369
+ {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"},
370
+ {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"},
371
+ {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"},
372
+ {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"},
373
+ {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"},
374
+ {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"},
375
+ {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"},
376
+ {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"},
377
+ ]
378
+
379
+ [[package]]
380
+ name = "matplotlib"
381
+ version = "3.10.6"
382
+ requires_python = ">=3.10"
383
+ summary = "Python plotting package"
384
+ groups = ["default"]
385
+ dependencies = [
386
+ "contourpy>=1.0.1",
387
+ "cycler>=0.10",
388
+ "fonttools>=4.22.0",
389
+ "kiwisolver>=1.3.1",
390
+ "numpy>=1.23",
391
+ "packaging>=20.0",
392
+ "pillow>=8",
393
+ "pyparsing>=2.3.1",
394
+ "python-dateutil>=2.7",
395
+ ]
396
+ files = [
397
+ {file = "matplotlib-3.10.6-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:905b60d1cb0ee604ce65b297b61cf8be9f4e6cfecf95a3fe1c388b5266bc8f4f"},
398
+ {file = "matplotlib-3.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7bac38d816637343e53d7185d0c66677ff30ffb131044a81898b5792c956ba76"},
399
+ {file = "matplotlib-3.10.6-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:942a8de2b5bfff1de31d95722f702e2966b8a7e31f4e68f7cd963c7cd8861cf6"},
400
+ {file = "matplotlib-3.10.6-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a3276c85370bc0dfca051ec65c5817d1e0f8f5ce1b7787528ec8ed2d524bbc2f"},
401
+ {file = "matplotlib-3.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9df5851b219225731f564e4b9e7f2ac1e13c9e6481f941b5631a0f8e2d9387ce"},
402
+ {file = "matplotlib-3.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:abb5d9478625dd9c9eb51a06d39aae71eda749ae9b3138afb23eb38824026c7e"},
403
+ {file = "matplotlib-3.10.6-cp311-cp311-win_arm64.whl", hash = "sha256:886f989ccfae63659183173bb3fced7fd65e9eb793c3cc21c273add368536951"},
404
+ {file = "matplotlib-3.10.6-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f2d684c3204fa62421bbf770ddfebc6b50130f9cad65531eeba19236d73bb488"},
405
+ {file = "matplotlib-3.10.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:6f4a69196e663a41d12a728fab8751177215357906436804217d6d9cf0d4d6cf"},
406
+ {file = "matplotlib-3.10.6-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d6ca6ef03dfd269f4ead566ec6f3fb9becf8dab146fb999022ed85ee9f6b3eb"},
407
+ {file = "matplotlib-3.10.6.tar.gz", hash = "sha256:ec01b645840dd1996df21ee37f208cd8ba57644779fa20464010638013d3203c"},
408
+ ]
409
+
410
+ [[package]]
411
+ name = "moviepy"
412
+ version = "2.2.1"
413
+ summary = "Video editing with Python"
414
+ groups = ["default"]
415
+ dependencies = [
416
+ "decorator<6.0,>=4.0.2",
417
+ "imageio-ffmpeg>=0.2.0",
418
+ "imageio<3.0,>=2.5",
419
+ "numpy>=1.25.0",
420
+ "pillow<12.0,>=9.2.0",
421
+ "proglog<=1.0.0",
422
+ "python-dotenv>=0.10",
423
+ ]
424
+ files = [
425
+ {file = "moviepy-2.2.1-py3-none-any.whl", hash = "sha256:6b56803fec2ac54b557404126ac1160e65448e03798fa282bd23e8fab3795060"},
426
+ {file = "moviepy-2.2.1.tar.gz", hash = "sha256:c80cb56815ece94e5e3e2d361aa40070eeb30a09d23a24c4e684d03e16deacb1"},
427
+ ]
428
+
429
+ [[package]]
430
+ name = "mpmath"
431
+ version = "1.3.0"
432
+ summary = "Python library for arbitrary-precision floating-point arithmetic"
433
+ groups = ["default"]
434
+ files = [
435
+ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"},
436
+ {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"},
437
+ ]
438
+
439
+ [[package]]
440
+ name = "narwhals"
441
+ version = "2.4.0"
442
+ requires_python = ">=3.9"
443
+ summary = "Extremely lightweight compatibility layer between dataframe libraries"
444
+ groups = ["default"]
445
+ files = [
446
+ {file = "narwhals-2.4.0-py3-none-any.whl", hash = "sha256:06d958b03e3e3725ae16feee6737b4970991bb52e8465ef75f388c574732ac59"},
447
+ {file = "narwhals-2.4.0.tar.gz", hash = "sha256:a71931f7fb3c8e082cbe18ef0740644d87d60eba841ddfa9ba9394de1d43062f"},
448
+ ]
449
+
450
+ [[package]]
451
+ name = "networkx"
452
+ version = "3.5"
453
+ requires_python = ">=3.11"
454
+ summary = "Python package for creating and manipulating graphs and networks"
455
+ groups = ["default"]
456
+ files = [
457
+ {file = "networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec"},
458
+ {file = "networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037"},
459
+ ]
460
+
461
+ [[package]]
462
+ name = "numpy"
463
+ version = "2.2.6"
464
+ requires_python = ">=3.10"
465
+ summary = "Fundamental package for array computing in Python"
466
+ groups = ["default"]
467
+ files = [
468
+ {file = "numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae"},
469
+ {file = "numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a"},
470
+ {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42"},
471
+ {file = "numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491"},
472
+ {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a"},
473
+ {file = "numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf"},
474
+ {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1"},
475
+ {file = "numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab"},
476
+ {file = "numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47"},
477
+ {file = "numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303"},
478
+ {file = "numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd"},
479
+ ]
480
+
481
+ [[package]]
482
+ name = "nvidia-cublas-cu12"
483
+ version = "12.8.4.1"
484
+ requires_python = ">=3"
485
+ summary = "CUBLAS native runtime libraries"
486
+ groups = ["default"]
487
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
488
+ files = [
489
+ {file = "nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:b86f6dd8935884615a0683b663891d43781b819ac4f2ba2b0c9604676af346d0"},
490
+ {file = "nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142"},
491
+ {file = "nvidia_cublas_cu12-12.8.4.1-py3-none-win_amd64.whl", hash = "sha256:47e9b82132fa8d2b4944e708049229601448aaad7e6f296f630f2d1a32de35af"},
492
+ ]
493
+
494
+ [[package]]
495
+ name = "nvidia-cuda-cupti-cu12"
496
+ version = "12.8.90"
497
+ requires_python = ">=3"
498
+ summary = "CUDA profiling tools runtime libs."
499
+ groups = ["default"]
500
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
501
+ files = [
502
+ {file = "nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4412396548808ddfed3f17a467b104ba7751e6b58678a4b840675c56d21cf7ed"},
503
+ {file = "nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182"},
504
+ {file = "nvidia_cuda_cupti_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:bb479dcdf7e6d4f8b0b01b115260399bf34154a1a2e9fe11c85c517d87efd98e"},
505
+ ]
506
+
507
+ [[package]]
508
+ name = "nvidia-cuda-nvrtc-cu12"
509
+ version = "12.8.93"
510
+ requires_python = ">=3"
511
+ summary = "NVRTC native runtime libraries"
512
+ groups = ["default"]
513
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
514
+ files = [
515
+ {file = "nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994"},
516
+ {file = "nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc1fec1e1637854b4c0a65fb9a8346b51dd9ee69e61ebaccc82058441f15bce8"},
517
+ {file = "nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:7a4b6b2904850fe78e0bd179c4b655c404d4bb799ef03ddc60804247099ae909"},
518
+ ]
519
+
520
+ [[package]]
521
+ name = "nvidia-cuda-runtime-cu12"
522
+ version = "12.8.90"
523
+ requires_python = ">=3"
524
+ summary = "CUDA Runtime native Libraries"
525
+ groups = ["default"]
526
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
527
+ files = [
528
+ {file = "nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d"},
529
+ {file = "nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90"},
530
+ {file = "nvidia_cuda_runtime_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:c0c6027f01505bfed6c3b21ec546f69c687689aad5f1a377554bc6ca4aa993a8"},
531
+ ]
532
+
533
+ [[package]]
534
+ name = "nvidia-cudnn-cu12"
535
+ version = "9.10.2.21"
536
+ requires_python = ">=3"
537
+ summary = "cuDNN runtime libraries"
538
+ groups = ["default"]
539
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
540
+ dependencies = [
541
+ "nvidia-cublas-cu12",
542
+ ]
543
+ files = [
544
+ {file = "nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:c9132cc3f8958447b4910a1720036d9eff5928cc3179b0a51fb6d167c6cc87d8"},
545
+ {file = "nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8"},
546
+ {file = "nvidia_cudnn_cu12-9.10.2.21-py3-none-win_amd64.whl", hash = "sha256:c6288de7d63e6cf62988f0923f96dc339cea362decb1bf5b3141883392a7d65e"},
547
+ ]
548
+
549
+ [[package]]
550
+ name = "nvidia-cufft-cu12"
551
+ version = "11.3.3.83"
552
+ requires_python = ">=3"
553
+ summary = "CUFFT native runtime libraries"
554
+ groups = ["default"]
555
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
556
+ dependencies = [
557
+ "nvidia-nvjitlink-cu12",
558
+ ]
559
+ files = [
560
+ {file = "nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:848ef7224d6305cdb2a4df928759dca7b1201874787083b6e7550dd6765ce69a"},
561
+ {file = "nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74"},
562
+ {file = "nvidia_cufft_cu12-11.3.3.83-py3-none-win_amd64.whl", hash = "sha256:7a64a98ef2a7c47f905aaf8931b69a3a43f27c55530c698bb2ed7c75c0b42cb7"},
563
+ ]
564
+
565
+ [[package]]
566
+ name = "nvidia-cufile-cu12"
567
+ version = "1.13.1.3"
568
+ requires_python = ">=3"
569
+ summary = "cuFile GPUDirect libraries"
570
+ groups = ["default"]
571
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
572
+ files = [
573
+ {file = "nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc"},
574
+ {file = "nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:4beb6d4cce47c1a0f1013d72e02b0994730359e17801d395bdcbf20cfb3bb00a"},
575
+ ]
576
+
577
+ [[package]]
578
+ name = "nvidia-curand-cu12"
579
+ version = "10.3.9.90"
580
+ requires_python = ">=3"
581
+ summary = "CURAND native runtime libraries"
582
+ groups = ["default"]
583
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
584
+ files = [
585
+ {file = "nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dfab99248034673b779bc6decafdc3404a8a6f502462201f2f31f11354204acd"},
586
+ {file = "nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9"},
587
+ {file = "nvidia_curand_cu12-10.3.9.90-py3-none-win_amd64.whl", hash = "sha256:f149a8ca457277da854f89cf282d6ef43176861926c7ac85b2a0fbd237c587ec"},
588
+ ]
589
+
590
+ [[package]]
591
+ name = "nvidia-cusolver-cu12"
592
+ version = "11.7.3.90"
593
+ requires_python = ">=3"
594
+ summary = "CUDA solver native runtime libraries"
595
+ groups = ["default"]
596
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
597
+ dependencies = [
598
+ "nvidia-cublas-cu12",
599
+ "nvidia-cusparse-cu12",
600
+ "nvidia-nvjitlink-cu12",
601
+ ]
602
+ files = [
603
+ {file = "nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:db9ed69dbef9715071232caa9b69c52ac7de3a95773c2db65bdba85916e4e5c0"},
604
+ {file = "nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450"},
605
+ {file = "nvidia_cusolver_cu12-11.7.3.90-py3-none-win_amd64.whl", hash = "sha256:4a550db115fcabc4d495eb7d39ac8b58d4ab5d8e63274d3754df1c0ad6a22d34"},
606
+ ]
607
+
608
+ [[package]]
609
+ name = "nvidia-cusparse-cu12"
610
+ version = "12.5.8.93"
611
+ requires_python = ">=3"
612
+ summary = "CUSPARSE native runtime libraries"
613
+ groups = ["default"]
614
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
615
+ dependencies = [
616
+ "nvidia-nvjitlink-cu12",
617
+ ]
618
+ files = [
619
+ {file = "nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b6c161cb130be1a07a27ea6923df8141f3c295852f4b260c65f18f3e0a091dc"},
620
+ {file = "nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b"},
621
+ {file = "nvidia_cusparse_cu12-12.5.8.93-py3-none-win_amd64.whl", hash = "sha256:9a33604331cb2cac199f2e7f5104dfbb8a5a898c367a53dfda9ff2acb6b6b4dd"},
622
+ ]
623
+
624
+ [[package]]
625
+ name = "nvidia-cusparselt-cu12"
626
+ version = "0.7.1"
627
+ summary = "NVIDIA cuSPARSELt"
628
+ groups = ["default"]
629
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
630
+ files = [
631
+ {file = "nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8878dce784d0fac90131b6817b607e803c36e629ba34dc5b433471382196b6a5"},
632
+ {file = "nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623"},
633
+ {file = "nvidia_cusparselt_cu12-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f67fbb5831940ec829c9117b7f33807db9f9678dc2a617fbe781cac17b4e1075"},
634
+ ]
635
+
636
+ [[package]]
637
+ name = "nvidia-nccl-cu12"
638
+ version = "2.27.3"
639
+ requires_python = ">=3"
640
+ summary = "NVIDIA Collective Communication Library (NCCL) Runtime"
641
+ groups = ["default"]
642
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
643
+ files = [
644
+ {file = "nvidia_nccl_cu12-2.27.3-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9ddf1a245abc36c550870f26d537a9b6087fb2e2e3d6e0ef03374c6fd19d984f"},
645
+ {file = "nvidia_nccl_cu12-2.27.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adf27ccf4238253e0b826bce3ff5fa532d65fc42322c8bfdfaf28024c0fbe039"},
646
+ ]
647
+
648
+ [[package]]
649
+ name = "nvidia-nvjitlink-cu12"
650
+ version = "12.8.93"
651
+ requires_python = ">=3"
652
+ summary = "Nvidia JIT LTO Library"
653
+ groups = ["default"]
654
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
655
+ files = [
656
+ {file = "nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88"},
657
+ {file = "nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:adccd7161ace7261e01bb91e44e88da350895c270d23f744f0820c818b7229e7"},
658
+ {file = "nvidia_nvjitlink_cu12-12.8.93-py3-none-win_amd64.whl", hash = "sha256:bd93fbeeee850917903583587f4fc3a4eafa022e34572251368238ab5e6bd67f"},
659
+ ]
660
+
661
+ [[package]]
662
+ name = "nvidia-nvtx-cu12"
663
+ version = "12.8.90"
664
+ requires_python = ">=3"
665
+ summary = "NVIDIA Tools Extension"
666
+ groups = ["default"]
667
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
668
+ files = [
669
+ {file = "nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d7ad891da111ebafbf7e015d34879f7112832fc239ff0d7d776b6cb685274615"},
670
+ {file = "nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f"},
671
+ {file = "nvidia_nvtx_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:619c8304aedc69f02ea82dd244541a83c3d9d40993381b3b590f1adaed3db41e"},
672
+ ]
673
+
674
+ [[package]]
675
+ name = "opencv-python"
676
+ version = "4.12.0.88"
677
+ requires_python = ">=3.6"
678
+ summary = "Wrapper package for OpenCV python bindings."
679
+ groups = ["default"]
680
+ dependencies = [
681
+ "numpy<2.0; python_version < \"3.9\"",
682
+ "numpy<2.3.0,>=2; python_version >= \"3.9\"",
683
+ ]
684
+ files = [
685
+ {file = "opencv-python-4.12.0.88.tar.gz", hash = "sha256:8b738389cede219405f6f3880b851efa3415ccd674752219377353f017d2994d"},
686
+ {file = "opencv_python-4.12.0.88-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:f9a1f08883257b95a5764bf517a32d75aec325319c8ed0f89739a57fae9e92a5"},
687
+ {file = "opencv_python-4.12.0.88-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:812eb116ad2b4de43ee116fcd8991c3a687f099ada0b04e68f64899c09448e81"},
688
+ {file = "opencv_python-4.12.0.88-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:51fd981c7df6af3e8f70b1556696b05224c4e6b6777bdd2a46b3d4fb09de1a92"},
689
+ {file = "opencv_python-4.12.0.88-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:092c16da4c5a163a818f120c22c5e4a2f96e0db4f24e659c701f1fe629a690f9"},
690
+ {file = "opencv_python-4.12.0.88-cp37-abi3-win32.whl", hash = "sha256:ff554d3f725b39878ac6a2e1fa232ec509c36130927afc18a1719ebf4fbf4357"},
691
+ {file = "opencv_python-4.12.0.88-cp37-abi3-win_amd64.whl", hash = "sha256:d98edb20aa932fd8ebd276a72627dad9dc097695b3d435a4257557bbb49a79d2"},
692
+ ]
693
+
694
+ [[package]]
695
+ name = "packaging"
696
+ version = "25.0"
697
+ requires_python = ">=3.8"
698
+ summary = "Core utilities for Python packages"
699
+ groups = ["default"]
700
+ files = [
701
+ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
702
+ {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
703
+ ]
704
+
705
+ [[package]]
706
+ name = "pandas"
707
+ version = "2.3.2"
708
+ requires_python = ">=3.9"
709
+ summary = "Powerful data structures for data analysis, time series, and statistics"
710
+ groups = ["default"]
711
+ dependencies = [
712
+ "numpy>=1.22.4; python_version < \"3.11\"",
713
+ "numpy>=1.23.2; python_version == \"3.11\"",
714
+ "numpy>=1.26.0; python_version >= \"3.12\"",
715
+ "python-dateutil>=2.8.2",
716
+ "pytz>=2020.1",
717
+ "tzdata>=2022.7",
718
+ ]
719
+ files = [
720
+ {file = "pandas-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1333e9c299adcbb68ee89a9bb568fc3f20f9cbb419f1dd5225071e6cddb2a743"},
721
+ {file = "pandas-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:76972bcbd7de8e91ad5f0ca884a9f2c477a2125354af624e022c49e5bd0dfff4"},
722
+ {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b98bdd7c456a05eef7cd21fd6b29e3ca243591fe531c62be94a2cc987efb5ac2"},
723
+ {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d81573b3f7db40d020983f78721e9bfc425f411e616ef019a10ebf597aedb2e"},
724
+ {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e190b738675a73b581736cc8ec71ae113d6c3768d0bd18bffa5b9a0927b0b6ea"},
725
+ {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c253828cb08f47488d60f43c5fc95114c771bbfff085da54bfc79cb4f9e3a372"},
726
+ {file = "pandas-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:9467697b8083f9667b212633ad6aa4ab32436dcbaf4cd57325debb0ddef2012f"},
727
+ {file = "pandas-2.3.2.tar.gz", hash = "sha256:ab7b58f8f82706890924ccdfb5f48002b83d2b5a3845976a9fb705d36c34dcdb"},
728
+ ]
729
+
730
+ [[package]]
731
+ name = "pillow"
732
+ version = "11.3.0"
733
+ requires_python = ">=3.9"
734
+ summary = "Python Imaging Library (Fork)"
735
+ groups = ["default"]
736
+ files = [
737
+ {file = "pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722"},
738
+ {file = "pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288"},
739
+ {file = "pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d"},
740
+ {file = "pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494"},
741
+ {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58"},
742
+ {file = "pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f"},
743
+ {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e"},
744
+ {file = "pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94"},
745
+ {file = "pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0"},
746
+ {file = "pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac"},
747
+ {file = "pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd"},
748
+ {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6"},
749
+ {file = "pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438"},
750
+ {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3"},
751
+ {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c"},
752
+ {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361"},
753
+ {file = "pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7"},
754
+ {file = "pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8"},
755
+ {file = "pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523"},
756
+ ]
757
+
758
+ [[package]]
759
+ name = "polars"
760
+ version = "1.33.1"
761
+ requires_python = ">=3.9"
762
+ summary = "Blazingly fast DataFrame library"
763
+ groups = ["default"]
764
+ files = [
765
+ {file = "polars-1.33.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:3881c444b0f14778ba94232f077a709d435977879c1b7d7bd566b55bd1830bb5"},
766
+ {file = "polars-1.33.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:29200b89c9a461e6f06fc1660bc9c848407640ee30fe0e5ef4947cfd49d55337"},
767
+ {file = "polars-1.33.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:444940646e76342abaa47f126c70e3e40b56e8e02a9e89e5c5d1c24b086db58a"},
768
+ {file = "polars-1.33.1-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:094a37d06789286649f654f229ec4efb9376630645ba8963b70cb9c0b008b3e1"},
769
+ {file = "polars-1.33.1-cp39-abi3-win_amd64.whl", hash = "sha256:c9781c704432a2276a185ee25898aa427f39a904fbe8fde4ae779596cdbd7a9e"},
770
+ {file = "polars-1.33.1-cp39-abi3-win_arm64.whl", hash = "sha256:c3cfddb3b78eae01a218222bdba8048529fef7e14889a71e33a5198644427642"},
771
+ {file = "polars-1.33.1.tar.gz", hash = "sha256:fa3fdc34eab52a71498264d6ff9b0aa6955eb4b0ae8add5d3cb43e4b84644007"},
772
+ ]
773
+
774
+ [[package]]
775
+ name = "proglog"
776
+ version = "0.1.12"
777
+ summary = "Log and progress bar manager for console, notebooks, web..."
778
+ groups = ["default"]
779
+ dependencies = [
780
+ "tqdm",
781
+ ]
782
+ files = [
783
+ {file = "proglog-0.1.12-py3-none-any.whl", hash = "sha256:ccaafce51e80a81c65dc907a460c07ccb8ec1f78dc660cfd8f9ec3a22f01b84c"},
784
+ {file = "proglog-0.1.12.tar.gz", hash = "sha256:361ee074721c277b89b75c061336cb8c5f287c92b043efa562ccf7866cda931c"},
785
+ ]
786
+
787
+ [[package]]
788
+ name = "protobuf"
789
+ version = "6.32.0"
790
+ requires_python = ">=3.9"
791
+ summary = ""
792
+ groups = ["default"]
793
+ files = [
794
+ {file = "protobuf-6.32.0-cp310-abi3-win32.whl", hash = "sha256:84f9e3c1ff6fb0308dbacb0950d8aa90694b0d0ee68e75719cb044b7078fe741"},
795
+ {file = "protobuf-6.32.0-cp310-abi3-win_amd64.whl", hash = "sha256:a8bdbb2f009cfc22a36d031f22a625a38b615b5e19e558a7b756b3279723e68e"},
796
+ {file = "protobuf-6.32.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d52691e5bee6c860fff9a1c86ad26a13afbeb4b168cd4445c922b7e2cf85aaf0"},
797
+ {file = "protobuf-6.32.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:501fe6372fd1c8ea2a30b4d9be8f87955a64d6be9c88a973996cef5ef6f0abf1"},
798
+ {file = "protobuf-6.32.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:75a2aab2bd1aeb1f5dc7c5f33bcb11d82ea8c055c9becbb41c26a8c43fd7092c"},
799
+ {file = "protobuf-6.32.0-py3-none-any.whl", hash = "sha256:ba377e5b67b908c8f3072a57b63e2c6a4cbd18aea4ed98d2584350dbf46f2783"},
800
+ {file = "protobuf-6.32.0.tar.gz", hash = "sha256:a81439049127067fc49ec1d36e25c6ee1d1a2b7be930675f919258d03c04e7d2"},
801
+ ]
802
+
803
+ [[package]]
804
+ name = "psutil"
805
+ version = "7.0.0"
806
+ requires_python = ">=3.6"
807
+ summary = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7."
808
+ groups = ["default"]
809
+ files = [
810
+ {file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"},
811
+ {file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"},
812
+ {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91"},
813
+ {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34"},
814
+ {file = "psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993"},
815
+ {file = "psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99"},
816
+ {file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"},
817
+ {file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"},
818
+ ]
819
+
820
+ [[package]]
821
+ name = "pyarrow"
822
+ version = "21.0.0"
823
+ requires_python = ">=3.9"
824
+ summary = "Python library for Apache Arrow"
825
+ groups = ["default"]
826
+ files = [
827
+ {file = "pyarrow-21.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c077f48aab61738c237802836fc3844f85409a46015635198761b0d6a688f87b"},
828
+ {file = "pyarrow-21.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:689f448066781856237eca8d1975b98cace19b8dd2ab6145bf49475478bcaa10"},
829
+ {file = "pyarrow-21.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:479ee41399fcddc46159a551705b89c05f11e8b8cb8e968f7fec64f62d91985e"},
830
+ {file = "pyarrow-21.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:40ebfcb54a4f11bcde86bc586cbd0272bac0d516cfa539c799c2453768477569"},
831
+ {file = "pyarrow-21.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8d58d8497814274d3d20214fbb24abcad2f7e351474357d552a8d53bce70c70e"},
832
+ {file = "pyarrow-21.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:585e7224f21124dd57836b1530ac8f2df2afc43c861d7bf3d58a4870c42ae36c"},
833
+ {file = "pyarrow-21.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:555ca6935b2cbca2c0e932bedd853e9bc523098c39636de9ad4693b5b1df86d6"},
834
+ {file = "pyarrow-21.0.0.tar.gz", hash = "sha256:5051f2dccf0e283ff56335760cbc8622cf52264d67e359d5569541ac11b6d5bc"},
835
+ ]
836
+
837
+ [[package]]
838
+ name = "pydeck"
839
+ version = "0.9.1"
840
+ requires_python = ">=3.8"
841
+ summary = "Widget for deck.gl maps"
842
+ groups = ["default"]
843
+ dependencies = [
844
+ "jinja2>=2.10.1",
845
+ "numpy>=1.16.4",
846
+ ]
847
+ files = [
848
+ {file = "pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038"},
849
+ {file = "pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605"},
850
+ ]
851
+
852
+ [[package]]
853
+ name = "pyparsing"
854
+ version = "3.2.3"
855
+ requires_python = ">=3.9"
856
+ summary = "pyparsing module - Classes and methods to define and execute parsing grammars"
857
+ groups = ["default"]
858
+ files = [
859
+ {file = "pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf"},
860
+ {file = "pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be"},
861
+ ]
862
+
863
+ [[package]]
864
+ name = "python-dateutil"
865
+ version = "2.9.0.post0"
866
+ requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
867
+ summary = "Extensions to the standard Python datetime module"
868
+ groups = ["default"]
869
+ dependencies = [
870
+ "six>=1.5",
871
+ ]
872
+ files = [
873
+ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
874
+ {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
875
+ ]
876
+
877
+ [[package]]
878
+ name = "python-dotenv"
879
+ version = "1.1.1"
880
+ requires_python = ">=3.9"
881
+ summary = "Read key-value pairs from a .env file and set them as environment variables"
882
+ groups = ["default"]
883
+ files = [
884
+ {file = "python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc"},
885
+ {file = "python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab"},
886
+ ]
887
+
888
+ [[package]]
889
+ name = "pytz"
890
+ version = "2025.2"
891
+ summary = "World timezone definitions, modern and historical"
892
+ groups = ["default"]
893
+ files = [
894
+ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
895
+ {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
896
+ ]
897
+
898
+ [[package]]
899
+ name = "pyyaml"
900
+ version = "6.0.2"
901
+ requires_python = ">=3.8"
902
+ summary = "YAML parser and emitter for Python"
903
+ groups = ["default"]
904
+ files = [
905
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
906
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
907
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
908
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
909
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
910
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
911
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
912
+ {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
913
+ {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
914
+ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
915
+ ]
916
+
917
+ [[package]]
918
+ name = "referencing"
919
+ version = "0.36.2"
920
+ requires_python = ">=3.9"
921
+ summary = "JSON Referencing + Python"
922
+ groups = ["default"]
923
+ dependencies = [
924
+ "attrs>=22.2.0",
925
+ "rpds-py>=0.7.0",
926
+ "typing-extensions>=4.4.0; python_version < \"3.13\"",
927
+ ]
928
+ files = [
929
+ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
930
+ {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
931
+ ]
932
+
933
+ [[package]]
934
+ name = "requests"
935
+ version = "2.32.5"
936
+ requires_python = ">=3.9"
937
+ summary = "Python HTTP for Humans."
938
+ groups = ["default"]
939
+ dependencies = [
940
+ "certifi>=2017.4.17",
941
+ "charset-normalizer<4,>=2",
942
+ "idna<4,>=2.5",
943
+ "urllib3<3,>=1.21.1",
944
+ ]
945
+ files = [
946
+ {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"},
947
+ {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"},
948
+ ]
949
+
950
+ [[package]]
951
+ name = "rpds-py"
952
+ version = "0.27.1"
953
+ requires_python = ">=3.9"
954
+ summary = "Python bindings to Rust's persistent data structures (rpds)"
955
+ groups = ["default"]
956
+ files = [
957
+ {file = "rpds_py-0.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:be898f271f851f68b318872ce6ebebbc62f303b654e43bf72683dbdc25b7c881"},
958
+ {file = "rpds_py-0.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62ac3d4e3e07b58ee0ddecd71d6ce3b1637de2d373501412df395a0ec5f9beb5"},
959
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4708c5c0ceb2d034f9991623631d3d23cb16e65c83736ea020cdbe28d57c0a0e"},
960
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:abfa1171a9952d2e0002aba2ad3780820b00cc3d9c98c6630f2e93271501f66c"},
961
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b507d19f817ebaca79574b16eb2ae412e5c0835542c93fe9983f1e432aca195"},
962
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168b025f8fd8d8d10957405f3fdcef3dc20f5982d398f90851f4abc58c566c52"},
963
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb56c6210ef77caa58e16e8c17d35c63fe3f5b60fd9ba9d424470c3400bcf9ed"},
964
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:d252f2d8ca0195faa707f8eb9368955760880b2b42a8ee16d382bf5dd807f89a"},
965
+ {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6e5e54da1e74b91dbc7996b56640f79b195d5925c2b78efaa8c5d53e1d88edde"},
966
+ {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ffce0481cc6e95e5b3f0a47ee17ffbd234399e6d532f394c8dce320c3b089c21"},
967
+ {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a205fdfe55c90c2cd8e540ca9ceba65cbe6629b443bc05db1f590a3db8189ff9"},
968
+ {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:689fb5200a749db0415b092972e8eba85847c23885c8543a8b0f5c009b1a5948"},
969
+ {file = "rpds_py-0.27.1-cp311-cp311-win32.whl", hash = "sha256:3182af66048c00a075010bc7f4860f33913528a4b6fc09094a6e7598e462fe39"},
970
+ {file = "rpds_py-0.27.1-cp311-cp311-win_amd64.whl", hash = "sha256:b4938466c6b257b2f5c4ff98acd8128ec36b5059e5c8f8372d79316b1c36bb15"},
971
+ {file = "rpds_py-0.27.1-cp311-cp311-win_arm64.whl", hash = "sha256:2f57af9b4d0793e53266ee4325535a31ba48e2f875da81a9177c9926dfa60746"},
972
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdfe4bb2f9fe7458b7453ad3c33e726d6d1c7c0a72960bcc23800d77384e42df"},
973
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8fabb8fd848a5f75a2324e4a84501ee3a5e3c78d8603f83475441866e60b94a3"},
974
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda8719d598f2f7f3e0f885cba8646644b55a187762bec091fa14a2b819746a9"},
975
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c64d07e95606ec402a0a1c511fe003873fa6af630bda59bac77fac8b4318ebc"},
976
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93a2ed40de81bcff59aabebb626562d48332f3d028ca2036f1d23cbb52750be4"},
977
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:387ce8c44ae94e0ec50532d9cb0edce17311024c9794eb196b90e1058aadeb66"},
978
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf94f812c95b5e60ebaf8bfb1898a7d7cb9c1af5744d4a67fa47796e0465d4e"},
979
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4848ca84d6ded9b58e474dfdbad4b8bfb450344c0551ddc8d958bf4b36aa837c"},
980
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2bde09cbcf2248b73c7c323be49b280180ff39fadcfe04e7b6f54a678d02a7cf"},
981
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:94c44ee01fd21c9058f124d2d4f0c9dc7634bec93cd4b38eefc385dabe71acbf"},
982
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:df8b74962e35c9249425d90144e721eed198e6555a0e22a563d29fe4486b51f6"},
983
+ {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:dc23e6820e3b40847e2f4a7726462ba0cf53089512abe9ee16318c366494c17a"},
984
+ {file = "rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8"},
985
+ ]
986
+
987
+ [[package]]
988
+ name = "ruff"
989
+ version = "0.13.1"
990
+ requires_python = ">=3.7"
991
+ summary = "An extremely fast Python linter and code formatter, written in Rust."
992
+ groups = ["dev"]
993
+ files = [
994
+ {file = "ruff-0.13.1-py3-none-linux_armv6l.whl", hash = "sha256:b2abff595cc3cbfa55e509d89439b5a09a6ee3c252d92020bd2de240836cf45b"},
995
+ {file = "ruff-0.13.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:4ee9f4249bf7f8bb3984c41bfaf6a658162cdb1b22e3103eabc7dd1dc5579334"},
996
+ {file = "ruff-0.13.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5c5da4af5f6418c07d75e6f3224e08147441f5d1eac2e6ce10dcce5e616a3bae"},
997
+ {file = "ruff-0.13.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80524f84a01355a59a93cef98d804e2137639823bcee2931f5028e71134a954e"},
998
+ {file = "ruff-0.13.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff7f5ce8d7988767dd46a148192a14d0f48d1baea733f055d9064875c7d50389"},
999
+ {file = "ruff-0.13.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c55d84715061f8b05469cdc9a446aa6c7294cd4bd55e86a89e572dba14374f8c"},
1000
+ {file = "ruff-0.13.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ac57fed932d90fa1624c946dc67a0a3388d65a7edc7d2d8e4ca7bddaa789b3b0"},
1001
+ {file = "ruff-0.13.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c366a71d5b4f41f86a008694f7a0d75fe409ec298685ff72dc882f882d532e36"},
1002
+ {file = "ruff-0.13.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4ea9d1b5ad3e7a83ee8ebb1229c33e5fe771e833d6d3dcfca7b77d95b060d38"},
1003
+ {file = "ruff-0.13.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f70202996055b555d3d74b626406476cc692f37b13bac8828acff058c9966a"},
1004
+ {file = "ruff-0.13.1-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:f8cff7a105dad631085d9505b491db33848007d6b487c3c1979dd8d9b2963783"},
1005
+ {file = "ruff-0.13.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:9761e84255443316a258dd7dfbd9bfb59c756e52237ed42494917b2577697c6a"},
1006
+ {file = "ruff-0.13.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3d376a88c3102ef228b102211ef4a6d13df330cb0f5ca56fdac04ccec2a99700"},
1007
+ {file = "ruff-0.13.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cbefd60082b517a82c6ec8836989775ac05f8991715d228b3c1d86ccc7df7dae"},
1008
+ {file = "ruff-0.13.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:dd16b9a5a499fe73f3c2ef09a7885cb1d97058614d601809d37c422ed1525317"},
1009
+ {file = "ruff-0.13.1-py3-none-win32.whl", hash = "sha256:55e9efa692d7cb18580279f1fbb525146adc401f40735edf0aaeabd93099f9a0"},
1010
+ {file = "ruff-0.13.1-py3-none-win_amd64.whl", hash = "sha256:3a3fb595287ee556de947183489f636b9f76a72f0fa9c028bdcabf5bab2cc5e5"},
1011
+ {file = "ruff-0.13.1-py3-none-win_arm64.whl", hash = "sha256:c0bae9ffd92d54e03c2bf266f466da0a65e145f298ee5b5846ed435f6a00518a"},
1012
+ {file = "ruff-0.13.1.tar.gz", hash = "sha256:88074c3849087f153d4bb22e92243ad4c1b366d7055f98726bc19aa08dc12d51"},
1013
+ ]
1014
+
1015
+ [[package]]
1016
+ name = "scipy"
1017
+ version = "1.16.1"
1018
+ requires_python = ">=3.11"
1019
+ summary = "Fundamental algorithms for scientific computing in Python"
1020
+ groups = ["default"]
1021
+ dependencies = [
1022
+ "numpy<2.6,>=1.25.2",
1023
+ ]
1024
+ files = [
1025
+ {file = "scipy-1.16.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:c033fa32bab91dc98ca59d0cf23bb876454e2bb02cbe592d5023138778f70030"},
1026
+ {file = "scipy-1.16.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:6e5c2f74e5df33479b5cd4e97a9104c511518fbd979aa9b8f6aec18b2e9ecae7"},
1027
+ {file = "scipy-1.16.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:0a55ffe0ba0f59666e90951971a884d1ff6f4ec3275a48f472cfb64175570f77"},
1028
+ {file = "scipy-1.16.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:f8a5d6cd147acecc2603fbd382fed6c46f474cccfcf69ea32582e033fb54dcfe"},
1029
+ {file = "scipy-1.16.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cb18899127278058bcc09e7b9966d41a5a43740b5bb8dcba401bd983f82e885b"},
1030
+ {file = "scipy-1.16.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adccd93a2fa937a27aae826d33e3bfa5edf9aa672376a4852d23a7cd67a2e5b7"},
1031
+ {file = "scipy-1.16.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:18aca1646a29ee9a0625a1be5637fa798d4d81fdf426481f06d69af828f16958"},
1032
+ {file = "scipy-1.16.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d85495cef541729a70cdddbbf3e6b903421bc1af3e8e3a9a72a06751f33b7c39"},
1033
+ {file = "scipy-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:226652fca853008119c03a8ce71ffe1b3f6d2844cc1686e8f9806edafae68596"},
1034
+ {file = "scipy-1.16.1.tar.gz", hash = "sha256:44c76f9e8b6e8e488a586190ab38016e4ed2f8a038af7cd3defa903c0a2238b3"},
1035
+ ]
1036
+
1037
+ [[package]]
1038
+ name = "setuptools"
1039
+ version = "80.9.0"
1040
+ requires_python = ">=3.9"
1041
+ summary = "Easily download, build, install, upgrade, and uninstall Python packages"
1042
+ groups = ["default"]
1043
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
1044
+ files = [
1045
+ {file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"},
1046
+ {file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"},
1047
+ ]
1048
+
1049
+ [[package]]
1050
+ name = "six"
1051
+ version = "1.17.0"
1052
+ requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
1053
+ summary = "Python 2 and 3 compatibility utilities"
1054
+ groups = ["default"]
1055
+ files = [
1056
+ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
1057
+ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
1058
+ ]
1059
+
1060
+ [[package]]
1061
+ name = "smmap"
1062
+ version = "5.0.2"
1063
+ requires_python = ">=3.7"
1064
+ summary = "A pure Python implementation of a sliding window memory map manager"
1065
+ groups = ["default"]
1066
+ files = [
1067
+ {file = "smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e"},
1068
+ {file = "smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5"},
1069
+ ]
1070
+
1071
+ [[package]]
1072
+ name = "streamlit"
1073
+ version = "1.49.1"
1074
+ requires_python = "!=3.9.7,>=3.9"
1075
+ summary = "A faster way to build and share data apps"
1076
+ groups = ["default"]
1077
+ dependencies = [
1078
+ "altair!=5.4.0,!=5.4.1,<6,>=4.0",
1079
+ "blinker<2,>=1.5.0",
1080
+ "cachetools<7,>=4.0",
1081
+ "click<9,>=7.0",
1082
+ "gitpython!=3.1.19,<4,>=3.0.7",
1083
+ "numpy<3,>=1.23",
1084
+ "packaging<26,>=20",
1085
+ "pandas<3,>=1.4.0",
1086
+ "pillow<12,>=7.1.0",
1087
+ "protobuf<7,>=3.20",
1088
+ "pyarrow>=7.0",
1089
+ "pydeck<1,>=0.8.0b4",
1090
+ "requests<3,>=2.27",
1091
+ "tenacity<10,>=8.1.0",
1092
+ "toml<2,>=0.10.1",
1093
+ "tornado!=6.5.0,<7,>=6.0.3",
1094
+ "typing-extensions<5,>=4.4.0",
1095
+ "watchdog<7,>=2.1.5; platform_system != \"Darwin\"",
1096
+ ]
1097
+ files = [
1098
+ {file = "streamlit-1.49.1-py3-none-any.whl", hash = "sha256:ad7b6d0dc35db168587acf96f80378249467fc057ed739a41c511f6bf5aa173b"},
1099
+ {file = "streamlit-1.49.1.tar.gz", hash = "sha256:6f213f1e43f035143a56f58ad50068d8a09482f0a2dad1050d7e7e99a9689818"},
1100
+ ]
1101
+
1102
+ [[package]]
1103
+ name = "sympy"
1104
+ version = "1.14.0"
1105
+ requires_python = ">=3.9"
1106
+ summary = "Computer algebra system (CAS) in Python"
1107
+ groups = ["default"]
1108
+ dependencies = [
1109
+ "mpmath<1.4,>=1.1.0",
1110
+ ]
1111
+ files = [
1112
+ {file = "sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5"},
1113
+ {file = "sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517"},
1114
+ ]
1115
+
1116
+ [[package]]
1117
+ name = "tenacity"
1118
+ version = "9.1.2"
1119
+ requires_python = ">=3.9"
1120
+ summary = "Retry code until it succeeds"
1121
+ groups = ["default"]
1122
+ files = [
1123
+ {file = "tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138"},
1124
+ {file = "tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb"},
1125
+ ]
1126
+
1127
+ [[package]]
1128
+ name = "toml"
1129
+ version = "0.10.2"
1130
+ requires_python = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
1131
+ summary = "Python Library for Tom's Obvious, Minimal Language"
1132
+ groups = ["default"]
1133
+ files = [
1134
+ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
1135
+ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
1136
+ ]
1137
+
1138
+ [[package]]
1139
+ name = "torch"
1140
+ version = "2.8.0"
1141
+ requires_python = ">=3.9.0"
1142
+ summary = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
1143
+ groups = ["default"]
1144
+ dependencies = [
1145
+ "filelock",
1146
+ "fsspec",
1147
+ "jinja2",
1148
+ "networkx",
1149
+ "nvidia-cublas-cu12==12.8.4.1; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1150
+ "nvidia-cuda-cupti-cu12==12.8.90; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1151
+ "nvidia-cuda-nvrtc-cu12==12.8.93; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1152
+ "nvidia-cuda-runtime-cu12==12.8.90; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1153
+ "nvidia-cudnn-cu12==9.10.2.21; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1154
+ "nvidia-cufft-cu12==11.3.3.83; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1155
+ "nvidia-cufile-cu12==1.13.1.3; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1156
+ "nvidia-curand-cu12==10.3.9.90; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1157
+ "nvidia-cusolver-cu12==11.7.3.90; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1158
+ "nvidia-cusparse-cu12==12.5.8.93; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1159
+ "nvidia-cusparselt-cu12==0.7.1; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1160
+ "nvidia-nccl-cu12==2.27.3; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1161
+ "nvidia-nvjitlink-cu12==12.8.93; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1162
+ "nvidia-nvtx-cu12==12.8.90; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1163
+ "setuptools; python_version >= \"3.12\"",
1164
+ "sympy>=1.13.3",
1165
+ "triton==3.4.0; platform_system == \"Linux\" and platform_machine == \"x86_64\"",
1166
+ "typing-extensions>=4.10.0",
1167
+ ]
1168
+ files = [
1169
+ {file = "torch-2.8.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:220a06fd7af8b653c35d359dfe1aaf32f65aa85befa342629f716acb134b9710"},
1170
+ {file = "torch-2.8.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c12fa219f51a933d5f80eeb3a7a5d0cbe9168c0a14bbb4055f1979431660879b"},
1171
+ {file = "torch-2.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:8c7ef765e27551b2fbfc0f41bcf270e1292d9bf79f8e0724848b1682be6e80aa"},
1172
+ {file = "torch-2.8.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:5ae0524688fb6707c57a530c2325e13bb0090b745ba7b4a2cd6a3ce262572916"},
1173
+ ]
1174
+
1175
+ [[package]]
1176
+ name = "torchvision"
1177
+ version = "0.23.0"
1178
+ requires_python = ">=3.9"
1179
+ summary = "image and video datasets and models for torch deep learning"
1180
+ groups = ["default"]
1181
+ dependencies = [
1182
+ "numpy",
1183
+ "pillow!=8.3.*,>=5.3.0",
1184
+ "torch==2.8.0",
1185
+ ]
1186
+ files = [
1187
+ {file = "torchvision-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49aa20e21f0c2bd458c71d7b449776cbd5f16693dd5807195a820612b8a229b7"},
1188
+ {file = "torchvision-0.23.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:01dc33ee24c79148aee7cdbcf34ae8a3c9da1674a591e781577b716d233b1fa6"},
1189
+ {file = "torchvision-0.23.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:35c27941831b653f5101edfe62c03d196c13f32139310519e8228f35eae0e96a"},
1190
+ {file = "torchvision-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:09bfde260e7963a15b80c9e442faa9f021c7e7f877ac0a36ca6561b367185013"},
1191
+ ]
1192
+
1193
+ [[package]]
1194
+ name = "tornado"
1195
+ version = "6.5.2"
1196
+ requires_python = ">=3.9"
1197
+ summary = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
1198
+ groups = ["default"]
1199
+ files = [
1200
+ {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6"},
1201
+ {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef"},
1202
+ {file = "tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e"},
1203
+ {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882"},
1204
+ {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108"},
1205
+ {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c"},
1206
+ {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4"},
1207
+ {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04"},
1208
+ {file = "tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0"},
1209
+ {file = "tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f"},
1210
+ {file = "tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af"},
1211
+ {file = "tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0"},
1212
+ ]
1213
+
1214
+ [[package]]
1215
+ name = "tqdm"
1216
+ version = "4.67.1"
1217
+ requires_python = ">=3.7"
1218
+ summary = "Fast, Extensible Progress Meter"
1219
+ groups = ["default"]
1220
+ dependencies = [
1221
+ "colorama; platform_system == \"Windows\"",
1222
+ ]
1223
+ files = [
1224
+ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
1225
+ {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
1226
+ ]
1227
+
1228
+ [[package]]
1229
+ name = "triton"
1230
+ version = "3.4.0"
1231
+ requires_python = "<3.14,>=3.9"
1232
+ summary = "A language and compiler for custom Deep Learning operations"
1233
+ groups = ["default"]
1234
+ marker = "platform_system == \"Linux\" and platform_machine == \"x86_64\""
1235
+ dependencies = [
1236
+ "importlib-metadata; python_version < \"3.10\"",
1237
+ "setuptools>=40.8.0",
1238
+ ]
1239
+ files = [
1240
+ {file = "triton-3.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b70f5e6a41e52e48cfc087436c8a28c17ff98db369447bcaff3b887a3ab4467"},
1241
+ ]
1242
+
1243
+ [[package]]
1244
+ name = "typing-extensions"
1245
+ version = "4.15.0"
1246
+ requires_python = ">=3.9"
1247
+ summary = "Backported and Experimental Type Hints for Python 3.9+"
1248
+ groups = ["default"]
1249
+ files = [
1250
+ {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"},
1251
+ {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"},
1252
+ ]
1253
+
1254
+ [[package]]
1255
+ name = "tzdata"
1256
+ version = "2025.2"
1257
+ requires_python = ">=2"
1258
+ summary = "Provider of IANA time zone data"
1259
+ groups = ["default"]
1260
+ files = [
1261
+ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
1262
+ {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
1263
+ ]
1264
+
1265
+ [[package]]
1266
+ name = "ultralytics"
1267
+ version = "8.3.197"
1268
+ requires_python = ">=3.8"
1269
+ summary = "Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification."
1270
+ groups = ["default"]
1271
+ dependencies = [
1272
+ "matplotlib>=3.3.0",
1273
+ "numpy>=1.23.0",
1274
+ "opencv-python>=4.6.0",
1275
+ "pillow>=7.1.2",
1276
+ "polars",
1277
+ "psutil",
1278
+ "pyyaml>=5.3.1",
1279
+ "requests>=2.23.0",
1280
+ "scipy>=1.4.1",
1281
+ "torch!=2.4.0,>=1.8.0; sys_platform == \"win32\"",
1282
+ "torch>=1.8.0",
1283
+ "torchvision>=0.9.0",
1284
+ "ultralytics-thop>=2.0.0",
1285
+ ]
1286
+ files = [
1287
+ {file = "ultralytics-8.3.197-py3-none-any.whl", hash = "sha256:5ee4c3608787b9fe95c39bd80bc5689bcee00ff9530e62c9b58535672e6bd65a"},
1288
+ {file = "ultralytics-8.3.197.tar.gz", hash = "sha256:6fdf8554d609d485463353b060470a56a0ef736c7591c57fb8b648642e4b1b48"},
1289
+ ]
1290
+
1291
+ [[package]]
1292
+ name = "ultralytics-thop"
1293
+ version = "2.0.17"
1294
+ requires_python = ">=3.8"
1295
+ summary = "Ultralytics THOP package for fast computation of PyTorch model FLOPs and parameters."
1296
+ groups = ["default"]
1297
+ dependencies = [
1298
+ "numpy",
1299
+ "torch",
1300
+ ]
1301
+ files = [
1302
+ {file = "ultralytics_thop-2.0.17-py3-none-any.whl", hash = "sha256:36ba7bd297b26cfd193531f4b8f42075ecf2059d9c0f04907521fee1db94e8c7"},
1303
+ {file = "ultralytics_thop-2.0.17.tar.gz", hash = "sha256:f4572aeb7236939f35c72f966e4e0c3d42fd433ae2974d816865d43e29dc981b"},
1304
+ ]
1305
+
1306
+ [[package]]
1307
+ name = "urllib3"
1308
+ version = "2.5.0"
1309
+ requires_python = ">=3.9"
1310
+ summary = "HTTP library with thread-safe connection pooling, file post, and more."
1311
+ groups = ["default"]
1312
+ files = [
1313
+ {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"},
1314
+ {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"},
1315
+ ]
1316
+
1317
+ [[package]]
1318
+ name = "watchdog"
1319
+ version = "6.0.0"
1320
+ requires_python = ">=3.9"
1321
+ summary = "Filesystem events monitoring"
1322
+ groups = ["default"]
1323
+ marker = "platform_system != \"Darwin\""
1324
+ files = [
1325
+ {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"},
1326
+ {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"},
1327
+ {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"},
1328
+ {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"},
1329
+ {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"},
1330
+ {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"},
1331
+ {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"},
1332
+ {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"},
1333
+ {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"},
1334
+ {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"},
1335
+ {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"},
1336
+ {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"},
1337
+ {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"},
1338
+ {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"},
1339
+ ]
pyproject.toml ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "object_detection_project"
3
+ version = "0.1.0"
4
+ description = "Default template for PDM package"
5
+ authors = [
6
+ {name = "", email = ""},
7
+ ]
8
+ dependencies = [
9
+ "streamlit>=1.49.1",
10
+ "moviepy>=2.2.1",
11
+ "pillow>=10.0.0",
12
+ "ultralytics>=8.3.0",
13
+ "numpy>=2.2.6",
14
+ "torch>=2.8.0",
15
+ "pandas>=2.3.2",
16
+ ]
17
+ requires-python = "==3.11.*"
18
+ readme = "README.md"
19
+ license = {text = "MIT"}
20
+
21
+
22
+ [tool.pdm]
23
+ distribution = false
24
+
25
+
26
+
27
+ [tool.ruff]
28
+ ignore = [
29
+
30
+ # (missing public docstrings) These work off of the Python sense of "public", rather than our
31
+ # bespoke definition based off of `@public`. When ruff supports custom plugins then we can write
32
+ # appropriate rules to require docstrings for `@public`.
33
+ "D100",
34
+ "D101",
35
+ "D102",
36
+ "D103",
37
+ "D104",
38
+ "D105",
39
+ "D106",
40
+ "D107",
41
+
42
+ # (docstring imperative mood) Overly restrictive.
43
+ "D401",
44
+
45
+ # (module level import not at top) There are several places where we use e.g.
46
+ # warnings.filterwarings calls before imports.
47
+ "E402",
48
+
49
+ # (line too long): This fires for comments, which black won't wrap.
50
+ # Disabling until there is an autoformat solution available for comments.
51
+ "E501",
52
+
53
+ # Pandas sometime need `==` for comparison instead of `is`
54
+ # Comparision to `None` should be done with `is` rather than `==`.
55
+ "E712",
56
+
57
+ # (no type comparison): There are a few places where we use `== type(None)` which are more clear
58
+ # than the equivalent `isinstance` check.
59
+ 'E721',
60
+
61
+ # (bare exception): There are many places where we want to catch a maximally generic exception.
62
+ 'E722',
63
+
64
+ # (no assign lambda): existing code assigns lambdas in a few places. With black formatting
65
+ # requiring extra empty lines between defs, disallowing lambda assignment can make code less
66
+ # readable.
67
+ "E731",
68
+
69
+ # (try-except-in-loop) we use this pattern in many places and the performance impact is negligible
70
+ "PERF203",
71
+
72
+ # (no concatenation) Existing codebase has many concatentations, no reason to disallow them.
73
+ "RUF005",
74
+
75
+ # (use ClassVar for attr declarations with defaults) This is a good rule for vanilla Python, but
76
+ # triggers false positives for many libs that have DSLs that make use of attr defaults.
77
+ "RUF012",
78
+
79
+
80
+ ##### TEMPORARY DISABLES
81
+
82
+ # (assorted docstring rules) There are too many violations of these to enable
83
+ # right now, but we should enable after fixing the violations.
84
+ "D200", # (one-line docstring should fit)
85
+ "D205", # (blank line after summary)
86
+ "D417", # (missing arg in docstring)
87
+ # (assorted perf rules) We have a lot of violations, enable when autofix is available
88
+ "PERF401", # (manual-list-comprehension)
89
+ "PERF402", # (manual-list-copy)
90
+ ]
91
+ # By default, ruff only uses all "E" (pycodestyle) and "F" (pyflakes) rules.
92
+ # Here we append to the defaults.
93
+ select = [
94
+ # (flake8-builtins) detect shadowing of python builtin symbols by variables and arguments.
95
+ # Attributes are OK (which is why A003) is not included here.
96
+ "A001",
97
+ "A002",
98
+
99
+ # (useless expression): Expressions that aren't assigned to anything are typically bugs.
100
+ "B018",
101
+
102
+ # (pydocstyle) Docstring-related rules. A large subset of these are ignored by the
103
+ # "convention=google" setting, we set under tool.ruff.pydocstyle.
104
+ "D",
105
+
106
+ # (pycodestyle) pycodestyle rules
107
+ "E",
108
+
109
+ # (pyflakes) pyflakes rules
110
+ "F",
111
+
112
+ # (isort) detect improperly sorted imports
113
+ "I001",
114
+
115
+ # (performance) perflint rules
116
+ "PERF",
117
+
118
+ # (pylint) use all pylint rules from categories "Convention", "Error", and "Warning" (ruff
119
+ # currently implements only a subset of pylint's rules)
120
+ "PLE",
121
+ "PLW",
122
+
123
+ # (no commented out code) keep commented out code blocks out of the codebase
124
+ # "ERA001",
125
+
126
+ # (ruff-specific) Enable all ruff-specific checks (i.e. not ports of
127
+ # functionality from an existing linter).
128
+ "RUF",
129
+
130
+ # (private member access) Flag access to `_`-prefixed symbols. By default the various special
131
+ # methods on `NamedTuple` are ignored (e.g. `_replace`).
132
+ "SLF001",
133
+
134
+ # (flake8-type-checking) Auto-sort imports into TYPE_CHECKING blocks depending on whether
135
+ # they are runtime or type-only imports.
136
+ "TCH",
137
+
138
+ # (f-strings) use f-strings instead of .format()
139
+ "UP032",
140
+
141
+ # (invalid escape sequence) flag errant backslashes
142
+ "W605",
143
+ ]
144
+
145
+ [dependency-groups]
146
+ dev = [
147
+ "ruff>=0.13.1",
148
+ ]
services/app_service/.streamlit/config.toml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ [theme]
2
+ base="dark"
3
+ primaryColor="#f63366"
4
+ backgroundColor="#1e1e1e"
5
+ secondaryBackgroundColor="#31333F"
6
+ textColor="#FAFAFA"
7
+ font="sans serif"
8
+
9
+ [client]
10
+ showSidebarNavigation = false
services/app_service/app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from pathlib import Path
3
+
4
+ import streamlit as st
5
+
6
+
7
+ def local_image_to_data_url(path: str | Path) -> str:
8
+ """Convert a local image into a base64 data URL for inlined CSS backgrounds."""
9
+ p = Path(path)
10
+ if not p.is_absolute():
11
+ p = Path(__file__).parent / p
12
+ mime = "image/png" if p.suffix.lower() == ".png" else "image/jpeg"
13
+ b64 = base64.b64encode(p.read_bytes()).decode()
14
+ return f"data:{mime};base64,{b64}"
15
+
16
+
17
+ def main() -> None:
18
+ st.set_page_config(
19
+ page_title="Home",
20
+ layout="wide",
21
+ initial_sidebar_state="expanded",
22
+ )
23
+
24
+ with st.sidebar:
25
+ st.header("Menu")
26
+ st.page_link("app.py", label="Home")
27
+ st.page_link("pages/bushland_beacon.py", label="Bushland beacon")
28
+ st.page_link("pages/lost_at_sea.py", label="Lost at sea")
29
+ st.page_link("pages/signal_watch.py", label="Detection")
30
+
31
+ hide_default_css = """
32
+ <style>
33
+ #MainMenu {visibility: hidden;}
34
+ footer {visibility: hidden;}
35
+ .block-container {padding-top: 0rem; padding-bottom: 0rem;}
36
+ </style>
37
+ """
38
+ st.markdown(hide_default_css, unsafe_allow_html=True)
39
+
40
+ bg_image = local_image_to_data_url("resources/images/rescue.png")
41
+
42
+ st.markdown(
43
+ f"""
44
+ <style>
45
+ :root {{
46
+ --text: #ffffff;
47
+ --text-dim: rgba(255,255,255,0.85);
48
+ --cta: #3e6ae1;
49
+ --cta-alt: #f4f4f4;
50
+ }}
51
+
52
+ .hero {{
53
+ position: relative;
54
+ min-height: 92vh;
55
+ width: 100%;
56
+ display: grid;
57
+ place-items: center;
58
+ text-align: center;
59
+ color: var(--text);
60
+ background-image:
61
+ linear-gradient(180deg, rgba(0,0,0,0.10) 0%, rgba(0,0,0,0.20) 35%, rgba(0,0,0,0.35) 100%),
62
+ url('{bg_image}');
63
+ background-size: cover;
64
+ background-position: center;
65
+ background-repeat: no-repeat;
66
+ }}
67
+
68
+ .hero .content {{
69
+ max-width: 1100px;
70
+ padding: 2rem 1rem 5rem;
71
+ }}
72
+
73
+ .hero h1 {{
74
+ font-size: clamp(2.6rem, 6vw, 5rem);
75
+ font-weight: 700;
76
+ letter-spacing: 0.3px;
77
+ margin: 0 0 0.7rem 0;
78
+ }}
79
+
80
+ .hero .subtitle {{
81
+ font-size: clamp(1rem, 2.1vw, 1.3rem);
82
+ color: var(--text-dim);
83
+ margin-bottom: 1.4rem;
84
+ }}
85
+
86
+ .hero .ctas {{
87
+ display: flex;
88
+ gap: 0.8rem;
89
+ justify-content: center;
90
+ flex-wrap: wrap;
91
+ }}
92
+
93
+ .hero .btn {{
94
+ border: 0;
95
+ border-radius: 4px;
96
+ padding: 0.9rem 1.4rem;
97
+ font-weight: 600;
98
+ cursor: pointer;
99
+ transition: transform 120ms ease, opacity 120ms ease;
100
+ }}
101
+
102
+ .hero .btn.primary {{ background: var(--cta); color: white; }}
103
+ .hero .btn.secondary {{ background: var(--cta-alt); color: #171a20; }}
104
+
105
+ .hero .btn:hover {{ transform: translateY(-1px); }}
106
+ .nav {{
107
+ position: fixed;
108
+ top: 0; left: 0; right: 0;
109
+ height: 54px;
110
+ display: flex;
111
+ align-items: center;
112
+ justify-content: center;
113
+ gap: 1.2rem;
114
+ backdrop-filter: saturate(180%) blur(10px);
115
+ background: rgba(255,255,255,0.2);
116
+ color: white;
117
+ z-index: 9999;
118
+ }}
119
+ .nav a {{
120
+ color: white;
121
+ text-decoration: none;
122
+ font-weight: 600;
123
+ font-size: 0.95rem;
124
+ opacity: 0.95;
125
+ }}
126
+ .nav a:hover {{ opacity: 1; }}
127
+ </style>
128
+ """,
129
+ unsafe_allow_html=True,
130
+ )
131
+
132
+ st.markdown(
133
+ """
134
+ <section class="hero">
135
+ <div class="content">
136
+ <h2>NATSAR Search & Rescue Simulation Hub</h2>
137
+ <div class="subtitle">
138
+ This web application helps search and rescue teams and researchers detect critical objects from images captured via drones, bodycams, or other camera systems.
139
+ </div>
140
+ </div>
141
+ </section>
142
+ """,
143
+ unsafe_allow_html=True,
144
+ )
145
+
146
+
147
+ if __name__ == "__main__":
148
+ main()
services/app_service/app_old.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import tempfile
3
+ import time
4
+ from glob import glob
5
+ from pathlib import Path
6
+ from shutil import which
7
+
8
+ import streamlit as st
9
+ from deim_model import DeimHgnetV2MDrone
10
+ from model import *
11
+ from PIL import Image
12
+
13
+
14
+ def main():
15
+ model = DeimHgnetV2MDrone()
16
+ minimum_confidence_threshold = 0.5
17
+
18
+ st.set_page_config(page_title="SatSense Demo")
19
+ st.title(":satellite: SatSense Demo")
20
+ st.markdown(
21
+ """
22
+ The SatSense demo app simplifies annotating images and videos taken by satellites.
23
+ It employs cutting-edge object detection models to automatically analyze and recognize
24
+ various objects in satellite imagery, including vehicles and ships.
25
+
26
+ #### How to get started
27
+
28
+ 1. **Upload Satellite Imagery:** Use the sidebar to upload your satellite imagery media
29
+ files for analysis.
30
+ 2. **Review Identified Objects:** Explore the annotated objects marked by the model.
31
+
32
+ #### Tips for usage
33
+
34
+ 1. Please clear any existing uploads in the sidebar before uploading a new file.
35
+ 2. For optimal results, please upload clear and high-resolution satellite media files.
36
+ 3. [Location SA Map Viewer](https://location.sa.gov.au/viewer/) provides satellite imagery that can be used as image input.
37
+
38
+ SatSense simplifies the process of annotating satellite imagery and allows you to
39
+ export the annotated media files. Start annotating and discovering objects of interest
40
+ effortlessly!
41
+
42
+ ***Note:** In its current MVP stage, the SatSense demo offers a glimpse into the
43
+ world of automatic object detection in satellite imagery. Your feedback can help shape
44
+ its future improvements!*
45
+ """
46
+ )
47
+
48
+ # Sidebar to set minimum confidence threshold
49
+ st.sidebar.header("Parameters")
50
+ minimum_confidence_threshold = st.sidebar.slider(
51
+ "Minimum confidence threshold",
52
+ min_value=0.0,
53
+ max_value=1.0,
54
+ step=0.1,
55
+ value=minimum_confidence_threshold,
56
+ format="%.1f",
57
+ )
58
+ st.sidebar.markdown("---")
59
+
60
+ # Sidebar for image detection
61
+ st.sidebar.header("Image Detection")
62
+ uploaded_image = st.sidebar.file_uploader(
63
+ "Upload an image", type=["jpg", "jpeg", "png"]
64
+ )
65
+
66
+ st.sidebar.markdown("---")
67
+
68
+ # Sidebar for video detection
69
+ st.sidebar.header("Video Detection")
70
+ uploaded_video = st.sidebar.file_uploader(
71
+ "Upload a video", type=["mp4", "avi", "mov"]
72
+ )
73
+
74
+ if uploaded_image:
75
+ st.markdown("---")
76
+ st.write("")
77
+
78
+ st.markdown("#### Uploaded image")
79
+ image = Image.open(uploaded_image)
80
+ st.image(image, use_column_width=True)
81
+
82
+ st.write("")
83
+ st.write("")
84
+
85
+ with st.spinner("Processing..."):
86
+ annotated_image = model.predict_image(
87
+ image, min_confidence=minimum_confidence_threshold
88
+ )
89
+
90
+ st.markdown("#### Annotated image")
91
+ st.image(annotated_image, use_column_width=True)
92
+
93
+ if uploaded_video:
94
+ st.markdown("---")
95
+ st.write("")
96
+
97
+ temp_dir = tempfile.mkdtemp()
98
+ # Preserve uploaded extension to maximize compatibility with OpenCV/YOLO
99
+ uploaded_ext = Path(uploaded_video.name).suffix.lower() or ".mp4"
100
+ temp_video_path = os.path.join(temp_dir, f"temp_video{uploaded_ext}")
101
+ annotated_dir = "./annotated_video"
102
+ os.makedirs(annotated_dir, exist_ok=True)
103
+ annotated_video_path_input_ext = os.path.join(
104
+ annotated_dir, f"temp_video{uploaded_ext}"
105
+ )
106
+ annotated_video_path_mp4 = os.path.join(annotated_dir, "temp_video.mp4")
107
+
108
+ st.markdown("#### Uploaded video")
109
+ uploaded_video_bytes = uploaded_video.getvalue()
110
+ st.video(uploaded_video_bytes)
111
+
112
+ st.write("")
113
+ st.write("")
114
+
115
+ progress_bar = st.progress(0.3, text="Performing object detection...")
116
+
117
+ with open(temp_video_path, "wb") as video_file:
118
+ video_file.write(uploaded_video.getvalue())
119
+
120
+ model.predict_video(
121
+ temp_video_path,
122
+ min_confidence=minimum_confidence_threshold,
123
+ target_dir_name="annotated_video",
124
+ )
125
+
126
+ # Resolve the actual saved annotated video. Ultralytics may write .avi even if input is .mp4
127
+ final_video_path = None
128
+ preferred_candidates = [
129
+ annotated_video_path_input_ext,
130
+ os.path.join(annotated_dir, "temp_video.mp4"),
131
+ os.path.join(annotated_dir, "temp_video.avi"),
132
+ ]
133
+ for cand in preferred_candidates:
134
+ if os.path.exists(cand):
135
+ final_video_path = cand
136
+ break
137
+
138
+ if final_video_path is None:
139
+ candidates = []
140
+ for pattern in ("*.mp4", "*.avi", "*.mov", "*.mkv", "*.webm"):
141
+ candidates.extend(glob(os.path.join(annotated_dir, pattern)))
142
+ if candidates:
143
+ final_video_path = max(candidates, key=os.path.getmtime)
144
+ else:
145
+ progress_bar.empty()
146
+ st.error(
147
+ "Annotated video not found after detection. Please try again or check logs."
148
+ )
149
+ return
150
+
151
+ # If the annotated output isn't mp4, try converting with ffmpeg if available
152
+ if Path(final_video_path).suffix.lower() != ".mp4":
153
+ progress_bar.progress(0.67, text="Converting video format...")
154
+ if which("ffmpeg"):
155
+ import subprocess
156
+
157
+ try:
158
+ subprocess.run(
159
+ [
160
+ "ffmpeg",
161
+ "-y",
162
+ "-i",
163
+ final_video_path,
164
+ "-c:v",
165
+ "libx264",
166
+ "-pix_fmt",
167
+ "yuv420p",
168
+ "-crf",
169
+ "23",
170
+ "-preset",
171
+ "veryfast",
172
+ "-an",
173
+ annotated_video_path_mp4,
174
+ ],
175
+ check=True,
176
+ stdout=subprocess.DEVNULL,
177
+ stderr=subprocess.STDOUT,
178
+ )
179
+ final_video_path = annotated_video_path_mp4
180
+ except Exception:
181
+ st.warning(
182
+ "ffmpeg failed to convert the video. Attempting to display original format."
183
+ )
184
+ else:
185
+ st.info(
186
+ "Install ffmpeg to enable conversion to mp4 (e.g. `brew install ffmpeg` on macOS) or use the provided Dockerfile."
187
+ )
188
+
189
+ progress_bar.progress(1.0, text="Done!")
190
+ time.sleep(1)
191
+ progress_bar.empty()
192
+
193
+ st.markdown("#### Annotated video")
194
+ annotated_video_file = open(final_video_path, "rb")
195
+ annotated_video_bytes = annotated_video_file.read()
196
+ # Let Streamlit infer format from the file when possible
197
+ st.video(annotated_video_bytes)
198
+
199
+ st.markdown("---")
200
+ st.markdown("Demo built by [Lucid Insights Pty Ltd](https://lucidinsights.com.au).")
201
+
202
+
203
+ if __name__ == "__main__":
204
+ main()
services/app_service/deim_model.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Tuple
5
+
6
+ import cv2
7
+ import numpy as np
8
+ import torch
9
+ import torchvision.transforms as T
10
+ from model import BaseModel
11
+ from PIL import Image
12
+
13
+ _TORCH_MIN_VERSION = (2, 5)
14
+
15
+
16
+ def _parse_version(version_str: str) -> Tuple[int, ...]:
17
+ parts = []
18
+ for piece in version_str.split("+")[0].split("."):
19
+ try:
20
+ parts.append(int(piece))
21
+ except ValueError:
22
+ break
23
+ return tuple(parts)
24
+
25
+
26
+ class DeimHgnetV2MDrone(BaseModel):
27
+ def __init__(self, device: str = "cpu", version: str = "v2"):
28
+ repo_root = Path(__file__).resolve().parents[1]
29
+ default_rel = (
30
+ Path("app_service") / "models" / f"model_deimhgnetV2m_{device}_{version}.pt"
31
+ )
32
+ # Allow explicit override via env var
33
+ override = (
34
+ Path(os.environ["DEIM_WEIGHTS_PATH"])
35
+ if "DEIM_WEIGHTS_PATH" in os.environ
36
+ else None
37
+ )
38
+
39
+ candidate_paths = [
40
+ override,
41
+ repo_root / default_rel,
42
+ Path(__file__).resolve().parent
43
+ / "models"
44
+ / f"model_deimhgnetV2m_{device}_{version}.pt",
45
+ Path.cwd() / "services" / default_rel,
46
+ Path("/app") / "services" / default_rel,
47
+ ]
48
+ weights_path = next((p for p in candidate_paths if p and p.exists()), None)
49
+ if weights_path is None:
50
+ models_dir = Path(__file__).resolve().parent / "models"
51
+ alt_models_dir = repo_root / "app_service" / "models"
52
+ available = []
53
+ for d in [models_dir, alt_models_dir]:
54
+ try:
55
+ if d.exists():
56
+ available.extend(str(p.name) for p in d.glob("*.pt"))
57
+ except Exception:
58
+ pass
59
+ searched = [str(p) for p in candidate_paths if p]
60
+ raise FileNotFoundError(
61
+ "Model weights not found. Looked in: "
62
+ + "; ".join(searched)
63
+ + ". Available .pt files: "
64
+ + (", ".join(sorted(set(available))) or "<none>")
65
+ )
66
+
67
+ self.device = device
68
+ cfg_path = weights_path.with_suffix(".json")
69
+ if not cfg_path.exists():
70
+ raise FileNotFoundError(
71
+ f"Config JSON not found next to weights: {cfg_path}"
72
+ )
73
+ version_tuple = _parse_version(torch.__version__)
74
+ if version_tuple < _TORCH_MIN_VERSION:
75
+ raise RuntimeError(
76
+ "PyTorch {} is too old for these weights. "
77
+ "Please upgrade to >= {}.{} (e.g. set torch==2.5.1 in Dockerfile).".format(
78
+ torch.__version__, *_TORCH_MIN_VERSION
79
+ )
80
+ )
81
+
82
+ size_bytes = weights_path.stat().st_size
83
+ if size_bytes < 1_000_000:
84
+ raise RuntimeError(
85
+ f"Weights file at {weights_path} is only {size_bytes} bytes. "
86
+ "This usually means Git LFS pointers were copied instead of the binary file. "
87
+ "Run `git lfs pull` before building the container to fetch the real weights."
88
+ )
89
+
90
+ self.cfg = json.load(open(cfg_path, "r"))
91
+ self.model = torch.jit.load(weights_path, map_location=device).eval()
92
+
93
+ def _preprocess_image(self, image: Image):
94
+ transforms = T.Compose(
95
+ [
96
+ T.Resize((self.cfg["target_size"][0], self.cfg["target_size"][1])),
97
+ T.ToTensor(),
98
+ ]
99
+ )
100
+ return transforms(image).unsqueeze(0).to(self.device)
101
+
102
+ def _postprocess_detections(
103
+ self, scores, bboxes, min_confidence: float, wh: Tuple[int, int]
104
+ ):
105
+ w, h = wh
106
+ b_np = bboxes[0].cpu().numpy()
107
+ s_np = scores.sigmoid()[0].cpu().numpy()
108
+ mask = (s_np >= min_confidence).squeeze()
109
+ if not mask.any():
110
+ return np.zeros((0, 5), dtype=np.float32)
111
+ valid = b_np[mask]
112
+ cx, cy, box_w, box_h = valid[:, 0], valid[:, 1], valid[:, 2], valid[:, 3]
113
+ x1 = cx - box_w / 2
114
+ y1 = cy - box_h / 2
115
+ x2 = cx + box_w / 2
116
+ y2 = cy + box_h / 2
117
+ valid_xyxy = np.stack([x1, y1, x2, y2], axis=1) * [w, h, w, h]
118
+ return np.concatenate([valid_xyxy, s_np[mask]], axis=1)
119
+
120
+ def _nms(self, dets):
121
+ if dets.shape[0] == 0 or self.cfg["nms_iou_thr"] <= 0:
122
+ return dets
123
+ x1 = dets[:, 0]
124
+ y1 = dets[:, 1]
125
+ x2 = dets[:, 2]
126
+ y2 = dets[:, 3]
127
+ scores = dets[:, 4]
128
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
129
+ order = scores.argsort()[::-1]
130
+
131
+ keep = []
132
+ while order.size > 0:
133
+ i = order[0]
134
+ keep.append(i)
135
+ xx1 = np.maximum(x1[i], x1[order[1:]])
136
+ yy1 = np.maximum(y1[i], y1[order[1:]])
137
+ xx2 = np.minimum(x2[i], x2[order[1:]])
138
+ yy2 = np.minimum(y2[i], y2[order[1:]])
139
+ w = np.maximum(0.0, xx2 - xx1 + 1)
140
+ h = np.maximum(0.0, yy2 - yy1 + 1)
141
+ inter = w * h
142
+ iou = inter / (areas[i] + areas[order[1:]] - inter)
143
+ inds = np.where(iou <= self.cfg["nms_iou_thr"])[0]
144
+ order = order[inds + 1]
145
+ return dets[keep]
146
+
147
+ def _draw_detections_on_np(
148
+ self, image_np: np.ndarray, dets: np.ndarray
149
+ ) -> np.ndarray:
150
+ for bbox in dets:
151
+ x1, y1, x2, y2, confidence = bbox
152
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
153
+ cv2.rectangle(image_np, (x1, y1), (x2, y2), (0, 255, 0), 2)
154
+ label = f"{confidence:.2f}"
155
+ label_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
156
+ cv2.rectangle(
157
+ image_np,
158
+ (x1, y1 - label_size[1] - 10),
159
+ (x1 + label_size[0], y1),
160
+ (0, 255, 0),
161
+ -1,
162
+ )
163
+ cv2.putText(
164
+ image_np,
165
+ label,
166
+ (x1, y1 - 5),
167
+ cv2.FONT_HERSHEY_SIMPLEX,
168
+ 0.5,
169
+ (0, 0, 0),
170
+ 1,
171
+ )
172
+ return image_np
173
+
174
+ def predict_image(self, image: Image, min_confidence: float) -> Image:
175
+ tensor = self._preprocess_image(image.copy())
176
+ with torch.no_grad():
177
+ labels, bboxes = self.model(tensor)
178
+ dets = self._postprocess_detections(labels, bboxes, min_confidence, image.size)
179
+ dets = self._nms(dets)
180
+ image_np: np.ndarray = np.array(image)
181
+ image_np = self._draw_detections_on_np(image_np, dets)
182
+ return Image.fromarray(image_np)
183
+
184
+ def predict_video(
185
+ self, video, min_confidence: float, target_dir_name="annotated_video"
186
+ ):
187
+ input_path = str(video)
188
+ cap = cv2.VideoCapture(input_path)
189
+ if not cap.isOpened():
190
+ raise ValueError(f"Cannot open video: {input_path}")
191
+
192
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
193
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
194
+
195
+ input_p = Path(input_path)
196
+ out_dir = Path(target_dir_name)
197
+ out_dir.mkdir(parents=True, exist_ok=True)
198
+ out_path = out_dir / f"{input_p.stem}_annotated.mp4"
199
+
200
+ try:
201
+ while True:
202
+ ret, frame_bgr = cap.read()
203
+ if not ret:
204
+ break
205
+ frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
206
+ pil_img = Image.fromarray(frame_rgb)
207
+
208
+ tensor = self._preprocess_image(pil_img.copy())
209
+ with torch.no_grad():
210
+ labels, bboxes = self.model(tensor)
211
+ dets = self._postprocess_detections(
212
+ labels, bboxes, min_confidence, (width, height)
213
+ )
214
+ dets = self._nms(dets)
215
+
216
+ frame_bgr = self._draw_detections_on_np(frame_bgr, dets)
217
+ cv2.imshow("img", frame_bgr)
218
+ cv2.waitKey(1)
219
+ finally:
220
+ cap.release()
221
+
222
+ return str(out_path)
223
+
224
+
225
+ if __name__ == "__main__":
226
+ model = DeimHgnetV2MDrone(version="v2")
227
+ output_image = model.predict_video("./resources/videos/raw/sample1.mp4", 0.3)
228
+ output_image.show()
services/app_service/model.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from pathlib import Path
3
+
4
+ from PIL import Image
5
+ from ultralytics import YOLO
6
+
7
+
8
+ class BaseModel(ABC):
9
+ @abstractmethod
10
+ def __init__(self):
11
+ pass
12
+
13
+ @abstractmethod
14
+ def predict_image(self, image):
15
+ pass
16
+
17
+ @abstractmethod
18
+ def predict_video(self, video):
19
+ pass
20
+
21
+
22
+ class YOLOModelv1(BaseModel):
23
+ """Model: modelYOLOv8n_datasetDOTAv2_epochs5_batch1.pt"""
24
+
25
+ def __init__(self):
26
+ repo_root = Path(__file__).resolve().parents[1]
27
+ weights_path = (
28
+ repo_root / "models" / "modelYOLOv8n_datasetDOTAv2_epochs5_batch1.pt"
29
+ )
30
+ self.model = YOLO(str(weights_path), task="detect")
31
+
32
+ def predict_image(self, image, min_confidence):
33
+ results = self.model.predict(image, save=False, imgsz=640, conf=min_confidence)
34
+ annotated_image_filename = "annotated_image.png"
35
+ last_im = None
36
+ for result in results:
37
+ im_array = result.plot()
38
+ last_im = Image.fromarray(im_array[..., ::-1]) # RGB PIL image
39
+ last_im.save(annotated_image_filename)
40
+ # Return PIL Image for robust display in Streamlit
41
+ return last_im if last_im is not None else Image.open(annotated_image_filename)
42
+
43
+ def predict_video(self, video, min_confidence, target_dir_name="annotated_video"):
44
+ self.model.predict(
45
+ video,
46
+ save=True,
47
+ project=".",
48
+ name=target_dir_name,
49
+ exist_ok=True,
50
+ imgsz=640,
51
+ conf=min_confidence,
52
+ )
53
+
54
+
55
+ class YOLOModelv2(BaseModel):
56
+ """Model: modelYOLOv8n_datasetDIOR_epochs50_batch16.pt"""
57
+
58
+ def __init__(self):
59
+ repo_root = Path(__file__).resolve().parents[1]
60
+ weights_path = (
61
+ repo_root / "models" / "modelYOLOv8n_datasetDIOR_epochs50_batch16.pt"
62
+ )
63
+ self.model = YOLO(str(weights_path), task="detect")
64
+
65
+ def predict_image(self, image, min_confidence, classes=None):
66
+ results = self.model.predict(
67
+ image, save=False, imgsz=800, conf=min_confidence, classes=classes
68
+ )
69
+ annotated_image_filename = "annotated_image.png"
70
+ last_im = None
71
+ for result in results:
72
+ im_array = result.plot()
73
+ last_im = Image.fromarray(im_array[..., ::-1]) # RGB PIL image
74
+ last_im.save(annotated_image_filename)
75
+ # Return PIL Image for robust display in Streamlit
76
+ return last_im if last_im is not None else Image.open(annotated_image_filename)
77
+
78
+ def predict_video(
79
+ self, video, min_confidence, target_dir_name="annotated_video", classes=None
80
+ ):
81
+ self.model.predict(
82
+ video,
83
+ save=True,
84
+ project=".",
85
+ name=target_dir_name,
86
+ exist_ok=True,
87
+ imgsz=800,
88
+ conf=min_confidence,
89
+ classes=classes,
90
+ )
services/app_service/models/model_deimhgnetV2m_cpu_v0.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "target_size": [960, 1280],
3
+ "nms_iou_thr": 0.8
4
+ }
services/app_service/models/model_deimhgnetV2m_cpu_v0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d7cf84155a58af2e964b1818c7b648be5e4de12129fc92f78c2c222648d70db
3
+ size 78217733
services/app_service/models/model_deimhgnetV2m_cpu_v0.pt.backup ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d7cf84155a58af2e964b1818c7b648be5e4de12129fc92f78c2c222648d70db
3
+ size 78217733
services/app_service/models/model_deimhgnetV2m_cpu_v2.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "target_size": [960, 1280],
3
+ "nms_iou_thr": 0.8
4
+ }
services/app_service/models/model_deimhgnetV2m_cpu_v2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04c4f81c686928daee97697ace4545f833c8e02fb0c248e4133cfb7e20539b9a
3
+ size 78311936
services/app_service/models/model_deimhgnetV2m_cuda_v2.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "target_size": [960, 1280],
3
+ "nms_iou_thr": 0.8
4
+ }
services/app_service/models/model_deimhgnetV2m_cuda_v2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2812bfe5e796c8389b447d772bf0c1403556d1eca2d40d4cd5b0dbe8ee83401
3
+ size 78254179
services/app_service/models/yolov8n.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59b3d833e2ff32e194b5bb8e08d211dc7c5bdf144b90d2c8412c47ccfc83b36
3
+ size 6549796
services/app_service/pages/bushland_beacon.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pages/bushland_beacon.py
2
+ import tempfile
3
+ import time
4
+ from pathlib import Path
5
+
6
+ import streamlit as st
7
+ from PIL import Image
8
+
9
+ from deim_model import DeimHgnetV2MDrone
10
+
11
+ # =============== Page setup ===============
12
+ st.set_page_config(
13
+ page_title="Bushland Beacon ", layout="wide", initial_sidebar_state="expanded"
14
+ )
15
+ st.markdown(
16
+ "<h2 style='text-align:center;margin-top:0'>S A R - X ai\u200b</h2>"
17
+ "<h2 style='text-align:center;margin-top:0'>Bushland Beacon 🚨 </h2>",
18
+ unsafe_allow_html=True,
19
+ )
20
+ # =============== Sidebar: custom menu + cards ===============
21
+ with st.sidebar:
22
+ st.header("Menu")
23
+ st.page_link("app.py", label="Home")
24
+ st.page_link("pages/lost_at_sea.py", label="Lost at Sea")
25
+ st.page_link("pages/bushland_beacon.py", label="Bushland Beacon")
26
+ st.page_link("pages/signal_watch.py", label="Signal Watch")
27
+
28
+ st.markdown("---")
29
+
30
+ # Simple "card" styling in the sidebar
31
+ st.markdown(
32
+ """
33
+ <style>
34
+ .sb-card {border:1px solid rgba(255,255,255,0.15); padding:14px; border-radius:8px; margin-bottom:16px;}
35
+ .sb-card h4 {margin:0 0 10px 0; font-weight:700;}
36
+ </style>
37
+ """,
38
+ unsafe_allow_html=True,
39
+ )
40
+
41
+ # Image Detection card
42
+ st.markdown("<h4>Image Detection</h4>", unsafe_allow_html=True)
43
+ img_file = st.file_uploader(
44
+ "Upload an image", type=["jpg", "jpeg", "png"], key="img_up"
45
+ )
46
+ run_img = st.button("🔍 Run Image Detection", use_container_width=True)
47
+
48
+ # Video Detection card
49
+ st.markdown("<h4>Video Detection</h4>", unsafe_allow_html=True)
50
+ vid_file = st.file_uploader(
51
+ "Upload a video", type=["mp4", "mov", "avi", "mkv"], key="vid_up"
52
+ )
53
+ run_vid = st.button("🎥 Run Video Detection", use_container_width=True)
54
+
55
+ # Parameters card (shared)
56
+ st.markdown("<h4>Parameters</h4>", unsafe_allow_html=True)
57
+ conf_thr = st.slider("Minimum confidence threshold", 0.05, 0.95, 0.50, 0.01)
58
+
59
+ @st.cache_resource
60
+ def load_model(device: str = "cpu"):
61
+ return DeimHgnetV2MDrone(device=device)
62
+
63
+
64
+ # =============== Detection helpers ===============
65
+ def run_image_detection(uploaded_file, conf_thr: float = 0.5):
66
+ img = Image.open(uploaded_file).convert("RGB")
67
+ st.image(img, caption="Uploaded Image", use_container_width=True)
68
+
69
+ model = load_model()
70
+ with st.spinner("Running detection..."):
71
+ annotated = model.predict_image(img, min_confidence=conf_thr)
72
+
73
+ st.subheader("🎯 Detection Results")
74
+ st.image(annotated, caption="Detections", use_container_width=True)
75
+
76
+
77
+ def run_video_detection(vid_bytes, conf_thr: float = 0.5):
78
+ tmp_in = Path(tempfile.gettempdir()) / f"in_{int(time.time())}.mp4"
79
+ with open(tmp_in, "wb") as f:
80
+ f.write(vid_bytes)
81
+
82
+ model = load_model()
83
+ with st.spinner("Processing video…"):
84
+ try:
85
+ out_path = model.predict_video(
86
+ tmp_in,
87
+ min_confidence=conf_thr,
88
+ target_dir_name="annotated_video",
89
+ )
90
+ except Exception as exc: # pragma: no cover - surface message to UI
91
+ st.error(f"Video detection failed: {exc}")
92
+ return
93
+
94
+ st.success("Done!")
95
+ st.video(str(out_path))
96
+ with open(out_path, "rb") as f:
97
+ st.download_button(
98
+ "Download processed video",
99
+ data=f.read(),
100
+ file_name=Path(out_path).name,
101
+ mime="video/mp4",
102
+ )
103
+
104
+
105
+ # =============== Main: hook up actions ===============
106
+ if run_img:
107
+ if img_file is None:
108
+ st.warning("Please upload an image first.")
109
+ else:
110
+ run_image_detection(img_file, conf_thr=conf_thr)
111
+
112
+ if run_vid:
113
+ if vid_file is None:
114
+ st.warning("Please upload a video first.")
115
+ else:
116
+ run_video_detection(vid_bytes=vid_file.read(), conf_thr=conf_thr)
services/app_service/pages/lost_at_sea.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pages/lost_at_sea.py
2
+ import tempfile
3
+ import time
4
+ from pathlib import Path
5
+
6
+ import streamlit as st
7
+ from deim_model import DeimHgnetV2MDrone
8
+ from PIL import Image
9
+
10
+ # =============== Page setup ===============
11
+ st.set_page_config(
12
+ page_title="Lost at Sea", layout="wide", initial_sidebar_state="expanded"
13
+ )
14
+
15
+ st.markdown(
16
+ "<h2 style='text-align:center;margin-top:0'>S A R - X ai\u200b</h2>"
17
+ "<h2 style='text-align:center;margin-top:0'>Lost at Sea 🌊</h2>",
18
+ unsafe_allow_html=True,
19
+ )
20
+
21
+ # =============== Sidebar: custom menu + cards ===============
22
+ with st.sidebar:
23
+ st.header("Menu")
24
+ st.page_link("app.py", label="Home")
25
+ st.page_link("pages/lost_at_sea.py", label="Lost at Sea")
26
+ st.page_link("pages/bushland_beacon.py", label="Bushland Beacon")
27
+ st.page_link("pages/signal_watch.py", label="Signal Watch")
28
+
29
+ st.markdown("---")
30
+
31
+ # Simple "card" styling in the sidebar
32
+ st.markdown(
33
+ """
34
+ <style>
35
+ .sb-card {border:1px solid rgba(255,255,255,0.15); padding:14px; border-radius:8px; margin-bottom:16px;}
36
+ .sb-card h4 {margin:0 0 10px 0; font-weight:700;}
37
+ </style>
38
+ """,
39
+ unsafe_allow_html=True,
40
+ )
41
+
42
+ # Image Detection card
43
+ st.markdown("<h4>Image Detection</h4>", unsafe_allow_html=True)
44
+ img_file = st.file_uploader(
45
+ "Upload an image", type=["jpg", "jpeg", "png"], key="img_up"
46
+ )
47
+ run_img = st.button("🔍 Run Image Detection", use_container_width=True)
48
+
49
+ # Video Detection card
50
+ st.markdown("<h4>Video Detection</h4>", unsafe_allow_html=True)
51
+ vid_file = st.file_uploader(
52
+ "Upload a video", type=["mp4", "mov", "avi", "mkv"], key="vid_up"
53
+ )
54
+ run_vid = st.button("🎥 Run Video Detection", use_container_width=True)
55
+
56
+ # Parameters card (shared)
57
+ st.markdown("<h4>Parameters</h4>", unsafe_allow_html=True)
58
+ conf_thr = st.slider("Minimum confidence threshold", 0.05, 0.95, 0.50, 0.01)
59
+
60
+
61
+ @st.cache_resource
62
+ def load_model(device: str = "cpu"):
63
+ return DeimHgnetV2MDrone(device=device)
64
+
65
+
66
+ # =============== Detection helpers ===============
67
+ def run_image_detection(uploaded_file, conf_thr: float = 0.5):
68
+ img = Image.open(uploaded_file).convert("RGB")
69
+ st.image(img, caption="Uploaded Image", use_container_width=True)
70
+
71
+ model = load_model()
72
+ with st.spinner("Running detection..."):
73
+ annotated = model.predict_image(img, min_confidence=conf_thr)
74
+
75
+ st.subheader("🎯 Detection Results")
76
+ st.image(annotated, caption="Detections", use_container_width=True)
77
+
78
+
79
+ def run_video_detection(vid_bytes, conf_thr: float = 0.5):
80
+ tmp_in = Path(tempfile.gettempdir()) / f"in_{int(time.time())}.mp4"
81
+ with open(tmp_in, "wb") as f:
82
+ f.write(vid_bytes)
83
+
84
+ model = load_model()
85
+ with st.spinner("Processing video…"):
86
+ try:
87
+ out_path = model.predict_video(
88
+ tmp_in,
89
+ min_confidence=conf_thr,
90
+ target_dir_name="annotated_video",
91
+ )
92
+ except Exception as exc: # pragma: no cover - surface message to UI
93
+ st.error(f"Video detection failed: {exc}")
94
+ return
95
+
96
+ st.success("Done!")
97
+ st.video(str(out_path))
98
+ with open(out_path, "rb") as f:
99
+ st.download_button(
100
+ "Download processed video",
101
+ data=f.read(),
102
+ file_name=Path(out_path).name,
103
+ mime="video/mp4",
104
+ )
105
+
106
+
107
+ # =============== Main: hook up actions ===============
108
+ if run_img:
109
+ if img_file is None:
110
+ st.warning("Please upload an image first.")
111
+ else:
112
+ run_image_detection(img_file, conf_thr=conf_thr)
113
+
114
+ if run_vid:
115
+ if vid_file is None:
116
+ st.warning("Please upload a video first.")
117
+ else:
118
+ run_video_detection(vid_bytes=vid_file.read(), conf_thr=conf_thr)
services/app_service/pages/signal_watch.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tempfile
2
+ import time
3
+ from pathlib import Path
4
+
5
+ import cv2
6
+ import numpy as np
7
+ import streamlit as st
8
+ from deim_model import DeimHgnetV2MDrone
9
+ from PIL import Image
10
+
11
+ # --- Page setup ---
12
+ st.set_page_config(page_title="signal watch", layout="wide")
13
+
14
+ st.markdown(
15
+ "<h2 style='text-align:center;margin-top:0'>S A R - X ai\u200b</h2>"
16
+ "<h2 style='text-align:center;margin-top:0'>Signal Watch 👁️</h2>",
17
+ unsafe_allow_html=True,
18
+ )
19
+
20
+ # =============== Sidebar: custom menu + cards ===============
21
+ with st.sidebar:
22
+ st.header("Menu")
23
+ st.page_link("app.py", label="Home")
24
+ st.page_link("pages/lost_at_sea.py", label="Lost at Sea")
25
+ st.page_link("pages/bushland_beacon.py", label="Bushland Beacon")
26
+ st.page_link("pages/signal_watch.py", label="Signal Watch")
27
+
28
+ st.markdown("---")
29
+ # ---------- Model loading ----------
30
+ try:
31
+ from ultralytics import YOLO
32
+
33
+ YOLO_AVAILABLE = True
34
+ except Exception:
35
+ YOLO = None
36
+ YOLO_AVAILABLE = False
37
+
38
+
39
+ MODELS_DIR = Path(__file__).resolve().parents[1] / "models"
40
+
41
+
42
+ def _discover_model_entries() -> list[tuple[str, str]]:
43
+ entries: list[tuple[str, str]] = [("DEIM (NATSAR bespoke)", "deim")]
44
+
45
+ if MODELS_DIR.exists():
46
+ for pt_path in sorted(MODELS_DIR.glob("*.pt")):
47
+ name_lower = pt_path.name.lower()
48
+ if "deimhgnet" in name_lower:
49
+ continue
50
+ entries.append((pt_path.name, f"yolo:{pt_path.resolve()}"))
51
+
52
+ return entries
53
+
54
+
55
+ MODEL_ENTRIES = _discover_model_entries()
56
+
57
+
58
+ @st.cache_resource
59
+ def load_model(model_key: str, device: str = "cpu"):
60
+ if model_key == "deim":
61
+ return DeimHgnetV2MDrone(device=device)
62
+ if not model_key.startswith("yolo:"):
63
+ raise ValueError(f"Unknown model key: {model_key}")
64
+ if not YOLO_AVAILABLE:
65
+ raise RuntimeError(
66
+ "Ultralytics YOLO weights requested but the package is not installed."
67
+ )
68
+
69
+ weight_path = Path(model_key.split(":", 1)[1])
70
+ if not weight_path.exists():
71
+ raise FileNotFoundError(
72
+ f"YOLO weights not found at {weight_path}. Place the .pt file there or "
73
+ "choose the DEIM model."
74
+ )
75
+ return YOLO(str(weight_path))
76
+
77
+
78
+ def draw_yolo_dets(frame_bgr, result, show_score=True):
79
+ out = frame_bgr.copy()
80
+ boxes = getattr(result, "boxes", None)
81
+ if boxes is None:
82
+ return out
83
+ names = result.names
84
+ cls_ids = boxes.cls.cpu().numpy().astype(int)
85
+ confs = boxes.conf.cpu().numpy()
86
+ xyxy = boxes.xyxy.cpu().numpy()
87
+ for (x1, y1, x2, y2), cls, score in zip(xyxy, cls_ids, confs):
88
+ x1, y1, x2, y2 = map(int, (x1, y1, x2, y2))
89
+ label = names.get(int(cls), str(int(cls)))
90
+ if show_score:
91
+ label = f"{label} {score:.2f}"
92
+ cv2.rectangle(out, (x1, y1), (x2, y2), (0, 255, 0), 2)
93
+ (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
94
+ cv2.rectangle(out, (x1, y1 - th - 8), (x1 + tw + 6, y1), (0, 255, 0), -1)
95
+ cv2.putText(
96
+ out,
97
+ label,
98
+ (x1 + 3, max(0, y1 - 6)),
99
+ cv2.FONT_HERSHEY_SIMPLEX,
100
+ 0.6,
101
+ (0, 0, 0),
102
+ 2,
103
+ cv2.LINE_AA,
104
+ )
105
+ return out
106
+
107
+
108
+ # ---------- Sidebar: Video feed dropdown ----------
109
+ st.sidebar.header("Video feed")
110
+
111
+ VIDEO_DIR = Path(__file__).resolve().parents[1] / "resources" / "videos" / "raw"
112
+
113
+ video_map = {}
114
+ if VIDEO_DIR.exists():
115
+ for p in sorted(VIDEO_DIR.glob("*")):
116
+ if p.suffix.lower() in {".mp4", ".mov", ".avi", ".mkv"}:
117
+ video_map[p.name] = str(p)
118
+
119
+ source_options = ["Webcam (0)"] + list(video_map.keys()) + ["Custom URL…"]
120
+ src_choice = st.sidebar.selectbox("Source", source_options, index=0)
121
+
122
+ custom_url = None
123
+ if src_choice == "Custom URL…":
124
+ custom_url = st.sidebar.text_input(
125
+ "RTSP/HTTP URL", placeholder="rtsp://... or https://..."
126
+ )
127
+
128
+ st.sidebar.header("Parameters")
129
+ label_to_key: dict[str, str] = {label: key for label, key in MODEL_ENTRIES}
130
+ available_model_labels = []
131
+ for label, key in MODEL_ENTRIES:
132
+ if key == "deim":
133
+ available_model_labels.append(label)
134
+ continue
135
+ if not key.startswith("yolo:"):
136
+ continue
137
+ weight_path = Path(key.split(":", 1)[1])
138
+ if YOLO_AVAILABLE and weight_path.exists():
139
+ available_model_labels.append(label)
140
+
141
+ if not available_model_labels:
142
+ available_model_labels = ["DEIM (NATSAR bespoke)"]
143
+
144
+ model_label = st.sidebar.selectbox("Model", available_model_labels, index=0)
145
+ model_key = label_to_key.get(model_label, "deim")
146
+ confidence_threshold = st.sidebar.slider("Minimum Confidence", 0.0, 1.0, 0.5, 0.01)
147
+ frame_stride = st.sidebar.slider("Process every Nth frame", 1, 5, 2, 1)
148
+ max_seconds = st.sidebar.slider("Max seconds (webcam/live)", 3, 60, 12, 1)
149
+ run_detection = st.sidebar.button("🎥 Run Detection", use_container_width=True)
150
+
151
+
152
+ # ---------- Main UI logic ----------
153
+
154
+
155
+ def resolve_source():
156
+ if src_choice == "Webcam (0)":
157
+ return 0
158
+ if src_choice == "Custom URL…":
159
+ return custom_url.strip() if custom_url else None
160
+ return video_map.get(src_choice)
161
+
162
+
163
+ def run_video_feed_detection(cap_source, conf_thr: float, stride: int, model_key: str):
164
+ try:
165
+ model = load_model(model_key)
166
+ except Exception as exc:
167
+ st.error(str(exc))
168
+ return
169
+
170
+ cap = cv2.VideoCapture(cap_source)
171
+ if not cap.isOpened():
172
+ st.error("Failed to open the selected source.")
173
+ return
174
+
175
+ is_file = isinstance(cap_source, str) and Path(str(cap_source)).exists()
176
+ fps = cap.get(cv2.CAP_PROP_FPS) or 25.0
177
+ total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT) or 0)
178
+
179
+ frame_ph = st.empty()
180
+ prog = st.progress(0.0, text="Processing…")
181
+ start_t = time.time()
182
+ i = 0
183
+
184
+ writer = None
185
+ out_path = None
186
+ if is_file:
187
+ W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
188
+ H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
189
+ out_path = Path(tempfile.gettempdir()) / f"out_{int(time.time())}.mp4"
190
+ writer = cv2.VideoWriter(
191
+ str(out_path), cv2.VideoWriter_fourcc(*"mp4v"), fps, (W, H)
192
+ )
193
+
194
+ while True:
195
+ ok, frame = cap.read()
196
+ if not ok:
197
+ break
198
+
199
+ if i % max(1, stride) == 0:
200
+ if model_key == "deim":
201
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
202
+ annotated_pil = model.predict_image( # type: ignore[call-arg]
203
+ Image.fromarray(frame_rgb), min_confidence=conf_thr
204
+ )
205
+ vis = cv2.cvtColor(np.array(annotated_pil), cv2.COLOR_RGB2BGR)
206
+ else:
207
+ results = model.predict(frame, conf=conf_thr, iou=0.5, verbose=False)
208
+ vis = draw_yolo_dets(frame, results[0], show_score=True)
209
+ frame_ph.image(
210
+ cv2.cvtColor(vis, cv2.COLOR_BGR2RGB), use_container_width=True
211
+ )
212
+ if writer is not None:
213
+ writer.write(vis)
214
+
215
+ i += 1
216
+
217
+ if is_file and total:
218
+ prog.progress(min(i / total, 1.0), text=f"Processing… {i}/{total}")
219
+ else:
220
+ elapsed = time.time() - start_t
221
+ prog.progress(
222
+ min(elapsed / max(1, max_seconds), 1.0), text=f"Live… {int(elapsed)}s"
223
+ )
224
+ if elapsed >= max_seconds:
225
+ break
226
+
227
+ cap.release()
228
+ if writer is not None:
229
+ writer.release()
230
+
231
+ st.success("Done!")
232
+ if out_path and out_path.exists():
233
+ st.video(str(out_path))
234
+ with open(out_path, "rb") as f:
235
+ st.download_button(
236
+ "Download processed video",
237
+ data=f.read(),
238
+ file_name="detections.mp4",
239
+ mime="video/mp4",
240
+ )
241
+
242
+
243
+ if run_detection:
244
+ src = resolve_source()
245
+ if not src:
246
+ st.warning("Please select a valid source (pick a file or enter a URL).")
247
+ else:
248
+ run_video_feed_detection(
249
+ cap_source=src,
250
+ conf_thr=confidence_threshold,
251
+ stride=frame_stride,
252
+ model_key=model_key,
253
+ )
254
+ else:
255
+ st.info("Pick a video source from the sidebar and click **Run Detection**.")
services/app_service/resources/images/rescue.png ADDED

Git LFS Details

  • SHA256: 4bf831b4480aa9f7fbc18f4504831d6ead8e05fac2e9d7482202445413c8dcb2
  • Pointer size: 132 Bytes
  • Size of remote file: 2.83 MB
services/app_service/resources/images/rescue1.png ADDED

Git LFS Details

  • SHA256: 4ef52b4d27f661d63eb444195b6512a5127b9c79516f7e2262bf369acc89bb81
  • Pointer size: 132 Bytes
  • Size of remote file: 3.22 MB
services/app_service/resources/images/rescue2.jpg ADDED

Git LFS Details

  • SHA256: 7bf56d5bac1bed94b7d93af2ef5701a41c72b6b3849eea355405700ad20c44dd
  • Pointer size: 132 Bytes
  • Size of remote file: 1.79 MB
services/app_service/resources/images/test.jpg ADDED

Git LFS Details

  • SHA256: cca620cf3e89d91bd2f6f52816355dbb938b00e3d2c724d9b897d5007f8a0500
  • Pointer size: 131 Bytes
  • Size of remote file: 363 kB
services/app_service/utils/__init__.py ADDED
File without changes
services/app_service/utils/cache.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import hashlib, json
2
+
3
+ def hash_config(*args, **kwargs):
4
+ payload = {"args": args, "kwargs": kwargs}
5
+ s = json.dumps(payload, sort_keys=True, default=str).encode("utf-8")
6
+ return hashlib.md5(s).hexdigest()
services/app_service/utils/inference.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ultralytics import YOLO
2
+ import numpy as np
3
+
4
+ # Simple in-memory cache keyed by (key, frame_idx)
5
+ class CachedDetections:
6
+ def __init__(self): self._m = {}
7
+ def get(self, key, i): return self._m.get((key, i))
8
+ def put(self, key, i, dets): self._m[(key, i)] = dets
9
+
10
+ class Detector:
11
+ def __init__(self, model_name, conf_thr=0.35, iou_thr=0.5):
12
+ self.model = YOLO(model_name)
13
+ self.conf_thr = conf_thr
14
+ self.iou_thr = iou_thr
15
+
16
+ def predict(self, frame_bgr):
17
+ # Ultralytics expects RGB
18
+ results = self.model.predict(frame_bgr[..., ::-1], conf=self.conf_thr, iou=self.iou_thr, verbose=False)
19
+ dets = []
20
+ if not results:
21
+ return dets
22
+ r = results[0]
23
+ if r.boxes is None: return dets
24
+ for b in r.boxes:
25
+ cls_id = int(b.cls[0])
26
+ conf = float(b.conf[0])
27
+ x1,y1,x2,y2 = map(float, b.xyxy[0].tolist())
28
+ label = r.names.get(cls_id, str(cls_id))
29
+ dets.append((label, conf, (x1,y1,x2,y2)))
30
+ return dets
services/app_service/utils/ir_sim.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+ def to_ir(frame_bgr):
4
+ gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)
5
+ gray = cv2.equalizeHist(gray)
6
+ ir = cv2.applyColorMap(gray, cv2.COLORMAP_INFERNO)
7
+ return ir
services/app_service/utils/overlays.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import time
3
+
4
+ COLOR = (0, 255, 0)
5
+
6
+ def draw_boxes(img, detections, show_scores=True):
7
+ out = img.copy()
8
+ for cls, conf, (x1,y1,x2,y2) in detections:
9
+ cv2.rectangle(out, (int(x1),int(y1)), (int(x2),int(y2)), COLOR, 2)
10
+ if show_scores:
11
+ label = f"{cls} {conf:.2f}"
12
+ cv2.putText(out, label, (int(x1), int(y1)-6),
13
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, COLOR, 2, cv2.LINE_AA)
14
+ return out
15
+
16
+ def draw_hud(img, fps=0.0, model="yolov8n.pt"):
17
+ out = img.copy()
18
+ hud = f"{model} | {fps:.1f} FPS | {time.strftime('%H:%M:%S')}"
19
+ cv2.putText(out, hud, (12, 28), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2, cv2.LINE_AA)
20
+ return out
services/app_service/utils/video_io.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+
3
+ def frame_generator(video_path, stride=1):
4
+ cap = cv2.VideoCapture(video_path)
5
+ if not cap.isOpened():
6
+ raise FileNotFoundError(f"Cannot open video: {video_path}")
7
+ i = 0
8
+ while True:
9
+ ok, frame = cap.read()
10
+ if not ok:
11
+ break
12
+ if i % stride == 0:
13
+ yield frame
14
+ i += 1
15
+ cap.release()
16
+
17
+ class VideoWriter:
18
+ def __init__(self, out_path, fps, width, height, fourcc="mp4v"):
19
+ self._writer = cv2.VideoWriter(
20
+ out_path, cv2.VideoWriter_fourcc(*fourcc), fps, (width, height)
21
+ )
22
+ def write(self, frame):
23
+ self._writer.write(frame)
24
+ def release(self):
25
+ self._writer.release()
services/training_service/LICENSE ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (C) INTELLINDUST INFORMATION TECHNOLOGY (SHENZHEN) CO., LTD. and all its affiliates.
2
+
3
+ DEIM is licensed under the Apache License.
4
+
5
+ A copy of the Apache License License is included in this file.
6
+
7
+
8
+ Apache License
9
+ Version 2.0, January 2004
10
+ http://www.apache.org/licenses/
11
+
12
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
13
+
14
+ 1. Definitions.
15
+
16
+ "License" shall mean the terms and conditions for use, reproduction,
17
+ and distribution as defined by Sections 1 through 9 of this document.
18
+
19
+ "Licensor" shall mean the copyright owner or entity authorized by
20
+ the copyright owner that is granting the License.
21
+
22
+ "Legal Entity" shall mean the union of the acting entity and all
23
+ other entities that control, are controlled by, or are under common
24
+ control with that entity. For the purposes of this definition,
25
+ "control" means (i) the power, direct or indirect, to cause the
26
+ direction or management of such entity, whether by contract or
27
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
28
+ outstanding shares, or (iii) beneficial ownership of such entity.
29
+
30
+ "You" (or "Your") shall mean an individual or Legal Entity
31
+ exercising permissions granted by this License.
32
+
33
+ "Source" form shall mean the preferred form for making modifications,
34
+ including but not limited to software source code, documentation
35
+ source, and configuration files.
36
+
37
+ "Object" form shall mean any form resulting from mechanical
38
+ transformation or translation of a Source form, including but
39
+ not limited to compiled object code, generated documentation,
40
+ and conversions to other media types.
41
+
42
+ "Work" shall mean the work of authorship, whether in Source or
43
+ Object form, made available under the License, as indicated by a
44
+ copyright notice that is included in or attached to the work
45
+ (an example is provided in the Appendix below).
46
+
47
+ "Derivative Works" shall mean any work, whether in Source or Object
48
+ form, that is based on (or derived from) the Work and for which the
49
+ editorial revisions, annotations, elaborations, or other modifications
50
+ represent, as a whole, an original work of authorship. For the purposes
51
+ of this License, Derivative Works shall not include works that remain
52
+ separable from, or merely link (or bind by name) to the interfaces of,
53
+ the Work and Derivative Works thereof.
54
+
55
+ "Contribution" shall mean any work of authorship, including
56
+ the original version of the Work and any modifications or additions
57
+ to that Work or Derivative Works thereof, that is intentionally
58
+ submitted to Licensor for inclusion in the Work by the copyright owner
59
+ or by an individual or Legal Entity authorized to submit on behalf of
60
+ the copyright owner. For the purposes of this definition, "submitted"
61
+ means any form of electronic, verbal, or written communication sent
62
+ to the Licensor or its representatives, including but not limited to
63
+ communication on electronic mailing lists, source code control systems,
64
+ and issue tracking systems that are managed by, or on behalf of, the
65
+ Licensor for the purpose of discussing and improving the Work, but
66
+ excluding communication that is conspicuously marked or otherwise
67
+ designated in writing by the copyright owner as "Not a Contribution."
68
+
69
+ "Contributor" shall mean Licensor and any individual or Legal Entity
70
+ on behalf of whom a Contribution has been received by Licensor and
71
+ subsequently incorporated within the Work.
72
+
73
+ 2. Grant of Copyright License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ copyright license to reproduce, prepare Derivative Works of,
77
+ publicly display, publicly perform, sublicense, and distribute the
78
+ Work and such Derivative Works in Source or Object form.
79
+
80
+ 3. Grant of Patent License. Subject to the terms and conditions of
81
+ this License, each Contributor hereby grants to You a perpetual,
82
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
83
+ (except as stated in this section) patent license to make, have made,
84
+ use, offer to sell, sell, import, and otherwise transfer the Work,
85
+ where such license applies only to those patent claims licensable
86
+ by such Contributor that are necessarily infringed by their
87
+ Contribution(s) alone or by combination of their Contribution(s)
88
+ with the Work to which such Contribution(s) was submitted. If You
89
+ institute patent litigation against any entity (including a
90
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
91
+ or a Contribution incorporated within the Work constitutes direct
92
+ or contributory patent infringement, then any patent licenses
93
+ granted to You under this License for that Work shall terminate
94
+ as of the date such litigation is filed.
95
+
96
+ 4. Redistribution. You may reproduce and distribute copies of the
97
+ Work or Derivative Works thereof in any medium, with or without
98
+ modifications, and in Source or Object form, provided that You
99
+ meet the following conditions:
100
+
101
+ (a) You must give any other recipients of the Work or
102
+ Derivative Works a copy of this License; and
103
+
104
+ (b) You must cause any modified files to carry prominent notices
105
+ stating that You changed the files; and
106
+
107
+ (c) You must retain, in the Source form of any Derivative Works
108
+ that You distribute, all copyright, patent, trademark, and
109
+ attribution notices from the Source form of the Work,
110
+ excluding those notices that do not pertain to any part of
111
+ the Derivative Works; and
112
+
113
+ (d) If the Work includes a "NOTICE" text file as part of its
114
+ distribution, then any Derivative Works that You distribute must
115
+ include a readable copy of the attribution notices contained
116
+ within such NOTICE file, excluding those notices that do not
117
+ pertain to any part of the Derivative Works, in at least one
118
+ of the following places: within a NOTICE text file distributed
119
+ as part of the Derivative Works; within the Source form or
120
+ documentation, if provided along with the Derivative Works; or,
121
+ within a display generated by the Derivative Works, if and
122
+ wherever such third-party notices normally appear. The contents
123
+ of the NOTICE file are for informational purposes only and
124
+ do not modify the License. You may add Your own attribution
125
+ notices within Derivative Works that You distribute, alongside
126
+ or as an addendum to the NOTICE text from the Work, provided
127
+ that such additional attribution notices cannot be construed
128
+ as modifying the License.
129
+
130
+ You may add Your own copyright statement to Your modifications and
131
+ may provide additional or different license terms and conditions
132
+ for use, reproduction, or distribution of Your modifications, or
133
+ for any such Derivative Works as a whole, provided Your use,
134
+ reproduction, and distribution of the Work otherwise complies with
135
+ the conditions stated in this License.
136
+
137
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
138
+ any Contribution intentionally submitted for inclusion in the Work
139
+ by You to the Licensor shall be under the terms and conditions of
140
+ this License, without any additional terms or conditions.
141
+ Notwithstanding the above, nothing herein shall supersede or modify
142
+ the terms of any separate license agreement you may have executed
143
+ with Licensor regarding such Contributions.
144
+
145
+ 6. Trademarks. This License does not grant permission to use the trade
146
+ names, trademarks, service marks, or product names of the Licensor,
147
+ except as required for reasonable and customary use in describing the
148
+ origin of the Work and reproducing the content of the NOTICE file.
149
+
150
+ 7. Disclaimer of Warranty. Unless required by applicable law or
151
+ agreed to in writing, Licensor provides the Work (and each
152
+ Contributor provides its Contributions) on an "AS IS" BASIS,
153
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
154
+ implied, including, without limitation, any warranties or conditions
155
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
156
+ PARTICULAR PURPOSE. You are solely responsible for determining the
157
+ appropriateness of using or redistributing the Work and assume any
158
+ risks associated with Your exercise of permissions under this License.
159
+
160
+ 8. Limitation of Liability. In no event and under no legal theory,
161
+ whether in tort (including negligence), contract, or otherwise,
162
+ unless required by applicable law (such as deliberate and grossly
163
+ negligent acts) or agreed to in writing, shall any Contributor be
164
+ liable to You for damages, including any direct, indirect, special,
165
+ incidental, or consequential damages of any character arising as a
166
+ result of this License or out of the use or inability to use the
167
+ Work (including but not limited to damages for loss of goodwill,
168
+ work stoppage, computer failure or malfunction, or any and all
169
+ other commercial damages or losses), even if such Contributor
170
+ has been advised of the possibility of such damages.
171
+
172
+ 9. Accepting Warranty or Additional Liability. While redistributing
173
+ the Work or Derivative Works thereof, You may choose to offer,
174
+ and charge a fee for, acceptance of support, warranty, indemnity,
175
+ or other liability obligations and/or rights consistent with this
176
+ License. However, in accepting such obligations, You may act only
177
+ on Your own behalf and on Your sole responsibility, not on behalf
178
+ of any other Contributor, and only if You agree to indemnify,
179
+ defend, and hold each Contributor harmless for any liability
180
+ incurred by, or claims asserted against, such Contributor by reason
181
+ of your accepting any such warranty or additional liability.
182
+
183
+ END OF TERMS AND CONDITIONS
184
+
185
+ APPENDIX: How to apply the Apache License to your work.
186
+
187
+ To apply the Apache License to your work, attach the following
188
+ boilerplate notice, with the fields enclosed by brackets "[]"
189
+ replaced with your own identifying information. (Don't include
190
+ the brackets!) The text should be enclosed in the appropriate
191
+ comment syntax for the file format. We also recommend that a
192
+ file or class name and description of purpose be included on the
193
+ same "printed page" as the copyright notice for easier
194
+ identification within third-party archives.
195
+
196
+ Copyright [yyyy] [name of copyright owner]
197
+
198
+ Licensed under the Apache License, Version 2.0 (the "License");
199
+ you may not use this file except in compliance with the License.
200
+ You may obtain a copy of the License at
201
+
202
+ http://www.apache.org/licenses/LICENSE-2.0
203
+
204
+ Unless required by applicable law or agreed to in writing, software
205
+ distributed under the License is distributed on an "AS IS" BASIS,
206
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
207
+ See the License for the specific language governing permissions and
208
+ limitations under the License.
services/training_service/README.md ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h2 align="center">
2
+ DEIM: DETR with Improved Matching for Fast Convergence
3
+ </h2>
4
+
5
+ <p align="center">
6
+ <a href="https://github.com/ShihuaHuang95/DEIM/blob/master/LICENSE">
7
+ <img alt="license" src="https://img.shields.io/badge/LICENSE-Apache%202.0-blue">
8
+ </a>
9
+ <a href="https://arxiv.org/abs/2412.04234">
10
+ <img alt="arXiv" src="https://img.shields.io/badge/arXiv-2412.04234-red">
11
+ </a>
12
+ <a href="https://www.shihuahuang.cn/DEIM/">
13
+ <img alt="project webpage" src="https://img.shields.io/badge/Webpage-DEIM-purple">
14
+ </a>
15
+ <a href="https://github.com/ShihuaHuang95/DEIM/pulls">
16
+ <img alt="prs" src="https://img.shields.io/github/issues-pr/ShihuaHuang95/DEIM">
17
+ </a>
18
+ <a href="https://github.com/ShihuaHuang95/DEIM/issues">
19
+ <img alt="issues" src="https://img.shields.io/github/issues/ShihuaHuang95/DEIM?color=olive">
20
+ </a>
21
+ <a href="https://github.com/ShihuaHuang95/DEIM">
22
+ <img alt="stars" src="https://img.shields.io/github/stars/ShihuaHuang95/DEIM">
23
+ </a>
24
+ <a href="mailto:shihuahuang95@gmail.com">
25
+ <img alt="Contact Us" src="https://img.shields.io/badge/Contact-Email-yellow">
26
+ </a>
27
+ </p>
28
+
29
+ <p align="center">
30
+ DEIM is an advanced training framework designed to enhance the matching mechanism in DETRs, enabling faster convergence and improved accuracy. It serves as a robust foundation for future research and applications in the field of real-time object detection.
31
+ </p>
32
+
33
+ ---
34
+
35
+
36
+ <div align="center">
37
+ <a href="http://www.shihuahuang.cn">Shihua Huang</a><sup>1</sup>,
38
+ <a href="https://scholar.google.com/citations?user=tIFWBcQAAAAJ&hl=en">Zhichao Lu</a><sup>2</sup>,
39
+ <a href="https://vinthony.github.io/academic/">Xiaodong Cun</a><sup>3</sup>,
40
+ Yongjun Yu<sup>1</sup>,
41
+ Xiao Zhou<sup>4</sup>,
42
+ <a href="https://xishen0220.github.io">Xi Shen</a><sup>1*</sup>
43
+ </div>
44
+
45
+
46
+ <p align="center">
47
+ <i>
48
+ 1. Intellindust AI Lab &nbsp; 2. City University of Hong Kong &nbsp; 3. Great Bay University &nbsp; 4. Hefei Normal University
49
+ </i>
50
+ </p>
51
+
52
+ <p align="center">
53
+ **📧 Corresponding author:** <a href="mailto:shenxiluc@gmail.com">shenxiluc@gmail.com</a>
54
+ </p>
55
+
56
+ <p align="center">
57
+ <a href="https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=deim-detr-with-improved-matching-for-fast">
58
+ <img alt="sota" src="https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/deim-detr-with-improved-matching-for-fast/real-time-object-detection-on-coco">
59
+ </a>
60
+ </p>
61
+
62
+ <p align="center">
63
+ <strong>If you like our work, please give us a ⭐!</strong>
64
+ </p>
65
+
66
+
67
+ <p align="center">
68
+ <img src="./figures/teaser_a.png" alt="Image 1" width="49%">
69
+ <img src="./figures/teaser_b.png" alt="Image 2" width="49%">
70
+ </p>
71
+
72
+ </details>
73
+
74
+
75
+
76
+ ## 🚀 Updates
77
+ - [x] **\[2025.06.24\]** DEIMv2 is coming soon: our next-gen detection series, along with three ultra-light variants: Pico (1.5M), Femto (0.96M), and Atto (0.49M), all delivering SoTA performance. Atto, in particular, is tailored for mobile devices, achieving 23.8 AP on COCO at 320×320 resolution.
78
+ - [x] **\[2025.03.12\]** The Object365 Pretrained [DEIM-D-FINE-X](https://drive.google.com/file/d/1RMNrHh3bYN0FfT5ZlWhXtQxkG23xb2xj/view?usp=drive_link) model is released, which achieves 59.5% AP after fine-tuning 24 COCO epochs.
79
+ - [x] **\[2025.03.05\]** The Nano DEIM model is released.
80
+ - [x] **\[2025.02.27\]** The DEIM paper is accepted to CVPR 2025. Thanks to all co-authors.
81
+ - [x] **\[2024.12.26\]** A more efficient implementation of Dense O2O, achieving nearly a 30% improvement in loading speed (See [the pull request](https://github.com/ShihuaHuang95/DEIM/pull/13) for more details). Huge thanks to my colleague [Longfei Liu](https://github.com/capsule2077).
82
+ - [x] **\[2024.12.03\]** Release DEIM series. Besides, this repo also supports the re-implmentations of [D-FINE](https://arxiv.org/abs/2410.13842) and [RT-DETR](https://arxiv.org/abs/2407.17140).
83
+
84
+ ## Table of Content
85
+ * [1. Model Zoo](https://github.com/ShihuaHuang95/DEIM?tab=readme-ov-file#1-model-zoo)
86
+ * [2. Quick start](https://github.com/ShihuaHuang95/DEIM?tab=readme-ov-file#2-quick-start)
87
+ * [3. Usage](https://github.com/ShihuaHuang95/DEIM?tab=readme-ov-file#3-usage)
88
+ * [4. Tools](https://github.com/ShihuaHuang95/DEIM?tab=readme-ov-file#4-tools)
89
+ * [5. Citation](https://github.com/ShihuaHuang95/DEIM?tab=readme-ov-file#5-citation)
90
+ * [6. Acknowledgement](https://github.com/ShihuaHuang95/DEIM?tab=readme-ov-file#6-acknowledgement)
91
+
92
+
93
+ ## 1. Model Zoo
94
+
95
+ ### DEIM-D-FINE
96
+ | Model | Dataset | AP<sup>D-FINE</sup> | AP<sup>DEIM</sup> | #Params | Latency | GFLOPs | config | checkpoint
97
+ | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---:
98
+ **N** | COCO | **42.8** | **43.0** | 4M | 2.12ms | 7 | [yml](./configs/deim_dfine/deim_hgnetv2_n_coco.yml) | [ckpt](https://drive.google.com/file/d/1ZPEhiU9nhW4M5jLnYOFwTSLQC1Ugf62e/view?usp=sharing) |
99
+ **S** | COCO | **48.7** | **49.0** | 10M | 3.49ms | 25 | [yml](./configs/deim_dfine/deim_hgnetv2_s_coco.yml) | [ckpt](https://drive.google.com/file/d/1tB8gVJNrfb6dhFvoHJECKOF5VpkthhfC/view?usp=drive_link) |
100
+ **M** | COCO | **52.3** | **52.7** | 19M | 5.62ms | 57 | [yml](./configs/deim_dfine/deim_hgnetv2_m_coco.yml) | [ckpt](https://drive.google.com/file/d/18Lj2a6UN6k_n_UzqnJyiaiLGpDzQQit8/view?usp=drive_link) |
101
+ **L** | COCO | **54.0** | **54.7** | 31M | 8.07ms | 91 | [yml](./configs/deim_dfine/deim_hgnetv2_l_coco.yml) | [ckpt](https://drive.google.com/file/d/1PIRf02XkrA2xAD3wEiKE2FaamZgSGTAr/view?usp=drive_link) |
102
+ **X** | COCO | **55.8** | **56.5** | 62M | 12.89ms | 202 | [yml](./configs/deim_dfine/deim_hgnetv2_x_coco.yml) | [ckpt](https://drive.google.com/file/d/1dPtbgtGgq1Oa7k_LgH1GXPelg1IVeu0j/view?usp=drive_link) |
103
+
104
+
105
+ ### DEIM-RT-DETRv2
106
+ | Model | Dataset | AP<sup>RT-DETRv2</sup> | AP<sup>DEIM</sup> | #Params | Latency | GFLOPs | config | checkpoint
107
+ | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---:
108
+ **S** | COCO | **47.9** | **49.0** | 20M | 4.59ms | 60 | [yml](./configs/deim_rtdetrv2/deim_r18vd_120e_coco.yml) | [ckpt](https://drive.google.com/file/d/153_JKff6EpFgiLKaqkJsoDcLal_0ux_F/view?usp=drive_link) |
109
+ **M** | COCO | **49.9** | **50.9** | 31M | 6.40ms | 92 | [yml](./configs/deim_rtdetrv2/deim_r34vd_120e_coco.yml) | [ckpt](https://drive.google.com/file/d/1O9RjZF6kdFWGv1Etn1Toml4r-YfdMDMM/view?usp=drive_link) |
110
+ **M*** | COCO | **51.9** | **53.2** | 33M | 6.90ms | 100 | [yml](./configs/deim_rtdetrv2/deim_r50vd_m_60e_coco.yml) | [ckpt](https://drive.google.com/file/d/10dLuqdBZ6H5ip9BbBiE6S7ZcmHkRbD0E/view?usp=drive_link) |
111
+ **L** | COCO | **53.4** | **54.3** | 42M | 9.15ms | 136 | [yml](./configs/deim_rtdetrv2/deim_r50vd_60e_coco.yml) | [ckpt](https://drive.google.com/file/d/1mWknAXD5JYknUQ94WCEvPfXz13jcNOTI/view?usp=drive_link) |
112
+ **X** | COCO | **54.3** | **55.5** | 76M | 13.66ms | 259 | [yml](./configs/deim_rtdetrv2/deim_r101vd_60e_coco.yml) | [ckpt](https://drive.google.com/file/d/1BIevZijOcBO17llTyDX32F_pYppBfnzu/view?usp=drive_link) |
113
+
114
+
115
+ ## 2. Quick start
116
+
117
+ ### Setup
118
+
119
+ ```shell
120
+ conda create -n deim python=3.11.9
121
+ conda activate deim
122
+ pip install -r requirements.txt
123
+ ```
124
+
125
+
126
+ ### Data Preparation
127
+
128
+ <details>
129
+ <summary> COCO2017 Dataset </summary>
130
+
131
+ 1. Download COCO2017 from [OpenDataLab](https://opendatalab.com/OpenDataLab/COCO_2017) or [COCO](https://cocodataset.org/#download).
132
+ 1. Modify paths in [coco_detection.yml](./configs/dataset/coco_detection.yml)
133
+
134
+ ```yaml
135
+ train_dataloader:
136
+ img_folder: /data/COCO2017/train2017/
137
+ ann_file: /data/COCO2017/annotations/instances_train2017.json
138
+ val_dataloader:
139
+ img_folder: /data/COCO2017/val2017/
140
+ ann_file: /data/COCO2017/annotations/instances_val2017.json
141
+ ```
142
+
143
+ </details>
144
+
145
+ <details>
146
+ <summary>Custom Dataset</summary>
147
+
148
+ To train on your custom dataset, you need to organize it in the COCO format. Follow the steps below to prepare your dataset:
149
+
150
+ 1. **Set `remap_mscoco_category` to `False`:**
151
+
152
+ This prevents the automatic remapping of category IDs to match the MSCOCO categories.
153
+
154
+ ```yaml
155
+ remap_mscoco_category: False
156
+ ```
157
+
158
+ 2. **Organize Images:**
159
+
160
+ Structure your dataset directories as follows:
161
+
162
+ ```shell
163
+ dataset/
164
+ ├── images/
165
+ │ ├── train/
166
+ │ │ ├── image1.jpg
167
+ │ │ ├── image2.jpg
168
+ │ │ └── ...
169
+ │ ├── val/
170
+ │ │ ├── image1.jpg
171
+ │ │ ├── image2.jpg
172
+ │ │ └── ...
173
+ └── annotations/
174
+ ├── instances_train.json
175
+ ├── instances_val.json
176
+ └── ...
177
+ ```
178
+
179
+ - **`images/train/`**: Contains all training images.
180
+ - **`images/val/`**: Contains all validation images.
181
+ - **`annotations/`**: Contains COCO-formatted annotation files.
182
+
183
+ 3. **Convert Annotations to COCO Format:**
184
+
185
+ If your annotations are not already in COCO format, you'll need to convert them. You can use the following Python script as a reference or utilize existing tools:
186
+
187
+ ```python
188
+ import json
189
+
190
+ def convert_to_coco(input_annotations, output_annotations):
191
+ # Implement conversion logic here
192
+ pass
193
+
194
+ if __name__ == "__main__":
195
+ convert_to_coco('path/to/your_annotations.json', 'dataset/annotations/instances_train.json')
196
+ ```
197
+
198
+ 4. **Update Configuration Files:**
199
+
200
+ Modify your [custom_detection.yml](./configs/dataset/custom_detection.yml).
201
+
202
+ ```yaml
203
+ task: detection
204
+
205
+ evaluator:
206
+ type: CocoEvaluator
207
+ iou_types: ['bbox', ]
208
+
209
+ num_classes: 777 # your dataset classes
210
+ remap_mscoco_category: False
211
+
212
+ train_dataloader:
213
+ type: DataLoader
214
+ dataset:
215
+ type: CocoDetection
216
+ img_folder: /data/yourdataset/train
217
+ ann_file: /data/yourdataset/train/train.json
218
+ return_masks: False
219
+ transforms:
220
+ type: Compose
221
+ ops: ~
222
+ shuffle: True
223
+ num_workers: 4
224
+ drop_last: True
225
+ collate_fn:
226
+ type: BatchImageCollateFunction
227
+
228
+ val_dataloader:
229
+ type: DataLoader
230
+ dataset:
231
+ type: CocoDetection
232
+ img_folder: /data/yourdataset/val
233
+ ann_file: /data/yourdataset/val/ann.json
234
+ return_masks: False
235
+ transforms:
236
+ type: Compose
237
+ ops: ~
238
+ shuffle: False
239
+ num_workers: 4
240
+ drop_last: False
241
+ collate_fn:
242
+ type: BatchImageCollateFunction
243
+ ```
244
+
245
+ </details>
246
+
247
+
248
+ ## 3. Usage
249
+ <details open>
250
+ <summary> COCO2017 </summary>
251
+
252
+ 1. Training
253
+ ```shell
254
+ CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --master_port=7777 --nproc_per_node=4 train.py -c configs/deim_dfine/deim_hgnetv2_${model}_coco.yml --use-amp --seed=0
255
+ ```
256
+
257
+ <!-- <summary>2. Testing </summary> -->
258
+ 2. Testing
259
+ ```shell
260
+ CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --master_port=7777 --nproc_per_node=4 train.py -c configs/deim_dfine/deim_hgnetv2_${model}_coco.yml --test-only -r model.pth
261
+ ```
262
+
263
+ <!-- <summary>3. Tuning </summary> -->
264
+ 3. Tuning
265
+ ```shell
266
+ CUDA_VISIBLE_DEVICES=0,1,2,3 torchrun --master_port=7777 --nproc_per_node=4 train.py -c configs/deim_dfine/deim_hgnetv2_${model}_coco.yml --use-amp --seed=0 -t model.pth
267
+ ```
268
+ </details>
269
+
270
+ <details>
271
+ <summary> Customizing Batch Size </summary>
272
+
273
+ For example, if you want to double the total batch size when training D-FINE-L on COCO2017, here are the steps you should follow:
274
+
275
+ 1. **Modify your [dataloader.yml](./configs/base/dataloader.yml)** to increase the `total_batch_size`:
276
+
277
+ ```yaml
278
+ train_dataloader:
279
+ total_batch_size: 64 # Previously it was 32, now doubled
280
+ ```
281
+
282
+ 2. **Modify your [deim_hgnetv2_l_coco.yml](./configs/deim_dfine/deim_hgnetv2_l_coco.yml)**. Here’s how the key parameters should be adjusted:
283
+
284
+ ```yaml
285
+ optimizer:
286
+ type: AdamW
287
+ params:
288
+ -
289
+ params: '^(?=.*backbone)(?!.*norm|bn).*$'
290
+ lr: 0.000025 # doubled, linear scaling law
291
+ -
292
+ params: '^(?=.*(?:encoder|decoder))(?=.*(?:norm|bn)).*$'
293
+ weight_decay: 0.
294
+
295
+ lr: 0.0005 # doubled, linear scaling law
296
+ betas: [0.9, 0.999]
297
+ weight_decay: 0.0001 # need a grid search
298
+
299
+ ema: # added EMA settings
300
+ decay: 0.9998 # adjusted by 1 - (1 - decay) * 2
301
+ warmups: 500 # halved
302
+
303
+ lr_warmup_scheduler:
304
+ warmup_duration: 250 # halved
305
+ ```
306
+
307
+ </details>
308
+
309
+
310
+ <details>
311
+ <summary> Customizing Input Size </summary>
312
+
313
+ If you'd like to train **DEIM** on COCO2017 with an input size of 320x320, follow these steps:
314
+
315
+ 1. **Modify your [dataloader.yml](./configs/base/dataloader.yml)**:
316
+
317
+ ```yaml
318
+
319
+ train_dataloader:
320
+ dataset:
321
+ transforms:
322
+ ops:
323
+ - {type: Resize, size: [320, 320], }
324
+ collate_fn:
325
+ base_size: 320
326
+ dataset:
327
+ transforms:
328
+ ops:
329
+ - {type: Resize, size: [320, 320], }
330
+ ```
331
+
332
+ 2. **Modify your [dfine_hgnetv2.yml](./configs/base/dfine_hgnetv2.yml)**:
333
+
334
+ ```yaml
335
+ eval_spatial_size: [320, 320]
336
+ ```
337
+
338
+ </details>
339
+
340
+ ## 4. Tools
341
+ <details>
342
+ <summary> Deployment </summary>
343
+
344
+ <!-- <summary>4. Export onnx </summary> -->
345
+ 1. Setup
346
+ ```shell
347
+ pip install onnx onnxsim
348
+ ```
349
+
350
+ 2. Export onnx
351
+ ```shell
352
+ python tools/deployment/export_onnx.py --check -c configs/deim_dfine/deim_hgnetv2_${model}_coco.yml -r model.pth
353
+ ```
354
+
355
+ 3. Export [tensorrt](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html)
356
+ ```shell
357
+ trtexec --onnx="model.onnx" --saveEngine="model.engine" --fp16
358
+ ```
359
+
360
+ </details>
361
+
362
+ <details>
363
+ <summary> Inference (Visualization) </summary>
364
+
365
+
366
+ 1. Setup
367
+ ```shell
368
+ pip install -r tools/inference/requirements.txt
369
+ ```
370
+
371
+
372
+ <!-- <summary>5. Inference </summary> -->
373
+ 2. Inference (onnxruntime / tensorrt / torch)
374
+
375
+ Inference on images and videos is now supported.
376
+ ```shell
377
+ python tools/inference/onnx_inf.py --onnx model.onnx --input image.jpg # video.mp4
378
+ python tools/inference/trt_inf.py --trt model.engine --input image.jpg
379
+ python tools/inference/torch_inf.py -c configs/deim_dfine/deim_hgnetv2_${model}_coco.yml -r model.pth --input image.jpg --device cuda:0
380
+ ```
381
+ </details>
382
+
383
+ <details>
384
+ <summary> Benchmark </summary>
385
+
386
+ 1. Setup
387
+ ```shell
388
+ pip install -r tools/benchmark/requirements.txt
389
+ ```
390
+
391
+ <!-- <summary>6. Benchmark </summary> -->
392
+ 2. Model FLOPs, MACs, and Params
393
+ ```shell
394
+ python tools/benchmark/get_info.py -c configs/deim_dfine/deim_hgnetv2_${model}_coco.yml
395
+ ```
396
+
397
+ 2. TensorRT Latency
398
+ ```shell
399
+ python tools/benchmark/trt_benchmark.py --COCO_dir path/to/COCO2017 --engine_dir model.engine
400
+ ```
401
+ </details>
402
+
403
+ <details>
404
+ <summary> Fiftyone Visualization </summary>
405
+
406
+ 1. Setup
407
+ ```shell
408
+ pip install fiftyone
409
+ ```
410
+ 4. Voxel51 Fiftyone Visualization ([fiftyone](https://github.com/voxel51/fiftyone))
411
+ ```shell
412
+ python tools/visualization/fiftyone_vis.py -c configs/deim_dfine/deim_hgnetv2_${model}_coco.yml -r model.pth
413
+ ```
414
+ </details>
415
+
416
+ <details>
417
+ <summary> Others </summary>
418
+
419
+ 1. Auto Resume Training
420
+ ```shell
421
+ bash reference/safe_training.sh
422
+ ```
423
+
424
+ 2. Converting Model Weights
425
+ ```shell
426
+ python reference/convert_weight.py model.pth
427
+ ```
428
+ </details>
429
+
430
+
431
+ ## 5. Citation
432
+ If you use `DEIM` or its methods in your work, please cite the following BibTeX entries:
433
+ <details open>
434
+ <summary> bibtex </summary>
435
+
436
+ ```latex
437
+ @misc{huang2024deim,
438
+ title={DEIM: DETR with Improved Matching for Fast Convergence},
439
+ author={Shihua Huang, Zhichao Lu, Xiaodong Cun, Yongjun Yu, Xiao Zhou, and Xi Shen},
440
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
441
+ year={2025},
442
+ }
443
+ ```
444
+ </details>
445
+
446
+ ## 6. Acknowledgement
447
+ Our work is built upon [D-FINE](https://github.com/Peterande/D-FINE) and [RT-DETR](https://github.com/lyuwenyu/RT-DETR).
448
+
449
+ ✨ Feel free to contribute and reach out if you have any questions! ✨
services/training_service/configs/base/dataloader.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ train_dataloader:
3
+ dataset:
4
+ transforms:
5
+ ops:
6
+ - {type: RandomPhotometricDistort, p: 0.5}
7
+ - {type: RandomZoomOut, fill: 0}
8
+ - {type: RandomIoUCrop, p: 0.8}
9
+ - {type: SanitizeBoundingBoxes, min_size: 1}
10
+ - {type: RandomHorizontalFlip}
11
+ - {type: RandomVerticalFlip}
12
+ - {type: Resize, size: [1024, 800], }
13
+ - {type: SanitizeBoundingBoxes, min_size: 1}
14
+ - {type: ConvertPILImage, dtype: 'float32', scale: True}
15
+ - {type: ConvertBoxes, fmt: 'cxcywh', normalize: True}
16
+ policy:
17
+ name: stop_epoch
18
+ epoch: 72 # epoch in [71, ~) stop `ops`
19
+ ops: ['Mosaic', 'RandomPhotometricDistort', 'RandomZoomOut', 'RandomIoUCrop']
20
+
21
+ collate_fn:
22
+ type: BatchImageCollateFunction
23
+ base_size: 1024
24
+ base_size_repeat: 3
25
+ stop_epoch: 72 # epoch in [72, ~) stop `multiscales`
26
+
27
+ shuffle: True
28
+ total_batch_size: 32 # total batch size equals to 32 (4 * 8)
29
+ num_workers: 16
30
+
31
+
32
+ val_dataloader:
33
+ dataset:
34
+ transforms:
35
+ ops:
36
+ - {type: Resize, size: [1024, 800], }
37
+ - {type: ConvertPILImage, dtype: 'float32', scale: True}
38
+ shuffle: False
39
+ total_batch_size: 64
40
+ num_workers: 16
services/training_service/configs/base/deim.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dense O2O
2
+ train_dataloader:
3
+ dataset:
4
+ transforms:
5
+ ops:
6
+ - {type: Mosaic, output_size: 320, rotation_range: 10, translation_range: [0.1, 0.1], scaling_range: [0.5, 1.5],
7
+ probability: 1.0, fill_value: 0, use_cache: False, max_cached_images: 50, random_pop: True}
8
+ - {type: RandomPhotometricDistort, p: 0.5}
9
+ - {type: RandomZoomOut, fill: 0}
10
+ - {type: RandomIoUCrop, p: 0.8}
11
+ - {type: SanitizeBoundingBoxes, min_size: 1}
12
+ - {type: RandomHorizontalFlip}
13
+ - {type: RandomVerticalFlip}
14
+ - {type: Resize, size: [1024, 800], }
15
+ - {type: SanitizeBoundingBoxes, min_size: 1}
16
+ - {type: ConvertPILImage, dtype: 'float32', scale: True}
17
+ - {type: ConvertBoxes, fmt: 'cxcywh', normalize: True}
18
+ policy:
19
+ epoch: [4, 29, 50] # list
20
+ ops: ['Mosaic', 'RandomPhotometricDistort', 'RandomZoomOut', 'RandomIoUCrop']
21
+ mosaic_prob: 0.5
22
+
23
+ collate_fn:
24
+ mixup_prob: 0.5
25
+ mixup_epochs: [4, 29]
26
+ stop_epoch: 50 # epoch in [72, ~) stop `multiscales`
27
+
28
+ # Unfreezing BN
29
+ HGNetv2:
30
+ freeze_at: -1 # 0 default
31
+ freeze_norm: False # True default
32
+
33
+ # Activation
34
+ DFINETransformer:
35
+ activation: silu
36
+ mlp_act: silu
37
+
38
+ ## Our LR-Scheduler
39
+ lrsheduler: flatcosine
40
+ lr_gamma: 0.5
41
+ warmup_iter: 2000
42
+ flat_epoch: 29 # 4 + epoch // 2, e.g., 40 = 4 + 72 / 2
43
+ no_aug_epoch: 8
44
+
45
+ ## Our Loss
46
+ DEIMCriterion:
47
+ weight_dict: {loss_mal: 1, loss_bbox: 5, loss_giou: 2, loss_fgl: 0.15, loss_ddf: 1.5}
48
+ losses: ['mal', 'boxes', 'local']
49
+ gamma: 1.5
services/training_service/configs/base/dfine_hgnetv2.yml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ model: DEIM
4
+ criterion: DEIMCriterion
5
+ postprocessor: PostProcessor
6
+
7
+ use_focal_loss: True
8
+ eval_spatial_size: [640, 640] # h w
9
+ checkpoint_freq: 4 # save freq
10
+
11
+ DEIM:
12
+ backbone: HGNetv2
13
+ encoder: HybridEncoder
14
+ decoder: DFINETransformer
15
+
16
+ # Add, default for step lr scheduler
17
+ lrsheduler: flatcosine
18
+ lr_gamma: 1
19
+ warmup_iter: 500
20
+ flat_epoch: 4000000
21
+ no_aug_epoch: 0
22
+
23
+ HGNetv2:
24
+ pretrained: True
25
+ local_model_dir: ../RT-DETR-main/D-FINE/weight/hgnetv2/
26
+
27
+ HybridEncoder:
28
+ in_channels: [512, 1024, 2048]
29
+ feat_strides: [8, 16, 32]
30
+
31
+ # intra
32
+ hidden_dim: 256
33
+ use_encoder_idx: [2]
34
+ num_encoder_layers: 1
35
+ nhead: 8
36
+ dim_feedforward: 1024
37
+ dropout: 0.
38
+ enc_act: 'gelu'
39
+
40
+ # cross
41
+ expansion: 1.0
42
+ depth_mult: 1
43
+ act: 'silu'
44
+
45
+
46
+ DFINETransformer:
47
+ feat_channels: [256, 256, 256]
48
+ feat_strides: [8, 16, 32]
49
+ hidden_dim: 256
50
+ num_levels: 3
51
+
52
+ num_layers: 6
53
+ eval_idx: -1
54
+ num_queries: 300
55
+
56
+ num_denoising: 100
57
+ label_noise_ratio: 0.5
58
+ box_noise_scale: 1.0
59
+
60
+ # NEW
61
+ reg_max: 32
62
+ reg_scale: 4
63
+
64
+ # Auxiliary decoder layers dimension scaling
65
+ # "eg. If num_layers: 6 eval_idx: -4,
66
+ # then layer 3, 4, 5 are auxiliary decoder layers."
67
+ layer_scale: 1 # 2
68
+
69
+
70
+ num_points: [3, 6, 3] # [4, 4, 4] [3, 6, 3]
71
+ cross_attn_method: default # default, discrete
72
+ query_select_method: default # default, agnostic
73
+
74
+
75
+ PostProcessor:
76
+ num_top_queries: 300
77
+
78
+
79
+ DEIMCriterion:
80
+ weight_dict: {loss_vfl: 1, loss_bbox: 5, loss_giou: 2, loss_fgl: 0.15, loss_ddf: 1.5}
81
+ losses: ['vfl', 'boxes', 'local']
82
+ alpha: 0.75
83
+ gamma: 2.0
84
+ reg_max: 32
85
+
86
+ matcher:
87
+ type: HungarianMatcher
88
+ weight_dict: {cost_class: 2, cost_bbox: 5, cost_giou: 2}
89
+ alpha: 0.25
90
+ gamma: 2.0
services/training_service/configs/base/optimizer.yml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use_amp: True
2
+ use_ema: True
3
+ ema:
4
+ type: ModelEMA
5
+ decay: 0.9999
6
+ warmups: 1000
7
+ start: 0
8
+
9
+ epoches: 72
10
+ clip_max_norm: 0.1
11
+
12
+ # Gradient Accumulation - set to 1 to disable, >1 to enable accumulation
13
+ # Effective batch size = batch_size * gradient_accumulation_steps
14
+ gradient_accumulation_steps: 1
15
+
16
+
17
+ optimizer:
18
+ type: AdamW
19
+ params:
20
+ -
21
+ params: '^(?=.*backbone)(?!.*norm).*$'
22
+ lr: 0.0000125
23
+ -
24
+ params: '^(?=.*(?:encoder|decoder))(?=.*(?:norm|bn)).*$'
25
+ weight_decay: 0.
26
+
27
+ lr: 0.00025
28
+ betas: [0.9, 0.999]
29
+ weight_decay: 0.000125
30
+
31
+
32
+ lr_scheduler:
33
+ type: MultiStepLR
34
+ milestones: [500]
35
+ gamma: 0.1
36
+
37
+ lr_warmup_scheduler:
38
+ type: LinearWarmup
39
+ warmup_duration: 500
services/training_service/configs/base/rt_deim.yml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dense O2O
2
+ train_dataloader:
3
+ dataset:
4
+ transforms:
5
+ ops:
6
+ - {type: Mosaic, output_size: 320, rotation_range: 10, translation_range: [0.1, 0.1], scaling_range: [0.5, 1.5],
7
+ probability: 1.0, fill_value: 0, use_cache: False, max_cached_images: 50, random_pop: True}
8
+ - {type: RandomPhotometricDistort, p: 0.5}
9
+ - {type: RandomZoomOut, fill: 0}
10
+ - {type: RandomIoUCrop, p: 0.8}
11
+ - {type: SanitizeBoundingBoxes, min_size: 1}
12
+ - {type: RandomHorizontalFlip}
13
+ - {type: Resize, size: [640, 640], }
14
+ - {type: SanitizeBoundingBoxes, min_size: 1}
15
+ - {type: ConvertPILImage, dtype: 'float32', scale: True}
16
+ - {type: ConvertBoxes, fmt: 'cxcywh', normalize: True}
17
+ policy:
18
+ epoch: [4, 29, 50] # list
19
+ ops: ['Mosaic', 'RandomPhotometricDistort', 'RandomZoomOut', 'RandomIoUCrop']
20
+ mosaic_prob: 0.5
21
+
22
+ collate_fn:
23
+ mixup_prob: 0.5
24
+ mixup_epochs: [4, 29]
25
+ stop_epoch: 50 # epoch in [72, ~) stop `multiscales`
26
+
27
+ # Unfreezing BN
28
+ PResNet:
29
+ freeze_at: -1 # default 0
30
+ freeze_norm: False # default True
31
+
32
+ # Activation
33
+ RTDETRTransformerv2:
34
+ query_pos_method: as_reg
35
+ activation: silu
36
+ mlp_act: silu
37
+
38
+ ## Our LR-Scheduler
39
+ lrsheduler: flatcosine
40
+ lr_gamma: 0.5
41
+ warmup_iter: 2000
42
+ flat_epoch: 29 # 4 + epoch // 2, e.g., 40 = 4 + 72 / 2
43
+ no_aug_epoch: 8
44
+
45
+ ## Our Loss
46
+ DEIMCriterion:
47
+ weight_dict: {loss_mal: 1, loss_bbox: 5, loss_giou: 2}
48
+ losses: ['mal', 'boxes', ]
49
+ gamma: 1.5
services/training_service/configs/base/rt_optimizer.yml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ use_amp: True
2
+ use_ema: True
3
+ ema:
4
+ type: ModelEMA
5
+ decay: 0.9999
6
+ warmups: 2000
7
+ start: 0
8
+
9
+ epoches: 72
10
+ clip_max_norm: 0.1
11
+
12
+ train_dataloader:
13
+ total_batch_size: 16
14
+
15
+ optimizer:
16
+ type: AdamW
17
+ params:
18
+ -
19
+ params: '^(?=.*backbone)(?!.*norm).*$'
20
+ lr: 0.00001
21
+ -
22
+ params: '^(?=.*(?:encoder|decoder))(?=.*(?:norm|bn)).*$'
23
+ weight_decay: 0.
24
+
25
+ lr: 0.0001
26
+ betas: [0.9, 0.999]
27
+ weight_decay: 0.0001
28
+
29
+ lr_scheduler:
30
+ type: MultiStepLR
31
+ milestones: [1000]
32
+ gamma: 0.1
33
+
34
+
35
+ lr_warmup_scheduler:
36
+ type: LinearWarmup
37
+ warmup_duration: 2000
services/training_service/configs/base/rtdetrv2_r50vd.yml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ model: DEIM
4
+ criterion: DEIMCriterion
5
+ postprocessor: PostProcessor
6
+
7
+ use_focal_loss: True
8
+ eval_spatial_size: [640, 640] # h w
9
+ checkpoint_freq: 4 # save freq
10
+
11
+ DEIM:
12
+ backbone: PResNet
13
+ encoder: HybridEncoder
14
+ decoder: RTDETRTransformerv2
15
+
16
+
17
+ # Add, default for step lr scheduler
18
+ lrsheduler: flatcosine
19
+ lr_gamma: 1
20
+ warmup_iter: 2000
21
+ flat_epoch: 4000000
22
+ no_aug_epoch: 0
23
+
24
+ PResNet:
25
+ depth: 50
26
+ variant: d
27
+ freeze_at: 0
28
+ return_idx: [1, 2, 3]
29
+ num_stages: 4
30
+ freeze_norm: True
31
+ pretrained: True
32
+ local_model_dir: ../RT-DETR-main/rtdetrv2_pytorch/INK1k/
33
+
34
+
35
+ HybridEncoder:
36
+ in_channels: [512, 1024, 2048]
37
+ feat_strides: [8, 16, 32]
38
+
39
+ # intra
40
+ hidden_dim: 256
41
+ use_encoder_idx: [2]
42
+ num_encoder_layers: 1
43
+ nhead: 8
44
+ dim_feedforward: 1024
45
+ dropout: 0.
46
+ enc_act: 'gelu'
47
+
48
+ # cross
49
+ expansion: 1.0
50
+ depth_mult: 1
51
+ act: 'silu'
52
+ version: rt_detrv2 # pay attention to this
53
+
54
+
55
+ RTDETRTransformerv2:
56
+ feat_channels: [256, 256, 256]
57
+ feat_strides: [8, 16, 32]
58
+ hidden_dim: 256
59
+ num_levels: 3
60
+
61
+ num_layers: 6
62
+ num_queries: 300
63
+
64
+ num_denoising: 100
65
+ label_noise_ratio: 0.5
66
+ box_noise_scale: 1.0 # 1.0 0.4
67
+
68
+ eval_idx: -1
69
+
70
+ # NEW, can be chosen
71
+ num_points: [4, 4, 4] # [3,3,3] [2,2,2]
72
+ cross_attn_method: default # default, discrete
73
+ query_select_method: default # default, agnostic
74
+
75
+
76
+ PostProcessor:
77
+ num_top_queries: 300
78
+
79
+ DEIMCriterion:
80
+ weight_dict: {loss_vfl: 1, loss_bbox: 5, loss_giou: 2,}
81
+ losses: ['vfl', 'boxes', ]
82
+ alpha: 0.75
83
+ gamma: 2.0
84
+ use_uni_set: False
85
+
86
+ matcher:
87
+ type: HungarianMatcher
88
+ weight_dict: {cost_class: 2, cost_bbox: 5, cost_giou: 2}
89
+ alpha: 0.25
90
+ gamma: 2.0
services/training_service/configs/base/wandb.yml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Weights & Biases Configuration
2
+ wandb:
3
+ enabled: False # can be overridden by --use-wandb
4
+ project: "DEIM-detection"
5
+ entity: null # your wandb team/username
6
+ name: null # auto-generated if null
7
+ id: null # for resuming runs
8
+ tags: ["deim", "object-detection"]
9
+ notes: "DEIM training run"
10
+ group: null # for grouping related runs
11
+ job_type: "training"
12
+
13
+ # Logging settings
14
+ watch_model: True
15
+ log_gradients: False
16
+ log_frequency: 10 # log every N steps
17
+ save_artifacts: True # save best model as artifact
18
+
19
+ # Advanced settings
20
+ save_code: True # save code snapshot
21
+ anonymous: null # "allow", "never", "must"
22
+ mode: "online" # "online", "offline", "disabled"
services/training_service/configs/dataset/coco_detection.yml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ evaluator:
4
+ type: CocoEvaluator
5
+ iou_types: ['bbox', ]
6
+
7
+ num_classes: 80
8
+ remap_mscoco_category: True
9
+
10
+ train_dataloader:
11
+ type: DataLoader
12
+ dataset:
13
+ type: CocoDetection
14
+ img_folder: /datassd/COCO/train2017/
15
+ ann_file: /datassd/COCO/annotations/instances_train2017.json
16
+ return_masks: False
17
+ transforms:
18
+ type: Compose
19
+ ops: ~
20
+ shuffle: True
21
+ num_workers: 4
22
+ drop_last: True
23
+ collate_fn:
24
+ type: BatchImageCollateFunction
25
+
26
+
27
+ val_dataloader:
28
+ type: DataLoader
29
+ dataset:
30
+ type: CocoDetection
31
+ img_folder: /datassd/COCO/val2017/
32
+ ann_file: /datassd/COCO/annotations/instances_val2017.json
33
+ return_masks: False
34
+ transforms:
35
+ type: Compose
36
+ ops: ~
37
+ shuffle: False
38
+ num_workers: 4
39
+ drop_last: False
40
+ collate_fn:
41
+ type: BatchImageCollateFunction
services/training_service/configs/dataset/crowdhuman_detection.yml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ evaluator:
4
+ type: CocoEvaluator
5
+ iou_types: ['bbox', ]
6
+
7
+ num_classes: 2 # your dataset classes
8
+ remap_mscoco_category: False
9
+
10
+ train_dataloader:
11
+ type: DataLoader
12
+ dataset:
13
+ type: CocoDetection
14
+ img_folder: /datassd/coco/crowd_human_coco/CrowdHuman_train
15
+ ann_file: /datassd/coco/crowd_human_coco/Chuman-train.json
16
+ return_masks: False
17
+ transforms:
18
+ type: Compose
19
+ ops: ~
20
+ shuffle: True
21
+ num_workers: 4
22
+ drop_last: True
23
+ collate_fn:
24
+ type: BatchImageCollateFunction
25
+
26
+
27
+ val_dataloader:
28
+ type: DataLoader
29
+ dataset:
30
+ type: CocoDetection
31
+ img_folder: /datassd/coco/crowd_human_coco/CrowdHuman_val
32
+ ann_file: /datassd/coco/crowd_human_coco/Chuman-val.json
33
+ return_masks: False
34
+ transforms:
35
+ type: Compose
36
+ ops: ~
37
+ shuffle: False
38
+ num_workers: 4
39
+ drop_last: False
40
+ collate_fn:
41
+ type: BatchImageCollateFunction
services/training_service/configs/dataset/custom_detection.yml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ evaluator:
4
+ type: CocoEvaluator
5
+ iou_types: ['bbox', ]
6
+
7
+ num_classes: 777 # your dataset classes
8
+ remap_mscoco_category: False
9
+
10
+ train_dataloader:
11
+ type: DataLoader
12
+ dataset:
13
+ type: CocoDetection
14
+ img_folder: /data/yourdataset/train
15
+ ann_file: /data/yourdataset/train/train.json
16
+ return_masks: False
17
+ transforms:
18
+ type: Compose
19
+ ops: ~
20
+ shuffle: True
21
+ num_workers: 4
22
+ drop_last: True
23
+ collate_fn:
24
+ type: BatchImageCollateFunction
25
+
26
+
27
+ val_dataloader:
28
+ type: DataLoader
29
+ dataset:
30
+ type: CocoDetection
31
+ img_folder: /data/yourdataset/val
32
+ ann_file: /data/yourdataset/val/val.json
33
+ return_masks: False
34
+ transforms:
35
+ type: Compose
36
+ ops: ~
37
+ shuffle: False
38
+ num_workers: 4
39
+ drop_last: False
40
+ collate_fn:
41
+ type: BatchImageCollateFunction
services/training_service/configs/dataset/drone_detection.yml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ evaluator:
4
+ type: CocoEvaluator
5
+ iou_types: ['bbox', ]
6
+
7
+ num_classes: 1
8
+ remap_mscoco_category: False
9
+
10
+ train_dataloader:
11
+ type: DataLoader
12
+ dataset:
13
+ type: CocoDetection
14
+ img_folder: /media/fast/drone_train/drone_ds/images
15
+ ann_file: /media/fast/drone_train/drone_ds/train.json
16
+ return_masks: False
17
+ transforms:
18
+ type: Compose
19
+ ops: ~
20
+ shuffle: True
21
+ num_workers: 8
22
+ drop_last: True
23
+ collate_fn:
24
+ type: BatchImageCollateFunction
25
+
26
+
27
+ val_dataloader:
28
+ type: DataLoader
29
+ dataset:
30
+ type: CocoDetection
31
+ img_folder: /media/fast/drone_train/drone_ds/images
32
+ ann_file: /media/fast/drone_train/drone_ds/val.json
33
+ return_masks: False
34
+ transforms:
35
+ type: Compose
36
+ ops: ~
37
+ shuffle: False
38
+ num_workers: 4
39
+ drop_last: False
40
+ collate_fn:
41
+ type: BatchImageCollateFunction
services/training_service/configs/dataset/obj365_detection.yml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ evaluator:
4
+ type: CocoEvaluator
5
+ iou_types: ['bbox', ]
6
+
7
+ num_classes: 366
8
+ remap_mscoco_category: False
9
+
10
+ train_dataloader:
11
+ type: DataLoader
12
+ dataset:
13
+ type: CocoDetection
14
+ img_folder: /home/Dataset/objects365/train
15
+ ann_file: /home/Dataset/objects365/train/new_zhiyuan_objv2_train_resized640.json
16
+ return_masks: False
17
+ transforms:
18
+ type: Compose
19
+ ops: ~
20
+ shuffle: True
21
+ num_workers: 4
22
+ drop_last: True
23
+ collate_fn:
24
+ type: BatchImageCollateFunction
25
+
26
+
27
+ val_dataloader:
28
+ type: DataLoader
29
+ dataset:
30
+ type: CocoDetection
31
+ img_folder: /home/Dataset/objects365/val
32
+ ann_file: /home/Dataset/objects365/val/new_zhiyuan_objv2_val_resized640.json
33
+ return_masks: False
34
+ transforms:
35
+ type: Compose
36
+ ops: ~
37
+ shuffle: False
38
+ num_workers: 4
39
+ drop_last: False
40
+ collate_fn:
41
+ type: BatchImageCollateFunction
services/training_service/configs/dataset/voc_detection.yml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ task: detection
2
+
3
+ evaluator:
4
+ type: CocoEvaluator
5
+ iou_types: ['bbox', ]
6
+
7
+ num_classes: 20
8
+
9
+ train_dataloader:
10
+ type: DataLoader
11
+ dataset:
12
+ type: VOCDetection
13
+ root: ./dataset/voc/
14
+ ann_file: trainval.txt
15
+ label_file: label_list.txt
16
+ transforms:
17
+ type: Compose
18
+ ops: ~
19
+ shuffle: True
20
+ num_workers: 4
21
+ drop_last: True
22
+ collate_fn:
23
+ type: BatchImageCollateFunction
24
+
25
+
26
+ val_dataloader:
27
+ type: DataLoader
28
+ dataset:
29
+ type: VOCDetection
30
+ root: ./dataset/voc/
31
+ ann_file: test.txt
32
+ label_file: label_list.txt
33
+ transforms:
34
+ type: Compose
35
+ ops: ~
36
+ shuffle: False
37
+ num_workers: 4
38
+ drop_last: False
39
+ collate_fn:
40
+ type: BatchImageCollateFunction